From b0fc58eaa310f2a7e3cbaa4cb5ef93a3ee8030b6 Mon Sep 17 00:00:00 2001 From: Jacob Peddicord Date: Wed, 3 Apr 2019 16:29:02 -0700 Subject: [PATCH 0001/1356] Creating initial files from template --- .github/PULL_REQUEST_TEMPLATE.md | 6 ++++ CODE_OF_CONDUCT.md | 4 +++ CONTRIBUTING.md | 61 ++++++++++++++++++++++++++++++++ 3 files changed, 71 insertions(+) create mode 100644 .github/PULL_REQUEST_TEMPLATE.md create mode 100644 CODE_OF_CONDUCT.md create mode 100644 CONTRIBUTING.md diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..6bdaa999 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,6 @@ +*Issue #, if available:* + +*Description of changes:* + + +By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice. diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..5b627cfa --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,4 @@ +## Code of Conduct +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..78300391 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,61 @@ +# Contributing Guidelines + +Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional +documentation, we greatly value feedback and contributions from our community. + +Please read through this document before submitting any issues or pull requests to ensure we have all the necessary +information to effectively respond to your bug report or contribution. + + +## Reporting Bugs/Feature Requests + +We welcome you to use the GitHub issue tracker to report bugs or suggest features. + +When filing an issue, please check [existing open](https://github.com/amazonlinux/PRIVATE-thar/issues), or [recently closed](https://github.com/amazonlinux/PRIVATE-thar/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already +reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: + +* A reproducible test case or series of steps +* The version of our code being used +* Any modifications you've made relevant to the bug +* Anything unusual about your environment or deployment + + +## Contributing via Pull Requests +Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: + +1. You are working against the latest source on the *master* branch. +2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. +3. You open an issue to discuss any significant work - we would hate for your time to be wasted. + +To send us a pull request, please: + +1. Fork the repository. +2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. +3. Ensure local tests pass. +4. Commit to your fork using clear commit messages. +5. Send us a pull request, answering any default questions in the pull request interface. +6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. + +GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and +[creating a pull request](https://help.github.com/articles/creating-a-pull-request/). + + +## Finding contributions to work on +Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/amazonlinux/PRIVATE-thar/labels/help%20wanted) issues is a great place to start. + + +## Code of Conduct +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. + + +## Security issue notifications +If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. + + +## Licensing + +See the [LICENSE](https://github.com/amazonlinux/PRIVATE-thar/blob/master/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. + +We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. From 33f9b309e83dc346b10317b34bd66dab935fec89 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 20 Feb 2019 13:15:08 -0800 Subject: [PATCH 0002/1356] build SDK Signed-off-by: Ben Cressey --- .gitignore | 3 +++ Makefile | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 .gitignore create mode 100644 Makefile diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..05fa7fca --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +/build +*.makevar +*.makepkg diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..5af4e3ca --- /dev/null +++ b/Makefile @@ -0,0 +1,35 @@ +.DEFAULT_GOAL := all + +TOPDIR := $(strip $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))) +SPEC2VAR ?= $(TOPDIR)/bin/spec2var +SPEC2PKG ?= $(TOPDIR)/bin/spec2pkg + +SPECS = $(wildcard packages/*/*.spec) +VARS = $(SPECS:.spec=.makevar) +PKGS = $(SPECS:.spec=.makepkg) + +OUTPUT ?= $(TOPDIR)/build +OUTVAR := $(shell mkdir -p $(OUTPUT)) + +BUILDCTL ?= buildctl --addr tcp://127.0.0.1:1234 +BUILDCTL_ARGS := --progress=plain +BUILDCTL_ARGS += --frontend=dockerfile.v0 +BUILDCTL_ARGS += --local context=. +BUILDCTL_ARGS += --local dockerfile=. + +%.makevar : %.spec $(SPEC2VAR) + @$(SPEC2VAR) $< > $@ + +%.makepkg : %.spec $(SPEC2PKG) + @$(SPEC2PKG) $< > $@ + +-include $(VARS) +-include $(PKGS) + +.PHONY: all +all: $(thar-sdk) + @echo BUILT IT ALL + +.PHONY: clean +clean: + @rm -r $(OUTPUT)/*.rpm From ce9adba1d8743d8ca2309f5301bc81dec7716190 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 21 Feb 2019 23:08:48 +0000 Subject: [PATCH 0003/1356] refactor handling of different architectures Signed-off-by: Ben Cressey --- Makefile | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 5af4e3ca..b471e077 100644 --- a/Makefile +++ b/Makefile @@ -11,25 +11,33 @@ PKGS = $(SPECS:.spec=.makepkg) OUTPUT ?= $(TOPDIR)/build OUTVAR := $(shell mkdir -p $(OUTPUT)) +ARCHS := x86_64 aarch64 + BUILDCTL ?= buildctl --addr tcp://127.0.0.1:1234 BUILDCTL_ARGS := --progress=plain BUILDCTL_ARGS += --frontend=dockerfile.v0 BUILDCTL_ARGS += --local context=. BUILDCTL_ARGS += --local dockerfile=. +empty := +space := $(empty) $(empty) +comma := , +list = $(subst $(space),$(comma),$(1)) + %.makevar : %.spec $(SPEC2VAR) - @$(SPEC2VAR) $< > $@ + @set -e; $(SPEC2VAR) --spec=$< --archs=$(call list,$(ARCHS)) > $@ %.makepkg : %.spec $(SPEC2PKG) - @$(SPEC2PKG) $< > $@ + @set -e; $(SPEC2PKG) --spec=$< --archs=$(call list,$(ARCHS)) > $@ -include $(VARS) -include $(PKGS) .PHONY: all -all: $(thar-sdk) +all: $(thar-x86_64-sdk) $(thar-aarch64-sdk) @echo BUILT IT ALL .PHONY: clean clean: - @rm -r $(OUTPUT)/*.rpm + @rm -f $(OUTPUT)/*.rpm + @find $(TOPDIR) -name '*.makevar' -name '*.makepkg' -delete From a7a5e638512f9cbaee5fdc837507b8a4306b4b40 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 21 Feb 2019 23:08:48 +0000 Subject: [PATCH 0004/1356] refactor handling of package dependencies Signed-off-by: Ben Cressey --- Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/Makefile b/Makefile index b471e077..830abf66 100644 --- a/Makefile +++ b/Makefile @@ -40,4 +40,3 @@ all: $(thar-x86_64-sdk) $(thar-aarch64-sdk) .PHONY: clean clean: @rm -f $(OUTPUT)/*.rpm - @find $(TOPDIR) -name '*.makevar' -name '*.makepkg' -delete From feb56bc689ed96dba95762c3842e0fd1751e6b77 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 22 Feb 2019 16:33:33 +0000 Subject: [PATCH 0005/1356] build kernel headers Signed-off-by: Ben Cressey --- Makefile | 2 +- packages/kernel/.gitignore | 1 + packages/kernel/config-aarch64 | 5126 ++++++++++++++++++++++++++++++++ packages/kernel/config-x86_64 | 4561 ++++++++++++++++++++++++++++ packages/kernel/kernel.spec | 72 + packages/kernel/sources | 1 + 6 files changed, 9762 insertions(+), 1 deletion(-) create mode 100644 packages/kernel/.gitignore create mode 100644 packages/kernel/config-aarch64 create mode 100644 packages/kernel/config-x86_64 create mode 100644 packages/kernel/kernel.spec create mode 100644 packages/kernel/sources diff --git a/Makefile b/Makefile index 830abf66..1b82d857 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,7 @@ list = $(subst $(space),$(comma),$(1)) -include $(PKGS) .PHONY: all -all: $(thar-x86_64-sdk) $(thar-aarch64-sdk) +all: $(thar-x86_64-kernel) $(thar-aarch64-kernel) @echo BUILT IT ALL .PHONY: clean diff --git a/packages/kernel/.gitignore b/packages/kernel/.gitignore new file mode 100644 index 00000000..91a1b59d --- /dev/null +++ b/packages/kernel/.gitignore @@ -0,0 +1 @@ +linux-4.14.102.tar.xz diff --git a/packages/kernel/config-aarch64 b/packages/kernel/config-aarch64 new file mode 100644 index 00000000..0d465603 --- /dev/null +++ b/packages/kernel/config-aarch64 @@ -0,0 +1,5126 @@ +CONFIG_ARM64=y +CONFIG_64BIT=y +CONFIG_ARCH_PHYS_ADDR_T_64BIT=y +CONFIG_MMU=y +CONFIG_ARM64_PAGE_SHIFT=12 +CONFIG_ARM64_CONT_SHIFT=4 +CONFIG_ARCH_MMAP_RND_BITS_MIN=18 +CONFIG_ARCH_MMAP_RND_BITS_MAX=33 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ZONE_DMA=y +CONFIG_HAVE_GENERIC_GUP=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_SMP=y +CONFIG_SWIOTLB=y +CONFIG_IOMMU_HELPER=y +CONFIG_KERNEL_MODE_NEON=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_DEFCONFIG_LIST="/lib/modules//.config" +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_EXTABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="" +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +CONFIG_FHANDLE=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_WATCH=y +CONFIG_AUDIT_TREE=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_HANDLE_DOMAIN_IRQ=y +# CONFIG_IRQ_DOMAIN_DEBUG is not set +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +# CONFIG_TICK_CPU_ACCOUNTING is not set +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +# CONFIG_TASKS_RCU is not set +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_CONTEXT_TRACKING=y +# CONFIG_CONTEXT_TRACKING_FORCE is not set +# CONFIG_BUILD_BIN2C is not set +# CONFIG_IKCONFIG is not set +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_GENERIC_SCHED_CLOCK=y +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +# CONFIG_CGROUP_RDMA is not set +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +# CONFIG_CHECKPOINT_RESTORE is not set +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_BPF=y +# CONFIG_EXPERT is not set +CONFIG_MULTIUSER=y +# CONFIG_SGETMASK_SYSCALL is not set +CONFIG_SYSFS_SYSCALL=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_POSIX_TIMERS=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +# CONFIG_KALLSYMS_ABSOLUTE_PERCPU is not set +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +# CONFIG_BPF_SYSCALL is not set +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_USERFAULTFD=y +CONFIG_PCI_QUIRKS=y +CONFIG_MEMBARRIER=y +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_MEMCG_SYSFS_ON is not set +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +CONFIG_SLAB_MERGE_DEFAULT=y +# CONFIG_SLAB_FREELIST_RANDOM is not set +# CONFIG_SLAB_FREELIST_HARDENED is not set +CONFIG_SLUB_CPU_PARTIAL=y +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_UPROBES=y +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_DMA_API_DEBUG=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_GCC_PLUGINS=y +# CONFIG_GCC_PLUGINS is not set +CONFIG_HAVE_CC_STACKPROTECTOR=y +CONFIG_CC_STACKPROTECTOR=y +# CONFIG_CC_STACKPROTECTOR_NONE is not set +# CONFIG_CC_STACKPROTECTOR_REGULAR is not set +CONFIG_CC_STACKPROTECTOR_STRONG=y +CONFIG_THIN_ARCHIVES=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_ARCH_MMAP_RND_BITS=18 +# CONFIG_HAVE_ARCH_HASH is not set +# CONFIG_ISA_BUS_API is not set +CONFIG_CLONE_BACKWARDS=y +# CONFIG_CPU_NO_EFFICIENT_FFS is not set +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +# CONFIG_ARCH_OPTIONAL_KERNEL_RWX is not set +# CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT is not set +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +# CONFIG_REFCOUNT_FULL is not set + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +CONFIG_MODULE_SIG_ALL=y +CONFIG_MODULE_SIG_SHA1=y +# CONFIG_MODULE_SIG_SHA224 is not set +# CONFIG_MODULE_SIG_SHA256 is not set +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha1" +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +# CONFIG_BLK_DEV_ZONED is not set +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +# CONFIG_BLK_WBT is not set +CONFIG_BLK_DEBUG_FS=y +# CONFIG_BLK_SED_OPAL is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_MQ_RDMA=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CFQ_GROUP_IOSCHED=y +CONFIG_DEFAULT_DEADLINE=y +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="deadline" +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +# CONFIG_IOSCHED_BFQ is not set +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_FREEZER=y + +# +# Platform selection +# +# CONFIG_ARCH_ACTIONS is not set +# CONFIG_ARCH_SUNXI is not set +CONFIG_ARCH_ALPINE=y +# CONFIG_ARCH_BCM2835 is not set +# CONFIG_ARCH_BCM_IPROC is not set +# CONFIG_ARCH_BERLIN is not set +# CONFIG_ARCH_BRCMSTB is not set +# CONFIG_ARCH_EXYNOS is not set +# CONFIG_ARCH_LAYERSCAPE is not set +# CONFIG_ARCH_LG1K is not set +CONFIG_ARCH_HISI=y +# CONFIG_ARCH_MEDIATEK is not set +# CONFIG_ARCH_MESON is not set +# CONFIG_ARCH_MVEBU is not set +CONFIG_ARCH_QCOM=y +# CONFIG_ARCH_REALTEK is not set +# CONFIG_ARCH_ROCKCHIP is not set +CONFIG_ARCH_SEATTLE=y +# CONFIG_ARCH_RENESAS is not set +# CONFIG_ARCH_STRATIX10 is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_SPRD is not set +CONFIG_ARCH_THUNDER=y +CONFIG_ARCH_THUNDER2=y +# CONFIG_ARCH_UNIPHIER is not set +CONFIG_ARCH_VEXPRESS=y +# CONFIG_ARCH_VULCAN is not set +CONFIG_ARCH_XGENE=y +# CONFIG_ARCH_ZX is not set +# CONFIG_ARCH_ZYNQMP is not set + +# +# Bus support +# +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCI_SYSCALL=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIE_ECRC=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +# CONFIG_PCIE_DPC is not set +# CONFIG_PCIE_PTM is not set +CONFIG_PCI_BUS_ADDR_T_64BIT=y +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +CONFIG_PCI_ATS=y +CONFIG_PCI_ECAM=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_PCI_LABEL=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# DesignWare PCI Core Support +# +CONFIG_PCIE_DW=y +CONFIG_PCIE_DW_HOST=y +# CONFIG_PCIE_DW_PLAT is not set +CONFIG_PCI_HISI=y +# CONFIG_PCIE_QCOM is not set +# CONFIG_PCIE_KIRIN is not set + +# +# PCI host controller drivers +# +CONFIG_PCI_HOST_COMMON=y +CONFIG_PCI_HOST_GENERIC=y +CONFIG_PCI_XGENE=y +CONFIG_PCI_XGENE_MSI=y +CONFIG_PCI_HOST_THUNDER_PEM=y +CONFIG_PCI_HOST_THUNDER_ECAM=y + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set + +# +# Kernel Features +# + +# +# ARM errata workarounds via the alternatives framework +# +CONFIG_ARM64_ERRATUM_826319=y +CONFIG_ARM64_ERRATUM_827319=y +CONFIG_ARM64_ERRATUM_824069=y +CONFIG_ARM64_ERRATUM_819472=y +CONFIG_ARM64_ERRATUM_832075=y +CONFIG_ARM64_ERRATUM_834220=y +CONFIG_ARM64_ERRATUM_843419=y +CONFIG_ARM64_ERRATUM_1024718=y +CONFIG_CAVIUM_ERRATUM_22375=y +CONFIG_CAVIUM_ERRATUM_23144=y +CONFIG_CAVIUM_ERRATUM_23154=y +CONFIG_CAVIUM_ERRATUM_27456=y +CONFIG_CAVIUM_ERRATUM_30115=y +CONFIG_QCOM_FALKOR_ERRATUM_1003=y +CONFIG_QCOM_FALKOR_ERRATUM_1009=y +CONFIG_QCOM_QDF2400_ERRATUM_0065=y +CONFIG_QCOM_FALKOR_ERRATUM_E1041=y +CONFIG_ARM64_4K_PAGES=y +# CONFIG_ARM64_16K_PAGES is not set +# CONFIG_ARM64_64K_PAGES is not set +# CONFIG_ARM64_VA_BITS_39 is not set +CONFIG_ARM64_VA_BITS_48=y +CONFIG_ARM64_VA_BITS=48 +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_SCHED_MC=y +CONFIG_SCHED_SMT=y +CONFIG_NR_CPUS=4096 +CONFIG_HOTPLUG_CPU=y +CONFIG_NUMA=y +CONFIG_NODES_SHIFT=2 +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_HOLES_IN_ZONE=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +CONFIG_HZ_100=y +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=100 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_HAVE_ARCH_PFN_VALID=y +CONFIG_HW_PERF_EVENTS=y +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_NO_BOOTMEM=y +CONFIG_MEMORY_ISOLATION=y +# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_BOUNCE=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +# CONFIG_MEMORY_FAILURE is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +# CONFIG_ARCH_WANTS_THP_SWAP is not set +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +CONFIG_CMA_AREAS=7 +CONFIG_ZSWAP=y +CONFIG_ZPOOL=y +CONFIG_ZBUD=y +# CONFIG_Z3FOLD is not set +CONFIG_ZSMALLOC=y +# CONFIG_PGTABLE_MAPPING is not set +CONFIG_ZSMALLOC_STAT=y +CONFIG_GENERIC_EARLY_IOREMAP=y +CONFIG_IDLE_PAGE_TRACKING=y +# CONFIG_PERCPU_STATS is not set +CONFIG_SECCOMP=y +CONFIG_PARAVIRT=y +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +# CONFIG_XEN is not set +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_UNMAP_KERNEL_AT_EL0=y +CONFIG_HARDEN_BRANCH_PREDICTOR=y +CONFIG_ARM64_SSBD=y +# CONFIG_ARM64_SW_TTBR0_PAN is not set + +# +# ARMv8.1 architectural features +# +CONFIG_ARM64_HW_AFDBM=y +CONFIG_ARM64_PAN=y +CONFIG_ARM64_LSE_ATOMICS=y +CONFIG_ARM64_VHE=y + +# +# ARMv8.2 architectural features +# +CONFIG_ARM64_UAO=y +# CONFIG_ARM64_PMEM is not set +CONFIG_ARM64_MODULE_CMODEL_LARGE=y +# CONFIG_RANDOMIZE_BASE is not set + +# +# Boot options +# +CONFIG_ARM64_ACPI_PARKING_PROTOCOL=y +CONFIG_CMDLINE="console=ttyAMA0" +# CONFIG_CMDLINE_FORCE is not set +CONFIG_EFI_STUB=y +CONFIG_EFI=y +CONFIG_DMI=y + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +# CONFIG_HAVE_AOUT is not set +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# CONFIG_COMPAT is not set + +# +# Power management options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_OPP=y +CONFIG_PM_CLK=y +CONFIG_PM_GENERIC_DOMAINS=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y +CONFIG_PM_GENERIC_DOMAINS_OF=y +CONFIG_CPU_PM=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y + +# +# CPU Power Management +# + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_DT_IDLE_STATES=y + +# +# ARM CPU Idle Drivers +# +CONFIG_ARM_CPUIDLE=y +# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +# CONFIG_CPUFREQ_DT is not set +# CONFIG_ARM_BIG_LITTLE_CPUFREQ is not set +# CONFIG_ARM_KIRKWOOD_CPUFREQ is not set +CONFIG_ACPI_CPPC_CPUFREQ=y +# CONFIG_QORIQ_CPUFREQ is not set +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=m +# CONFIG_TLS is not set +CONFIG_XFRM=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +# CONFIG_SMC is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +# CONFIG_INET_ESP_OFFLOAD is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_XFRM_MODE_TRANSPORT=m +CONFIG_INET_XFRM_MODE_TUNNEL=m +CONFIG_INET_XFRM_MODE_BEET=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +# CONFIG_INET_RAW_DIAG is not set +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +# CONFIG_TCP_CONG_CDG is not set +# CONFIG_TCP_CONG_BBR is not set +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +# CONFIG_INET6_ESP_OFFLOAD is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_INET6_XFRM_MODE_TRANSPORT=m +CONFIG_INET6_XFRM_MODE_TUNNEL=m +CONFIG_INET6_XFRM_MODE_BEET=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +# CONFIG_IPV6_FOU is not set +# CONFIG_IPV6_FOU_TUNNEL is not set +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +CONFIG_NETLABEL=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_COMMON=m +# CONFIG_NF_LOG_NETDEV is not set +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=m +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_NEEDED=y +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=m +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +# CONFIG_NFT_RT is not set +# CONFIG_NFT_NUMGEN is not set +CONFIG_NFT_CT=m +# CONFIG_NFT_SET_RBTREE is not set +# CONFIG_NFT_SET_HASH is not set +# CONFIG_NFT_SET_BITMAP is not set +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +# CONFIG_NFT_OBJREF is not set +CONFIG_NFT_QUEUE=m +# CONFIG_NFT_QUOTA is not set +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NETFILTER_XTABLES=m + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +# CONFIG_IP_SET_HASH_IPMAC is not set +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +# CONFIG_NFT_FIB_IPV4 is not set +CONFIG_NF_TABLES_ARP=m +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_NAT_MASQUERADE_IPV4=m +CONFIG_NFT_MASQ_IPV4=m +CONFIG_NFT_REDIR_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PROTO_GRE=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m +CONFIG_NFT_MASQ_IPV6=m +CONFIG_NFT_REDIR_IPV6=m +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +# CONFIG_NFT_FIB_IPV6 is not set +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_NF_NAT_IPV6=m +CONFIG_NF_NAT_MASQUERADE_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +# CONFIG_IP6_NF_TARGET_NPT is not set +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_LOG_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=m +CONFIG_NET_SCTPPROBE=m +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +# CONFIG_ATM_MPOA is not set +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +# CONFIG_DECNET is not set +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +# CONFIG_6LOWPAN is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_ATM=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +# CONFIG_NET_SCH_DEFAULT is not set + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +# CONFIG_NET_ACT_SAMPLE is not set +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +# CONFIG_NET_ACT_BPF is not set +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_SKBMOD is not set +# CONFIG_NET_ACT_IFE is not set +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_CLS_IND=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=m +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=m +# CONFIG_MPLS_ROUTING is not set +# CONFIG_NET_NSH is not set +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +# CONFIG_NET_L3_MASTER_DEV is not set +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +# CONFIG_NET_TCPPROBE is not set +CONFIG_NET_DROP_MONITOR=y +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +# CONFIG_STREAM_PARSER is not set +CONFIG_FIB_RULES=y +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +CONFIG_RFKILL_GPIO=m +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +# CONFIG_PSAMPLE is not set +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_NET_DEVLINK=m +CONFIG_MAY_USE_DEVLINK=m +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# +CONFIG_ARM_AMBA=y + +# +# Generic Driver Options +# +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_EXTRA_FIRMWARE="" +CONFIG_FW_LOADER_USER_HELPER=y +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_GENERIC_CPU_DEVICES is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_SPI=y +CONFIG_REGMAP_MMIO=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +CONFIG_DMA_CMA=y + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=64 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +CONFIG_GENERIC_ARCH_TOPOLOGY=y + +# +# Bus devices +# +CONFIG_ARM_CCI=y +CONFIG_ARM_CCI_PMU=y +CONFIG_ARM_CCI400_COMMON=y +CONFIG_ARM_CCI400_PMU=y +CONFIG_ARM_CCI5xx_PMU=y +CONFIG_ARM_CCN=y +# CONFIG_BRCMSTB_GISB_ARB is not set +CONFIG_QCOM_EBI2=y +# CONFIG_SIMPLE_PM_BUS is not set +CONFIG_VEXPRESS_CONFIG=y +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AFS_PARTS is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_AR7_PARTS is not set + +# +# Partition parsers +# + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=m +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=m +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_CFI_UTIL=m +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PHYSMAP=m +# CONFIG_MTD_PHYSMAP_COMPAT is not set +CONFIG_MTD_PHYSMAP_OF=m +# CONFIG_MTD_PHYSMAP_OF_VERSATILE is not set +# CONFIG_MTD_PHYSMAP_OF_GEMINI is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_SPI_NOR is not set +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_BLOCK is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_DYNAMIC=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_ADDRESS_PCI=y +CONFIG_OF_IRQ=y +CONFIG_OF_NET=y +CONFIG_OF_MDIO=y +CONFIG_OF_PCI=y +CONFIG_OF_PCI_IRQ=y +CONFIG_OF_RESERVED_MEM=y +CONFIG_OF_RESOLVE=y +CONFIG_OF_OVERLAY=y +CONFIG_OF_NUMA=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +# CONFIG_ZRAM_WRITEBACK is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_DRBD is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_BLK_DEV_RAM_DAX=y +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +CONFIG_ATA_OVER_ETH=m +CONFIG_VIRTIO_BLK=m +# CONFIG_VIRTIO_BLK_SCSI is not set +CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_RSXX is not set +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +# CONFIG_NVME_FC is not set +CONFIG_NVME_TARGET=m +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +# CONFIG_NVME_TARGET_FC is not set + +# +# Misc devices +# +# CONFIG_SENSORS_LIS3LV02D is not set +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +# CONFIG_SGI_IOC4 is not set +CONFIG_TIFM_CORE=m +# CONFIG_TIFM_7XX1 is not set +# CONFIG_ICS932S401 is not set +CONFIG_ENCLOSURE_SERVICES=m +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_TI_DAC7512 is not set +# CONFIG_USB_SWITCH_FSA9480 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +CONFIG_VEXPRESS_SYSCFG=y +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# CONFIG_SENSORS_LIS3_I2C is not set + +# +# Altera FPGA firmware download module +# +# CONFIG_ALTERA_STAPL is not set + +# +# Intel MIC Bus Driver +# + +# +# SCIF Bus Driver +# + +# +# VOP Bus Driver +# + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_CXL_BASE is not set +# CONFIG_CXL_AFU_DRIVER_OPS is not set +# CONFIG_CXL_LIB is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +# CONFIG_SCSI_MQ_DEFAULT is not set +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +CONFIG_CHR_DEV_OSST=m +CONFIG_BLK_DEV_SR=m +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=m +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +CONFIG_SCSI_CXGB4_ISCSI=m +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_SCSI_BNX2X_FCOE is not set +CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +CONFIG_SCSI_HISI_SAS=m +# CONFIG_SCSI_HISI_SAS_PCI is not set +CONFIG_SCSI_MVSAS=m +# CONFIG_SCSI_MVSAS_DEBUG is not set +CONFIG_SCSI_MVSAS_TASKLET=y +CONFIG_SCSI_MVUMI=m +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +# CONFIG_SCSI_MPT2SAS is not set +CONFIG_SCSI_SMARTPQI=m +CONFIG_SCSI_UFSHCD=m +CONFIG_SCSI_UFSHCD_PCI=m +# CONFIG_SCSI_UFS_DWC_TC_PCI is not set +# CONFIG_SCSI_UFSHCD_PLATFORM is not set +CONFIG_SCSI_HPTIOP=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FUTURE_DOMAIN is not set +# CONFIG_SCSI_IPS is not set +CONFIG_SCSI_INITIO=m +# CONFIG_SCSI_INIA100 is not set +CONFIG_SCSI_STEX=m +# CONFIG_SCSI_SYM53C8XX_2 is not set +CONFIG_SCSI_IPR=m +CONFIG_SCSI_IPR_TRACE=y +CONFIG_SCSI_IPR_DUMP=y +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=m +# CONFIG_TCM_QLA2XXX is not set +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +CONFIG_SCSI_PMCRAID=m +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_SCSI_OSD_INITIATOR=m +CONFIG_SCSI_OSD_ULD=m +CONFIG_SCSI_OSD_DPRINT_SENSE=1 +# CONFIG_SCSI_OSD_DEBUG is not set +CONFIG_HAVE_PATA_PLATFORM=y +CONFIG_ATA=y +# CONFIG_ATA_NONSTANDARD is not set +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_AHCI_CEVA is not set +CONFIG_AHCI_XGENE=m +# CONFIG_AHCI_QORIQ is not set +CONFIG_SATA_AHCI_SEATTLE=m +# CONFIG_SATA_INIC162X is not set +CONFIG_SATA_ACARD_AHCI=m +CONFIG_SATA_SIL24=m +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +CONFIG_PDC_ADMA=m +CONFIG_SATA_QSTOR=m +CONFIG_SATA_SX4=m +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=y +# CONFIG_SATA_DWC is not set +CONFIG_SATA_MV=m +CONFIG_SATA_NV=m +CONFIG_SATA_PROMISE=m +CONFIG_SATA_SIL=m +# CONFIG_SATA_SIS is not set +CONFIG_SATA_SVW=m +CONFIG_SATA_ULI=m +CONFIG_SATA_VIA=m +CONFIG_SATA_VITESSE=m + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +# CONFIG_MD_MULTIPATH is not set +CONFIG_MD_FAULTY=m +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +# CONFIG_DM_MQ_DEFAULT is not set +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +# CONFIG_DM_LOG_WRITES is not set +# CONFIG_DM_INTEGRITY is not set +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_TCM_FC=m +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_GTP is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +# CONFIG_VSOCKMON is not set +# CONFIG_ARCNET is not set +# CONFIG_ATM_DRIVERS is not set + +# +# CAIF transport drivers +# + +# +# Distributed Switch Architecture drivers +# +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +CONFIG_NET_VENDOR_ALACRITECH=y +# CONFIG_SLICOSS is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_NET_VENDOR_AMD=y +# CONFIG_AMD8111_ETH is not set +# CONFIG_PCNET32 is not set +CONFIG_AMD_XGBE=m +# CONFIG_AMD_XGBE_DCB is not set +# CONFIG_AMD_XGBE_HAVE_ECC is not set +CONFIG_NET_XGENE=m +CONFIG_NET_XGENE_V2=m +CONFIG_NET_VENDOR_AQUANTIA=y +CONFIG_NET_VENDOR_ARC=y +CONFIG_NET_VENDOR_ATHEROS=y +# CONFIG_ATL2 is not set +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_NET_CADENCE=y +# CONFIG_MACB is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +# CONFIG_CNIC is not set +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +# CONFIG_BNXT_DCB is not set +# CONFIG_NET_VENDOR_BROCADE is not set +CONFIG_NET_VENDOR_CAVIUM=y +CONFIG_THUNDER_NIC_PF=m +CONFIG_THUNDER_NIC_VF=m +CONFIG_THUNDER_NIC_BGX=m +CONFIG_THUNDER_NIC_RGX=m +CONFIG_LIQUIDIO=m +# CONFIG_LIQUIDIO_VF is not set +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +# CONFIG_CHELSIO_T3 is not set +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +# CONFIG_NET_VENDOR_CISCO is not set +CONFIG_DNET=m +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_EXAR is not set +CONFIG_NET_VENDOR_HISILICON=y +# CONFIG_HIX5HD2_GMAC is not set +# CONFIG_HISI_FEMAC is not set +# CONFIG_HIP04_ETH is not set +CONFIG_HNS_MDIO=m +CONFIG_HNS=m +CONFIG_HNS_DSAF=m +CONFIG_HNS_ENET=m +# CONFIG_HNS3 is not set +# CONFIG_NET_VENDOR_HP is not set +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m +# CONFIG_IXGB is not set +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +# CONFIG_I40E_DCB is not set +CONFIG_I40EVF=m +CONFIG_FM10K=m +# CONFIG_NET_VENDOR_I825XX is not set +# CONFIG_JME is not set +CONFIG_NET_VENDOR_MARVELL=y +CONFIG_MVMDIO=m +CONFIG_SKGE=m +# CONFIG_SKGE_DEBUG is not set +CONFIG_SKGE_GENESIS=y +CONFIG_SKY2=m +# CONFIG_SKY2_DEBUG is not set +CONFIG_NET_VENDOR_MELLANOX=y +# CONFIG_MLX4_EN is not set +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +# CONFIG_MLX5_CORE is not set +# CONFIG_MLXSW_CORE is not set +# CONFIG_MLXFW is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +CONFIG_NET_VENDOR_MYRI=y +# CONFIG_MYRI10GE is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +CONFIG_NET_VENDOR_NETRONOME=y +# CONFIG_NFP is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +CONFIG_NET_PACKET_ENGINE=y +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +CONFIG_NET_VENDOR_QUALCOMM=y +# CONFIG_QCA7000_SPI is not set +CONFIG_QCOM_EMAC=m +# CONFIG_RMNET is not set +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_ROCKER=m +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +# CONFIG_SFC is not set +# CONFIG_SFC_FALCON is not set +CONFIG_NET_VENDOR_SMSC=y +CONFIG_SMC91X=m +CONFIG_EPIC100=m +CONFIG_SMSC911X=m +# CONFIG_SMSC911X_ARCH_HOOKS is not set +CONFIG_SMSC9420=m +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_NET_VENDOR_SYNOPSYS=y +# CONFIG_DWC_XLGMAC is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_MDIO_BCM_UNIMAC=m +CONFIG_MDIO_BITBANG=m +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set +CONFIG_MDIO_CAVIUM=m +CONFIG_MDIO_GPIO=m +# CONFIG_MDIO_HISI_FEMAC is not set +CONFIG_MDIO_OCTEON=m +CONFIG_MDIO_THUNDER=m +CONFIG_MDIO_XGENE=m +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +# CONFIG_LED_TRIGGER_PHY is not set + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +CONFIG_AQUANTIA_PHY=m +CONFIG_AT803X_PHY=m +# CONFIG_BCM7XXX_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BROADCOM_PHY=m +CONFIG_CICADA_PHY=m +# CONFIG_CORTINA_PHY is not set +CONFIG_DAVICOM_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +CONFIG_FIXED_PHY=y +CONFIG_ICPLUS_PHY=m +# CONFIG_INTEL_XWAY_PHY is not set +CONFIG_LSI_ET1011C_PHY=m +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m +# CONFIG_MARVELL_10G_PHY is not set +CONFIG_MICREL_PHY=m +CONFIG_MICROCHIP_PHY=m +# CONFIG_MICROSEMI_PHY is not set +CONFIG_NATIONAL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +# CONFIG_ROCKCHIP_PHY is not set +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_VITESSE_PHY=m +# CONFIG_XILINX_GMII2RGMII is not set +# CONFIG_MICREL_KS8995MA is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SR9700=m +# CONFIG_USB_NET_SR9800 is not set +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m + +# +# X.25/LAPB support is disabled +# +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +# CONFIG_DSCC4 is not set +CONFIG_DLCI=m +CONFIG_DLCI_MAX=8 +# CONFIG_VMXNET3 is not set +# CONFIG_FUJITSU_ES is not set +# CONFIG_ISDN is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_POLLDEV=m +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +CONFIG_INPUT_MOUSE=y +# CONFIG_MOUSE_PS2 is not set +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_APPLETOUCH is not set +# CONFIG_MOUSE_BCM5974 is not set +# CONFIG_MOUSE_CYAPA is not set +# CONFIG_MOUSE_ELAN_I2C is not set +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set +CONFIG_RMI4_CORE=m +# CONFIG_RMI4_I2C is not set +# CONFIG_RMI4_SPI is not set +# CONFIG_RMI4_SMB is not set +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +# CONFIG_RMI4_F34 is not set +# CONFIG_RMI4_F55 is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_SERPORT=y +CONFIG_SERIO_AMBAKMI=y +# CONFIG_SERIO_PCIPS2 is not set +# CONFIG_SERIO_LIBPS2 is not set +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=m +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_ROCKETPORT is not set +CONFIG_CYCLADES=m +# CONFIG_CYZ_INTR is not set +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_SYNCLINKMP=m +CONFIG_SYNCLINK_GT=m +# CONFIG_NOZOMI is not set +# CONFIG_ISI is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +# CONFIG_TRACE_SINK is not set +# CONFIG_DEVMEM is not set + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_ASPEED_VUART is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_FSL=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_8250_RT288X=y +# CONFIG_SERIAL_8250_MOXA is not set +CONFIG_SERIAL_OF_PLATFORM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_AMBA_PL010 is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST=y +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_MSM is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_DEV_BUS is not set +CONFIG_HVC_DRIVER=y +# CONFIG_HVC_DCC is not set +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +# CONFIG_IPMI_PANIC_EVENT is not set +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_HW_RANDOM_TPM=m +CONFIG_HW_RANDOM_HISI=y +# CONFIG_HW_RANDOM_MSM is not set +CONFIG_HW_RANDOM_XGENE=m +CONFIG_HW_RANDOM_CAVIUM=m +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# PCMCIA character devices +# +CONFIG_RAW_DRIVER=y +CONFIG_MAX_RAW_DEVS=8192 +# CONFIG_HPET is not set +CONFIG_TCG_TPM=m +CONFIG_TCG_TIS_CORE=m +CONFIG_TCG_TIS=m +# CONFIG_TCG_TIS_SPI is not set +# CONFIG_TCG_TIS_I2C_ATMEL is not set +# CONFIG_TCG_TIS_I2C_INFINEON is not set +# CONFIG_TCG_TIS_I2C_NUVOTON is not set +CONFIG_TCG_ATMEL=m +# CONFIG_TCG_INFINEON is not set +# CONFIG_TCG_CRB is not set +# CONFIG_TCG_VTPM_PROXY is not set +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set +# CONFIG_DEVPORT is not set +# CONFIG_XILLYBUS is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y + +# +# Multiplexer I2C Chip support +# +CONFIG_I2C_ARB_GPIO_CHALLENGE=m +CONFIG_I2C_MUX_GPIO=m +# CONFIG_I2C_MUX_GPMUX is not set +# CONFIG_I2C_MUX_LTC4306 is not set +CONFIG_I2C_MUX_PCA9541=m +CONFIG_I2C_MUX_PCA954x=m +CONFIG_I2C_MUX_PINCTRL=m +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_DEMUX_PINCTRL is not set +# CONFIG_I2C_MUX_MLXCPLD is not set +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_I2C_SMBUS=m + +# +# I2C Algorithms +# +CONFIG_I2C_ALGOBIT=y +CONFIG_I2C_ALGOPCF=m +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_HIX5HD2 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +CONFIG_I2C_NFORCE2=m +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CADENCE is not set +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=y +CONFIG_I2C_DESIGNWARE_PLATFORM=y +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +CONFIG_I2C_GPIO=m +# CONFIG_I2C_NOMADIK is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=m +# CONFIG_I2C_PXA_PCI is not set +CONFIG_I2C_QUP=m +# CONFIG_I2C_RK3X is not set +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_VERSATILE=m +CONFIG_I2C_THUNDERX=m +# CONFIG_I2C_XILINX is not set +CONFIG_I2C_XLP9XX=m + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +CONFIG_I2C_PARPORT_LIGHT=m +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +CONFIG_I2C_TINY_USB=m + +# +# Other I2C/SMBus bus drivers +# +CONFIG_I2C_XGENE_SLIMPRO=m +CONFIG_I2C_STUB=m +CONFIG_I2C_SLAVE=y +CONFIG_I2C_SLAVE_EEPROM=m +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +CONFIG_SPI_CADENCE=m +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_OC_TINY is not set +CONFIG_SPI_PL022=m +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_PXA2XX_PCI is not set +# CONFIG_SPI_ROCKCHIP is not set +CONFIG_SPI_QUP=y +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_THUNDERX is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +CONFIG_SPI_XLP=m +# CONFIG_SPI_ZYNQMP_GQSPI is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=m +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=m +CONFIG_DP83640_PHY=m +CONFIG_PINCTRL=y + +# +# Pin controllers +# +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_SX150X is not set +CONFIG_PINCTRL_MSM=y +# CONFIG_PINCTRL_APQ8064 is not set +# CONFIG_PINCTRL_APQ8084 is not set +# CONFIG_PINCTRL_IPQ4019 is not set +# CONFIG_PINCTRL_IPQ8064 is not set +# CONFIG_PINCTRL_IPQ8074 is not set +# CONFIG_PINCTRL_MSM8660 is not set +# CONFIG_PINCTRL_MSM8960 is not set +# CONFIG_PINCTRL_MDM9615 is not set +# CONFIG_PINCTRL_MSM8X74 is not set +# CONFIG_PINCTRL_MSM8916 is not set +# CONFIG_PINCTRL_MSM8994 is not set +# CONFIG_PINCTRL_MSM8996 is not set +CONFIG_PINCTRL_QDF2XXX=y +# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set +CONFIG_GPIOLIB=y +CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_GENERIC=m + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set +CONFIG_GPIO_AMDPT=m +CONFIG_GPIO_DWAPB=m +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +CONFIG_GPIO_GENERIC_PLATFORM=m +# CONFIG_GPIO_GRGPIO is not set +# CONFIG_GPIO_MOCKUP is not set +CONFIG_GPIO_PL061=y +# CONFIG_GPIO_SYSCON is not set +# CONFIG_GPIO_THUNDERX is not set +CONFIG_GPIO_XGENE=y +CONFIG_GPIO_XGENE_SB=m +# CONFIG_GPIO_XILINX is not set +CONFIG_GPIO_XLP=m + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_SX150X is not set +# CONFIG_GPIO_TPIC2810 is not set + +# +# MFD GPIO expanders +# + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_RDC321X is not set + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set + +# +# USB GPIO expanders +# +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_BRCMSTB is not set +CONFIG_POWER_RESET_GPIO=y +CONFIG_POWER_RESET_GPIO_RESTART=y +CONFIG_POWER_RESET_HISI=y +# CONFIG_POWER_RESET_MSM is not set +# CONFIG_POWER_RESET_LTC2952 is not set +CONFIG_POWER_RESET_RESTART=y +CONFIG_POWER_RESET_VEXPRESS=y +# CONFIG_POWER_RESET_XGENE is not set +CONFIG_POWER_RESET_SYSCON=y +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_SYSCON_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LTC3651 is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24190 is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ25890 is not set +CONFIG_CHARGER_SMB347=m +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_CHARGER_RT9455 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +CONFIG_SENSORS_AD7314=m +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1021=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7X10=m +CONFIG_SENSORS_ADT7310=m +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +CONFIG_SENSORS_ASC7621=m +CONFIG_SENSORS_ARM_SCPI=m +# CONFIG_SENSORS_ASPEED is not set +CONFIG_SENSORS_ATXP1=m +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +# CONFIG_SENSORS_I5K_AMB is not set +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +CONFIG_SENSORS_G762=m +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +CONFIG_SENSORS_IT87=m +# CONFIG_SENSORS_JC42 is not set +CONFIG_SENSORS_POWR1220=m +CONFIG_SENSORS_LINEAGE=m +CONFIG_SENSORS_LTC2945=m +# CONFIG_SENSORS_LTC2990 is not set +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +CONFIG_SENSORS_LTC4222=m +CONFIG_SENSORS_LTC4245=m +CONFIG_SENSORS_LTC4260=m +CONFIG_SENSORS_LTC4261=m +CONFIG_SENSORS_MAX1111=m +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31722 is not set +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6642=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +CONFIG_SENSORS_MAX31790=m +CONFIG_SENSORS_MCP3021=m +# CONFIG_SENSORS_TC654 is not set +CONFIG_SENSORS_ADCXX=m +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM70=m +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +CONFIG_SENSORS_NCT6683=m +CONFIG_SENSORS_NCT6775=m +CONFIG_SENSORS_NCT7802=m +CONFIG_SENSORS_NCT7904=m +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +CONFIG_SENSORS_ADM1275=m +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_IR35221 is not set +CONFIG_SENSORS_LM25066=m +CONFIG_SENSORS_LTC2978=m +CONFIG_SENSORS_LTC3815=m +CONFIG_SENSORS_MAX16064=m +CONFIG_SENSORS_MAX20751=m +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +CONFIG_SENSORS_TPS40422=m +# CONFIG_SENSORS_TPS53679 is not set +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +CONFIG_SENSORS_ZL6100=m +CONFIG_SENSORS_PWM_FAN=m +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +# CONFIG_SENSORS_SHT3x is not set +CONFIG_SENSORS_SHTC1=m +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +# CONFIG_SENSORS_EMC2103 is not set +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +# CONFIG_SENSORS_SCH56XX_COMMON is not set +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +CONFIG_SENSORS_ADC128D818=m +CONFIG_SENSORS_ADS1015=m +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_ADS7871=m +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +# CONFIG_SENSORS_INA3221 is not set +CONFIG_SENSORS_TC74=m +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +CONFIG_SENSORS_TMP103=m +# CONFIG_SENSORS_TMP108 is not set +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +CONFIG_SENSORS_VEXPRESS=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +CONFIG_SENSORS_XGENE=m + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=y +CONFIG_THERMAL=y +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +CONFIG_THERMAL_GOV_USER_SPACE=y +# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set +CONFIG_CPU_THERMAL=y +# CONFIG_CLOCK_THERMAL is not set +# CONFIG_DEVFREQ_THERMAL is not set +# CONFIG_THERMAL_EMULATION is not set +CONFIG_HISI_THERMAL=m +# CONFIG_QORIQ_THERMAL is not set + +# +# ACPI INT340X thermal drivers +# + +# +# Qualcomm thermal drivers +# +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=m +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CROS_EC is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_MFD_HI655X_PMIC is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77620 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_QCOM_RPM is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RTSX_PCI is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RTSX_USB is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_RK808 is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TPS68470 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_VEXPRESS_SYSREG is not set +# CONFIG_REGULATOR is not set +CONFIG_RC_CORE=y +CONFIG_RC_MAP=y +CONFIG_RC_DECODERS=y +# CONFIG_LIRC is not set +CONFIG_IR_NEC_DECODER=y +CONFIG_IR_RC5_DECODER=y +CONFIG_IR_RC6_DECODER=y +CONFIG_IR_JVC_DECODER=y +CONFIG_IR_SONY_DECODER=y +CONFIG_IR_SANYO_DECODER=y +CONFIG_IR_SHARP_DECODER=y +CONFIG_IR_MCE_KBD_DECODER=y +CONFIG_IR_XMP_DECODER=y +# CONFIG_RC_DEVICES is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_DRM=m +# CONFIG_DRM_DP_AUX_CHARDEV is not set +# CONFIG_DRM_DEBUG_MM_SELFTEST is not set +CONFIG_DRM_KMS_HELPER=m +CONFIG_DRM_KMS_FB_HELPER=y +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_TTM=m +CONFIG_DRM_VM=y + +# +# I2C encoder or helper chips +# +CONFIG_DRM_I2C_CH7006=m +# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_I2C_NXP_TDA998X=m +# CONFIG_DRM_HDLCD is not set +# CONFIG_DRM_MALI_DISPLAY is not set +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +# CONFIG_DRM_AMDGPU_SI is not set +CONFIG_DRM_AMDGPU_CIK=y +CONFIG_DRM_AMDGPU_USERPTR=y +# CONFIG_DRM_AMDGPU_GART_DEBUGFS is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_AMD_ACP is not set +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +# CONFIG_DRM_VGEM is not set +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_RCAR_DW_HDMI is not set +CONFIG_DRM_QXL=m +CONFIG_DRM_BOCHS=m +CONFIG_DRM_VIRTIO_GPU=m +# CONFIG_DRM_MSM is not set +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_LVDS is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_DUMB_VGA_DAC is not set +# CONFIG_DRM_LVDS_ENCODER is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_I2C_ADV7511 is not set +# CONFIG_DRM_ARCPGU is not set +CONFIG_DRM_HISI_HIBMC=m +# CONFIG_DRM_HISI_KIRIN is not set +# CONFIG_DRM_MXSFB is not set +# CONFIG_DRM_TINYDRM is not set +# CONFIG_DRM_PL111 is not set +# CONFIG_DRM_LEGACY is not set +# CONFIG_DRM_LIB_RANDOM is not set + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +CONFIG_FB_SYS_FILLRECT=m +CONFIG_FB_SYS_COPYAREA=m +CONFIG_FB_SYS_IMAGEBLIT=m +# CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=m +CONFIG_FB_DEFERRED_IO=y +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +CONFIG_FB_ARMCLCD=y +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_EFI=y +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +# CONFIG_FB_AUO_K190X is not set +CONFIG_FB_SIMPLE=y +CONFIG_FB_SSD1307=m +# CONFIG_FB_SM712 is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_S6E63M0 is not set +# CONFIG_LCD_LD9040 is not set +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_GENERIC is not set +CONFIG_BACKLIGHT_PWM=m +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +CONFIG_BACKLIGHT_GPIO=m +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# CONFIG_VGASTATE is not set +CONFIG_VIDEOMODE_HELPERS=y +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +CONFIG_SOUND=m +# CONFIG_SOUND_OSS_CORE is not set +# CONFIG_SND is not set + +# +# HID support +# +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=m +# CONFIG_HID_ACCUTOUCH is not set +CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +# CONFIG_HID_ASUS is not set +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CP2112 is not set +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +CONFIG_HID_EZKEY=m +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=y +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MAYFLASH is not set +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NTI is not set +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y +CONFIG_HID_PICOLCD_CIR=y +CONFIG_HID_PLANTRONICS=m +CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=m +# CONFIG_HID_SENSOR_CUSTOM_SENSOR is not set +# CONFIG_HID_ALPS is not set + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y + +# +# I2C HID support +# +CONFIG_I2C_HID=m +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_LEDS_TRIGGER_USBPORT is not set +CONFIG_USB_MON=y +CONFIG_USB_WUSB=m +CONFIG_USB_WUSB_CBAF=m +# CONFIG_USB_WUSB_CBAF_DEBUG is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_PCI=y +CONFIG_USB_XHCI_PLATFORM=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_MSM is not set +CONFIG_USB_EHCI_HCD_PLATFORM=m +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_ISP1362_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_U132_HCD is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_WHCI_HCD is not set +CONFIG_USB_HWA_HCD=m +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=y +CONFIG_USB_SERIAL_CONSOLE=y +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_SIMPLE=m +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set +# CONFIG_USB_SERIAL_F8153X is not set +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7840=m +# CONFIG_USB_SERIAL_MXUPORT is not set +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_XIRCOM=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +# CONFIG_USB_SERIAL_UPD78F0730 is not set +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +# CONFIG_USB_RIO500 is not set +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +CONFIG_USB_IDMOUSE=m +CONFIG_USB_FTDI_ELAN=m +CONFIG_USB_APPLEDISPLAY=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_SISUSBVGA_CON=y +CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set +CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +CONFIG_USB_CHAOSKEY=m +CONFIG_USB_ATM=m +# CONFIG_USB_SPEEDTOUCH is not set +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_USB_PHY is not set +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# CONFIG_USB_MSM_OTG is not set +# CONFIG_USB_QCOM_8X16_PHY is not set +# CONFIG_USB_ULPI is not set +# CONFIG_USB_GADGET is not set + +# +# USB Power Delivery and Type-C drivers +# +# CONFIG_TYPEC_UCSI is not set +CONFIG_USB_LED_TRIG=y +CONFIG_USB_ULPI_BUS=m +CONFIG_UWB=m +CONFIG_UWB_HWA=m +CONFIG_UWB_WHCI=m +CONFIG_UWB_I1480U=m +CONFIG_MMC=m +CONFIG_PWRSEQ_EMMC=m +CONFIG_PWRSEQ_SIMPLE=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_ARMMMCI=m +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_OF_ARASAN is not set +# CONFIG_MMC_SDHCI_OF_AT91 is not set +# CONFIG_MMC_SDHCI_CADENCE is not set +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_SDHCI_MSM is not set +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_SPI=m +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +# CONFIG_MMC_CAVIUM_THUNDERX is not set +# CONFIG_MMC_DW is not set +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_TOSHIBA_PCI=m +CONFIG_MMC_MTK=m +# CONFIG_MMC_SDHCI_XENON is not set +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_CLASS_FLASH=m +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_AAT1290 is not set +# CONFIG_LEDS_AS3645A is not set +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +CONFIG_LEDS_LP55XX_COMMON=m +CONFIG_LEDS_LP5521=m +CONFIG_LEDS_LP5523=m +CONFIG_LEDS_LP5562=m +# CONFIG_LEDS_LP8501 is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_BD2802 is not set +CONFIG_LEDS_LT3593=m +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_KTD2692 is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +CONFIG_LEDS_BLINKM=m +# CONFIG_LEDS_SYSCON is not set +# CONFIG_LEDS_USER is not set + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +# CONFIG_LEDS_TRIGGER_MTD is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +CONFIG_LEDS_TRIGGER_GPIO=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +# CONFIG_INFINIBAND_EXP_USER_ACCESS is not set +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_QIB is not set +CONFIG_INFINIBAND_CXGB4=m +CONFIG_INFINIBAND_I40IW=m +CONFIG_MLX4_INFINIBAND=m +# CONFIG_INFINIBAND_NES is not set +# CONFIG_INFINIBAND_OCRDMA is not set +# CONFIG_INFINIBAND_HNS is not set +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +CONFIG_INFINIBAND_RDMAVT=m +CONFIG_RDMA_RXE=m +# CONFIG_INFINIBAND_BNXT_RE is not set +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +# CONFIG_EDAC_DEBUG is not set +# CONFIG_EDAC_GHES is not set +CONFIG_EDAC_THUNDERX=m +CONFIG_EDAC_XGENE=m +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_SYSTOHC is not set +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +CONFIG_RTC_DRV_ABB5ZES3=m +CONFIG_RTC_DRV_ABX80X=m +CONFIG_RTC_DRV_DS1307=m +# CONFIG_RTC_DRV_DS1307_HWMON is not set +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1374_WDT=y +CONFIG_RTC_DRV_DS1672=m +# CONFIG_RTC_DRV_HYM8563 is not set +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_RTC_DRV_PCF85063=m +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_RX8010=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV8803 is not set + +# +# SPI RTC drivers +# +CONFIG_RTC_DRV_M41T93=m +CONFIG_RTC_DRV_M41T94=m +# CONFIG_RTC_DRV_DS1302 is not set +CONFIG_RTC_DRV_DS1305=m +CONFIG_RTC_DRV_DS1343=m +CONFIG_RTC_DRV_DS1347=m +CONFIG_RTC_DRV_DS1390=m +# CONFIG_RTC_DRV_MAX6916 is not set +CONFIG_RTC_DRV_R9701=m +CONFIG_RTC_DRV_RX4581=m +# CONFIG_RTC_DRV_RX6110 is not set +CONFIG_RTC_DRV_RS5C348=m +CONFIG_RTC_DRV_MAX6902=m +CONFIG_RTC_DRV_PCF2123=m +CONFIG_RTC_DRV_MCP795=m +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +CONFIG_RTC_DRV_PCF2127=m +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1685_FAMILY=m +CONFIG_RTC_DRV_DS1685=y +# CONFIG_RTC_DRV_DS1689 is not set +# CONFIG_RTC_DRV_DS17285 is not set +# CONFIG_RTC_DRV_DS17485 is not set +# CONFIG_RTC_DRV_DS17885 is not set +# CONFIG_RTC_DS1685_PROC_REGS is not set +CONFIG_RTC_DS1685_SYSFS_REGS=y +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_EFI=y +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_BQ4802=m +CONFIG_RTC_DRV_RP5C01=m +CONFIG_RTC_DRV_V3020=m +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_PL030 is not set +CONFIG_RTC_DRV_PL031=y +# CONFIG_RTC_DRV_FTRTC010 is not set +# CONFIG_RTC_DRV_SNVS is not set +# CONFIG_RTC_DRV_XGENE is not set +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_AMBA_PL08X is not set +# CONFIG_BCM_SBA_RAID is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_K3_DMA is not set +# CONFIG_MV_XOR_V2 is not set +# CONFIG_PL330_DMA is not set +# CONFIG_XGENE_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_ZYNQMP_DMA is not set +# CONFIG_QCOM_BAM_DMA is not set +CONFIG_QCOM_HIDMA_MGMT=m +CONFIG_QCOM_HIDMA=m +CONFIG_DW_DMAC_CORE=m +CONFIG_DW_DMAC=m +CONFIG_DW_DMAC_PCI=m + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +# CONFIG_DMATEST is not set + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +CONFIG_AUXDISPLAY=y +# CONFIG_HD44780 is not set +# CONFIG_IMG_ASCII_LCD is not set +# CONFIG_HT16K33 is not set +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +# CONFIG_UIO_DMEM_GENIRQ is not set +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_VIRQFD=m +CONFIG_VFIO=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_PCI=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PLATFORM=m +CONFIG_VFIO_AMBA=m +# CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET is not set +# CONFIG_VFIO_PLATFORM_AMDXGBE_RESET is not set +# CONFIG_VFIO_MDEV is not set +CONFIG_IRQ_BYPASS_MANAGER=m +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO=m + +# +# Virtio drivers +# +CONFIG_VIRTIO_PCI=m +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=m +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_HYPERV_TSCPAGE is not set +# CONFIG_STAGING is not set +# CONFIG_GOLDFISH is not set +CONFIG_CHROME_PLATFORMS=y +# CONFIG_CROS_KBD_LED_BACKLIGHT is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Common Clock Framework +# +CONFIG_COMMON_CLK_VERSATILE=y +CONFIG_CLK_SP810=y +CONFIG_CLK_VEXPRESS_OSC=y +# CONFIG_CLK_HSDK is not set +CONFIG_COMMON_CLK_SCPI=m +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_CLK_QORIQ is not set +CONFIG_COMMON_CLK_XGENE=y +# CONFIG_COMMON_CLK_NXP is not set +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_COMMON_CLK_PXA is not set +# CONFIG_COMMON_CLK_PIC32 is not set +# CONFIG_COMMON_CLK_VC5 is not set +CONFIG_COMMON_CLK_HI3516CV300=y +CONFIG_COMMON_CLK_HI3519=y +CONFIG_COMMON_CLK_HI3660=y +CONFIG_COMMON_CLK_HI3798CV200=y +# CONFIG_COMMON_CLK_HI6220 is not set +CONFIG_RESET_HISI=y +# CONFIG_COMMON_CLK_QCOM is not set +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +CONFIG_TIMER_OF=y +CONFIG_TIMER_ACPI=y +CONFIG_TIMER_PROBE=y +CONFIG_CLKSRC_MMIO=y +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y +CONFIG_FSL_ERRATUM_A008585=y +CONFIG_HISILICON_ERRATUM_161010101=y +CONFIG_ARM64_ERRATUM_858921=y +CONFIG_ARM_TIMER_SP804=y +# CONFIG_ATMEL_PIT is not set +# CONFIG_SH_TIMER_CMT is not set +# CONFIG_SH_TIMER_MTU2 is not set +# CONFIG_SH_TIMER_TMU is not set +# CONFIG_EM_TIMER_STI is not set +CONFIG_MAILBOX=y +CONFIG_ARM_MHU=m +# CONFIG_PLATFORM_MHU is not set +# CONFIG_PL320_MBOX is not set +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +# CONFIG_HI6220_MBOX is not set +# CONFIG_MAILBOX_TEST is not set +# CONFIG_QCOM_APCS_IPC is not set +CONFIG_XGENE_SLIMPRO_MBOX=m +# CONFIG_BCM_FLEXRM_MBOX is not set +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IO_PGTABLE=y +CONFIG_IOMMU_IO_PGTABLE_LPAE=y +# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set +# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set +CONFIG_IOMMU_IOVA=y +CONFIG_OF_IOMMU=y +CONFIG_IOMMU_DMA=y +CONFIG_ARM_SMMU=y +CONFIG_ARM_SMMU_V3=y +# CONFIG_QCOM_IOMMU is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# + +# +# Broadcom SoC drivers +# +# CONFIG_SOC_BRCMSTB is not set + +# +# i.MX SoC drivers +# + +# +# Qualcomm SoC drivers +# +# CONFIG_QCOM_GSBI is not set +# CONFIG_SUNXI_SRAM is not set +# CONFIG_SOC_TI is not set +CONFIG_PM_DEVFREQ=y + +# +# DEVFREQ Governors +# +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=m +# CONFIG_DEVFREQ_GOV_PERFORMANCE is not set +# CONFIG_DEVFREQ_GOV_POWERSAVE is not set +# CONFIG_DEVFREQ_GOV_USERSPACE is not set +# CONFIG_DEVFREQ_GOV_PASSIVE is not set + +# +# DEVFREQ Drivers +# +# CONFIG_PM_DEVFREQ_EVENT is not set +CONFIG_EXTCON=m + +# +# Extcon Device Drivers +# +CONFIG_EXTCON_GPIO=m +# CONFIG_EXTCON_MAX3355 is not set +# CONFIG_EXTCON_QCOM_SPMI_MISC is not set +# CONFIG_EXTCON_RT8973A is not set +# CONFIG_EXTCON_SM5502 is not set +# CONFIG_EXTCON_USB_GPIO is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_FSL_FTM is not set +# CONFIG_PWM_HIBVT is not set +# CONFIG_PWM_PCA9685 is not set +CONFIG_IRQCHIP=y +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_MAX_NR=1 +CONFIG_ARM_GIC_V2M=y +CONFIG_ARM_GIC_V3=y +CONFIG_ARM_GIC_V3_ITS=y +CONFIG_ALPINE_MSI=y +CONFIG_HISILICON_IRQ_MBIGEN=y +CONFIG_PARTITION_PERCPU=y +# CONFIG_QCOM_IRQ_COMBINER is not set +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_ATH79 is not set +# CONFIG_RESET_BERLIN is not set +# CONFIG_RESET_IMX7 is not set +# CONFIG_RESET_LANTIQ is not set +# CONFIG_RESET_LPC18XX is not set +# CONFIG_RESET_MESON is not set +# CONFIG_RESET_PISTACHIO is not set +# CONFIG_RESET_SOCFPGA is not set +# CONFIG_RESET_STM32 is not set +# CONFIG_RESET_SUNXI is not set +# CONFIG_RESET_TI_SYSCON is not set +# CONFIG_RESET_ZYNQ is not set +CONFIG_COMMON_RESET_HI3660=y +CONFIG_COMMON_RESET_HI6220=m +# CONFIG_RESET_TEGRA_BPMP is not set +CONFIG_FMC=m +CONFIG_FMC_FAKEDEV=m +CONFIG_FMC_TRIVIAL=m +CONFIG_FMC_WRITE_EEPROM=m +CONFIG_FMC_CHARDEV=m + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +CONFIG_PHY_XGENE=y +# CONFIG_BCM_KONA_USB2_PHY is not set +CONFIG_PHY_HI6220_USB=m +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_QCOM_APQ8064_SATA is not set +# CONFIG_PHY_QCOM_IPQ806X_SATA is not set +# CONFIG_PHY_QCOM_QMP is not set +# CONFIG_PHY_QCOM_QUSB2 is not set +# CONFIG_PHY_QCOM_UFS is not set +# CONFIG_PHY_QCOM_USB_HS is not set +# CONFIG_PHY_QCOM_USB_HSIC is not set +# CONFIG_PHY_TUSB1210 is not set +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +CONFIG_ARM_PMU=y +CONFIG_ARM_PMU_ACPI=y +# CONFIG_QCOM_L2_PMU is not set +# CONFIG_QCOM_L3_PMU is not set +CONFIG_XGENE_PMU=y +CONFIG_RAS=y + +# +# Android +# +# CONFIG_ANDROID is not set +CONFIG_LIBNVDIMM=m +CONFIG_BLK_DEV_PMEM=m +CONFIG_ND_BLK=m +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=m +CONFIG_BTT=y +CONFIG_DAX=y +# CONFIG_DEV_DAX is not set +CONFIG_NVMEM=y +# CONFIG_QCOM_QFPROM is not set +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# CONFIG_FPGA is not set + +# +# FSI support +# +# CONFIG_FSI is not set +# CONFIG_TEE is not set +CONFIG_AMAZON_DRIVER_UPDATES=y +CONFIG_AMAZON_ENA_ETHERNET=m + +# +# Firmware Drivers +# +CONFIG_ARM_PSCI_FW=y +# CONFIG_ARM_PSCI_CHECKER is not set +CONFIG_ARM_SCPI_PROTOCOL=m +CONFIG_ARM_SCPI_POWER_DOMAIN=m +# CONFIG_FIRMWARE_MEMMAP is not set +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +# CONFIG_FW_CFG_SYSFS is not set +CONFIG_HAVE_ARM_SMCCC=y +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_VARS=y +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=y +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_EFI_PARAMS_FROM_FDT=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_ARMSTUB=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +CONFIG_UEFI_CPER=y +# CONFIG_EFI_DEV_PATH_PARSER is not set +# CONFIG_MESON_SM is not set + +# +# Tegra firmware driver +# +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_CCA_REQUIRED=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_FAN=y +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_MCFG=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_THERMAL=y +CONFIG_ACPI_NUMA=y +# CONFIG_ACPI_CUSTOM_DSDT is not set +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HED=y +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_BGRT is not set +CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y +CONFIG_HAVE_ACPI_APEI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_SEA=y +CONFIG_ACPI_APEI_EINJ=m +# CONFIG_ACPI_APEI_ERST_DEBUG is not set +# CONFIG_PMIC_OPREGION is not set +# CONFIG_ACPI_CONFIGFS is not set +CONFIG_ACPI_IORT=y +CONFIG_ACPI_GTDT=y +CONFIG_ACPI_PPTT=y + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_FS_IOMAP=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=m +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_ENCRYPTION is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=m +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=m +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_OVERLAY_FS=m +# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set +# CONFIG_OVERLAY_FS_INDEX is not set + +# +# Caches +# +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_HISTOGRAM is not set + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_UDF_NLS=y + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_UBIFS_FS is not set +CONFIG_CRAMFS=m +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_ZLIB_COMPRESS=y +# CONFIG_PSTORE_LZO_COMPRESS is not set +# CONFIG_PSTORE_LZ4_COMPRESS is not set +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=m +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +# CONFIG_EXOFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFSD=m +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +CONFIG_NFSD_SCSILAYOUT=y +# CONFIG_NFSD_FLEXFILELAYOUT is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +# CONFIG_NFSD_FAULT_INJECTION is not set +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +# CONFIG_CEPH_FSCACHE is not set +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CIFS=m +CONFIG_CIFS_STATS=y +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_ACL=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SMB311 is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +# CONFIG_DLM is not set +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_KVM_ARM_HOST=y +CONFIG_KVM_ARM_PMU=y +CONFIG_VHOST_NET=m +# CONFIG_VHOST_SCSI is not set +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_GDB_SCRIPTS is not set +# CONFIG_ENABLE_WARN_DEPRECATED is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_PAGE_OWNER is not set +CONFIG_DEBUG_FS=y +CONFIG_HEADERS_CHECK=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_ARCH_KASAN=y +# CONFIG_KASAN is not set +CONFIG_ARCH_HAS_KCOV=y +# CONFIG_KCOV is not set +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Lockups and Hangs +# +# CONFIG_SOFTLOCKUP_DETECTOR is not set +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_WQ_WATCHDOG is not set +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_HAVE_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PI_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_PROVE_RCU is not set +# CONFIG_TORTURE_TEST is not set +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_IRQSOFF_TRACER is not set +CONFIG_SCHED_TRACER=y +# CONFIG_HWLAT_TRACER is not set +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KPROBE_EVENTS=y +CONFIG_UPROBE_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_DYNAMIC_FTRACE=y +# CONFIG_FUNCTION_PROFILER is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_HIST_TRIGGERS is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +CONFIG_RING_BUFFER_BENCHMARK=m +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_TRACING_EVENTS_GPIO is not set +# CONFIG_DMA_API_DEBUG is not set + +# +# Runtime Testing +# +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +CONFIG_TEST_KSTRTOX=y +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_MEMTEST is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_SERIAL_CONSOLE=y +CONFIG_KGDB_TESTS=y +# CONFIG_KGDB_TESTS_ON_BOOT is not set +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x0 +CONFIG_KDB_KEYBOARD=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_ARCH_WANTS_UBSAN_NO_NULL is not set +# CONFIG_UBSAN is not set +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +# CONFIG_ARM64_PTDUMP_CORE is not set +# CONFIG_ARM64_PTDUMP_DEBUGFS is not set +# CONFIG_PID_IN_CONTEXTIDR is not set +# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set +# CONFIG_DEBUG_WX is not set +# CONFIG_DEBUG_ALIGN_RODATA is not set +# CONFIG_DEBUG_EFI is not set +# CONFIG_ARM64_RELOC_TEST is not set +# CONFIG_CORESIGHT is not set + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_BIG_KEYS=y +CONFIG_TRUSTED_KEYS=m +CONFIG_ENCRYPTED_KEYS=m +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITY_WRITABLE_HOOKS=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +# CONFIG_SECURITY_INFINIBAND is not set +CONFIG_SECURITY_NETWORK_XFRM=y +# CONFIG_SECURITY_PATH is not set +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +# CONFIG_HARDENED_USERCOPY is not set +# CONFIG_FORTIFY_SOURCE is not set +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1 +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +# CONFIG_SECURITY_YAMA is not set +# CONFIG_INTEGRITY is not set +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_DEFAULT_SECURITY="selinux" +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_RSA=y +# CONFIG_CRYPTO_DH is not set +# CONFIG_CRYPTO_ECDH is not set +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_GF128MUL=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=y +# CONFIG_CRYPTO_MCRYPTD is not set +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SIMD=y +CONFIG_CRYPTO_ENGINE=m + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=m +# CONFIG_CRYPTO_KEYWRAP is not set + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=m +# CONFIG_CRYPTO_SHA3 is not set +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=m +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +# CONFIG_CRYPTO_LZ4 is not set +# CONFIG_CRYPTO_LZ4HC is not set + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_HASH_INFO=y +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC is not set +CONFIG_CRYPTO_DEV_CCP=y +CONFIG_CRYPTO_DEV_CCP_DD=m +CONFIG_CRYPTO_DEV_SP_CCP=y +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m +CONFIG_CRYPTO_DEV_CPT=m +CONFIG_CAVIUM_CPT=m +# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set +# CONFIG_CRYPTO_DEV_CAVIUM_ZIP is not set +# CONFIG_CRYPTO_DEV_QCE is not set +CONFIG_CRYPTO_DEV_CHELSIO=m +CONFIG_CRYPTO_DEV_VIRTIO=m +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +# CONFIG_SECONDARY_TRUSTED_KEYRING is not set +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA256_ARM64=m +# CONFIG_CRYPTO_SHA512_ARM64 is not set +CONFIG_CRYPTO_SHA1_ARM64_CE=m +CONFIG_CRYPTO_SHA2_ARM64_CE=m +CONFIG_CRYPTO_GHASH_ARM64_CE=m +# CONFIG_CRYPTO_CRCT10DIF_ARM64_CE is not set +CONFIG_CRYPTO_CRC32_ARM64_CE=m +CONFIG_CRYPTO_AES_ARM64=y +CONFIG_CRYPTO_AES_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y +# CONFIG_CRYPTO_CHACHA20_NEON is not set +# CONFIG_CRYPTO_AES_ARM64_BS is not set +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_BITREVERSE=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_RATIONAL=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IO=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=m +CONFIG_CRC8=m +CONFIG_AUDIT_GENERIC=y +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=m +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_RADIX_TREE_MULTIORDER=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +# CONFIG_DMA_NOOP_OPS is not set +CONFIG_DMA_VIRT_OPS=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +CONFIG_CORDIC=m +# CONFIG_DDR is not set +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +# CONFIG_SG_SPLIT is not set +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_SG_CHAIN=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set diff --git a/packages/kernel/config-x86_64 b/packages/kernel/config-x86_64 new file mode 100644 index 00000000..a816d2e8 --- /dev/null +++ b/packages/kernel/config-x86_64 @@ -0,0 +1,4561 @@ +CONFIG_64BIT=y +CONFIG_X86_64=y +CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ZONE_DMA32=y +CONFIG_AUDIT_ARCH=y +CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_HAVE_INTEL_TXT=y +CONFIG_X86_64_SMP=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_DEFCONFIG_LIST="/lib/modules//.config" +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_EXTABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="" +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +CONFIG_FHANDLE=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_WATCH=y +CONFIG_AUDIT_TREE=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +# CONFIG_IRQ_DOMAIN_DEBUG is not set +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y +CONFIG_GENERIC_CMOS_UPDATE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +# CONFIG_TASKS_RCU is not set +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_BUILD_BIN2C=y +# CONFIG_IKCONFIG is not set +CONFIG_LOG_BUF_SHIFT=21 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +# CONFIG_CGROUP_RDMA is not set +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_HAVE_PCSPKR_PLATFORM=y +CONFIG_BPF=y +CONFIG_EXPERT=y +CONFIG_UID16=y +CONFIG_MULTIUSER=y +# CONFIG_SGETMASK_SYSCALL is not set +# CONFIG_SYSFS_SYSCALL is not set +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_POSIX_TIMERS=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_PCSPKR_PLATFORM=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_USERFAULTFD=y +CONFIG_PCI_QUIRKS=y +CONFIG_MEMBARRIER=y +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +CONFIG_SLUB_MEMCG_SYSFS_ON=y +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +CONFIG_SLAB_MERGE_DEFAULT=y +# CONFIG_SLAB_FREELIST_RANDOM is not set +CONFIG_SLAB_FREELIST_HARDENED=y +CONFIG_SLUB_CPU_PARTIAL=y +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_HOTPLUG_SMT=y +CONFIG_OPROFILE=m +# CONFIG_OPROFILE_EVENT_MULTIPLEX is not set +CONFIG_HAVE_OPROFILE=y +CONFIG_OPROFILE_NMI_TIMER=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_OPTPROBES=y +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_DMA_API_DEBUG=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y +CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_HAVE_RCU_TABLE_INVALIDATE=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_GCC_PLUGINS=y +# CONFIG_GCC_PLUGINS is not set +CONFIG_HAVE_CC_STACKPROTECTOR=y +CONFIG_CC_STACKPROTECTOR=y +# CONFIG_CC_STACKPROTECTOR_NONE is not set +# CONFIG_CC_STACKPROTECTOR_REGULAR is not set +CONFIG_CC_STACKPROTECTOR_STRONG=y +CONFIG_THIN_ARCHIVES=y +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=28 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y +CONFIG_HAVE_COPY_THREAD_TLS=y +CONFIG_HAVE_STACK_VALIDATION=y +# CONFIG_HAVE_ARCH_HASH is not set +# CONFIG_ISA_BUS_API is not set +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +# CONFIG_CPU_NO_EFFICIENT_FFS is not set +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +# CONFIG_ARCH_OPTIONAL_KERNEL_RWX is not set +# CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT is not set +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_ARCH_HAS_REFCOUNT=y +# CONFIG_REFCOUNT_FULL is not set + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +CONFIG_MODULE_SIG_ALL=y +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +# CONFIG_MODULE_SIG_SHA256 is not set +# CONFIG_MODULE_SIG_SHA384 is not set +CONFIG_MODULE_SIG_SHA512=y +CONFIG_MODULE_SIG_HASH="sha512" +# CONFIG_MODULE_COMPRESS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +# CONFIG_BLK_DEV_ZONED is not set +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +CONFIG_BLK_CMDLINE_PARSER=y +CONFIG_BLK_WBT=y +# CONFIG_BLK_WBT_SQ is not set +CONFIG_BLK_WBT_MQ=y +CONFIG_BLK_DEBUG_FS=y +# CONFIG_BLK_SED_OPAL is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +# CONFIG_MINIX_SUBPARTITION is not set +CONFIG_SOLARIS_X86_PARTITION=y +# CONFIG_UNIXWARE_DISKLABEL is not set +CONFIG_LDM_PARTITION=y +# CONFIG_LDM_DEBUG is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +CONFIG_SUN_PARTITION=y +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +CONFIG_CMDLINE_PARTITION=y +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_MQ_RDMA=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=m +CONFIG_IOSCHED_CFQ=y +CONFIG_CFQ_GROUP_IOSCHED=y +# CONFIG_DEFAULT_CFQ is not set +CONFIG_DEFAULT_NOOP=y +CONFIG_DEFAULT_IOSCHED="noop" +CONFIG_MQ_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_KYBER=m +CONFIG_IOSCHED_BFQ=m +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_FREEZER=y + +# +# Processor type and features +# +CONFIG_ZONE_DMA=y +CONFIG_SMP=y +CONFIG_X86_FEATURE_NAMES=y +CONFIG_X86_FAST_FEATURE_TESTS=y +CONFIG_X86_X2APIC=y +CONFIG_X86_MPPARSE=y +# CONFIG_GOLDFISH is not set +CONFIG_RETPOLINE=y +# CONFIG_INTEL_RDT is not set +# CONFIG_X86_EXTENDED_PLATFORM is not set +# CONFIG_X86_INTEL_LPSS is not set +# CONFIG_X86_AMD_PLATFORM_DEVICE is not set +CONFIG_IOSF_MBI=m +# CONFIG_IOSF_MBI_DEBUG is not set +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_PARAVIRT=y +# CONFIG_PARAVIRT_DEBUG is not set +CONFIG_PARAVIRT_SPINLOCKS=y +# CONFIG_QUEUED_LOCK_STAT is not set +CONFIG_XEN=y +CONFIG_XEN_PV=y +CONFIG_XEN_PV_SMP=y +CONFIG_XEN_DOM0=y +CONFIG_XEN_PVHVM=y +CONFIG_XEN_PVHVM_SMP=y +CONFIG_XEN_512GB=y +CONFIG_XEN_SAVE_RESTORE=y +# CONFIG_XEN_DEBUG_FS is not set +CONFIG_XEN_PVH=y +CONFIG_KVM_GUEST=y +CONFIG_KVM_DEBUG_FS=y +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_PARAVIRT_CLOCK=y +CONFIG_NO_BOOTMEM=y +# CONFIG_MK8 is not set +# CONFIG_MPSC is not set +# CONFIG_MCORE2 is not set +# CONFIG_MATOM is not set +CONFIG_GENERIC_CPU=y +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_TSC=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_CMOV=y +CONFIG_X86_MINIMUM_CPU_FAMILY=64 +CONFIG_X86_DEBUGCTLMSR=y +# CONFIG_PROCESSOR_SELECT is not set +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_HPET_TIMER=y +CONFIG_HPET_EMULATE_RTC=y +CONFIG_DMI=y +CONFIG_GART_IOMMU=y +# CONFIG_CALGARY_IOMMU is not set +CONFIG_SWIOTLB=y +CONFIG_IOMMU_HELPER=y +CONFIG_MAXSMP=y +CONFIG_NR_CPUS=8192 +CONFIG_SCHED_SMT=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_MC_PRIO=y +CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set +# CONFIG_PREEMPT is not set +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_IO_APIC=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +CONFIG_X86_MCE=y +CONFIG_X86_MCELOG_LEGACY=y +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_AMD=y +CONFIG_X86_MCE_THRESHOLD=y +# CONFIG_X86_MCE_INJECT is not set +CONFIG_X86_THERMAL_VECTOR=y + +# +# Performance monitoring +# +CONFIG_PERF_EVENTS_INTEL_UNCORE=y +# CONFIG_PERF_EVENTS_INTEL_RAPL is not set +# CONFIG_PERF_EVENTS_INTEL_CSTATE is not set +# CONFIG_PERF_EVENTS_AMD_POWER is not set +# CONFIG_VM86 is not set +CONFIG_X86_VSYSCALL_EMULATION=y +CONFIG_I8K=m +CONFIG_MICROCODE=y +CONFIG_MICROCODE_INTEL=y +CONFIG_MICROCODE_AMD=y +CONFIG_MICROCODE_OLD_INTERFACE=y +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +# CONFIG_X86_5LEVEL is not set +CONFIG_ARCH_PHYS_ADDR_T_64BIT=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_X86_DIRECT_GBPAGES=y +CONFIG_ARCH_HAS_MEM_ENCRYPT=y +CONFIG_AMD_MEM_ENCRYPT=y +# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set +CONFIG_ARCH_USE_MEMREMAP_PROT=y +CONFIG_NUMA=y +CONFIG_AMD_NUMA=y +CONFIG_X86_64_ACPI_NUMA=y +CONFIG_NODES_SPAN_OTHER_NODES=y +# CONFIG_NUMA_EMU is not set +CONFIG_NODES_SHIFT=10 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_MEMORY_PROBE=y +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_HAVE_GENERIC_GUP=y +CONFIG_ARCH_DISCARD_MEMBLOCK=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_HAVE_BOOTMEM_INFO_NODE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_SPARSE=y +# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_BOUNCE=y +CONFIG_VIRT_TO_BUS=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +# CONFIG_MEMORY_FAILURE is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set +CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_THP_SWAP=y +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +# CONFIG_CMA is not set +CONFIG_MEM_SOFT_DIRTY=y +CONFIG_ZSWAP=y +CONFIG_ZPOOL=y +CONFIG_ZBUD=y +CONFIG_Z3FOLD=m +CONFIG_ZSMALLOC=m +# CONFIG_PGTABLE_MAPPING is not set +CONFIG_ZSMALLOC_STAT=y +CONFIG_GENERIC_EARLY_IOREMAP=y +CONFIG_ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +CONFIG_ARCH_HAS_ZONE_DEVICE=y +# CONFIG_ZONE_DEVICE is not set +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_HAS_PKEYS=y +CONFIG_PERCPU_STATS=y +# CONFIG_X86_PMEM_LEGACY is not set +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y +CONFIG_X86_RESERVE_LOW=64 +CONFIG_MTRR=y +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=0 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 +CONFIG_X86_PAT=y +CONFIG_ARCH_USES_PG_UNCACHED=y +CONFIG_ARCH_RANDOM=y +CONFIG_X86_SMAP=y +# CONFIG_X86_INTEL_MPX is not set +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y +# CONFIG_EFI is not set +CONFIG_SECCOMP=y +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y +CONFIG_KEXEC_VERIFY_SIG=y +CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y +CONFIG_CRASH_DUMP=y +# CONFIG_KEXEC_JUMP is not set +CONFIG_PHYSICAL_START=0x1000000 +CONFIG_RELOCATABLE=y +# CONFIG_RANDOMIZE_BASE is not set +CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_HOTPLUG_CPU=y +# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set +# CONFIG_DEBUG_HOTPLUG_CPU0 is not set +# CONFIG_COMPAT_VDSO is not set +# CONFIG_LEGACY_VSYSCALL_NATIVE is not set +CONFIG_LEGACY_VSYSCALL_EMULATE=y +# CONFIG_LEGACY_VSYSCALL_NONE is not set +# CONFIG_CMDLINE_BOOL is not set +# CONFIG_MODIFY_LDT_SYSCALL is not set +CONFIG_HAVE_LIVEPATCH=y +CONFIG_LIVEPATCH=y +CONFIG_ARCH_HAS_ADD_PAGES=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y + +# +# Power management and ACPI options +# +CONFIG_ARCH_HIBERNATION_HEADER=y +# CONFIG_SUSPEND is not set +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_ACPI=y +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_SLEEP=y +CONFIG_ACPI_PROCFS_POWER=y +# CONFIG_ACPI_REV_OVERRIDE_POSSIBLE is not set +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=m +CONFIG_ACPI_BATTERY=m +CONFIG_ACPI_BUTTON=m +# CONFIG_ACPI_VIDEO is not set +# CONFIG_ACPI_FAN is not set +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_CSTATE=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m +CONFIG_ACPI_THERMAL=m +CONFIG_ACPI_NUMA=y +# CONFIG_ACPI_CUSTOM_DSDT is not set +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_X86_PM_TIMER=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HOTPLUG_IOAPIC=y +CONFIG_ACPI_SBS=m +# CONFIG_ACPI_HED is not set +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set +# CONFIG_ACPI_NFIT is not set +CONFIG_HAVE_ACPI_APEI=y +CONFIG_HAVE_ACPI_APEI_NMI=y +# CONFIG_ACPI_APEI is not set +# CONFIG_DPTF_POWER is not set +CONFIG_ACPI_EXTLOG=m +# CONFIG_PMIC_OPREGION is not set +# CONFIG_ACPI_CONFIGFS is not set +# CONFIG_SFI is not set + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=m +CONFIG_CPU_FREQ_GOV_USERSPACE=m +CONFIG_CPU_FREQ_GOV_ONDEMAND=m +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +CONFIG_X86_INTEL_PSTATE=y +CONFIG_X86_PCC_CPUFREQ=m +CONFIG_X86_ACPI_CPUFREQ=m +# CONFIG_X86_ACPI_CPUFREQ_CPB is not set +# CONFIG_X86_POWERNOW_K8 is not set +# CONFIG_X86_AMD_FREQ_SENSITIVITY is not set +# CONFIG_X86_SPEEDSTEP_CENTRINO is not set +# CONFIG_X86_P4_CLOCKMOD is not set + +# +# shared options +# +# CONFIG_X86_SPEEDSTEP_LIB is not set + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set +CONFIG_INTEL_IDLE=y + +# +# Bus options (PCI etc.) +# +CONFIG_PCI=y +CONFIG_PCI_DIRECT=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCI_XEN=y +CONFIG_PCI_DOMAINS=y +# CONFIG_PCI_CNB20LE_QUIRK is not set +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +# CONFIG_PCIEAER is not set +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +# CONFIG_PCIE_DPC is not set +# CONFIG_PCIE_PTM is not set +CONFIG_PCI_BUS_ADDR_T_64BIT=y +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=m +CONFIG_XEN_PCIDEV_FRONTEND=y +CONFIG_HT_IRQ=y +CONFIG_PCI_ATS=y +CONFIG_PCI_LOCKLESS_CONFIG=y +CONFIG_PCI_IOV=y +# CONFIG_PCI_PRI is not set +# CONFIG_PCI_PASID is not set +CONFIG_PCI_LABEL=y +# CONFIG_PCI_HYPERV is not set +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +CONFIG_HOTPLUG_PCI_CPCI=y +# CONFIG_HOTPLUG_PCI_CPCI_ZT5550 is not set +CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m +CONFIG_HOTPLUG_PCI_SHPC=m + +# +# DesignWare PCI Core Support +# +# CONFIG_PCIE_DW_PLAT is not set + +# +# PCI host controller drivers +# +# CONFIG_VMD is not set + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# CONFIG_ISA_BUS is not set +CONFIG_ISA_DMA_API=y +CONFIG_AMD_NB=y +# CONFIG_PCCARD is not set +# CONFIG_RAPIDIO is not set +# CONFIG_X86_SYSFB is not set + +# +# Executable file formats / Emulations +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +# CONFIG_HAVE_AOUT is not set +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +CONFIG_IA32_EMULATION=y +CONFIG_IA32_AOUT=m +# CONFIG_X86_X32 is not set +CONFIG_COMPAT_32=y +CONFIG_COMPAT=y +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_X86_DEV_DMA_OPS=y +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=m +CONFIG_XFRM_USER=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +# CONFIG_SMC is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +# CONFIG_IP_FIB_TRIE_STATS is not set +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +CONFIG_NET_FOU=m +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_XFRM_MODE_TRANSPORT=m +CONFIG_INET_XFRM_MODE_TUNNEL=m +CONFIG_INET_XFRM_MODE_BEET=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +# CONFIG_TCP_CONG_NV is not set +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +# CONFIG_TCP_CONG_CDG is not set +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_INET6_XFRM_MODE_TRANSPORT=m +CONFIG_INET6_XFRM_MODE_TUNNEL=m +CONFIG_INET6_XFRM_MODE_BEET=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_FOU=m +CONFIG_IPV6_FOU_TUNNEL=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_NETLABEL=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_COMMON=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=m +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_NEEDED=y +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=m +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_RT=m +# CONFIG_NFT_NUMGEN is not set +CONFIG_NFT_CT=m +CONFIG_NFT_SET_RBTREE=m +CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +# CONFIG_NFT_QUOTA is not set +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +# CONFIG_NF_DUP_NETDEV is not set +# CONFIG_NFT_DUP_NETDEV is not set +# CONFIG_NFT_FWD_NETDEV is not set +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NETFILTER_XTABLES=m + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=m +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_NAT_MASQUERADE_IPV4=m +CONFIG_NFT_MASQ_IPV4=m +CONFIG_NFT_REDIR_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PROTO_GRE=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m +CONFIG_NFT_MASQ_IPV6=m +CONFIG_NFT_REDIR_IPV6=m +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_NF_NAT_IPV6=m +CONFIG_NF_NAT_MASQUERADE_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +# CONFIG_IP6_NF_TARGET_NPT is not set +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_LOG_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_IP_DCCP=m +CONFIG_INET_DCCP_DIAG=m + +# +# DCCP CCIDs Configuration +# +# CONFIG_IP_DCCP_CCID2_DEBUG is not set +CONFIG_IP_DCCP_CCID3=y +# CONFIG_IP_DCCP_CCID3_DEBUG is not set +CONFIG_IP_DCCP_TFRC_LIB=y + +# +# DCCP Kernel Hacking +# +# CONFIG_IP_DCCP_DEBUG is not set +CONFIG_NET_DCCPPROBE=m +CONFIG_IP_SCTP=m +# CONFIG_NET_SCTPPROBE is not set +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +CONFIG_RDS=m +# CONFIG_RDS_RDMA is not set +CONFIG_RDS_TCP=m +# CONFIG_RDS_DEBUG is not set +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_UDP=y +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +# CONFIG_DECNET is not set +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +# CONFIG_6LOWPAN is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +# CONFIG_NET_SCH_DEFAULT is not set + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +# CONFIG_NET_CLS_MATCHALL is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +# CONFIG_NET_ACT_SKBMOD is not set +# CONFIG_NET_ACT_IFE is not set +# CONFIG_NET_ACT_TUNNEL_KEY is not set +CONFIG_NET_CLS_IND=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=m +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VMWARE_VMCI_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_HYPERV_VSOCKETS=m +# CONFIG_NETLINK_DIAG is not set +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=m +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +# CONFIG_NET_NSH is not set +CONFIG_HSR=m +# CONFIG_NET_SWITCHDEV is not set +# CONFIG_NET_L3_MASTER_DEV is not set +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +# CONFIG_NET_TCPPROBE is not set +# CONFIG_NET_DROP_MONITOR is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +CONFIG_BT=m +CONFIG_BT_BREDR=y +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_LE=y +# CONFIG_BT_SELFTEST is not set +CONFIG_BT_DEBUGFS=y + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_BCM=m +CONFIG_BT_RTL=m +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_BCM=y +CONFIG_BT_HCIBTUSB_RTL=y +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_SERDEV=y +CONFIG_BT_HCIUART_H4=y +# CONFIG_BT_HCIUART_NOKIA is not set +CONFIG_BT_HCIUART_BCSP=y +# CONFIG_BT_HCIUART_ATH3K is not set +CONFIG_BT_HCIUART_LL=y +CONFIG_BT_HCIUART_3WIRE=y +CONFIG_BT_HCIUART_INTEL=y +CONFIG_BT_HCIUART_BCM=y +# CONFIG_BT_HCIUART_QCA is not set +# CONFIG_BT_HCIUART_AG6XX is not set +# CONFIG_BT_HCIUART_MRVL is not set +# CONFIG_BT_HCIBCM203X is not set +# CONFIG_BT_HCIBPA10X is not set +# CONFIG_BT_HCIBFUSB is not set +CONFIG_BT_HCIVHCI=m +# CONFIG_BT_MRVL is not set +# CONFIG_BT_ATH3K is not set +CONFIG_AF_RXRPC=m +# CONFIG_AF_RXRPC_IPV6 is not set +# CONFIG_AF_RXRPC_INJECT_LOSS is not set +# CONFIG_AF_RXRPC_DEBUG is not set +# CONFIG_RXKAD is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +CONFIG_FIB_RULES=y +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +# CONFIG_NET_DEVLINK is not set +CONFIG_MAY_USE_DEVLINK=y +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_EXTRA_FIRMWARE="" +CONFIG_FW_LOADER_USER_HELPER=y +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +CONFIG_DEBUG_DEVRES=y +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_SYS_HYPERVISOR=y +# CONFIG_GENERIC_CPU_DEVICES is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=m +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set + +# +# Bus devices +# +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_MTD is not set +# CONFIG_OF is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +# CONFIG_ZRAM_WRITEBACK is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_BLK_DEV_DRBD=m +# CONFIG_DRBD_FAULT_INJECTION is not set +CONFIG_BLK_DEV_NBD=m +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +CONFIG_ATA_OVER_ETH=m +CONFIG_XEN_BLKDEV_FRONTEND=m +CONFIG_XEN_BLKDEV_BACKEND=m +CONFIG_VIRTIO_BLK=m +# CONFIG_VIRTIO_BLK_SCSI is not set +CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_RSXX is not set +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +# CONFIG_NVME_RDMA is not set +# CONFIG_NVME_FC is not set +# CONFIG_NVME_TARGET is not set + +# +# Misc devices +# +# CONFIG_SENSORS_LIS3LV02D is not set +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_IBM_ASM is not set +# CONFIG_PHANTOM is not set +# CONFIG_SGI_IOC4 is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +CONFIG_VMWARE_BALLOON=m +# CONFIG_USB_SWITCH_FSA9480 is not set +# CONFIG_SRAM is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_SENSORS_LIS3_I2C is not set + +# +# Altera FPGA firmware download module +# +# CONFIG_ALTERA_STAPL is not set +# CONFIG_INTEL_MEI is not set +# CONFIG_INTEL_MEI_ME is not set +# CONFIG_INTEL_MEI_TXE is not set +CONFIG_VMWARE_VMCI=m + +# +# Intel MIC Bus Driver +# +# CONFIG_INTEL_MIC_BUS is not set + +# +# SCIF Bus Driver +# +# CONFIG_SCIF_BUS is not set + +# +# VOP Bus Driver +# +# CONFIG_VOP_BUS is not set + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_CXL_BASE is not set +# CONFIG_CXL_AFU_DRIVER_OPS is not set +# CONFIG_CXL_LIB is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=m +CONFIG_RAID_ATTRS=m +CONFIG_SCSI=m +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_MQ_DEFAULT=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_CHR_DEV_OSST=m +CONFIG_BLK_DEV_SR=m +# CONFIG_BLK_DEV_SR_VENDOR is not set +CONFIG_CHR_DEV_SG=m +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +# CONFIG_SCSI_SAS_LIBSAS is not set +# CONFIG_SCSI_SRP_ATTRS is not set +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +# CONFIG_SCSI_CXGB4_ISCSI is not set +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_SCSI_BNX2X_FCOE is not set +# CONFIG_BE2ISCSI is not set +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +# CONFIG_SCSI_HPSA is not set +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_DPT_I2O is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +# CONFIG_MEGARAID_SAS is not set +# CONFIG_SCSI_MPT3SAS is not set +# CONFIG_SCSI_MPT2SAS is not set +# CONFIG_SCSI_SMARTPQI is not set +# CONFIG_SCSI_UFSHCD is not set +# CONFIG_SCSI_HPTIOP is not set +CONFIG_SCSI_BUSLOGIC=m +# CONFIG_SCSI_FLASHPOINT is not set +CONFIG_VMWARE_PVSCSI=m +CONFIG_XEN_SCSI_FRONTEND=m +CONFIG_HYPERV_STORAGE=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +# CONFIG_FCOE is not set +# CONFIG_FCOE_FNIC is not set +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_EATA is not set +# CONFIG_SCSI_FUTURE_DOMAIN is not set +# CONFIG_SCSI_GDTH is not set +# CONFIG_SCSI_ISCI is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +# CONFIG_SCSI_QLA_FC is not set +# CONFIG_SCSI_QLA_ISCSI is not set +# CONFIG_SCSI_LPFC is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +# CONFIG_SCSI_DH is not set +CONFIG_SCSI_OSD_INITIATOR=m +CONFIG_SCSI_OSD_ULD=m +CONFIG_SCSI_OSD_DPRINT_SENSE=1 +# CONFIG_SCSI_OSD_DEBUG is not set +CONFIG_ATA=m +# CONFIG_ATA_NONSTANDARD is not set +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=m +# CONFIG_SATA_AHCI_PLATFORM is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=m +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +# CONFIG_ATA_GENERIC is not set +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +# CONFIG_MD_MULTIPATH is not set +CONFIG_MD_FAULTY=m +# CONFIG_MD_CLUSTER is not set +CONFIG_BCACHE=m +# CONFIG_BCACHE_DEBUG is not set +# CONFIG_BCACHE_CLOSURES_DEBUG is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +# CONFIG_DM_MQ_DEFAULT is not set +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +# CONFIG_DM_DEBUG_BLOCK_STACK_TRACING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +# CONFIG_DM_ERA is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +# CONFIG_DM_VERITY is not set +# CONFIG_DM_SWITCH is not set +# CONFIG_DM_LOG_WRITES is not set +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +# CONFIG_TCM_PSCSI is not set +# CONFIG_TCM_USER2 is not set +CONFIG_LOOPBACK_TARGET=m +# CONFIG_TCM_FC is not set +CONFIG_ISCSI_TARGET=m +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +CONFIG_FUSION_FC=m +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 +# CONFIG_FUSION_CTL is not set +# CONFIG_FUSION_LOGGING is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# CONFIG_MACINTOSH_DRIVERS is not set +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_EQUALIZER=m +# CONFIG_NET_FC is not set +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +# CONFIG_NET_TEAM_MODE_RANDOM is not set +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_GTP is not set +# CONFIG_MACSEC is not set +CONFIG_NETCONSOLE=m +# CONFIG_NETCONSOLE_DYNAMIC is not set +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +# CONFIG_VSOCKMON is not set +# CONFIG_ARCNET is not set + +# +# CAIF transport drivers +# + +# +# Distributed Switch Architecture drivers +# +CONFIG_ETHERNET=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +# CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_AURORA is not set +# CONFIG_NET_CADENCE is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_CX_ECAT is not set +# CONFIG_DNET is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_EXAR is not set +# CONFIG_NET_VENDOR_HP is not set +# CONFIG_NET_VENDOR_HUAWEI is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +# CONFIG_E1000E_HWTS is not set +# CONFIG_IGB is not set +# CONFIG_IGBVF is not set +# CONFIG_IXGB is not set +# CONFIG_IXGBE is not set +CONFIG_IXGBEVF=m +# CONFIG_I40E is not set +# CONFIG_I40EVF is not set +# CONFIG_FM10K is not set +# CONFIG_NET_VENDOR_I825XX is not set +# CONFIG_JME is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_ETHOC is not set +# CONFIG_NET_PACKET_ENGINE is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +# CONFIG_8139TOO_8129 is not set +# CONFIG_8139_OLD_RX_RESET is not set +# CONFIG_R8169 is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_MDIO_DEVICE=m +CONFIG_MDIO_BUS=m +# CONFIG_MDIO_BITBANG is not set +# CONFIG_MDIO_THUNDER is not set +CONFIG_PHYLIB=m + +# +# MII PHY device drivers +# +# CONFIG_AMD_PHY is not set +# CONFIG_AQUANTIA_PHY is not set +# CONFIG_AT803X_PHY is not set +# CONFIG_BCM7XXX_PHY is not set +# CONFIG_BCM87XX_PHY is not set +# CONFIG_BROADCOM_PHY is not set +# CONFIG_CICADA_PHY is not set +# CONFIG_CORTINA_PHY is not set +# CONFIG_DAVICOM_PHY is not set +# CONFIG_DP83848_PHY is not set +# CONFIG_DP83867_PHY is not set +# CONFIG_FIXED_PHY is not set +# CONFIG_ICPLUS_PHY is not set +# CONFIG_INTEL_XWAY_PHY is not set +# CONFIG_LSI_ET1011C_PHY is not set +# CONFIG_LXT_PHY is not set +# CONFIG_MARVELL_PHY is not set +# CONFIG_MARVELL_10G_PHY is not set +# CONFIG_MICREL_PHY is not set +# CONFIG_MICROCHIP_PHY is not set +# CONFIG_MICROSEMI_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_QSEMI_PHY is not set +# CONFIG_REALTEK_PHY is not set +# CONFIG_ROCKCHIP_PHY is not set +# CONFIG_SMSC_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_TERANETICS_PHY is not set +# CONFIG_VITESSE_PHY is not set +# CONFIG_XILINX_GMII2RGMII is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set + +# +# Host-side USB support is needed for USB Network Adapter support +# +CONFIG_USB_NET_DRIVERS=m +# CONFIG_USB_CATC is not set +# CONFIG_USB_KAWETH is not set +# CONFIG_USB_PEGASUS is not set +# CONFIG_USB_RTL8150 is not set +# CONFIG_USB_RTL8152 is not set +# CONFIG_USB_LAN78XX is not set +# CONFIG_USB_USBNET is not set +# CONFIG_USB_IPHETH is not set +# CONFIG_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +CONFIG_XEN_NETDEV_FRONTEND=y +CONFIG_XEN_NETDEV_BACKEND=m +CONFIG_VMXNET3=m +# CONFIG_FUJITSU_ES is not set +CONFIG_HYPERV_NET=m +# CONFIG_ISDN is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_POLLDEV=m +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=m +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=m +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=m +# CONFIG_MOUSE_PS2_ALPS is not set +# CONFIG_MOUSE_PS2_BYD is not set +# CONFIG_MOUSE_PS2_LOGIPS2PP is not set +# CONFIG_MOUSE_PS2_SYNAPTICS is not set +# CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS is not set +# CONFIG_MOUSE_PS2_CYPRESS is not set +# CONFIG_MOUSE_PS2_LIFEBOOK is not set +# CONFIG_MOUSE_PS2_TRACKPOINT is not set +# CONFIG_MOUSE_PS2_ELANTECH is not set +# CONFIG_MOUSE_PS2_SENTELIC is not set +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +# CONFIG_MOUSE_PS2_FOCALTECH is not set +# CONFIG_MOUSE_PS2_VMMOUSE is not set +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_APPLETOUCH is not set +# CONFIG_MOUSE_BCM5974 is not set +# CONFIG_MOUSE_CYAPA is not set +# CONFIG_MOUSE_ELAN_I2C is not set +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_MOUSE_SYNAPTICS_I2C is not set +# CONFIG_MOUSE_SYNAPTICS_USB is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +# CONFIG_INPUT_PCSPKR is not set +# CONFIG_INPUT_MMA8450 is not set +# CONFIG_INPUT_ATLAS_BTNS is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_KXTJ9 is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=y +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +# CONFIG_RMI4_CORE is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=y +# CONFIG_SERIO_SERPORT is not set +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +# CONFIG_SERIO_RAW is not set +# CONFIG_SERIO_ALTERA_PS2 is not set +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=m +CONFIG_HYPERV_KEYBOARD=m +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_NOZOMI is not set +# CONFIG_N_GSM is not set +# CONFIG_TRACE_SINK is not set +CONFIG_DEVMEM=y +# CONFIG_DEVKMEM is not set + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=m +CONFIG_SERIAL_8250_NR_UARTS=4 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +# CONFIG_SERIAL_8250_EXTENDED is not set +# CONFIG_SERIAL_8250_FSL is not set +# CONFIG_SERIAL_8250_DW is not set +# CONFIG_SERIAL_8250_RT288X is not set +# CONFIG_SERIAL_8250_LPSS is not set +# CONFIG_SERIAL_8250_MID is not set +# CONFIG_SERIAL_8250_MOXA is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_ARC_NR_PORTS=1 +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +CONFIG_SERIAL_DEV_BUS=y +CONFIG_SERIAL_DEV_CTRL_TTYPORT=y +# CONFIG_TTY_PRINTK is not set +CONFIG_HVC_DRIVER=y +CONFIG_HVC_IRQ=y +CONFIG_HVC_XEN=y +CONFIG_HVC_XEN_FRONTEND=y +CONFIG_VIRTIO_CONSOLE=m +# CONFIG_IPMI_HANDLER is not set +CONFIG_HW_RANDOM=m +# CONFIG_HW_RANDOM_TIMERIOMEM is not set +CONFIG_HW_RANDOM_INTEL=m +CONFIG_HW_RANDOM_AMD=m +CONFIG_HW_RANDOM_VIA=m +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_HW_RANDOM_TPM=m +CONFIG_NVRAM=m +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set +# CONFIG_MWAVE is not set +CONFIG_RAW_DRIVER=m +CONFIG_MAX_RAW_DEVS=256 +CONFIG_HPET=y +CONFIG_HPET_MMAP=y +# CONFIG_HPET_MMAP_DEFAULT is not set +CONFIG_HANGCHECK_TIMER=m +CONFIG_TCG_TPM=y +CONFIG_TCG_TIS_CORE=y +CONFIG_TCG_TIS=y +# CONFIG_TCG_TIS_I2C_ATMEL is not set +# CONFIG_TCG_TIS_I2C_INFINEON is not set +# CONFIG_TCG_TIS_I2C_NUVOTON is not set +# CONFIG_TCG_NSC is not set +# CONFIG_TCG_ATMEL is not set +# CONFIG_TCG_INFINEON is not set +CONFIG_TCG_XEN=m +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set +CONFIG_TELCLOCK=m +CONFIG_DEVPORT=y +# CONFIG_XILLYBUS is not set + +# +# I2C support +# +CONFIG_I2C=m +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +# CONFIG_I2C_CHARDEV is not set +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_ISMT is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE_PLATFORM is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_PXA_PCI is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_DIOLAN_U2C is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_MLXCPLD is not set +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_SPI is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=m +CONFIG_PPS_DEBUG=y + +# +# PPS clients support +# +CONFIG_PPS_CLIENT_KTIMER=m +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=m +# CONFIG_DP83640_PHY is not set +CONFIG_PTP_1588_CLOCK_KVM=m +# CONFIG_GPIOLIB is not set +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_RESTART is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_SMB347 is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +CONFIG_HWMON=m +# CONFIG_HWMON_VID is not set +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_ABITUGURU is not set +# CONFIG_SENSORS_ABITUGURU3 is not set +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_K8TEMP is not set +# CONFIG_SENSORS_K10TEMP is not set +# CONFIG_SENSORS_FAM15H_POWER is not set +# CONFIG_SENSORS_APPLESMC is not set +# CONFIG_SENSORS_ASB100 is not set +# CONFIG_SENSORS_ASPEED is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +CONFIG_SENSORS_DELL_SMM=m +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_FSCHMD is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_I5500 is not set +# CONFIG_SENSORS_CORETEMP is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_POWR1220 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4222 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4260 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_PMBUS is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_SCH56XX_COMMON is not set +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS1015 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_VIA_CPUTEMP is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +# CONFIG_SENSORS_XGENE is not set + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=m +# CONFIG_SENSORS_ATK0110 is not set +CONFIG_THERMAL=y +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +CONFIG_THERMAL_GOV_USER_SPACE=y +# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set +# CONFIG_THERMAL_EMULATION is not set +# CONFIG_INTEL_POWERCLAMP is not set +CONFIG_X86_PKG_TEMP_THERMAL=m +# CONFIG_INTEL_SOC_DTS_THERMAL is not set + +# +# ACPI INT340X thermal drivers +# +# CONFIG_INT340X_THERMAL is not set +# CONFIG_INTEL_PCH_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +CONFIG_SSB=m +CONFIG_SSB_SPROM=y +CONFIG_SSB_PCIHOST_POSSIBLE=y +CONFIG_SSB_PCIHOST=y +# CONFIG_SSB_B43_PCI_BRIDGE is not set +# CONFIG_SSB_SILENT is not set +# CONFIG_SSB_DEBUG is not set +CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y +CONFIG_SSB_DRIVER_PCICORE=y +CONFIG_BCMA_POSSIBLE=y +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=m +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CROS_EC is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set +CONFIG_LPC_ICH=m +CONFIG_LPC_SCH=m +# CONFIG_MFD_INTEL_LPSS_ACPI is not set +# CONFIG_MFD_INTEL_LPSS_PCI is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RTSX_PCI is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RTSX_USB is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_REGULATOR is not set +# CONFIG_RC_CORE is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +# CONFIG_AGP is not set +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 +# CONFIG_VGA_SWITCHEROO is not set +CONFIG_DRM=m +# CONFIG_DRM_DP_AUX_CHARDEV is not set +# CONFIG_DRM_DEBUG_MM_SELFTEST is not set +CONFIG_DRM_KMS_HELPER=m +CONFIG_DRM_KMS_FB_HELPER=y +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set +CONFIG_DRM_TTM=m + +# +# I2C encoder or helper chips +# +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_RADEON is not set +# CONFIG_DRM_AMDGPU is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_NOUVEAU is not set +# CONFIG_DRM_I915 is not set +# CONFIG_DRM_VGEM is not set +# CONFIG_DRM_VMWGFX is not set +# CONFIG_DRM_GMA500 is not set +# CONFIG_DRM_UDL is not set +# CONFIG_DRM_AST is not set +# CONFIG_DRM_MGAG200 is not set +# CONFIG_DRM_CIRRUS_QEMU is not set +# CONFIG_DRM_QXL is not set +# CONFIG_DRM_BOCHS is not set +# CONFIG_DRM_VIRTIO_GPU is not set +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_HISI_HIBMC is not set +# CONFIG_DRM_TINYDRM is not set +# CONFIG_DRM_LEGACY is not set +# CONFIG_DRM_LIB_RANDOM is not set + +# +# Frame buffer Devices +# +CONFIG_FB=m +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=m +CONFIG_FB_CFB_COPYAREA=m +CONFIG_FB_CFB_IMAGEBLIT=m +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +CONFIG_FB_SYS_FILLRECT=m +CONFIG_FB_SYS_COPYAREA=m +CONFIG_FB_SYS_IMAGEBLIT=m +# CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=m +CONFIG_FB_DEFERRED_IO=y +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ARC is not set +# CONFIG_FB_VGA16 is not set +# CONFIG_FB_UVESA is not set +# CONFIG_FB_N411 is not set +# CONFIG_FB_HGA is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_LE80578 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_XEN_FBDEV_FRONTEND is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +# CONFIG_FB_AUO_K190X is not set +# CONFIG_FB_HYPERV is not set +# CONFIG_FB_SM712 is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_PLATFORM is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=m +CONFIG_BACKLIGHT_GENERIC=m +# CONFIG_BACKLIGHT_APPLE is not set +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_SAHARA is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3639 is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# CONFIG_VGASTATE is not set +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +# CONFIG_VGACON_SOFT_SCROLLBACK is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +# CONFIG_FRAMEBUFFER_CONSOLE is not set +# CONFIG_LOGO is not set +# CONFIG_SOUND is not set + +# +# HID support +# +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=m + +# +# Special HID drivers +# +# CONFIG_HID_A4TECH is not set +# CONFIG_HID_ACCUTOUCH is not set +# CONFIG_HID_ACRUX is not set +# CONFIG_HID_APPLE is not set +# CONFIG_HID_APPLEIR is not set +# CONFIG_HID_AUREAL is not set +# CONFIG_HID_BELKIN is not set +# CONFIG_HID_BETOP_FF is not set +# CONFIG_HID_CHERRY is not set +# CONFIG_HID_CHICONY is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CYPRESS is not set +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_ELO is not set +# CONFIG_HID_EZKEY is not set +# CONFIG_HID_GEMBIRD is not set +# CONFIG_HID_GFRM is not set +# CONFIG_HID_HOLTEK is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_UCLOGIC is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_ICADE is not set +# CONFIG_HID_ITE is not set +# CONFIG_HID_TWINHAN is not set +# CONFIG_HID_KENSINGTON is not set +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LENOVO is not set +# CONFIG_HID_LOGITECH is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_MICROSOFT is not set +# CONFIG_HID_MONTEREY is not set +# CONFIG_HID_MULTITOUCH is not set +# CONFIG_HID_NTI is not set +# CONFIG_HID_NTRIG is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PENMOUNT is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PLANTRONICS is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_RETRODE is not set +# CONFIG_HID_ROCCAT is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEELSERIES is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +CONFIG_HID_HYPERV_MOUSE=m +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_WACOM is not set +# CONFIG_HID_XINMO is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_HID_SENSOR_HUB is not set +# CONFIG_HID_ALPS is not set + +# +# USB HID support +# +CONFIG_USB_HID=m +# CONFIG_HID_PID is not set +# CONFIG_USB_HIDDEV is not set + +# +# USB HID Boot Protocol drivers +# +# CONFIG_USB_KBD is not set +# CONFIG_USB_MOUSE is not set + +# +# I2C HID support +# +# CONFIG_I2C_HID is not set + +# +# Intel ISH HID support +# +# CONFIG_INTEL_ISH_HID is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=m +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=m +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +CONFIG_USB_MON=m +# CONFIG_USB_WUSB_CBAF is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=m +CONFIG_USB_XHCI_PCI=m +CONFIG_USB_XHCI_PLATFORM=m +CONFIG_USB_EHCI_HCD=m +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=m +CONFIG_USB_EHCI_HCD_PLATFORM=m +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_ISP1362_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +CONFIG_USB_OHCI_HCD=m +CONFIG_USB_OHCI_HCD_PCI=m +# CONFIG_USB_OHCI_HCD_SSB is not set +CONFIG_USB_OHCI_HCD_PLATFORM=m +CONFIG_USB_UHCI_HCD=m +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_SSB is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +# CONFIG_USB_ACM is not set +# CONFIG_USB_PRINTER is not set +# CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +# CONFIG_USB_STORAGE_REALTEK is not set +# CONFIG_USB_STORAGE_DATAFAB is not set +# CONFIG_USB_STORAGE_FREECOM is not set +# CONFIG_USB_STORAGE_ISD200 is not set +# CONFIG_USB_STORAGE_USBAT is not set +# CONFIG_USB_STORAGE_SDDR09 is not set +# CONFIG_USB_STORAGE_SDDR55 is not set +# CONFIG_USB_STORAGE_JUMPSHOT is not set +# CONFIG_USB_STORAGE_ALAUDA is not set +# CONFIG_USB_STORAGE_ONETOUCH is not set +# CONFIG_USB_STORAGE_KARMA is not set +# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set +# CONFIG_USB_STORAGE_ENE_UB6250 is not set +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set +CONFIG_USBIP_CORE=m +CONFIG_USBIP_VHCI_HCD=m +CONFIG_USBIP_VHCI_HC_PORTS=8 +CONFIG_USBIP_VHCI_NR_HCS=1 +CONFIG_USBIP_HOST=m +# CONFIG_USBIP_DEBUG is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +# CONFIG_USB_SERIAL is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_RIO500 is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_SISUSBVGA is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_YUREX is not set +# CONFIG_USB_EZUSB_FX2 is not set +# CONFIG_USB_HUB_USB251XB is not set +# CONFIG_USB_HSIC_USB3503 is not set +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set + +# +# USB Physical Layer drivers +# +# CONFIG_USB_PHY is not set +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_ISP1301 is not set +# CONFIG_USB_GADGET is not set + +# +# USB Power Delivery and Type-C drivers +# +# CONFIG_TYPEC_UCSI is not set +# CONFIG_USB_ULPI_BUS is not set +# CONFIG_UWB is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +# CONFIG_INFINIBAND_USER_MAD is not set +CONFIG_INFINIBAND_USER_ACCESS=m +# CONFIG_INFINIBAND_EXP_USER_ACCESS is not set +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_MLX4_INFINIBAND is not set +# CONFIG_INFINIBAND_NES is not set +# CONFIG_INFINIBAND_OCRDMA is not set +# CONFIG_INFINIBAND_VMWARE_PVRDMA is not set +# CONFIG_INFINIBAND_USNIC is not set +# CONFIG_INFINIBAND_IPOIB is not set +# CONFIG_INFINIBAND_SRP is not set +# CONFIG_INFINIBAND_SRPT is not set +# CONFIG_INFINIBAND_ISER is not set +# CONFIG_INFINIBAND_ISERT is not set +# CONFIG_INFINIBAND_OPA_VNIC is not set +# CONFIG_INFINIBAND_RDMAVT is not set +# CONFIG_RDMA_RXE is not set +# CONFIG_INFINIBAND_BNXT_RE is not set +CONFIG_EDAC_ATOMIC_SCRUB=y +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_DECODE_MCE=y +CONFIG_EDAC_AMD64=m +# CONFIG_EDAC_AMD64_ERROR_INJECTION is not set +CONFIG_EDAC_E752X=m +CONFIG_EDAC_I82975X=m +CONFIG_EDAC_I3000=m +CONFIG_EDAC_I3200=m +CONFIG_EDAC_IE31200=m +CONFIG_EDAC_X38=m +CONFIG_EDAC_I5400=m +CONFIG_EDAC_I7CORE=m +CONFIG_EDAC_I5000=m +CONFIG_EDAC_I5100=m +CONFIG_EDAC_I7300=m +CONFIG_EDAC_SBRIDGE=m +CONFIG_EDAC_SKX=m +CONFIG_EDAC_PND2=m +CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +# CONFIG_RTC_NVMEM is not set + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABX80X is not set +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8523 is not set +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8010 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV8803 is not set + +# +# SPI RTC drivers +# +CONFIG_RTC_I2C_AND_SPI=m + +# +# SPI and I2C RTC drivers +# +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_FTRTC010 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_INTEL_IDMA64 is not set +CONFIG_INTEL_IOATDMA=m +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +# CONFIG_DW_DMAC is not set +# CONFIG_DW_DMAC_PCI is not set + +# +# DMA Clients +# +# CONFIG_ASYNC_TX_DMA is not set +CONFIG_DMATEST=m +CONFIG_DMA_ENGINE_RAID=y + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +CONFIG_DCA=m +CONFIG_AUXDISPLAY=y +# CONFIG_IMG_ASCII_LCD is not set +CONFIG_UIO=m +# CONFIG_UIO_CIF is not set +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m +# CONFIG_UIO_AEC is not set +# CONFIG_UIO_SERCOS3 is not set +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_UIO_HV_GENERIC=m +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_VIRQFD=m +CONFIG_VFIO=m +# CONFIG_VFIO_NOIOMMU is not set +CONFIG_VFIO_PCI=m +# CONFIG_VFIO_PCI_VGA is not set +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PCI_IGD=y +# CONFIG_VFIO_MDEV is not set +CONFIG_IRQ_BYPASS_MANAGER=m +CONFIG_VIRT_DRIVERS=y +CONFIG_VIRTIO=m + +# +# Virtio drivers +# +CONFIG_VIRTIO_PCI=m +CONFIG_VIRTIO_PCI_LEGACY=y +# CONFIG_VIRTIO_BALLOON is not set +# CONFIG_VIRTIO_INPUT is not set +CONFIG_VIRTIO_MMIO=m +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set + +# +# Microsoft Hyper-V guest support +# +CONFIG_HYPERV=m +CONFIG_HYPERV_TSCPAGE=y +CONFIG_HYPERV_UTILS=m +CONFIG_HYPERV_BALLOON=m + +# +# Xen driver support +# +# CONFIG_XEN_BALLOON is not set +CONFIG_XEN_DEV_EVTCHN=m +CONFIG_XEN_BACKEND=y +CONFIG_XENFS=m +CONFIG_XEN_COMPAT_XENFS=y +CONFIG_XEN_SYS_HYPERVISOR=y +CONFIG_XEN_XENBUS_FRONTEND=y +CONFIG_XEN_GNTDEV=m +CONFIG_XEN_GRANT_DEV_ALLOC=m +CONFIG_SWIOTLB_XEN=y +CONFIG_XEN_TMEM=m +CONFIG_XEN_PCIDEV_BACKEND=m +# CONFIG_XEN_PVCALLS_BACKEND is not set +# CONFIG_XEN_SCSI_BACKEND is not set +CONFIG_XEN_PRIVCMD=m +# CONFIG_XEN_ACPI_PROCESSOR is not set +# CONFIG_XEN_MCE_LOG is not set +CONFIG_XEN_HAVE_PVMMU=y +CONFIG_XEN_AUTO_XLATE=y +CONFIG_XEN_ACPI=y +# CONFIG_XEN_SYMS is not set +CONFIG_XEN_HAVE_VPMU=y +CONFIG_STAGING=y +# CONFIG_IRDA is not set +# CONFIG_COMEDI is not set +# CONFIG_RTS5208 is not set +# CONFIG_FB_SM750 is not set +# CONFIG_FB_XGI is not set + +# +# Speakup console speech +# +# CONFIG_SPEAKUP is not set +# CONFIG_STAGING_MEDIA is not set + +# +# Android +# +# CONFIG_LTE_GDM724X is not set +CONFIG_LNET=m +CONFIG_LNET_MAX_PAYLOAD=1048576 +# CONFIG_LNET_SELFTEST is not set +# CONFIG_LNET_XPRT_IB is not set +CONFIG_LUSTRE_FS=m +# CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK is not set +# CONFIG_DGNC is not set +# CONFIG_GS_FPGABOOT is not set +# CONFIG_CRYPTO_SKEIN is not set +# CONFIG_UNISYSSPAR is not set +# CONFIG_MOST is not set +# CONFIG_GREYBUS is not set + +# +# USB Power Delivery and Type-C drivers +# +# CONFIG_TYPEC_TCPM is not set +CONFIG_DRM_VBOXVIDEO=m +CONFIG_X86_PLATFORM_DEVICES=y +# CONFIG_ACERHDF is not set +# CONFIG_ASUS_LAPTOP is not set +# CONFIG_DELL_LAPTOP is not set +# CONFIG_DELL_SMO8800 is not set +# CONFIG_FUJITSU_LAPTOP is not set +# CONFIG_FUJITSU_TABLET is not set +# CONFIG_HP_ACCEL is not set +# CONFIG_HP_WIRELESS is not set +# CONFIG_PANASONIC_LAPTOP is not set +# CONFIG_THINKPAD_ACPI is not set +# CONFIG_SENSORS_HDAPS is not set +# CONFIG_INTEL_MENLOW is not set +# CONFIG_EEEPC_LAPTOP is not set +# CONFIG_ASUS_WIRELESS is not set +# CONFIG_ACPI_WMI is not set +# CONFIG_TOPSTAR_LAPTOP is not set +# CONFIG_TOSHIBA_BT_RFKILL is not set +# CONFIG_TOSHIBA_HAPS is not set +# CONFIG_ACPI_CMPC is not set +# CONFIG_INTEL_CHT_INT33FE is not set +# CONFIG_INTEL_HID_EVENT is not set +# CONFIG_INTEL_VBTN is not set +# CONFIG_INTEL_IPS is not set +# CONFIG_INTEL_PMC_CORE is not set +# CONFIG_IBM_RTL is not set +# CONFIG_SAMSUNG_LAPTOP is not set +# CONFIG_SAMSUNG_Q10 is not set +# CONFIG_APPLE_GMUX is not set +# CONFIG_INTEL_RST is not set +# CONFIG_INTEL_SMARTCONNECT is not set +# CONFIG_PVPANIC is not set +# CONFIG_INTEL_PMC_IPC is not set +# CONFIG_SURFACE_PRO3_BUTTON is not set +# CONFIG_INTEL_PUNIT_IPC is not set +# CONFIG_MLX_PLATFORM is not set +# CONFIG_MLX_CPLD_PLATFORM is not set +CONFIG_INTEL_TURBO_MAX_3=y +CONFIG_PMC_ATOM=y +# CONFIG_CHROME_PLATFORMS is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Common Clock Framework +# +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_NXP is not set +# CONFIG_COMMON_CLK_PXA is not set +# CONFIG_COMMON_CLK_PIC32 is not set +CONFIG_HWSPINLOCK=m + +# +# Clock Source drivers +# +CONFIG_CLKEVT_I8253=y +CONFIG_I8253_LOCK=y +CONFIG_CLKBLD_I8253=y +# CONFIG_ATMEL_PIT is not set +# CONFIG_SH_TIMER_CMT is not set +# CONFIG_SH_TIMER_MTU2 is not set +# CONFIG_SH_TIMER_TMU is not set +# CONFIG_EM_TIMER_STI is not set +CONFIG_MAILBOX=y +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IOVA=y +# CONFIG_AMD_IOMMU is not set +CONFIG_DMAR_TABLE=y +CONFIG_INTEL_IOMMU=y +# CONFIG_INTEL_IOMMU_SVM is not set +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set +CONFIG_INTEL_IOMMU_FLOPPY_WA=y +CONFIG_IRQ_REMAP=y + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# + +# +# Broadcom SoC drivers +# + +# +# i.MX SoC drivers +# + +# +# Qualcomm SoC drivers +# +# CONFIG_SUNXI_SRAM is not set +# CONFIG_SOC_TI is not set +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +# CONFIG_PWM is not set +CONFIG_ARM_GIC_MAX_NR=1 +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set +# CONFIG_FMC is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +CONFIG_RAS=y +# CONFIG_THUNDERBOLT is not set + +# +# Android +# +# CONFIG_ANDROID is not set +# CONFIG_LIBNVDIMM is not set +CONFIG_DAX=m +# CONFIG_DEV_DAX is not set +# CONFIG_NVMEM is not set +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# CONFIG_FPGA is not set + +# +# FSI support +# +CONFIG_FSI=m +CONFIG_FSI_MASTER_HUB=m +CONFIG_FSI_SCOM=m +CONFIG_AMAZON_DRIVER_UPDATES=y +CONFIG_AMAZON_ENA_ETHERNET=m + +# +# Firmware Drivers +# +CONFIG_EDD=m +# CONFIG_EDD_OFF is not set +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DELL_RBU=m +CONFIG_DCDBAS=m +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=m +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +# CONFIG_ISCSI_IBFT_FIND is not set +# CONFIG_FW_CFG_SYSFS is not set +# CONFIG_GOOGLE_FIRMWARE is not set +CONFIG_UEFI_CPER=y +# CONFIG_EFI_DEV_PATH_PARSER is not set + +# +# Tegra firmware driver +# + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_FS_IOMAP=y +CONFIG_EXT2_FS=m +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=m +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=m +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_DEBUG=y +CONFIG_JBD2=m +CONFIG_JBD2_DEBUG=y +CONFIG_FS_MBCACHE=m +# CONFIG_REISERFS_FS is not set +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +# CONFIG_JFS_DEBUG is not set +CONFIG_JFS_STATISTICS=y +CONFIG_XFS_FS=m +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +CONFIG_NILFS2_FS=m +# CONFIG_F2FS_FS is not set +# CONFIG_FS_DAX is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +# CONFIG_EXPORTFS_BLOCK_OPS is not set +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +CONFIG_FS_ENCRYPTION=m +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=m +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=m +CONFIG_QUOTACTL=y +CONFIG_QUOTACTL_COMPAT=y +CONFIG_AUTOFS4_FS=m +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_OVERLAY_FS=m +# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set +# CONFIG_OVERLAY_FS_INDEX is not set + +# +# Caches +# +CONFIG_FSCACHE=m +# CONFIG_FSCACHE_STATS is not set +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_HISTOGRAM is not set + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_UDF_NLS=y + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set +CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +CONFIG_NTFS_RW=y + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=m +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +CONFIG_ECRYPT_FS=m +# CONFIG_ECRYPT_FS_MESSAGING is not set +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +# CONFIG_HFSPLUS_FS_POSIX_ACL is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_CRAMFS=m +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_SQUASHFS_ZSTD=y +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +CONFIG_ROMFS_FS=m +CONFIG_ROMFS_BACKED_BY_BLOCK=y +CONFIG_ROMFS_ON_BLOCK=y +CONFIG_PSTORE=y +CONFIG_PSTORE_ZLIB_COMPRESS=y +# CONFIG_PSTORE_LZO_COMPRESS is not set +# CONFIG_PSTORE_LZ4_COMPRESS is not set +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=m +# CONFIG_SYSV_FS is not set +CONFIG_UFS_FS=m +# CONFIG_UFS_FS_WRITE is not set +# CONFIG_UFS_DEBUG is not set +# CONFIG_EXOFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +CONFIG_NFS_V2=m +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFSD=m +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +# CONFIG_NFSD_SCSILAYOUT is not set +# CONFIG_NFSD_FLEXFILELAYOUT is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +# CONFIG_NFSD_FAULT_INJECTION is not set +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_SUNRPC_DEBUG=y +# CONFIG_SUNRPC_XPRT_RDMA is not set +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CIFS=m +CONFIG_CIFS_STATS=y +CONFIG_CIFS_STATS2=y +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_ACL=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SMB311 is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +CONFIG_AFS_FS=m +# CONFIG_AFS_DEBUG is not set +CONFIG_AFS_FSCACHE=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_DLM=m +# CONFIG_DLM_DEBUG is not set + +# +# Kernel hacking +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_GDB_SCRIPTS is not set +CONFIG_ENABLE_WARN_DEPRECATED=y +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +CONFIG_UNUSED_SYMBOLS=y +# CONFIG_PAGE_OWNER is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_STACK_VALIDATION=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +CONFIG_DEBUG_STACKOVERFLOW=y +CONFIG_HAVE_ARCH_KASAN=y +# CONFIG_KASAN is not set +CONFIG_ARCH_HAS_KCOV=y +# CONFIG_KCOV is not set +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Lockups and Hangs +# +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0 +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +CONFIG_WQ_WATCHDOG=y +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_STACK_END_CHECK=y +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PI_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_PROVE_RCU is not set +# CONFIG_TORTURE_TEST is not set +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=59 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +CONFIG_LATENCYTOP=y +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_RING_BUFFER_ALLOW_SWAP=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_IRQSOFF_TRACER is not set +CONFIG_SCHED_TRACER=y +# CONFIG_HWLAT_TRACER is not set +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_STACK_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KPROBE_EVENTS=y +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +# CONFIG_FUNCTION_PROFILER is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_MMIOTRACE is not set +# CONFIG_HIST_TRIGGERS is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +CONFIG_TRACE_EVAL_MAP_FILE=y +# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set +# CONFIG_DMA_API_DEBUG is not set + +# +# Runtime Testing +# +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +CONFIG_INTERVAL_TREE_TEST=m +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_ASYNC_RAID6_TEST is not set +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_MEMTEST is not set +CONFIG_BUG_ON_DATA_CORRUPTION=y +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_SERIAL_CONSOLE=m +# CONFIG_KGDB_TESTS is not set +CONFIG_KGDB_LOW_LEVEL_TRAP=y +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x1 +CONFIG_KDB_KEYBOARD=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_ARCH_WANTS_UBSAN_NO_NULL is not set +# CONFIG_UBSAN is not set +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set +CONFIG_X86_VERBOSE_BOOTUP=y +CONFIG_EARLY_PRINTK=y +# CONFIG_EARLY_PRINTK_DBGP is not set +# CONFIG_EARLY_PRINTK_USB_XDBC is not set +# CONFIG_X86_PTDUMP_CORE is not set +# CONFIG_X86_PTDUMP is not set +# CONFIG_DEBUG_WX is not set +CONFIG_DOUBLEFAULT=y +# CONFIG_DEBUG_TLBFLUSH is not set +# CONFIG_IOMMU_DEBUG is not set +# CONFIG_IOMMU_STRESS is not set +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +# CONFIG_X86_DECODER_SELFTEST is not set +CONFIG_IO_DELAY_TYPE_0X80=0 +CONFIG_IO_DELAY_TYPE_0XED=1 +CONFIG_IO_DELAY_TYPE_UDELAY=2 +CONFIG_IO_DELAY_TYPE_NONE=3 +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +CONFIG_DEFAULT_IO_DELAY_TYPE=0 +CONFIG_DEBUG_BOOT_PARAMS=y +# CONFIG_CPA_DEBUG is not set +CONFIG_OPTIMIZE_INLINING=y +# CONFIG_DEBUG_ENTRY is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set +# CONFIG_X86_DEBUG_FPU is not set +# CONFIG_PUNIT_ATOM_DEBUG is not set +CONFIG_UNWINDER_ORC=y +# CONFIG_UNWINDER_FRAME_POINTER is not set +# CONFIG_UNWINDER_GUESS is not set + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +CONFIG_PERSISTENT_KEYRINGS=y +# CONFIG_BIG_KEYS is not set +CONFIG_TRUSTED_KEYS=m +CONFIG_ENCRYPTED_KEYS=m +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITY_WRITABLE_HOOKS=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_PAGE_TABLE_ISOLATION=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +# CONFIG_SECURITY_PATH is not set +CONFIG_INTEL_TXT=y +CONFIG_LSM_MMAP_MIN_ADDR=65536 +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +# CONFIG_HARDENED_USERCOPY is not set +CONFIG_FORTIFY_SOURCE=y +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1 +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +# CONFIG_SECURITY_YAMA is not set +CONFIG_INTEGRITY=y +# CONFIG_INTEGRITY_SIGNATURE is not set +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +# CONFIG_IMA_TEMPLATE is not set +CONFIG_IMA_NG_TEMPLATE=y +# CONFIG_IMA_SIG_TEMPLATE is not set +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" +CONFIG_IMA_DEFAULT_HASH_SHA1=y +# CONFIG_IMA_DEFAULT_HASH_SHA256 is not set +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +CONFIG_IMA_DEFAULT_HASH="sha1" +# CONFIG_IMA_WRITE_POLICY is not set +# CONFIG_IMA_READ_POLICY is not set +# CONFIG_IMA_APPRAISE is not set +# CONFIG_EVM is not set +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_DEFAULT_SECURITY="selinux" +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=y +CONFIG_CRYPTO_ECDH=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_GF128MUL=m +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_MCRYPTD=m +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_ABLK_HELPER=m +CONFIG_CRYPTO_SIMD=m +CONFIG_CRYPTO_GLUE_HELPER_X86=m + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=m +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=m +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_KEYWRAP=m + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32C_INTEL=m +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRC32_PCLMUL=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRCT10DIF_PCLMUL=y +CONFIG_CRYPTO_GHASH=m +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_POLY1305_X86_64=m +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA1_SSSE3=m +CONFIG_CRYPTO_SHA256_SSSE3=m +CONFIG_CRYPTO_SHA512_SSSE3=m +CONFIG_CRYPTO_SHA1_MB=m +CONFIG_CRYPTO_SHA256_MB=m +CONFIG_CRYPTO_SHA512_MB=m +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_AES_TI=y +CONFIG_CRYPTO_AES_X86_64=m +CONFIG_CRYPTO_AES_NI_INTEL=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_BLOWFISH_X86_64=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAMELLIA_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST5_AVX_X86_64=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_CAST6_AVX_X86_64=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_DES3_EDE_X86_64=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CHACHA20_X86_64=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +CONFIG_CRYPTO_TWOFISH_X86_64=m +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m +CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=m +CONFIG_CRYPTO_LZO=y +CONFIG_CRYPTO_842=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_USER_API=m +CONFIG_CRYPTO_USER_API_HASH=m +CONFIG_CRYPTO_USER_API_SKCIPHER=m +CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m +CONFIG_CRYPTO_HASH_INFO=y +# CONFIG_CRYPTO_HW is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y +CONFIG_PKCS7_TEST_KEY=m +CONFIG_SIGNED_PE_FILE_VERIFICATION=y + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +# CONFIG_SECONDARY_TRUSTED_KEYRING is not set +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_ASYNC_PF=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_KVM_COMPAT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_INTEL=m +CONFIG_KVM_AMD=m +CONFIG_KVM_MMU_AUDIT=y +CONFIG_VHOST_NET=m +# CONFIG_VHOST_SCSI is not set +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_BITREVERSE=y +# CONFIG_HAVE_ARCH_BITREVERSE is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_FIND_FIRST_BIT=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IOMAP=y +CONFIG_GENERIC_IO=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_CRC_CCITT=m +CONFIG_CRC16=m +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +CONFIG_CRC32_SELFTEST=y +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC4=m +CONFIG_CRC7=m +CONFIG_LIBCRC32C=m +# CONFIG_CRC8 is not set +CONFIG_XXHASH=m +# CONFIG_AUDIT_ARCH_COMPAT_GENERIC is not set +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_842_COMPRESS=m +CONFIG_842_DECOMPRESS=m +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=m +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +# CONFIG_XZ_DEC_POWERPC is not set +# CONFIG_XZ_DEC_IA64 is not set +# CONFIG_XZ_DEC_ARM is not set +# CONFIG_XZ_DEC_ARMTHUMB is not set +# CONFIG_XZ_DEC_SPARC is not set +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=m +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_INTERVAL_TREE=y +CONFIG_RADIX_TREE_MULTIORDER=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +# CONFIG_DMA_NOOP_OPS is not set +# CONFIG_DMA_VIRT_OPS is not set +CONFIG_CPUMASK_OFFSTACK=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_LRU_CACHE=m +CONFIG_CLZ_TAB=y +# CONFIG_CORDIC is not set +# CONFIG_DDR is not set +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_OID_REGISTRY=y +# CONFIG_SG_SPLIT is not set +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_SG_CHAIN=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set diff --git a/packages/kernel/kernel.spec b/packages/kernel/kernel.spec new file mode 100644 index 00000000..43dbbad9 --- /dev/null +++ b/packages/kernel/kernel.spec @@ -0,0 +1,72 @@ +%global debug_package %{nil} + +Name: %{_cross_os}kernel +Version: 4.14.102 +Release: 1%{?dist} +Summary: The Linux kernel +Group: System Environment/Kernel +License: GPLv2 and Redistributable, no modification permitted +URL: https://www.kernel.org/ +Source0: https://www.kernel.org/pub/linux/kernel/v4.x/linux-%{version}.tar.xz +Source100: config-%{_cross_arch} +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) +BuildRequires: gcc-%{_cross_target} + +%description +%{summary}. + +%package headers +Summary: Header files for the Linux kernel for use by glibc +Group: Development/System + +%description headers +%{summary}. + +%prep +%setup -q -n linux-%{version} +cp %{SOURCE100} "arch/%{_cross_karch}/configs/%{_cross_vendor}_defconfig" + +%build +make -s \ + ARCH="%{_cross_karch}" \ + CROSS_COMPILE="%{_cross_target}-" \ + %{_cross_vendor}_defconfig + +%install +make -s \ + ARCH="%{_cross_karch}" \ + CROSS_COMPILE="%{_cross_target}-" \ + INSTALL_HDR_PATH="%{buildroot}%{_cross_prefix}" \ + headers_install + +find %{buildroot}%{_cross_prefix} \ + \( -name .install -o -name .check -o \ + -name ..install.cmd -o -name ..check.cmd \) -delete + +%files + +%files headers +%dir %{_cross_includedir}/asm +%dir %{_cross_includedir}/asm-generic +%dir %{_cross_includedir}/drm +%dir %{_cross_includedir}/linux +%dir %{_cross_includedir}/misc +%dir %{_cross_includedir}/mtd +%dir %{_cross_includedir}/rdma +%dir %{_cross_includedir}/scsi +%dir %{_cross_includedir}/sound +%dir %{_cross_includedir}/video +%dir %{_cross_includedir}/xen +%{_cross_includedir}/asm/* +%{_cross_includedir}/asm-generic/* +%{_cross_includedir}/drm/* +%{_cross_includedir}/linux/* +%{_cross_includedir}/misc/* +%{_cross_includedir}/mtd/* +%{_cross_includedir}/rdma/* +%{_cross_includedir}/scsi/* +%{_cross_includedir}/sound/* +%{_cross_includedir}/video/* +%{_cross_includedir}/xen/* + +%changelog diff --git a/packages/kernel/sources b/packages/kernel/sources new file mode 100644 index 00000000..1bdd97c8 --- /dev/null +++ b/packages/kernel/sources @@ -0,0 +1 @@ +sha256 d8a982cfa2804edc2ae9d20792ab0e3897f3976ced5632f2392c2e1918562501 linux-4.14.102.tar.xz From 56532b6e6ebff39526875b803b2ea86701641f19 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sun, 24 Feb 2019 01:34:30 +0000 Subject: [PATCH 0006/1356] build glibc Signed-off-by: Ben Cressey --- Makefile | 2 +- packages/kernel/kernel.spec | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 1b82d857..6cb8b130 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,7 @@ list = $(subst $(space),$(comma),$(1)) -include $(PKGS) .PHONY: all -all: $(thar-x86_64-kernel) $(thar-aarch64-kernel) +all: $(thar-x86_64-glibc) $(thar-aarch64-glibc) @echo BUILT IT ALL .PHONY: clean diff --git a/packages/kernel/kernel.spec b/packages/kernel/kernel.spec index 43dbbad9..c833565d 100644 --- a/packages/kernel/kernel.spec +++ b/packages/kernel/kernel.spec @@ -4,12 +4,10 @@ Name: %{_cross_os}kernel Version: 4.14.102 Release: 1%{?dist} Summary: The Linux kernel -Group: System Environment/Kernel License: GPLv2 and Redistributable, no modification permitted URL: https://www.kernel.org/ Source0: https://www.kernel.org/pub/linux/kernel/v4.x/linux-%{version}.tar.xz Source100: config-%{_cross_arch} -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) BuildRequires: gcc-%{_cross_target} %description @@ -17,7 +15,6 @@ BuildRequires: gcc-%{_cross_target} %package headers Summary: Header files for the Linux kernel for use by glibc -Group: Development/System %description headers %{summary}. From feeb42b59fe14dcde1b44cabc35c0a6b6baa9db9 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sun, 24 Feb 2019 23:44:06 +0000 Subject: [PATCH 0007/1356] build ncurses Signed-off-by: Ben Cressey --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 6cb8b130..4a1f47f5 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,7 @@ list = $(subst $(space),$(comma),$(1)) -include $(PKGS) .PHONY: all -all: $(thar-x86_64-glibc) $(thar-aarch64-glibc) +all: $(thar-x86_64-ncurses) $(thar-aarch64-glibc) @echo BUILT IT ALL .PHONY: clean From 727474d54eb7e9b98e1207913570fc5ee0fa9b53 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 25 Feb 2019 00:08:00 +0000 Subject: [PATCH 0008/1356] build readline Signed-off-by: Ben Cressey --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 4a1f47f5..37faabca 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,7 @@ list = $(subst $(space),$(comma),$(1)) -include $(PKGS) .PHONY: all -all: $(thar-x86_64-ncurses) $(thar-aarch64-glibc) +all: $(thar-x86_64-readline) $(thar-aarch64-readline) @echo BUILT IT ALL .PHONY: clean From 43183998d9de14872b6e0a4d2e08758b1eee1a38 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 25 Feb 2019 17:06:34 +0000 Subject: [PATCH 0009/1356] build bash Signed-off-by: Ben Cressey --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 37faabca..a820be16 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,7 @@ list = $(subst $(space),$(comma),$(1)) -include $(PKGS) .PHONY: all -all: $(thar-x86_64-readline) $(thar-aarch64-readline) +all: $(thar-x86_64-bash) $(thar-aarch64-bash) @echo BUILT IT ALL .PHONY: clean From a484595bfb00b132e3b703ee3793718e984b4e77 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 26 Feb 2019 00:19:53 +0000 Subject: [PATCH 0010/1356] add release package and target Signed-off-by: Ben Cressey --- Makefile | 45 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 42 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index a820be16..fb61fa6c 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,6 @@ .DEFAULT_GOAL := all +OS := thar TOPDIR := $(strip $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))) SPEC2VAR ?= $(TOPDIR)/bin/spec2var SPEC2PKG ?= $(TOPDIR)/bin/spec2pkg @@ -10,6 +11,7 @@ PKGS = $(SPECS:.spec=.makepkg) OUTPUT ?= $(TOPDIR)/build OUTVAR := $(shell mkdir -p $(OUTPUT)) +DATE := $(shell date --rfc-3339=date) ARCHS := x86_64 aarch64 @@ -19,6 +21,38 @@ BUILDCTL_ARGS += --frontend=dockerfile.v0 BUILDCTL_ARGS += --local context=. BUILDCTL_ARGS += --local dockerfile=. +DOCKER ?= docker + +define build_rpm + $(eval HASH:= $(shell sha1sum $3 /dev/null | sha1sum - | awk '{printf $$1}')) + $(eval RPMS:= $(shell echo $3 | tr ' ' '\n' | awk '/.rpm$$/' | tr '\n' ' ')) + @$(BUILDCTL) build \ + --frontend-opt target=rpm \ + --frontend-opt build-arg:PACKAGE=$(1) \ + --frontend-opt build-arg:ARCH=$(2) \ + --frontend-opt build-arg:HASH=$(HASH) \ + --frontend-opt build-arg:RPMS="$(RPMS)" \ + --frontend-opt build-arg:DATE=$(DATE) \ + --exporter=local \ + --exporter-opt output=$(OUTPUT) \ + $(BUILDCTL_ARGS) +endef + +define build_fs + $(eval HASH:= $(shell sha1sum $(2) /dev/null | sha1sum - | awk '{print $$1}')) + @$(BUILDCTL) build \ + --frontend-opt target=fs \ + --frontend-opt build-arg:PACKAGE=$(OS)-$(1)-release \ + --frontend-opt build-arg:ARCH=$(1) \ + --frontend-opt build-arg:HASH=$(HASH) \ + --frontend-opt build-arg:DATE=$(DATE) \ + --exporter=docker \ + --exporter-opt name=$(OS):$(1) \ + --exporter-opt output=build/$(OS)-$(1).tar \ + $(BUILDCTL_ARGS) ; \ + $(DOCKER) load < build/$(OS)-$(1).tar +endef + empty := space := $(empty) $(empty) comma := , @@ -33,9 +67,14 @@ list = $(subst $(space),$(comma),$(1)) -include $(VARS) -include $(PKGS) -.PHONY: all -all: $(thar-x86_64-bash) $(thar-aarch64-bash) - @echo BUILT IT ALL +.PHONY: all $(ARCHS) + +.SECONDEXPANSION: +$(ARCHS): $$($(OS)-$$(@)-release) + $(eval PKGS:= $(wildcard $(OUTPUT)/$(OS)-$(@)-*.rpm)) + $(call build_fs,$@,$(PKGS)) + +all: $(ARCHS) .PHONY: clean clean: From f4210c0a0d969b6d43537a9c267b1813d4bc1026 Mon Sep 17 00:00:00 2001 From: Jacob Vallejo Date: Wed, 13 Mar 2019 16:37:18 -0700 Subject: [PATCH 0011/1356] Add a target to run the buildkitd daemon Signed-off-by: Jacob Vallejo --- Makefile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index fb61fa6c..bad53099 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,8 @@ DATE := $(shell date --rfc-3339=date) ARCHS := x86_64 aarch64 -BUILDCTL ?= buildctl --addr tcp://127.0.0.1:1234 +BUILDKITD_ADDR ?= tcp://127.0.0.1:1234 +BUILDCTL ?= buildctl --addr $(BUILDKITD_ADDR) BUILDCTL_ARGS := --progress=plain BUILDCTL_ARGS += --frontend=dockerfile.v0 BUILDCTL_ARGS += --local context=. @@ -79,3 +80,5 @@ all: $(ARCHS) .PHONY: clean clean: @rm -f $(OUTPUT)/*.rpm + +include $(TOPDIR)/hack/rules.mk From 977357ec129a6155d1d599dbd06d5a4c8e996ad2 Mon Sep 17 00:00:00 2001 From: Jacob Vallejo Date: Wed, 13 Mar 2019 16:37:19 -0700 Subject: [PATCH 0012/1356] Run buildctl from a docker container Signed-off-by: Jacob Vallejo --- Makefile | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index bad53099..7b12fb1e 100644 --- a/Makefile +++ b/Makefile @@ -15,15 +15,17 @@ DATE := $(shell date --rfc-3339=date) ARCHS := x86_64 aarch64 +DOCKER ?= docker + +BUILDKIT_VER = v0.3.3 BUILDKITD_ADDR ?= tcp://127.0.0.1:1234 -BUILDCTL ?= buildctl --addr $(BUILDKITD_ADDR) +BUILDCTL_DOCKER_RUN = $(DOCKER) run --rm -ti --entrypoint /usr/bin/buildctl --user $(shell id -u):$(shell id -g) --volume $(TOPDIR):$(TOPDIR) --workdir $(TOPDIR) --network host moby/buildkit:$(BUILDKIT_VER) +BUILDCTL ?= $(BUILDCTL_DOCKER_RUN) --addr $(BUILDKITD_ADDR) BUILDCTL_ARGS := --progress=plain BUILDCTL_ARGS += --frontend=dockerfile.v0 BUILDCTL_ARGS += --local context=. BUILDCTL_ARGS += --local dockerfile=. -DOCKER ?= docker - define build_rpm $(eval HASH:= $(shell sha1sum $3 /dev/null | sha1sum - | awk '{printf $$1}')) $(eval RPMS:= $(shell echo $3 | tr ' ' '\n' | awk '/.rpm$$/' | tr '\n' ' ')) From d5cbf379458a86eedab0336e2b53b1ce7b2164e0 Mon Sep 17 00:00:00 2001 From: Jacob Vallejo Date: Wed, 13 Mar 2019 16:37:20 -0700 Subject: [PATCH 0013/1356] Arches Signed-off-by: Jacob Vallejo Signed-off-by: Ben Cressey --- Makefile | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index 7b12fb1e..2c7aaa1e 100644 --- a/Makefile +++ b/Makefile @@ -13,7 +13,7 @@ OUTPUT ?= $(TOPDIR)/build OUTVAR := $(shell mkdir -p $(OUTPUT)) DATE := $(shell date --rfc-3339=date) -ARCHS := x86_64 aarch64 +ARCHES := x86_64 aarch64 DOCKER ?= docker @@ -62,22 +62,22 @@ comma := , list = $(subst $(space),$(comma),$(1)) %.makevar : %.spec $(SPEC2VAR) - @set -e; $(SPEC2VAR) --spec=$< --archs=$(call list,$(ARCHS)) > $@ + @set -e; $(SPEC2VAR) --spec=$< --arches=$(call list,$(ARCHES)) > $@ %.makepkg : %.spec $(SPEC2PKG) - @set -e; $(SPEC2PKG) --spec=$< --archs=$(call list,$(ARCHS)) > $@ + @set -e; $(SPEC2PKG) --spec=$< --arches=$(call list,$(ARCHES)) > $@ -include $(VARS) -include $(PKGS) -.PHONY: all $(ARCHS) +.PHONY: all $(ARCHES) .SECONDEXPANSION: -$(ARCHS): $$($(OS)-$$(@)-release) +$(ARCHES): $$($(OS)-$$(@)-release) $(eval PKGS:= $(wildcard $(OUTPUT)/$(OS)-$(@)-*.rpm)) $(call build_fs,$@,$(PKGS)) -all: $(ARCHS) +all: $(ARCHES) .PHONY: clean clean: From f2e6a8c92026590315d8fd1305fb5895273ccd87 Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Thu, 14 Mar 2019 20:22:21 +0000 Subject: [PATCH 0014/1356] Use BSD-style checksums "BSD-style checksums" (as coreutils puts it) identify the checksum type while still being readable by sha512sum(1) and its related tools. Additionally, remove empty sources files. sha512sum(1) errors when the input is empty, and we'd like to use it to verify files after downloading them. Signed-off-by: iliana destroyer of worlds --- packages/kernel/sources | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/kernel/sources b/packages/kernel/sources index 1bdd97c8..2f90cb91 100644 --- a/packages/kernel/sources +++ b/packages/kernel/sources @@ -1 +1 @@ -sha256 d8a982cfa2804edc2ae9d20792ab0e3897f3976ced5632f2392c2e1918562501 linux-4.14.102.tar.xz +SHA512 (linux-4.14.102.tar.xz) = bdc387dcaa6a585ca01cfc2bf04bf93024d8512dce1a5921c6ce6b55847d663b0d1bf24cd18e87ae200d9713eefd0ea2f866577b1a236e928ca0bfbc49589a53 From 39f6a9df272dc9f919d70ab6d7ad5d4447a0830f Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 15 Mar 2019 22:34:20 +0000 Subject: [PATCH 0015/1356] build kernel image Signed-off-by: Ben Cressey --- packages/kernel/kernel.spec | 46 +++++++++++++++++++++++++++++-------- 1 file changed, 37 insertions(+), 9 deletions(-) diff --git a/packages/kernel/kernel.spec b/packages/kernel/kernel.spec index c833565d..d11c12c0 100644 --- a/packages/kernel/kernel.spec +++ b/packages/kernel/kernel.spec @@ -8,11 +8,21 @@ License: GPLv2 and Redistributable, no modification permitted URL: https://www.kernel.org/ Source0: https://www.kernel.org/pub/linux/kernel/v4.x/linux-%{version}.tar.xz Source100: config-%{_cross_arch} +BuildRequires: bc +BuildRequires: elfutils-devel BuildRequires: gcc-%{_cross_target} +BuildRequires: hostname +BuildRequires: openssl-devel %description %{summary}. +%package modules +Summary: Modules for the Linux kernel + +%description modules +%{summary}. + %package headers Summary: Header files for the Linux kernel for use by glibc @@ -23,24 +33,42 @@ Summary: Header files for the Linux kernel for use by glibc %setup -q -n linux-%{version} cp %{SOURCE100} "arch/%{_cross_karch}/configs/%{_cross_vendor}_defconfig" +%global kmake \ +make -s\\\ + ARCH="%{_cross_karch}"\\\ + CROSS_COMPILE="%{_cross_target}-"\\\ + INSTALL_HDR_PATH="%{buildroot}%{_cross_prefix}"\\\ + INSTALL_MOD_PATH="%{buildroot}%{_cross_prefix}"\\\ + INSTALL_MOD_STRIP=1\\\ +%{nil} + %build -make -s \ - ARCH="%{_cross_karch}" \ - CROSS_COMPILE="%{_cross_target}-" \ - %{_cross_vendor}_defconfig +%kmake mrproper +%kmake %{_cross_vendor}_defconfig +%kmake %{?_smp_mflags} %{_cross_kimage} +%kmake %{?_smp_mflags} modules %install -make -s \ - ARCH="%{_cross_karch}" \ - CROSS_COMPILE="%{_cross_target}-" \ - INSTALL_HDR_PATH="%{buildroot}%{_cross_prefix}" \ - headers_install +%kmake headers_install +%kmake modules_install + +install -d %{buildroot}/boot +install -T -m 0755 arch/%{_cross_karch}/boot/%{_cross_kimage} %{buildroot}/boot/vmlinuz +install -m 0644 .config %{buildroot}/boot/config +install -m 0644 System.map %{buildroot}/boot/System.map find %{buildroot}%{_cross_prefix} \ \( -name .install -o -name .check -o \ -name ..install.cmd -o -name ..check.cmd \) -delete %files +/boot/vmlinuz +/boot/config +/boot/System.map + +%files modules +%dir %{_cross_libdir}/modules +%{_cross_libdir}/modules/* %files headers %dir %{_cross_includedir}/asm From 8e221bd6e9ab65b8dc8b751aa49700e66f7c0d43 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sat, 16 Mar 2019 22:21:53 +0000 Subject: [PATCH 0016/1356] build grub Signed-off-by: Ben Cressey --- packages/grub/.gitignore | 1 + ...reat-R_X86_64_PLT32-as-R_X86_64_PC32.patch | 74 ++++++++++ packages/grub/core.cfg | 9 ++ packages/grub/grub.spec | 126 ++++++++++++++++++ packages/grub/sources | 1 + 5 files changed, 211 insertions(+) create mode 100644 packages/grub/.gitignore create mode 100644 packages/grub/0001-x86-64-Treat-R_X86_64_PLT32-as-R_X86_64_PC32.patch create mode 100644 packages/grub/core.cfg create mode 100644 packages/grub/grub.spec create mode 100644 packages/grub/sources diff --git a/packages/grub/.gitignore b/packages/grub/.gitignore new file mode 100644 index 00000000..b619e359 --- /dev/null +++ b/packages/grub/.gitignore @@ -0,0 +1 @@ +grub-2.02.tar.xz diff --git a/packages/grub/0001-x86-64-Treat-R_X86_64_PLT32-as-R_X86_64_PC32.patch b/packages/grub/0001-x86-64-Treat-R_X86_64_PLT32-as-R_X86_64_PC32.patch new file mode 100644 index 00000000..cd8b5e73 --- /dev/null +++ b/packages/grub/0001-x86-64-Treat-R_X86_64_PLT32-as-R_X86_64_PC32.patch @@ -0,0 +1,74 @@ +From 842c390469e2c2e10b5aa36700324cd3bde25875 Mon Sep 17 00:00:00 2001 +From: "H.J. Lu" +Date: Sat, 17 Feb 2018 06:47:28 -0800 +Subject: [PATCH] x86-64: Treat R_X86_64_PLT32 as R_X86_64_PC32 + +Starting from binutils commit bd7ab16b4537788ad53521c45469a1bdae84ad4a: + +https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=bd7ab16b4537788ad53521c45469a1bdae84ad4a + +x86-64 assembler generates R_X86_64_PLT32, instead of R_X86_64_PC32, for +32-bit PC-relative branches. Grub2 should treat R_X86_64_PLT32 as +R_X86_64_PC32. + +Signed-off-by: H.J. Lu +Reviewed-by: Daniel Kiper +Signed-off-by: Romain Naour +--- + grub-core/efiemu/i386/loadcore64.c | 1 + + grub-core/kern/x86_64/dl.c | 1 + + util/grub-mkimagexx.c | 1 + + util/grub-module-verifier.c | 1 + + 4 files changed, 4 insertions(+) + +diff --git a/grub-core/efiemu/i386/loadcore64.c b/grub-core/efiemu/i386/loadcore64.c +index e49d0b6..18facf4 100644 +--- a/grub-core/efiemu/i386/loadcore64.c ++++ b/grub-core/efiemu/i386/loadcore64.c +@@ -98,6 +98,7 @@ grub_arch_efiemu_relocate_symbols64 (grub_efiemu_segment_t segs, + break; + + case R_X86_64_PC32: ++ case R_X86_64_PLT32: + err = grub_efiemu_write_value (addr, + *addr32 + rel->r_addend + + sym.off +diff --git a/grub-core/kern/x86_64/dl.c b/grub-core/kern/x86_64/dl.c +index 4406906..3a73e6e 100644 +--- a/grub-core/kern/x86_64/dl.c ++++ b/grub-core/kern/x86_64/dl.c +@@ -70,6 +70,7 @@ grub_arch_dl_relocate_symbols (grub_dl_t mod, void *ehdr, + break; + + case R_X86_64_PC32: ++ case R_X86_64_PLT32: + { + grub_int64_t value; + value = ((grub_int32_t) *addr32) + rel->r_addend + sym->st_value - +diff --git a/util/grub-mkimagexx.c b/util/grub-mkimagexx.c +index a2bb054..39d7efb 100644 +--- a/util/grub-mkimagexx.c ++++ b/util/grub-mkimagexx.c +@@ -841,6 +841,7 @@ SUFFIX (relocate_addresses) (Elf_Ehdr *e, Elf_Shdr *sections, + break; + + case R_X86_64_PC32: ++ case R_X86_64_PLT32: + { + grub_uint32_t *t32 = (grub_uint32_t *) target; + *t32 = grub_host_to_target64 (grub_target_to_host32 (*t32) +diff --git a/util/grub-module-verifier.c b/util/grub-module-verifier.c +index 9179285..a79271f 100644 +--- a/util/grub-module-verifier.c ++++ b/util/grub-module-verifier.c +@@ -19,6 +19,7 @@ struct grub_module_verifier_arch archs[] = { + -1 + }, (int[]){ + R_X86_64_PC32, ++ R_X86_64_PLT32, + -1 + } + }, +-- +2.7.4 + diff --git a/packages/grub/core.cfg b/packages/grub/core.cfg new file mode 100644 index 00000000..12ac5ceb --- /dev/null +++ b/packages/grub/core.cfg @@ -0,0 +1,9 @@ +set boot_uuid="2415fbea-3fdc-4260-9560-28418ee80c45" +set root_uuid="501efeae-456a-4162-8784-01209628c6ae" + +export boot_uuid +export root_uuid + +search.fs_uuid $boot_uuid root +set prefix=($root)/grub +configfile /grub/grub.cfg diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec new file mode 100644 index 00000000..a580d915 --- /dev/null +++ b/packages/grub/grub.spec @@ -0,0 +1,126 @@ +%global debug_package %{nil} + +Name: %{_cross_os}grub +Version: 2.02 +Release: 1%{?dist} +Summary: Bootloader with support for Linux and more +License: GPLv3+ +URL: https://www.gnu.org/software/grub/ +Source0: https://ftp.gnu.org/gnu/grub/grub-%{version}.tar.xz +Source1: core.cfg +Patch1: 0001-x86-64-Treat-R_X86_64_PLT32-as-R_X86_64_PC32.patch + +BuildRequires: bison +BuildRequires: flex +BuildRequires: gcc-%{_cross_target} +BuildRequires: grub2-tools +BuildRequires: %{_cross_os}glibc-devel + +%description +%{summary}. + +%package modules +Summary: Modules for the bootloader with support for Linux and more +BuildArch: noarch + +%description modules +%{summary}. + +%package tools +Summary: Tools for the bootloader with support for Linux and more + +%description tools +%{summary}. + +%prep +%autosetup -n grub-%{version} -p1 + +%global grub_cflags -pipe -fno-stack-protector -fno-strict-aliasing +%global grub_ldflags -static + +%build +install -T -m0644 %{SOURCE1} core.cfg + +export \ + CPP="%{_cross_target}-gcc -E" \ + TARGET_CC="%{_cross_target}-gcc" \ + TARGET_CFLAGS="%{grub_cflags}" \ + TARGET_CPPFLAGS="%{grub_cflags}" \ + TARGET_LDFLAGS="%{grub_ldflags}" \ + TARGET_NM="%{_cross_target}-nm" \ + TARGET_OBJCOPY="%{_cross_target}-objcopy" \ + TARGET_STRIP="%{_cross_target}-strip" \ + +%cross_configure \ + CFLAGS="" \ + LDFLAGS="" \ + --target="%{_cross_grub_target}" \ + --with-platform="%{_cross_grub_platform}" \ + --disable-grub-mkfont \ + --enable-efiemu=no \ + --enable-device-mapper=no \ + --enable-libzfs=no \ + --disable-werror \ + +%make_build + +%install +%make_install + +mkdir -p %{buildroot}%{_cross_grubdir} + +grub2-mkimage \ + -c core.cfg \ + -d ./grub-core/ \ + -O "%{_cross_grub_tuple}" \ + -o "%{buildroot}%{_cross_grubdir}/%{_cross_grub_image}" \ + -p "%{_cross_grub_prefix}" \ + biosdisk configfile ext2 linux normal part_gpt search_fs_uuid + +install -m 0644 ./grub-core/boot.img \ + %{buildroot}%{_cross_grubdir}/boot.img + +%files +%dir %{_cross_grubdir} +%{_cross_grubdir}/boot.img +%{_cross_grubdir}/%{_cross_grub_image} +%exclude %{_cross_infodir} +%exclude %{_cross_localedir} +%exclude %{_cross_sysconfdir} + +%files modules +%dir %{_cross_libdir}/grub +%dir %{_cross_libdir}/grub/%{_cross_grub_tuple} +%{_cross_libdir}/grub/%{_cross_grub_tuple}/* + +%files tools +%{_cross_bindir}/grub-editenv +%{_cross_bindir}/grub-file +%{_cross_bindir}/grub-fstest +%{_cross_bindir}/grub-glue-efi +%{_cross_bindir}/grub-kbdcomp +%{_cross_bindir}/grub-menulst2cfg +%{_cross_bindir}/grub-mkimage +%{_cross_bindir}/grub-mklayout +%{_cross_bindir}/grub-mknetdir +%{_cross_bindir}/grub-mkpasswd-pbkdf2 +%{_cross_bindir}/grub-mkrelpath +%{_cross_bindir}/grub-mkrescue +%{_cross_bindir}/grub-mkstandalone +%{_cross_bindir}/grub-render-label +%{_cross_bindir}/grub-script-check +%{_cross_bindir}/grub-syslinux2cfg +%{_cross_sbindir}/grub-bios-setup +%{_cross_sbindir}/grub-install +%{_cross_sbindir}/grub-macbless +%{_cross_sbindir}/grub-mkconfig +%{_cross_sbindir}/grub-ofpathname +%{_cross_sbindir}/grub-probe +%{_cross_sbindir}/grub-reboot +%{_cross_sbindir}/grub-set-default +%{_cross_sbindir}/grub-sparc64-setup + +%dir %{_cross_datadir}/grub +%{_cross_datadir}/grub/grub-mkconfig_lib + +%changelog diff --git a/packages/grub/sources b/packages/grub/sources new file mode 100644 index 00000000..d0fbe4ba --- /dev/null +++ b/packages/grub/sources @@ -0,0 +1 @@ +sha256 810b3798d316394f94096ec2797909dbf23c858e48f7b3830826b8daa06b7b0f grub-2.02.tar.xz From 3298e8c22a286bfe728fd3f4a354a9fa4a50ead5 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sun, 17 Mar 2019 22:00:59 +0000 Subject: [PATCH 0017/1356] enable disk image builds Signed-off-by: Ben Cressey --- Makefile | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index 2c7aaa1e..2c60168d 100644 --- a/Makefile +++ b/Makefile @@ -41,19 +41,24 @@ define build_rpm $(BUILDCTL_ARGS) endef -define build_fs +define build_image $(eval HASH:= $(shell sha1sum $(2) /dev/null | sha1sum - | awk '{print $$1}')) @$(BUILDCTL) build \ - --frontend-opt target=fs \ + --frontend-opt target=builder \ --frontend-opt build-arg:PACKAGE=$(OS)-$(1)-release \ --frontend-opt build-arg:ARCH=$(1) \ --frontend-opt build-arg:HASH=$(HASH) \ --frontend-opt build-arg:DATE=$(DATE) \ --exporter=docker \ - --exporter-opt name=$(OS):$(1) \ - --exporter-opt output=build/$(OS)-$(1).tar \ - $(BUILDCTL_ARGS) ; \ - $(DOCKER) load < build/$(OS)-$(1).tar + --exporter-opt name=$(OS)-builder:$(1) \ + --exporter-opt output=build/$(OS)-$(1)-builder.tar \ + $(BUILDCTL_ARGS) + @$(DOCKER) load < build/$(OS)-$(1)-builder.tar + @$(DOCKER) run -t -v /dev:/dev -v $(OUTPUT):/local/output --privileged \ + $(OS)-builder:$(1) \ + --image-name=$(OS)-$(1).img \ + --package-dir=/local/rpms \ + --output-dir=/local/output endef empty := @@ -75,7 +80,7 @@ list = $(subst $(space),$(comma),$(1)) .SECONDEXPANSION: $(ARCHES): $$($(OS)-$$(@)-release) $(eval PKGS:= $(wildcard $(OUTPUT)/$(OS)-$(@)-*.rpm)) - $(call build_fs,$@,$(PKGS)) + $(call build_image,$@,$(PKGS)) all: $(ARCHES) From 8b193f281415beadf4cba331a4431e2a0ed36741 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 18 Mar 2019 00:54:54 +0000 Subject: [PATCH 0018/1356] adjust x86 kernel config for KVM Signed-off-by: Ben Cressey --- packages/kernel/config-x86_64 | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/packages/kernel/config-x86_64 b/packages/kernel/config-x86_64 index a816d2e8..508e9e44 100644 --- a/packages/kernel/config-x86_64 +++ b/packages/kernel/config-x86_64 @@ -1645,7 +1645,7 @@ CONFIG_CDROM_PKTCDVD_BUFFERS=8 CONFIG_ATA_OVER_ETH=m CONFIG_XEN_BLKDEV_FRONTEND=m CONFIG_XEN_BLKDEV_BACKEND=m -CONFIG_VIRTIO_BLK=m +CONFIG_VIRTIO_BLK=y # CONFIG_VIRTIO_BLK_SCSI is not set CONFIG_BLK_DEV_RBD=m # CONFIG_BLK_DEV_RSXX is not set @@ -1845,7 +1845,7 @@ CONFIG_SCSI_DEBUG=m # CONFIG_SCSI_PMCRAID is not set # CONFIG_SCSI_PM8001 is not set # CONFIG_SCSI_BFA_FC is not set -CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_VIRTIO=y CONFIG_SCSI_CHELSIO_FCOE=m # CONFIG_SCSI_DH is not set CONFIG_SCSI_OSD_INITIATOR=m @@ -2038,7 +2038,7 @@ CONFIG_TUN=m CONFIG_TAP=m # CONFIG_TUN_VNET_CROSS_LE is not set CONFIG_VETH=m -CONFIG_VIRTIO_NET=m +CONFIG_VIRTIO_NET=y CONFIG_NLMON=m # CONFIG_VSOCKMON is not set # CONFIG_ARCNET is not set @@ -2389,14 +2389,14 @@ CONFIG_HVC_DRIVER=y CONFIG_HVC_IRQ=y CONFIG_HVC_XEN=y CONFIG_HVC_XEN_FRONTEND=y -CONFIG_VIRTIO_CONSOLE=m +CONFIG_VIRTIO_CONSOLE=y # CONFIG_IPMI_HANDLER is not set CONFIG_HW_RANDOM=m # CONFIG_HW_RANDOM_TIMERIOMEM is not set CONFIG_HW_RANDOM_INTEL=m CONFIG_HW_RANDOM_AMD=m CONFIG_HW_RANDOM_VIA=m -CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_HW_RANDOM_VIRTIO=y CONFIG_HW_RANDOM_TPM=m CONFIG_NVRAM=m # CONFIG_R3964 is not set @@ -3370,12 +3370,12 @@ CONFIG_VFIO_PCI_IGD=y # CONFIG_VFIO_MDEV is not set CONFIG_IRQ_BYPASS_MANAGER=m CONFIG_VIRT_DRIVERS=y -CONFIG_VIRTIO=m +CONFIG_VIRTIO=y # # Virtio drivers # -CONFIG_VIRTIO_PCI=m +CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_PCI_LEGACY=y # CONFIG_VIRTIO_BALLOON is not set # CONFIG_VIRTIO_INPUT is not set @@ -3647,7 +3647,7 @@ CONFIG_EXT2_FS_SECURITY=y CONFIG_EXT3_FS=m CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=m +CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y From 75246dcbd05e6e66531d4d0f677c7db6ec100251 Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Thu, 21 Mar 2019 10:01:26 -0700 Subject: [PATCH 0019/1356] Fix packages/grub/sources to be consistent Signed-off-by: iliana destroyer of worlds --- packages/grub/sources | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/grub/sources b/packages/grub/sources index d0fbe4ba..ee84926c 100644 --- a/packages/grub/sources +++ b/packages/grub/sources @@ -1 +1 @@ -sha256 810b3798d316394f94096ec2797909dbf23c858e48f7b3830826b8daa06b7b0f grub-2.02.tar.xz +SHA512 (grub-2.02.tar.xz) = cc6eb0a42b5c8df2f671cc128ff725afb3ff1f8832a196022e433cf0d3b75decfca2316d0aa5fabea75747d55e88f3d021dd93508563f8ca80fd7b9e7fe1f088 From 035ca66706ad22e7ac7fbaddf6735e22f7994807 Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Thu, 21 Mar 2019 19:28:22 +0000 Subject: [PATCH 0020/1356] Update buildkit to 0.4.0 Signed-off-by: iliana destroyer of worlds --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 2c60168d..0105e40a 100644 --- a/Makefile +++ b/Makefile @@ -17,7 +17,7 @@ ARCHES := x86_64 aarch64 DOCKER ?= docker -BUILDKIT_VER = v0.3.3 +BUILDKIT_VER = v0.4.0 BUILDKITD_ADDR ?= tcp://127.0.0.1:1234 BUILDCTL_DOCKER_RUN = $(DOCKER) run --rm -ti --entrypoint /usr/bin/buildctl --user $(shell id -u):$(shell id -g) --volume $(TOPDIR):$(TOPDIR) --workdir $(TOPDIR) --network host moby/buildkit:$(BUILDKIT_VER) BUILDCTL ?= $(BUILDCTL_DOCKER_RUN) --addr $(BUILDKITD_ADDR) From 50d4a5f3b2de1a6f023f7684d3f3d3821f12bcd5 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 25 Mar 2019 21:11:56 +0000 Subject: [PATCH 0021/1356] allow buildctl to run without a tty Signed-off-by: Ben Cressey --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 0105e40a..114871eb 100644 --- a/Makefile +++ b/Makefile @@ -19,7 +19,7 @@ DOCKER ?= docker BUILDKIT_VER = v0.4.0 BUILDKITD_ADDR ?= tcp://127.0.0.1:1234 -BUILDCTL_DOCKER_RUN = $(DOCKER) run --rm -ti --entrypoint /usr/bin/buildctl --user $(shell id -u):$(shell id -g) --volume $(TOPDIR):$(TOPDIR) --workdir $(TOPDIR) --network host moby/buildkit:$(BUILDKIT_VER) +BUILDCTL_DOCKER_RUN = $(DOCKER) run --rm -t --entrypoint /usr/bin/buildctl --user $(shell id -u):$(shell id -g) --volume $(TOPDIR):$(TOPDIR) --workdir $(TOPDIR) --network host moby/buildkit:$(BUILDKIT_VER) BUILDCTL ?= $(BUILDCTL_DOCKER_RUN) --addr $(BUILDKITD_ADDR) BUILDCTL_ARGS := --progress=plain BUILDCTL_ARGS += --frontend=dockerfile.v0 From e3d90a6412b61c20d2e243f056869f82f02f7e96 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 25 Mar 2019 21:12:53 +0000 Subject: [PATCH 0022/1356] migrate to new buildctl options Signed-off-by: Ben Cressey --- Makefile | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/Makefile b/Makefile index 114871eb..b3ca46d0 100644 --- a/Makefile +++ b/Makefile @@ -30,28 +30,25 @@ define build_rpm $(eval HASH:= $(shell sha1sum $3 /dev/null | sha1sum - | awk '{printf $$1}')) $(eval RPMS:= $(shell echo $3 | tr ' ' '\n' | awk '/.rpm$$/' | tr '\n' ' ')) @$(BUILDCTL) build \ - --frontend-opt target=rpm \ - --frontend-opt build-arg:PACKAGE=$(1) \ - --frontend-opt build-arg:ARCH=$(2) \ - --frontend-opt build-arg:HASH=$(HASH) \ - --frontend-opt build-arg:RPMS="$(RPMS)" \ - --frontend-opt build-arg:DATE=$(DATE) \ - --exporter=local \ - --exporter-opt output=$(OUTPUT) \ + --opt target=rpm \ + --opt build-arg:PACKAGE=$(1) \ + --opt build-arg:ARCH=$(2) \ + --opt build-arg:HASH=$(HASH) \ + --opt build-arg:RPMS="$(RPMS)" \ + --opt build-arg:DATE=$(DATE) \ + --output type=local,dest=$(OUTPUT) \ $(BUILDCTL_ARGS) endef define build_image $(eval HASH:= $(shell sha1sum $(2) /dev/null | sha1sum - | awk '{print $$1}')) @$(BUILDCTL) build \ - --frontend-opt target=builder \ - --frontend-opt build-arg:PACKAGE=$(OS)-$(1)-release \ - --frontend-opt build-arg:ARCH=$(1) \ - --frontend-opt build-arg:HASH=$(HASH) \ - --frontend-opt build-arg:DATE=$(DATE) \ - --exporter=docker \ - --exporter-opt name=$(OS)-builder:$(1) \ - --exporter-opt output=build/$(OS)-$(1)-builder.tar \ + --opt target=builder \ + --opt build-arg:PACKAGE=$(OS)-$(1)-release \ + --opt build-arg:ARCH=$(1) \ + --opt build-arg:HASH=$(HASH) \ + --opt build-arg:DATE=$(DATE) \ + --output type=docker,name=$(OS)-builder:$(1),dest=build/$(OS)-$(1)-builder.tar \ $(BUILDCTL_ARGS) @$(DOCKER) load < build/$(OS)-$(1)-builder.tar @$(DOCKER) run -t -v /dev:/dev -v $(OUTPUT):/local/output --privileged \ From 805228d10263cf40d22c0829e048760fcee3cdb9 Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Thu, 28 Mar 2019 23:31:12 +0000 Subject: [PATCH 0023/1356] Add gptprio to grub Signed-off-by: iliana destroyer of worlds --- packages/grub/core.cfg | 10 +- packages/grub/gpt.patch | 5725 +++++++++++++++++++++++++++++++++++++++ packages/grub/grub.spec | 10 +- 3 files changed, 5734 insertions(+), 11 deletions(-) create mode 100644 packages/grub/gpt.patch diff --git a/packages/grub/core.cfg b/packages/grub/core.cfg index 12ac5ceb..9cfa4448 100644 --- a/packages/grub/core.cfg +++ b/packages/grub/core.cfg @@ -1,9 +1,5 @@ -set boot_uuid="2415fbea-3fdc-4260-9560-28418ee80c45" -set root_uuid="501efeae-456a-4162-8784-01209628c6ae" - -export boot_uuid -export root_uuid - -search.fs_uuid $boot_uuid root +gptprio.next -d boot_dev -u boot_uuid +set root=$boot_dev set prefix=($root)/grub +export boot_uuid configfile /grub/grub.cfg diff --git a/packages/grub/gpt.patch b/packages/grub/gpt.patch new file mode 100644 index 00000000..b2d506e9 --- /dev/null +++ b/packages/grub/gpt.patch @@ -0,0 +1,5725 @@ +From 9300d0044c1d9e1b8df2784d50a3c26250639ca3 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Sun, 28 Sep 2014 21:26:21 -0700 +Subject: [PATCH] gpt: start new GPT module + +This module is a new implementation for reading GUID Partition Tables +which is much stricter than the existing part_gpt module and exports GPT +data directly instead of the generic grub_partition structure. It will +be the basis for modules that need to read/write/update GPT data. + +The current code does nothing more than read and verify the table. +--- + Makefile.util.def | 16 ++ + grub-core/Makefile.core.def | 5 + + grub-core/lib/gpt.c | 288 ++++++++++++++++++++++++++ + include/grub/gpt_partition.h | 60 ++++++ + tests/gpt_unit_test.c | 467 +++++++++++++++++++++++++++++++++++++++++++ + 5 files changed, 836 insertions(+) + create mode 100644 grub-core/lib/gpt.c + create mode 100644 tests/gpt_unit_test.c + +diff --git a/Makefile.util.def b/Makefile.util.def +index f9caccb97..48448c28d 100644 +--- a/Makefile.util.def ++++ b/Makefile.util.def +@@ -1254,6 +1254,22 @@ program = { + ldadd = '$(LIBDEVMAPPER) $(LIBZFS) $(LIBNVPAIR) $(LIBGEOM)'; + }; + ++program = { ++ testcase; ++ name = gpt_unit_test; ++ common = tests/gpt_unit_test.c; ++ common = tests/lib/unit_test.c; ++ common = grub-core/disk/host.c; ++ common = grub-core/kern/emu/hostfs.c; ++ common = grub-core/lib/gpt.c; ++ common = grub-core/tests/lib/test.c; ++ ldadd = libgrubmods.a; ++ ldadd = libgrubgcry.a; ++ ldadd = libgrubkern.a; ++ ldadd = grub-core/gnulib/libgnu.a; ++ ldadd = '$(LIBDEVMAPPER) $(LIBZFS) $(LIBNVPAIR) $(LIBGEOM)'; ++}; ++ + program = { + name = grub-menulst2cfg; + mansection = 1; +diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def +index 2dfa22a92..d3bcdbe2f 100644 +--- a/grub-core/Makefile.core.def ++++ b/grub-core/Makefile.core.def +@@ -821,6 +821,11 @@ module = { + common = commands/gptsync.c; + }; + ++module = { ++ name = gpt; ++ common = lib/gpt.c; ++}; ++ + module = { + name = halt; + nopc = commands/halt.c; +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +new file mode 100644 +index 000000000..a308e8537 +--- /dev/null ++++ b/grub-core/lib/gpt.c +@@ -0,0 +1,288 @@ ++/* gpt.c - Read/Verify/Write GUID Partition Tables (GPT). */ ++/* ++ * GRUB -- GRand Unified Bootloader ++ * Copyright (C) 2002,2005,2006,2007,2008 Free Software Foundation, Inc. ++ * Copyright (C) 2014 CoreOS, Inc. ++ * ++ * GRUB is free software: you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation, either version 3 of the License, or ++ * (at your option) any later version. ++ * ++ * GRUB is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with GRUB. If not, see . ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++GRUB_MOD_LICENSE ("GPLv3+"); ++ ++static grub_uint8_t grub_gpt_magic[] = GRUB_GPT_HEADER_MAGIC; ++ ++ ++static grub_err_t ++grub_gpt_header_crc32 (struct grub_gpt_header *gpt, grub_uint32_t *crc) ++{ ++ grub_uint8_t *crc32_context; ++ grub_uint32_t old; ++ ++ crc32_context = grub_zalloc (GRUB_MD_CRC32->contextsize); ++ if (!crc32_context) ++ return grub_errno; ++ ++ /* crc32 must be computed with the field cleared. */ ++ old = gpt->crc32; ++ gpt->crc32 = 0; ++ GRUB_MD_CRC32->init (crc32_context); ++ GRUB_MD_CRC32->write (crc32_context, gpt, sizeof (*gpt)); ++ GRUB_MD_CRC32->final (crc32_context); ++ gpt->crc32 = old; ++ ++ /* GRUB_MD_CRC32 always uses big endian, gpt is always little. */ ++ *crc = grub_swap_bytes32 (*(grub_uint32_t *) ++ GRUB_MD_CRC32->read (crc32_context)); ++ ++ grub_free (crc32_context); ++ ++ return GRUB_ERR_NONE; ++} ++ ++/* Make sure the MBR is a protective MBR and not a normal MBR. */ ++grub_err_t ++grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr) ++{ ++ unsigned int i; ++ ++ if (mbr->signature != ++ grub_cpu_to_le16_compile_time (GRUB_PC_PARTITION_SIGNATURE)) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid MBR signature"); ++ ++ for (i = 0; i < sizeof (mbr->entries); i++) ++ if (mbr->entries[i].type == GRUB_PC_PARTITION_TYPE_GPT_DISK) ++ return GRUB_ERR_NONE; ++ ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid protective MBR"); ++} ++ ++grub_err_t ++grub_gpt_header_check (struct grub_gpt_header *gpt, ++ unsigned int log_sector_size) ++{ ++ grub_uint32_t crc = 0, size; ++ ++ if (grub_memcmp (gpt->magic, grub_gpt_magic, sizeof (grub_gpt_magic)) != 0) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT signature"); ++ ++ if (gpt->version != GRUB_GPT_HEADER_VERSION) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "unknown GPT version"); ++ ++ if (grub_gpt_header_crc32 (gpt, &crc)) ++ return grub_errno; ++ ++ if (gpt->crc32 != crc) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT header crc32"); ++ ++ /* The header size must be between 92 and the sector size. */ ++ size = grub_le_to_cpu32 (gpt->headersize); ++ if (size < 92U || size > (1U << log_sector_size)) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT header size"); ++ ++ /* The partition entry size must be a multiple of 128. */ ++ size = grub_le_to_cpu32 (gpt->partentry_size); ++ if (size < 128 || size % 128) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT entry size"); ++ ++ return GRUB_ERR_NONE; ++} ++ ++static grub_err_t ++grub_gpt_read_primary (grub_disk_t disk, grub_gpt_t gpt) ++{ ++ grub_disk_addr_t addr; ++ ++ /* TODO: The gpt partmap module searches for the primary header instead ++ * of relying on the disk's sector size. For now trust the disk driver ++ * but eventually this code should match the existing behavior. */ ++ gpt->log_sector_size = disk->log_sector_size; ++ ++ addr = grub_gpt_sector_to_addr (gpt, 1); ++ if (grub_disk_read (disk, addr, 0, sizeof (gpt->primary), &gpt->primary)) ++ return grub_errno; ++ ++ if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) ++ return grub_errno; ++ ++ gpt->status |= GRUB_GPT_PRIMARY_HEADER_VALID; ++ return GRUB_ERR_NONE; ++} ++ ++static grub_err_t ++grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) ++{ ++ grub_uint64_t sector; ++ grub_disk_addr_t addr; ++ ++ /* Assumes gpt->log_sector_size == disk->log_sector_size */ ++ if (disk->total_sectors != GRUB_DISK_SIZE_UNKNOWN) ++ sector = disk->total_sectors - 1; ++ else if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) ++ sector = grub_le_to_cpu64 (gpt->primary.backup); ++ else ++ return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, ++ "Unable to locate backup GPT"); ++ ++ addr = grub_gpt_sector_to_addr (gpt, sector); ++ if (grub_disk_read (disk, addr, 0, sizeof (gpt->backup), &gpt->backup)) ++ return grub_errno; ++ ++ if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) ++ return grub_errno; ++ ++ gpt->status |= GRUB_GPT_BACKUP_HEADER_VALID; ++ return GRUB_ERR_NONE; ++} ++ ++static struct grub_gpt_partentry * ++grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, ++ struct grub_gpt_header *header) ++{ ++ struct grub_gpt_partentry *entries = NULL; ++ grub_uint8_t *crc32_context = NULL; ++ grub_uint32_t count, size, crc; ++ grub_disk_addr_t addr; ++ grub_size_t entries_size; ++ ++ /* Grub doesn't include calloc, hence the manual overflow check. */ ++ count = grub_le_to_cpu32 (header->maxpart); ++ size = grub_le_to_cpu32 (header->partentry_size); ++ entries_size = count *size; ++ if (size && entries_size / size != count) ++ { ++ grub_error (GRUB_ERR_OUT_OF_MEMORY, N_("out of memory")); ++ goto fail; ++ } ++ ++ entries = grub_malloc (entries_size); ++ if (!entries) ++ goto fail; ++ ++ addr = grub_gpt_sector_to_addr (gpt, grub_le_to_cpu64 (header->partitions)); ++ if (grub_disk_read (disk, addr, 0, entries_size, entries)) ++ goto fail; ++ ++ crc32_context = grub_zalloc (GRUB_MD_CRC32->contextsize); ++ if (!crc32_context) ++ goto fail; ++ ++ GRUB_MD_CRC32->init (crc32_context); ++ GRUB_MD_CRC32->write (crc32_context, entries, entries_size); ++ GRUB_MD_CRC32->final (crc32_context); ++ ++ crc = *(grub_uint32_t *) GRUB_MD_CRC32->read (crc32_context); ++ if (grub_swap_bytes32 (crc) != header->partentry_crc32) ++ { ++ grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT entry crc32"); ++ goto fail; ++ } ++ ++ grub_free (crc32_context); ++ return entries; ++ ++fail: ++ grub_free (entries); ++ grub_free (crc32_context); ++ return NULL; ++} ++ ++grub_gpt_t ++grub_gpt_read (grub_disk_t disk) ++{ ++ grub_gpt_t gpt; ++ struct grub_gpt_partentry *backup_entries; ++ ++ gpt = grub_zalloc (sizeof (*gpt)); ++ if (!gpt) ++ goto fail; ++ ++ if (grub_disk_read (disk, 0, 0, sizeof (gpt->mbr), &gpt->mbr)) ++ goto fail; ++ ++ /* Check the MBR but errors aren't reported beyond the status bit. */ ++ if (grub_gpt_pmbr_check (&gpt->mbr)) ++ grub_errno = GRUB_ERR_NONE; ++ else ++ gpt->status |= GRUB_GPT_PROTECTIVE_MBR; ++ ++ /* If both the primary and backup fail report the primary's error. */ ++ if (grub_gpt_read_primary (disk, gpt)) ++ { ++ grub_error_push (); ++ grub_gpt_read_backup (disk, gpt); ++ grub_error_pop (); ++ } ++ else ++ grub_gpt_read_backup (disk, gpt); ++ ++ /* If either succeeded clear any possible error from the other. */ ++ if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID || ++ gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) ++ grub_errno = GRUB_ERR_NONE; ++ else ++ goto fail; ++ ++ /* Same error handling scheme for the entry tables. */ ++ gpt->entries = grub_gpt_read_entries (disk, gpt, &gpt->primary); ++ if (!gpt->entries) ++ { ++ grub_error_push (); ++ backup_entries = grub_gpt_read_entries (disk, gpt, &gpt->backup); ++ grub_error_pop (); ++ } ++ else ++ { ++ gpt->status |= GRUB_GPT_PRIMARY_ENTRIES_VALID; ++ backup_entries = grub_gpt_read_entries (disk, gpt, &gpt->backup); ++ } ++ ++ if (backup_entries) ++ { ++ gpt->status |= GRUB_GPT_BACKUP_ENTRIES_VALID; ++ ++ if (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID) ++ grub_free (backup_entries); ++ else ++ gpt->entries = backup_entries; ++ } ++ ++ if (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID || ++ gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID) ++ { ++ grub_errno = GRUB_ERR_NONE; ++ return gpt; ++ } ++ ++fail: ++ grub_gpt_free (gpt); ++ return NULL; ++} ++ ++void ++grub_gpt_free (grub_gpt_t gpt) ++{ ++ if (!gpt) ++ return; ++ ++ grub_free (gpt->entries); ++ grub_free (gpt); ++} +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index 1b32f6725..04ed2d7f1 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -21,6 +21,7 @@ + + #include + #include ++#include + + struct grub_gpt_part_type + { +@@ -50,6 +51,12 @@ typedef struct grub_gpt_part_type grub_gpt_part_type_t; + { 0x85, 0xD2, 0xE1, 0xE9, 0x04, 0x34, 0xCF, 0xB3 } \ + } + ++#define GRUB_GPT_HEADER_MAGIC \ ++ { 0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54 } ++ ++#define GRUB_GPT_HEADER_VERSION \ ++ grub_cpu_to_le32_compile_time (0x00010000U) ++ + struct grub_gpt_header + { + grub_uint8_t magic[8]; +@@ -78,10 +85,63 @@ struct grub_gpt_partentry + char name[72]; + } GRUB_PACKED; + ++/* Basic GPT partmap module. */ + grub_err_t + grub_gpt_partition_map_iterate (grub_disk_t disk, + grub_partition_iterate_hook_t hook, + void *hook_data); + ++/* Advanced GPT library. */ ++typedef enum grub_gpt_status ++ { ++ GRUB_GPT_PROTECTIVE_MBR = 0x01, ++ GRUB_GPT_HYBRID_MBR = 0x02, ++ GRUB_GPT_PRIMARY_HEADER_VALID = 0x04, ++ GRUB_GPT_PRIMARY_ENTRIES_VALID = 0x08, ++ GRUB_GPT_BACKUP_HEADER_VALID = 0x10, ++ GRUB_GPT_BACKUP_ENTRIES_VALID = 0x20, ++ } grub_gpt_status_t; ++ ++#define GRUB_GPT_MBR_VALID (GRUB_GPT_PROTECTIVE_MBR|GRUB_GPT_HYBRID_MBR) ++ ++/* UEFI requires the entries table to be at least 16384 bytes for a ++ * total of 128 entries given the standard 128 byte entry size. */ ++#define GRUB_GPT_DEFAULT_ENTRIES_LENGTH 128 ++ ++struct grub_gpt ++{ ++ /* Bit field indicating which structures on disk are valid. */ ++ grub_gpt_status_t status; ++ ++ /* Protective or hybrid MBR. */ ++ struct grub_msdos_partition_mbr mbr; ++ ++ /* Each of the two GPT headers. */ ++ struct grub_gpt_header primary; ++ struct grub_gpt_header backup; ++ ++ /* Only need one entries table, on disk both copies are identical. */ ++ struct grub_gpt_partentry *entries; ++ ++ /* Logarithm of sector size, in case GPT and disk driver disagree. */ ++ unsigned int log_sector_size; ++}; ++typedef struct grub_gpt *grub_gpt_t; ++ ++/* Translate GPT sectors to GRUB's 512 byte block addresses. */ ++static inline grub_disk_addr_t ++grub_gpt_sector_to_addr (grub_gpt_t gpt, grub_uint64_t sector) ++{ ++ return (sector << (gpt->log_sector_size - GRUB_DISK_SECTOR_BITS)); ++} ++ ++/* Allocates and fills new grub_gpt structure, free with grub_gpt_free. */ ++grub_gpt_t grub_gpt_read (grub_disk_t disk); ++ ++void grub_gpt_free (grub_gpt_t gpt); ++ ++grub_err_t grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr); ++grub_err_t grub_gpt_header_check (struct grub_gpt_header *gpt, ++ unsigned int log_sector_size); + + #endif /* ! GRUB_GPT_PARTITION_HEADER */ +diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c +new file mode 100644 +index 000000000..a824cd967 +--- /dev/null ++++ b/tests/gpt_unit_test.c +@@ -0,0 +1,467 @@ ++/* ++ * GRUB -- GRand Unified Bootloader ++ * Copyright (C) 2014 CoreOS, Inc. ++ * ++ * GRUB is free software: you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation, either version 3 of the License, or ++ * (at your option) any later version. ++ * ++ * GRUB is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with GRUB. If not, see . ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* from gnulib */ ++#include ++ ++ ++/* GPT section sizes. */ ++#define HEADER_SIZE (sizeof (struct grub_gpt_header)) ++#define HEADER_PAD (GRUB_DISK_SECTOR_SIZE - HEADER_SIZE) ++#define ENTRY_SIZE (sizeof (struct grub_gpt_partentry)) ++#define TABLE_ENTRIES 0x80 ++#define TABLE_SIZE (TABLE_ENTRIES * ENTRY_SIZE) ++#define TABLE_SECTORS (TABLE_SIZE / GRUB_DISK_SECTOR_SIZE) ++ ++/* Double check that the table size calculation was valid. */ ++verify (TABLE_SECTORS * GRUB_DISK_SECTOR_SIZE == TABLE_SIZE); ++ ++/* GPT section locations for a 1MiB disk. */ ++#define DISK_SECTORS 0x800 ++#define DISK_SIZE (GRUB_DISK_SECTOR_SIZE * DISK_SECTORS) ++#define PRIMARY_HEADER_SECTOR 0x1 ++#define PRIMARY_TABLE_SECTOR 0x2 ++#define BACKUP_HEADER_SECTOR (DISK_SECTORS - 0x1) ++#define BACKUP_TABLE_SECTOR (BACKUP_HEADER_SECTOR - TABLE_SECTORS) ++ ++#define DATA_START_SECTOR (PRIMARY_TABLE_SECTOR + TABLE_SECTORS) ++#define DATA_END_SECTOR (BACKUP_TABLE_SECTOR - 0x1) ++#define DATA_SECTORS (BACKUP_TABLE_SECTOR - DATA_START_SECTOR) ++#define DATA_SIZE (GRUB_DISK_SECTOR_SIZE * DATA_SECTORS) ++ ++struct test_disk ++{ ++ struct grub_msdos_partition_mbr mbr; ++ ++ struct grub_gpt_header primary_header; ++ grub_uint8_t primary_header_pad[HEADER_PAD]; ++ struct grub_gpt_partentry primary_entries[TABLE_ENTRIES]; ++ ++ grub_uint8_t data[DATA_SIZE]; ++ ++ struct grub_gpt_partentry backup_entries[TABLE_ENTRIES]; ++ struct grub_gpt_header backup_header; ++ grub_uint8_t backup_header_pad[HEADER_PAD]; ++} GRUB_PACKED; ++ ++/* Sanity check that all the above ugly math was correct. */ ++verify (sizeof (struct test_disk) == DISK_SIZE); ++ ++struct test_data ++{ ++ int fd; ++ grub_device_t dev; ++ struct test_disk *raw; ++}; ++ ++ ++/* Sample primary GPT header for an empty 1MB disk. */ ++static const struct grub_gpt_header example_primary = { ++ .magic = GRUB_GPT_HEADER_MAGIC, ++ .version = GRUB_GPT_HEADER_VERSION, ++ .headersize = sizeof (struct grub_gpt_header), ++ .crc32 = grub_cpu_to_le32_compile_time (0x7cd8642c), ++ .primary = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), ++ .backup = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), ++ .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), ++ .end = grub_cpu_to_le64_compile_time (DATA_END_SECTOR), ++ .guid = {0xad, 0x31, 0xc1, 0x69, 0xd6, 0x67, 0xc6, 0x46, ++ 0x93, 0xc4, 0x12, 0x4c, 0x75, 0x52, 0x56, 0xac}, ++ .partitions = grub_cpu_to_le64_compile_time (PRIMARY_TABLE_SECTOR), ++ .maxpart = grub_cpu_to_le32_compile_time (TABLE_ENTRIES), ++ .partentry_size = grub_cpu_to_le32_compile_time (ENTRY_SIZE), ++ .partentry_crc32 = grub_cpu_to_le32_compile_time (0xab54d286), ++}; ++ ++/* And the backup header. */ ++static const struct grub_gpt_header example_backup = { ++ .magic = GRUB_GPT_HEADER_MAGIC, ++ .version = GRUB_GPT_HEADER_VERSION, ++ .headersize = sizeof (struct grub_gpt_header), ++ .crc32 = grub_cpu_to_le32_compile_time (0xcfaa4a27), ++ .primary = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), ++ .backup = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), ++ .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), ++ .end = grub_cpu_to_le64_compile_time (DATA_END_SECTOR), ++ .guid = {0xad, 0x31, 0xc1, 0x69, 0xd6, 0x67, 0xc6, 0x46, ++ 0x93, 0xc4, 0x12, 0x4c, 0x75, 0x52, 0x56, 0xac}, ++ .partitions = grub_cpu_to_le64_compile_time (BACKUP_TABLE_SECTOR), ++ .maxpart = grub_cpu_to_le32_compile_time (TABLE_ENTRIES), ++ .partentry_size = grub_cpu_to_le32_compile_time (ENTRY_SIZE), ++ .partentry_crc32 = grub_cpu_to_le32_compile_time (0xab54d286), ++}; ++ ++/* Sample protective MBR for the same 1MB disk. Note, this matches ++ * parted and fdisk behavior. The UEFI spec uses different values. */ ++static const struct grub_msdos_partition_mbr example_pmbr = { ++ .entries = {{.flag = 0x00, ++ .start_head = 0x00, ++ .start_sector = 0x01, ++ .start_cylinder = 0x00, ++ .type = 0xee, ++ .end_head = 0xfe, ++ .end_sector = 0xff, ++ .end_cylinder = 0xff, ++ .start = grub_cpu_to_le32_compile_time (0x1), ++ .length = grub_cpu_to_le32_compile_time (DISK_SECTORS - 0x1), ++ }}, ++ .signature = grub_cpu_to_le16_compile_time (GRUB_PC_PARTITION_SIGNATURE), ++}; ++ ++/* If errors are left in grub's error stack things can get confused. */ ++static void ++assert_error_stack_empty (void) ++{ ++ do ++ { ++ grub_test_assert (grub_errno == GRUB_ERR_NONE, ++ "error on stack: %s", grub_errmsg); ++ } ++ while (grub_error_pop ()); ++} ++ ++static grub_err_t ++execute_command2 (const char *name, const char *arg1, const char *arg2) ++{ ++ grub_command_t cmd; ++ grub_err_t err; ++ char *argv[2]; ++ ++ cmd = grub_command_find (name); ++ if (!cmd) ++ grub_fatal ("can't find command %s", name); ++ ++ argv[0] = strdup (arg1); ++ argv[1] = strdup (arg2); ++ err = (cmd->func) (cmd, 2, argv); ++ free (argv[0]); ++ free (argv[1]); ++ ++ return err; ++} ++ ++static void ++sync_disk (struct test_data *data) ++{ ++ if (msync (data->raw, DISK_SIZE, MS_SYNC | MS_INVALIDATE) < 0) ++ grub_fatal ("Syncing disk failed: %s", strerror (errno)); ++ ++ grub_disk_cache_invalidate_all (); ++} ++ ++static void ++reset_disk (struct test_data *data) ++{ ++ memset (data->raw, 0, DISK_SIZE); ++ ++ /* Initialize image with valid example tables. */ ++ memcpy (&data->raw->mbr, &example_pmbr, sizeof (data->raw->mbr)); ++ memcpy (&data->raw->primary_header, &example_primary, ++ sizeof (data->raw->primary_header)); ++ memcpy (&data->raw->backup_header, &example_backup, ++ sizeof (data->raw->backup_header)); ++ ++ sync_disk (data); ++} ++ ++static void ++open_disk (struct test_data *data) ++{ ++ const char *loop = "loop0"; ++ char template[] = "/tmp/grub_gpt_test.XXXXXX"; ++ char host[sizeof ("(host)") + sizeof (template)]; ++ ++ data->fd = mkstemp (template); ++ if (data->fd < 0) ++ grub_fatal ("Creating %s failed: %s", template, strerror (errno)); ++ ++ if (ftruncate (data->fd, DISK_SIZE) < 0) ++ { ++ int err = errno; ++ unlink (template); ++ grub_fatal ("Resizing %s failed: %s", template, strerror (err)); ++ } ++ ++ data->raw = mmap (NULL, DISK_SIZE, PROT_READ | PROT_WRITE, ++ MAP_SHARED, data->fd, 0); ++ if (data->raw == MAP_FAILED) ++ { ++ int err = errno; ++ unlink (template); ++ grub_fatal ("Maping %s failed: %s", template, strerror (err)); ++ } ++ ++ snprintf (host, sizeof (host), "(host)%s", template); ++ if (execute_command2 ("loopback", loop, host) != GRUB_ERR_NONE) ++ { ++ unlink (template); ++ grub_fatal ("loopback %s %s failed: %s", loop, host, grub_errmsg); ++ } ++ ++ if (unlink (template) < 0) ++ grub_fatal ("Unlinking %s failed: %s", template, strerror (errno)); ++ ++ reset_disk (data); ++ ++ data->dev = grub_device_open (loop); ++ if (!data->dev) ++ grub_fatal ("Opening %s failed: %s", loop, grub_errmsg); ++} ++ ++static void ++close_disk (struct test_data *data) ++{ ++ char *loop; ++ ++ assert_error_stack_empty (); ++ ++ if (munmap (data->raw, DISK_SIZE) || close (data->fd)) ++ grub_fatal ("Closing disk image failed: %s", strerror (errno)); ++ ++ loop = strdup (data->dev->disk->name); ++ grub_test_assert (grub_device_close (data->dev) == GRUB_ERR_NONE, ++ "Closing disk device failed: %s", grub_errmsg); ++ ++ grub_test_assert (execute_command2 ("loopback", "-d", loop) == ++ GRUB_ERR_NONE, "loopback -d %s failed: %s", loop, ++ grub_errmsg); ++ ++ free (loop); ++} ++ ++static grub_gpt_t ++read_disk (struct test_data *data) ++{ ++ grub_gpt_t gpt; ++ ++ gpt = grub_gpt_read (data->dev->disk); ++ if (gpt == NULL) ++ { ++ grub_print_error (); ++ grub_fatal ("grub_gpt_read failed"); ++ } ++ ++ ++ return gpt; ++} ++ ++static void ++pmbr_test (void) ++{ ++ struct grub_msdos_partition_mbr mbr; ++ ++ memset (&mbr, 0, sizeof (mbr)); ++ ++ /* Empty is invalid. */ ++ grub_gpt_pmbr_check (&mbr); ++ grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++ ++ /* A table without a protective partition is invalid. */ ++ mbr.signature = grub_cpu_to_le16_compile_time (GRUB_PC_PARTITION_SIGNATURE); ++ grub_gpt_pmbr_check (&mbr); ++ grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++ ++ /* A table with a protective type is ok. */ ++ memcpy (&mbr, &example_pmbr, sizeof (mbr)); ++ grub_gpt_pmbr_check (&mbr); ++ grub_test_assert (grub_errno == GRUB_ERR_NONE, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++} ++ ++static void ++header_test (void) ++{ ++ struct grub_gpt_header primary, backup; ++ ++ /* Example headers should be valid. */ ++ memcpy (&primary, &example_primary, sizeof (primary)); ++ grub_gpt_header_check (&primary, GRUB_DISK_SECTOR_BITS); ++ grub_test_assert (grub_errno == GRUB_ERR_NONE, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++ ++ memcpy (&backup, &example_backup, sizeof (backup)); ++ grub_gpt_header_check (&backup, GRUB_DISK_SECTOR_BITS); ++ grub_test_assert (grub_errno == GRUB_ERR_NONE, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++ ++ /* Twiddle the GUID to invalidate the CRC. */ ++ primary.guid[0] = 0; ++ grub_gpt_header_check (&primary, GRUB_DISK_SECTOR_BITS); ++ grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++ ++ backup.guid[0] = 0; ++ grub_gpt_header_check (&backup, GRUB_DISK_SECTOR_BITS); ++ grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++} ++ ++static void ++read_valid_test (void) ++{ ++ struct test_data data; ++ grub_gpt_t gpt; ++ ++ open_disk (&data); ++ gpt = read_disk (&data); ++ grub_test_assert (gpt->status == (GRUB_GPT_PROTECTIVE_MBR | ++ GRUB_GPT_PRIMARY_HEADER_VALID | ++ GRUB_GPT_PRIMARY_ENTRIES_VALID | ++ GRUB_GPT_BACKUP_HEADER_VALID | ++ GRUB_GPT_BACKUP_ENTRIES_VALID), ++ "unexpected status: 0x%02x", gpt->status); ++ grub_gpt_free (gpt); ++ close_disk (&data); ++} ++ ++static void ++read_invalid_entries_test (void) ++{ ++ struct test_data data; ++ grub_gpt_t gpt; ++ ++ open_disk (&data); ++ ++ /* Corrupt the first entry in both tables. */ ++ memset (&data.raw->primary_entries[0], 0x55, ++ sizeof (data.raw->primary_entries[0])); ++ memset (&data.raw->backup_entries[0], 0x55, ++ sizeof (data.raw->backup_entries[0])); ++ sync_disk (&data); ++ ++ gpt = grub_gpt_read (data.dev->disk); ++ grub_test_assert (gpt == NULL, "no error reported for corrupt entries"); ++ grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++ ++ close_disk (&data); ++} ++ ++static void ++read_fallback_test (void) ++{ ++ struct test_data data; ++ grub_gpt_t gpt; ++ ++ open_disk (&data); ++ ++ /* Corrupt the primary header. */ ++ memset (&data.raw->primary_header.guid, 0x55, ++ sizeof (data.raw->primary_header.guid)); ++ sync_disk (&data); ++ gpt = read_disk (&data); ++ grub_test_assert ((gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) == 0, ++ "unreported corrupt primary header"); ++ grub_gpt_free (gpt); ++ reset_disk (&data); ++ ++ /* Corrupt the backup header. */ ++ memset (&data.raw->backup_header.guid, 0x55, ++ sizeof (data.raw->backup_header.guid)); ++ sync_disk (&data); ++ gpt = read_disk (&data); ++ grub_test_assert ((gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) == 0, ++ "unreported corrupt backup header"); ++ grub_gpt_free (gpt); ++ reset_disk (&data); ++ ++ /* Corrupt the primary entry table. */ ++ memset (&data.raw->primary_entries[0], 0x55, ++ sizeof (data.raw->primary_entries[0])); ++ sync_disk (&data); ++ gpt = read_disk (&data); ++ grub_test_assert ((gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID) == 0, ++ "unreported corrupt primary entries table"); ++ grub_gpt_free (gpt); ++ reset_disk (&data); ++ ++ /* Corrupt the backup entry table. */ ++ memset (&data.raw->backup_entries[0], 0x55, ++ sizeof (data.raw->backup_entries[0])); ++ sync_disk (&data); ++ gpt = read_disk (&data); ++ grub_test_assert ((gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID) == 0, ++ "unreported corrupt backup entries table"); ++ grub_gpt_free (gpt); ++ reset_disk (&data); ++ ++ /* If primary is corrupt and disk size is unknown fallback fails. */ ++ memset (&data.raw->primary_header.guid, 0x55, ++ sizeof (data.raw->primary_header.guid)); ++ sync_disk (&data); ++ data.dev->disk->total_sectors = GRUB_DISK_SIZE_UNKNOWN; ++ gpt = grub_gpt_read (data.dev->disk); ++ grub_test_assert (gpt == NULL, "no error reported"); ++ grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++ ++ close_disk (&data); ++} ++ ++void ++grub_unit_test_init (void) ++{ ++ grub_init_all (); ++ grub_hostfs_init (); ++ grub_host_init (); ++ grub_test_register ("gpt_pmbr_test", pmbr_test); ++ grub_test_register ("gpt_header_test", header_test); ++ grub_test_register ("gpt_read_valid_test", read_valid_test); ++ grub_test_register ("gpt_read_invalid_test", read_invalid_entries_test); ++ grub_test_register ("gpt_read_fallback_test", read_fallback_test); ++} ++ ++void ++grub_unit_test_fini (void) ++{ ++ grub_test_unregister ("gpt_pmbr_test"); ++ grub_test_unregister ("gpt_header_test"); ++ grub_test_unregister ("gpt_read_valid_test"); ++ grub_test_unregister ("gpt_read_invalid_test"); ++ grub_test_unregister ("gpt_read_fallback_test"); ++ grub_fini_all (); ++} +From 91a8986e53926bc0a94f251a9b4fe8974af75020 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Sat, 18 Oct 2014 15:39:13 -0700 +Subject: [PATCH] gpt: rename misnamed header location fields + +The header location fields refer to 'this header' and 'alternate header' +respectively, not 'primary header' and 'backup header'. The previous +field names are backwards for the backup header. +--- + grub-core/lib/gpt.c | 2 +- + include/grub/gpt_partition.h | 4 ++-- + tests/gpt_unit_test.c | 8 ++++---- + 3 files changed, 7 insertions(+), 7 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index a308e8537..705bd77f9 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -137,7 +137,7 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) + if (disk->total_sectors != GRUB_DISK_SIZE_UNKNOWN) + sector = disk->total_sectors - 1; + else if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) +- sector = grub_le_to_cpu64 (gpt->primary.backup); ++ sector = grub_le_to_cpu64 (gpt->primary.alternate_lba); + else + return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, + "Unable to locate backup GPT"); +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index 04ed2d7f1..a7ef61875 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -64,8 +64,8 @@ struct grub_gpt_header + grub_uint32_t headersize; + grub_uint32_t crc32; + grub_uint32_t unused1; +- grub_uint64_t primary; +- grub_uint64_t backup; ++ grub_uint64_t header_lba; ++ grub_uint64_t alternate_lba; + grub_uint64_t start; + grub_uint64_t end; + grub_uint8_t guid[16]; +diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c +index a824cd967..4d70868af 100644 +--- a/tests/gpt_unit_test.c ++++ b/tests/gpt_unit_test.c +@@ -94,8 +94,8 @@ static const struct grub_gpt_header example_primary = { + .version = GRUB_GPT_HEADER_VERSION, + .headersize = sizeof (struct grub_gpt_header), + .crc32 = grub_cpu_to_le32_compile_time (0x7cd8642c), +- .primary = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), +- .backup = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), ++ .header_lba = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), ++ .alternate_lba = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), + .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), + .end = grub_cpu_to_le64_compile_time (DATA_END_SECTOR), + .guid = {0xad, 0x31, 0xc1, 0x69, 0xd6, 0x67, 0xc6, 0x46, +@@ -112,8 +112,8 @@ static const struct grub_gpt_header example_backup = { + .version = GRUB_GPT_HEADER_VERSION, + .headersize = sizeof (struct grub_gpt_header), + .crc32 = grub_cpu_to_le32_compile_time (0xcfaa4a27), +- .primary = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), +- .backup = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), ++ .header_lba = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), ++ .alternate_lba = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), + .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), + .end = grub_cpu_to_le64_compile_time (DATA_END_SECTOR), + .guid = {0xad, 0x31, 0xc1, 0x69, 0xd6, 0x67, 0xc6, 0x46, +From 23e0197ea4561fc3a9e59c1af9bf2357d21e1b52 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Sat, 18 Oct 2014 16:46:17 -0700 +Subject: [PATCH] gpt: record size of of the entries table + +The size of the entries table will be needed later when writing it back +to disk. Restructure the entries reading code to flow a little better. +--- + grub-core/lib/gpt.c | 53 +++++++++++++++++++------------------------- + include/grub/gpt_partition.h | 5 ++++- + 2 files changed, 27 insertions(+), 31 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 705bd77f9..01df7f3e8 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -153,7 +153,7 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) + return GRUB_ERR_NONE; + } + +-static struct grub_gpt_partentry * ++static grub_err_t + grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, + struct grub_gpt_header *header) + { +@@ -173,6 +173,10 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, + goto fail; + } + ++ /* Double check that the header was validated properly. */ ++ if (entries_size < GRUB_GPT_DEFAULT_ENTRIES_SIZE) ++ return grub_error (GRUB_ERR_BUG, "invalid GPT entries table size"); ++ + entries = grub_malloc (entries_size); + if (!entries) + goto fail; +@@ -197,19 +201,21 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, + } + + grub_free (crc32_context); +- return entries; ++ grub_free (gpt->entries); ++ gpt->entries = entries; ++ gpt->entries_size = entries_size; ++ return GRUB_ERR_NONE; + + fail: + grub_free (entries); + grub_free (crc32_context); +- return NULL; ++ return grub_errno; + } + + grub_gpt_t + grub_gpt_read (grub_disk_t disk) + { + grub_gpt_t gpt; +- struct grub_gpt_partentry *backup_entries; + + gpt = grub_zalloc (sizeof (*gpt)); + if (!gpt) +@@ -241,36 +247,23 @@ grub_gpt_read (grub_disk_t disk) + else + goto fail; + +- /* Same error handling scheme for the entry tables. */ +- gpt->entries = grub_gpt_read_entries (disk, gpt, &gpt->primary); +- if (!gpt->entries) +- { +- grub_error_push (); +- backup_entries = grub_gpt_read_entries (disk, gpt, &gpt->backup); +- grub_error_pop (); +- } +- else +- { +- gpt->status |= GRUB_GPT_PRIMARY_ENTRIES_VALID; +- backup_entries = grub_gpt_read_entries (disk, gpt, &gpt->backup); +- } ++ /* Similarly, favor the value or error from the primary table. */ ++ if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID && ++ !grub_gpt_read_entries (disk, gpt, &gpt->backup)) ++ gpt->status |= GRUB_GPT_BACKUP_ENTRIES_VALID; + +- if (backup_entries) +- { +- gpt->status |= GRUB_GPT_BACKUP_ENTRIES_VALID; +- +- if (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID) +- grub_free (backup_entries); +- else +- gpt->entries = backup_entries; +- } ++ grub_errno = GRUB_ERR_NONE; ++ if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID && ++ !grub_gpt_read_entries (disk, gpt, &gpt->primary)) ++ gpt->status |= GRUB_GPT_PRIMARY_ENTRIES_VALID; + + if (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID || + gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID) +- { +- grub_errno = GRUB_ERR_NONE; +- return gpt; +- } ++ grub_errno = GRUB_ERR_NONE; ++ else ++ goto fail; ++ ++ return gpt; + + fail: + grub_gpt_free (gpt); +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index a7ef61875..7f41e22dd 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -106,7 +106,9 @@ typedef enum grub_gpt_status + + /* UEFI requires the entries table to be at least 16384 bytes for a + * total of 128 entries given the standard 128 byte entry size. */ +-#define GRUB_GPT_DEFAULT_ENTRIES_LENGTH 128 ++#define GRUB_GPT_DEFAULT_ENTRIES_SIZE 16384 ++#define GRUB_GPT_DEFAULT_ENTRIES_LENGTH \ ++ (GRUB_GPT_DEFAULT_ENTRIES_SIZE / sizeof (struct grub_gpt_partentry)) + + struct grub_gpt + { +@@ -122,6 +124,7 @@ struct grub_gpt + + /* Only need one entries table, on disk both copies are identical. */ + struct grub_gpt_partentry *entries; ++ grub_size_t entries_size; + + /* Logarithm of sector size, in case GPT and disk driver disagree. */ + unsigned int log_sector_size; +From 187c377743b26d8fcf44ea3e5ac1ae6edf92ab23 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Sat, 18 Oct 2014 18:18:17 -0700 +Subject: [PATCH] gpt: consolidate crc32 computation code + +The gcrypt API is overly verbose, wrap it up in a helper function to +keep this rather common operation easy to use. +--- + grub-core/lib/gpt.c | 43 ++++++++++++++++++++++++------------------- + 1 file changed, 24 insertions(+), 19 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 01df7f3e8..43a150942 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -32,22 +32,17 @@ static grub_uint8_t grub_gpt_magic[] = GRUB_GPT_HEADER_MAGIC; + + + static grub_err_t +-grub_gpt_header_crc32 (struct grub_gpt_header *gpt, grub_uint32_t *crc) ++grub_gpt_lecrc32 (void *data, grub_size_t len, grub_uint32_t *crc) + { + grub_uint8_t *crc32_context; +- grub_uint32_t old; + + crc32_context = grub_zalloc (GRUB_MD_CRC32->contextsize); + if (!crc32_context) + return grub_errno; + +- /* crc32 must be computed with the field cleared. */ +- old = gpt->crc32; +- gpt->crc32 = 0; + GRUB_MD_CRC32->init (crc32_context); +- GRUB_MD_CRC32->write (crc32_context, gpt, sizeof (*gpt)); ++ GRUB_MD_CRC32->write (crc32_context, data, len); + GRUB_MD_CRC32->final (crc32_context); +- gpt->crc32 = old; + + /* GRUB_MD_CRC32 always uses big endian, gpt is always little. */ + *crc = grub_swap_bytes32 (*(grub_uint32_t *) +@@ -58,6 +53,25 @@ grub_gpt_header_crc32 (struct grub_gpt_header *gpt, grub_uint32_t *crc) + return GRUB_ERR_NONE; + } + ++static grub_err_t ++grub_gpt_header_lecrc32 (struct grub_gpt_header *header, grub_uint32_t *crc) ++{ ++ grub_uint32_t old, new; ++ grub_err_t err; ++ ++ /* crc32 must be computed with the field cleared. */ ++ old = header->crc32; ++ header->crc32 = 0; ++ err = grub_gpt_lecrc32 (header, sizeof (*header), &new); ++ header->crc32 = old; ++ ++ if (err) ++ return err; ++ ++ *crc = new; ++ return GRUB_ERR_NONE; ++} ++ + /* Make sure the MBR is a protective MBR and not a normal MBR. */ + grub_err_t + grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr) +@@ -87,7 +101,7 @@ grub_gpt_header_check (struct grub_gpt_header *gpt, + if (gpt->version != GRUB_GPT_HEADER_VERSION) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "unknown GPT version"); + +- if (grub_gpt_header_crc32 (gpt, &crc)) ++ if (grub_gpt_header_lecrc32 (gpt, &crc)) + return grub_errno; + + if (gpt->crc32 != crc) +@@ -158,7 +172,6 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, + struct grub_gpt_header *header) + { + struct grub_gpt_partentry *entries = NULL; +- grub_uint8_t *crc32_context = NULL; + grub_uint32_t count, size, crc; + grub_disk_addr_t addr; + grub_size_t entries_size; +@@ -185,22 +198,15 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, + if (grub_disk_read (disk, addr, 0, entries_size, entries)) + goto fail; + +- crc32_context = grub_zalloc (GRUB_MD_CRC32->contextsize); +- if (!crc32_context) ++ if (grub_gpt_lecrc32 (entries, entries_size, &crc)) + goto fail; + +- GRUB_MD_CRC32->init (crc32_context); +- GRUB_MD_CRC32->write (crc32_context, entries, entries_size); +- GRUB_MD_CRC32->final (crc32_context); +- +- crc = *(grub_uint32_t *) GRUB_MD_CRC32->read (crc32_context); +- if (grub_swap_bytes32 (crc) != header->partentry_crc32) ++ if (crc != header->partentry_crc32) + { + grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT entry crc32"); + goto fail; + } + +- grub_free (crc32_context); + grub_free (gpt->entries); + gpt->entries = entries; + gpt->entries_size = entries_size; +@@ -208,7 +214,6 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, + + fail: + grub_free (entries); +- grub_free (crc32_context); + return grub_errno; + } + +From f6e8fc02aa5f5ed02e529e2b30a94b0589d30e31 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Sat, 18 Oct 2014 18:21:07 -0700 +Subject: [PATCH] gpt: add new repair function to sync up primary and backup + tables. + +--- + grub-core/lib/gpt.c | 90 ++++++++++++++++++++++++++++++++++++++++++++ + include/grub/gpt_partition.h | 3 ++ + tests/gpt_unit_test.c | 49 ++++++++++++++++++++++++ + 3 files changed, 142 insertions(+) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 43a150942..2d61df488 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -31,6 +31,20 @@ GRUB_MOD_LICENSE ("GPLv3+"); + static grub_uint8_t grub_gpt_magic[] = GRUB_GPT_HEADER_MAGIC; + + ++static grub_uint64_t ++grub_gpt_size_to_sectors (grub_gpt_t gpt, grub_size_t size) ++{ ++ unsigned int sector_size; ++ grub_uint64_t sectors; ++ ++ sector_size = 1U << gpt->log_sector_size; ++ sectors = size / sector_size; ++ if (size % sector_size) ++ sectors++; ++ ++ return sectors; ++} ++ + static grub_err_t + grub_gpt_lecrc32 (void *data, grub_size_t len, grub_uint32_t *crc) + { +@@ -275,6 +289,82 @@ fail: + return NULL; + } + ++grub_err_t ++grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) ++{ ++ grub_uint64_t backup_header, backup_entries; ++ grub_uint32_t crc; ++ ++ if (disk->log_sector_size != gpt->log_sector_size) ++ return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, ++ "GPT sector size must match disk sector size"); ++ ++ if (!(gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID || ++ gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID)) ++ return grub_error (GRUB_ERR_BUG, "No valid GPT entries"); ++ ++ if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) ++ { ++ backup_header = grub_le_to_cpu64 (gpt->primary.alternate_lba); ++ grub_memcpy (&gpt->backup, &gpt->primary, sizeof (gpt->backup)); ++ } ++ else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) ++ { ++ backup_header = grub_le_to_cpu64 (gpt->backup.header_lba); ++ grub_memcpy (&gpt->primary, &gpt->backup, sizeof (gpt->primary)); ++ } ++ else ++ return grub_error (GRUB_ERR_BUG, "No valid GPT header"); ++ ++ /* Relocate backup to end if disk whenever possible. */ ++ if (disk->total_sectors != GRUB_DISK_SIZE_UNKNOWN) ++ backup_header = disk->total_sectors - 1; ++ ++ backup_entries = backup_header - ++ grub_gpt_size_to_sectors (gpt, gpt->entries_size); ++ ++ /* Update/fixup header and partition table locations. */ ++ gpt->primary.header_lba = grub_cpu_to_le64_compile_time (1); ++ gpt->primary.alternate_lba = grub_cpu_to_le64 (backup_header); ++ gpt->primary.partitions = grub_cpu_to_le64_compile_time (2); ++ gpt->backup.header_lba = gpt->primary.alternate_lba; ++ gpt->backup.alternate_lba = gpt->primary.header_lba; ++ gpt->backup.partitions = grub_cpu_to_le64 (backup_entries); ++ ++ /* Writing headers larger than our header structure are unsupported. */ ++ gpt->primary.headersize = ++ grub_cpu_to_le32_compile_time (sizeof (gpt->primary)); ++ gpt->backup.headersize = ++ grub_cpu_to_le32_compile_time (sizeof (gpt->backup)); ++ ++ /* Recompute checksums. */ ++ if (grub_gpt_lecrc32 (gpt->entries, gpt->entries_size, &crc)) ++ return grub_errno; ++ ++ gpt->primary.partentry_crc32 = crc; ++ gpt->backup.partentry_crc32 = crc; ++ ++ if (grub_gpt_header_lecrc32 (&gpt->primary, &gpt->primary.crc32)) ++ return grub_errno; ++ ++ if (grub_gpt_header_lecrc32 (&gpt->backup, &gpt->backup.crc32)) ++ return grub_errno; ++ ++ /* Sanity check. */ ++ if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) ++ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); ++ ++ if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) ++ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); ++ ++ gpt->status |= (GRUB_GPT_PRIMARY_HEADER_VALID | ++ GRUB_GPT_PRIMARY_ENTRIES_VALID | ++ GRUB_GPT_BACKUP_HEADER_VALID | ++ GRUB_GPT_BACKUP_ENTRIES_VALID); ++ ++ return GRUB_ERR_NONE; ++} ++ + void + grub_gpt_free (grub_gpt_t gpt) + { +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index 7f41e22dd..62d027e4e 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -141,6 +141,9 @@ grub_gpt_sector_to_addr (grub_gpt_t gpt, grub_uint64_t sector) + /* Allocates and fills new grub_gpt structure, free with grub_gpt_free. */ + grub_gpt_t grub_gpt_read (grub_disk_t disk); + ++/* Sync up primary and backup headers, recompute checksums. */ ++grub_err_t grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt); ++ + void grub_gpt_free (grub_gpt_t gpt); + + grub_err_t grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr); +diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c +index 4d70868af..83198bebf 100644 +--- a/tests/gpt_unit_test.c ++++ b/tests/gpt_unit_test.c +@@ -24,6 +24,7 @@ + #include + #include + #include ++#include + #include + + #include +@@ -442,6 +443,52 @@ read_fallback_test (void) + close_disk (&data); + } + ++static void ++repair_test (void) ++{ ++ struct test_data data; ++ grub_gpt_t gpt; ++ ++ open_disk (&data); ++ ++ /* Erase/Repair primary. */ ++ memset (&data.raw->primary_header, 0, sizeof (data.raw->primary_header)); ++ sync_disk (&data); ++ gpt = read_disk (&data); ++ grub_gpt_repair (data.dev->disk, gpt); ++ grub_test_assert (grub_errno == GRUB_ERR_NONE, ++ "repair failed: %s", grub_errmsg); ++ if (memcmp (&gpt->primary, &example_primary, sizeof (gpt->primary))) ++ { ++ printf ("Invalid restored primary header:\n"); ++ hexdump (16, (char*)&gpt->primary, sizeof (gpt->primary)); ++ printf ("Expected primary header:\n"); ++ hexdump (16, (char*)&example_primary, sizeof (example_primary)); ++ grub_test_assert (0, "repair did not restore primary header"); ++ } ++ grub_gpt_free (gpt); ++ reset_disk (&data); ++ ++ /* Erase/Repair backup. */ ++ memset (&data.raw->backup_header, 0, sizeof (data.raw->backup_header)); ++ sync_disk (&data); ++ gpt = read_disk (&data); ++ grub_gpt_repair (data.dev->disk, gpt); ++ grub_test_assert (grub_errno == GRUB_ERR_NONE, ++ "repair failed: %s", grub_errmsg); ++ if (memcmp (&gpt->backup, &example_backup, sizeof (gpt->backup))) ++ { ++ printf ("Invalid restored backup header:\n"); ++ hexdump (16, (char*)&gpt->backup, sizeof (gpt->backup)); ++ printf ("Expected backup header:\n"); ++ hexdump (16, (char*)&example_backup, sizeof (example_backup)); ++ grub_test_assert (0, "repair did not restore backup header"); ++ } ++ grub_gpt_free (gpt); ++ reset_disk (&data); ++ ++ close_disk (&data); ++} + void + grub_unit_test_init (void) + { +@@ -453,6 +500,7 @@ grub_unit_test_init (void) + grub_test_register ("gpt_read_valid_test", read_valid_test); + grub_test_register ("gpt_read_invalid_test", read_invalid_entries_test); + grub_test_register ("gpt_read_fallback_test", read_fallback_test); ++ grub_test_register ("gpt_repair_test", repair_test); + } + + void +@@ -463,5 +511,6 @@ grub_unit_test_fini (void) + grub_test_unregister ("gpt_read_valid_test"); + grub_test_unregister ("gpt_read_invalid_test"); + grub_test_unregister ("gpt_read_fallback_test"); ++ grub_test_unregister ("gpt_repair_test"); + grub_fini_all (); + } +From c9041ec4e40315f2734f2b6a38a75ba17cbba0ca Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Sun, 19 Oct 2014 14:21:29 -0700 +Subject: [PATCH] gpt: add write function and gptrepair command + +The first hint of something practical, a command that can restore any of +the GPT structures from the alternate location. New test case must run +under QEMU because the loopback device used by the other unit tests does +not support writing. +--- + Makefile.util.def | 6 +++ + grub-core/Makefile.core.def | 5 ++ + grub-core/commands/gptrepair.c | 116 +++++++++++++++++++++++++++++++++++++++++ + grub-core/lib/gpt.c | 44 ++++++++++++++-- + include/grub/gpt_partition.h | 8 +++ + tests/gptrepair_test.in | 102 ++++++++++++++++++++++++++++++++++++ + 6 files changed, 277 insertions(+), 4 deletions(-) + create mode 100644 grub-core/commands/gptrepair.c + create mode 100644 tests/gptrepair_test.in + +diff --git a/Makefile.util.def b/Makefile.util.def +index 48448c28d..8156fca5f 100644 +--- a/Makefile.util.def ++++ b/Makefile.util.def +@@ -1159,6 +1159,12 @@ script = { + common = tests/grub_cmd_tr.in; + }; + ++script = { ++ testcase; ++ name = gptrepair_test; ++ common = tests/gptrepair_test.in; ++}; ++ + script = { + testcase; + name = file_filter_test; +diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def +index d3bcdbe2f..23a047632 100644 +--- a/grub-core/Makefile.core.def ++++ b/grub-core/Makefile.core.def +@@ -821,6 +821,11 @@ module = { + common = commands/gptsync.c; + }; + ++module = { ++ name = gptrepair; ++ common = commands/gptrepair.c; ++}; ++ + module = { + name = gpt; + common = lib/gpt.c; +diff --git a/grub-core/commands/gptrepair.c b/grub-core/commands/gptrepair.c +new file mode 100644 +index 000000000..38392fd8f +--- /dev/null ++++ b/grub-core/commands/gptrepair.c +@@ -0,0 +1,116 @@ ++/* gptrepair.c - verify and restore GPT info from alternate location. */ ++/* ++ * GRUB -- GRand Unified Bootloader ++ * Copyright (C) 2009 Free Software Foundation, Inc. ++ * Copyright (C) 2014 CoreOS, Inc. ++ * ++ * GRUB is free software: you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation, either version 3 of the License, or ++ * (at your option) any later version. ++ * ++ * GRUB is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with GRUB. If not, see . ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++GRUB_MOD_LICENSE ("GPLv3+"); ++ ++static char * ++trim_dev_name (char *name) ++{ ++ grub_size_t len = grub_strlen (name); ++ if (len && name[0] == '(' && name[len - 1] == ')') ++ { ++ name[len - 1] = '\0'; ++ name = name + 1; ++ } ++ return name; ++} ++ ++static grub_err_t ++grub_cmd_gptrepair (grub_command_t cmd __attribute__ ((unused)), ++ int argc, char **args) ++{ ++ grub_device_t dev = NULL; ++ grub_gpt_t gpt = NULL; ++ char *dev_name; ++ grub_uint32_t primary_crc, backup_crc; ++ enum grub_gpt_status old_status; ++ ++ if (argc != 1 || !grub_strlen(args[0])) ++ return grub_error (GRUB_ERR_BAD_ARGUMENT, "device name required"); ++ ++ dev_name = trim_dev_name (args[0]); ++ dev = grub_device_open (dev_name); ++ if (!dev) ++ goto done; ++ ++ if (!dev->disk) ++ { ++ grub_error (GRUB_ERR_BAD_ARGUMENT, "not a disk"); ++ goto done; ++ } ++ ++ gpt = grub_gpt_read (dev->disk); ++ if (!gpt) ++ goto done; ++ ++ primary_crc = gpt->primary.crc32; ++ backup_crc = gpt->backup.crc32; ++ old_status = gpt->status; ++ ++ if (grub_gpt_repair (dev->disk, gpt)) ++ goto done; ++ ++ if (primary_crc == gpt->primary.crc32 && ++ backup_crc == gpt->backup.crc32 && ++ old_status && gpt->status) ++ { ++ grub_printf_ (N_("GPT already valid, %s unmodified.\n"), dev_name); ++ goto done; ++ } ++ ++ if (grub_gpt_write (dev->disk, gpt)) ++ goto done; ++ ++ if (!(old_status & GRUB_GPT_PRIMARY_VALID)) ++ grub_printf_ (N_("Primary GPT for %s repaired.\n"), dev_name); ++ ++ if (!(old_status & GRUB_GPT_BACKUP_VALID)) ++ grub_printf_ (N_("Backup GPT for %s repaired.\n"), dev_name); ++ ++done: ++ if (gpt) ++ grub_gpt_free (gpt); ++ ++ if (dev) ++ grub_device_close (dev); ++ ++ return grub_errno; ++} ++ ++static grub_command_t cmd; ++ ++GRUB_MOD_INIT(gptrepair) ++{ ++ cmd = grub_register_command ("gptrepair", grub_cmd_gptrepair, ++ N_("DEVICE"), ++ N_("Verify and repair GPT on drive DEVICE.")); ++} ++ ++GRUB_MOD_FINI(gptrepair) ++{ ++ grub_unregister_command (cmd); ++} +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 2d61df488..67ffdf703 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -357,10 +357,46 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) + return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); + +- gpt->status |= (GRUB_GPT_PRIMARY_HEADER_VALID | +- GRUB_GPT_PRIMARY_ENTRIES_VALID | +- GRUB_GPT_BACKUP_HEADER_VALID | +- GRUB_GPT_BACKUP_ENTRIES_VALID); ++ gpt->status |= GRUB_GPT_BOTH_VALID; ++ return GRUB_ERR_NONE; ++} ++ ++static grub_err_t ++grub_gpt_write_table (grub_disk_t disk, grub_gpt_t gpt, ++ struct grub_gpt_header *header) ++{ ++ grub_disk_addr_t addr; ++ ++ if (grub_le_to_cpu32 (header->headersize) != sizeof (*header)) ++ return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, ++ "Header size is %u, must be %u", ++ grub_le_to_cpu32 (header->headersize), ++ sizeof (*header)); ++ ++ addr = grub_gpt_sector_to_addr (gpt, grub_le_to_cpu64 (header->header_lba)); ++ if (grub_disk_write (disk, addr, 0, sizeof (*header), header)) ++ return grub_errno; ++ ++ addr = grub_gpt_sector_to_addr (gpt, grub_le_to_cpu64 (header->partitions)); ++ if (grub_disk_write (disk, addr, 0, gpt->entries_size, gpt->entries)) ++ return grub_errno; ++ ++ return GRUB_ERR_NONE; ++} ++ ++grub_err_t ++grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt) ++{ ++ /* TODO: update/repair protective MBRs too. */ ++ ++ if (!(gpt->status & GRUB_GPT_BOTH_VALID)) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "Invalid GPT data"); ++ ++ if (grub_gpt_write_table (disk, gpt, &gpt->primary)) ++ return grub_errno; ++ ++ if (grub_gpt_write_table (disk, gpt, &gpt->backup)) ++ return grub_errno; + + return GRUB_ERR_NONE; + } +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index 62d027e4e..3cac6df32 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -103,6 +103,11 @@ typedef enum grub_gpt_status + } grub_gpt_status_t; + + #define GRUB_GPT_MBR_VALID (GRUB_GPT_PROTECTIVE_MBR|GRUB_GPT_HYBRID_MBR) ++#define GRUB_GPT_PRIMARY_VALID \ ++ (GRUB_GPT_PRIMARY_HEADER_VALID|GRUB_GPT_PRIMARY_ENTRIES_VALID) ++#define GRUB_GPT_BACKUP_VALID \ ++ (GRUB_GPT_BACKUP_HEADER_VALID|GRUB_GPT_BACKUP_ENTRIES_VALID) ++#define GRUB_GPT_BOTH_VALID (GRUB_GPT_PRIMARY_VALID|GRUB_GPT_BACKUP_VALID) + + /* UEFI requires the entries table to be at least 16384 bytes for a + * total of 128 entries given the standard 128 byte entry size. */ +@@ -144,6 +149,9 @@ grub_gpt_t grub_gpt_read (grub_disk_t disk); + /* Sync up primary and backup headers, recompute checksums. */ + grub_err_t grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt); + ++/* Write headers and entry tables back to disk. */ ++grub_err_t grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt); ++ + void grub_gpt_free (grub_gpt_t gpt); + + grub_err_t grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr); +diff --git a/tests/gptrepair_test.in b/tests/gptrepair_test.in +new file mode 100644 +index 000000000..80b2de633 +--- /dev/null ++++ b/tests/gptrepair_test.in +@@ -0,0 +1,102 @@ ++#! /bin/sh ++set -e ++ ++# Copyright (C) 2010 Free Software Foundation, Inc. ++# Copyright (C) 2014 CoreOS, Inc. ++# ++# GRUB is free software: you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation, either version 3 of the License, or ++# (at your option) any later version. ++# ++# GRUB is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GRUB. If not, see . ++ ++parted=parted ++grubshell=@builddir@/grub-shell ++ ++. "@builddir@/grub-core/modinfo.sh" ++ ++case "${grub_modinfo_target_cpu}-${grub_modinfo_platform}" in ++ mips-qemu_mips | mipsel-qemu_mips | i386-qemu | i386-multiboot | i386-coreboot | mipsel-loongson) ++ disk=ata0 ++ ;; ++ powerpc-ieee1275) ++ disk=ieee1275//pci@80000000/mac-io@4/ata-3@20000/disk@0 ++ # FIXME: QEMU firmware has bugs which prevent it from accessing hard disk w/o recognised label. ++ exit 0 ++ ;; ++ sparc64-ieee1275) ++ disk=ieee1275//pci@1fe\,0/pci-ata@5/ide0@500/disk@0 ++ # FIXME: QEMU firmware has bugs which prevent it from accessing hard disk w/o recognised label. ++ exit 0 ++ ;; ++ i386-ieee1275) ++ disk=ieee1275/d ++ # FIXME: QEMU firmware has bugs which prevent it from accessing hard disk w/o recognised label. ++ exit 0 ++ ;; ++ mips-arc) ++ # FIXME: ARC firmware has bugs which prevent it from accessing hard disk w/o dvh disklabel. ++ exit 0 ;; ++ mipsel-arc) ++ disk=arc/scsi0/disk0/rdisk0 ++ ;; ++ *) ++ disk=hd0 ++ ;; ++esac ++img1="`mktemp "${TMPDIR:-/tmp}/tmp.XXXXXXXXXX"`" || exit 1 ++img2="`mktemp "${TMPDIR:-/tmp}/tmp.XXXXXXXXXX"`" || exit 1 ++trap "rm -f '${img1}' '${ing2}'" EXIT ++ ++create_disk_image () { ++ size=$1 ++ rm -f "${img1}" ++ dd if=/dev/zero of="${img1}" bs=512 count=1 seek=$((size - 1)) status=none ++ ${parted} -a none -s "${img1}" mklabel gpt ++ cp "${img1}" "${img2}" ++} ++ ++wipe_disk_area () { ++ sector=$1 ++ size=$2 ++ dd if=/dev/zero of="${img2}" bs=512 count=${size} seek=${sector} conv=notrunc status=none ++} ++ ++do_repair () { ++ output="`echo "gptrepair ($disk)" | "${grubshell}" --disk="${img2}"`" ++ if echo "${output}" | grep ^error; then ++ return 1 ++ fi ++ if echo "${output}" | grep -v GPT; then ++ echo "Unexpected output ${output}" ++ return 1 ++ fi ++ echo "${output}" ++} ++ ++echo "Nothing to repair:" ++create_disk_image 100 ++do_repair ++cmp "${img1}" "${img2}" ++echo ++ ++echo "Repair primary (MBR left intact)" ++create_disk_image 100 ++wipe_disk_area 1 1 ++do_repair ++cmp "${img1}" "${img2}" ++echo ++ ++echo "Repair backup" ++create_disk_image 100 ++wipe_disk_area 99 1 ++do_repair ++cmp "${img1}" "${img2}" ++echo +From 24341bb2904a4f2b40d69dbd633789cf49e9616b Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Sun, 19 Oct 2014 20:44:34 -0700 +Subject: [PATCH] tests: fix path to words file on Gentoo/CoreOS + +By default there isn't a linux.words file, but there is words. +--- + tests/util/grub-fs-tester.in | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/tests/util/grub-fs-tester.in b/tests/util/grub-fs-tester.in +index 2337771a1..d768d66d1 100644 +--- a/tests/util/grub-fs-tester.in ++++ b/tests/util/grub-fs-tester.in +@@ -241,8 +241,10 @@ for ((LOGSECSIZE=MINLOGSECSIZE;LOGSECSIZE<=MAXLOGSECSIZE;LOGSECSIZE=LOGSECSIZE + + CFILESN=1 + if test -f /usr/share/dict/american-english; then + CFILESSRC[0]="/usr/share/dict/american-english" +- else ++ elif test -f /usr/share/dict/linux.words; then + CFILESSRC[0]="/usr/share/dict/linux.words" ++ else ++ CFILESSRC[0]="/usr/share/dict/words" + fi + case x"$fs" in + # FS LIMITATION: 8.3 names +From 059ae5370a9d5f7fe19c928bf5000751ece28ccd Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Thu, 30 Oct 2014 20:55:21 -0700 +Subject: [PATCH] gpt: add a new generic GUID type + +In order to do anything with partition GUIDs they need to be stored in a +proper structure like the partition type GUIDs. Additionally add an +initializer macro to simplify defining both GUID types. +--- + include/grub/gpt_partition.h | 36 +++++++++++++++++++----------------- + tests/gpt_unit_test.c | 12 ++++++------ + 2 files changed, 25 insertions(+), 23 deletions(-) + +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index 3cac6df32..df076ca64 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -23,33 +23,35 @@ + #include + #include + +-struct grub_gpt_part_type ++struct grub_gpt_guid + { + grub_uint32_t data1; + grub_uint16_t data2; + grub_uint16_t data3; + grub_uint8_t data4[8]; + } __attribute__ ((aligned(8))); +-typedef struct grub_gpt_part_type grub_gpt_part_type_t; ++typedef struct grub_gpt_guid grub_gpt_guid_t; ++typedef struct grub_gpt_guid grub_gpt_part_type_t; ++ ++#define GRUB_GPT_GUID_INIT(a, b, c, d1, d2, d3, d4, d5, d6, d7, d8) \ ++ { \ ++ grub_cpu_to_le32_compile_time (a), \ ++ grub_cpu_to_le16_compile_time (b), \ ++ grub_cpu_to_le16_compile_time (c), \ ++ { d1, d2, d3, d4, d5, d6, d7, d8 } \ ++ } + + #define GRUB_GPT_PARTITION_TYPE_EMPTY \ +- { 0x0, 0x0, 0x0, \ +- { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 } \ +- } ++ GRUB_GPT_GUID_INIT (0x0, 0x0, 0x0, \ ++ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0) + + #define GRUB_GPT_PARTITION_TYPE_BIOS_BOOT \ +- { grub_cpu_to_le32_compile_time (0x21686148), \ +- grub_cpu_to_le16_compile_time (0x6449), \ +- grub_cpu_to_le16_compile_time (0x6e6f), \ +- { 0x74, 0x4e, 0x65, 0x65, 0x64, 0x45, 0x46, 0x49 } \ +- } ++ GRUB_GPT_GUID_INIT (0x21686148, 0x6449, 0x6e6f, \ ++ 0x74, 0x4e, 0x65, 0x65, 0x64, 0x45, 0x46, 0x49) + + #define GRUB_GPT_PARTITION_TYPE_LDM \ +- { grub_cpu_to_le32_compile_time (0x5808C8AAU),\ +- grub_cpu_to_le16_compile_time (0x7E8F), \ +- grub_cpu_to_le16_compile_time (0x42E0), \ +- { 0x85, 0xD2, 0xE1, 0xE9, 0x04, 0x34, 0xCF, 0xB3 } \ +- } ++ GRUB_GPT_GUID_INIT (0x5808c8aa, 0x7e8f, 0x42e0, \ ++ 0x85, 0xd2, 0xe1, 0xe9, 0x04, 0x34, 0xcf, 0xb3) + + #define GRUB_GPT_HEADER_MAGIC \ + { 0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54 } +@@ -68,7 +70,7 @@ struct grub_gpt_header + grub_uint64_t alternate_lba; + grub_uint64_t start; + grub_uint64_t end; +- grub_uint8_t guid[16]; ++ grub_gpt_guid_t guid; + grub_uint64_t partitions; + grub_uint32_t maxpart; + grub_uint32_t partentry_size; +@@ -78,7 +80,7 @@ struct grub_gpt_header + struct grub_gpt_partentry + { + grub_gpt_part_type_t type; +- grub_uint8_t guid[16]; ++ grub_gpt_guid_t guid; + grub_uint64_t start; + grub_uint64_t end; + grub_uint64_t attrib; +diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c +index 83198bebf..86e4364a5 100644 +--- a/tests/gpt_unit_test.c ++++ b/tests/gpt_unit_test.c +@@ -99,8 +99,8 @@ static const struct grub_gpt_header example_primary = { + .alternate_lba = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), + .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), + .end = grub_cpu_to_le64_compile_time (DATA_END_SECTOR), +- .guid = {0xad, 0x31, 0xc1, 0x69, 0xd6, 0x67, 0xc6, 0x46, +- 0x93, 0xc4, 0x12, 0x4c, 0x75, 0x52, 0x56, 0xac}, ++ .guid = GRUB_GPT_GUID_INIT(0x69c131ad, 0x67d6, 0x46c6, ++ 0x93, 0xc4, 0x12, 0x4c, 0x75, 0x52, 0x56, 0xac), + .partitions = grub_cpu_to_le64_compile_time (PRIMARY_TABLE_SECTOR), + .maxpart = grub_cpu_to_le32_compile_time (TABLE_ENTRIES), + .partentry_size = grub_cpu_to_le32_compile_time (ENTRY_SIZE), +@@ -117,8 +117,8 @@ static const struct grub_gpt_header example_backup = { + .alternate_lba = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), + .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), + .end = grub_cpu_to_le64_compile_time (DATA_END_SECTOR), +- .guid = {0xad, 0x31, 0xc1, 0x69, 0xd6, 0x67, 0xc6, 0x46, +- 0x93, 0xc4, 0x12, 0x4c, 0x75, 0x52, 0x56, 0xac}, ++ .guid = GRUB_GPT_GUID_INIT(0x69c131ad, 0x67d6, 0x46c6, ++ 0x93, 0xc4, 0x12, 0x4c, 0x75, 0x52, 0x56, 0xac), + .partitions = grub_cpu_to_le64_compile_time (BACKUP_TABLE_SECTOR), + .maxpart = grub_cpu_to_le32_compile_time (TABLE_ENTRIES), + .partentry_size = grub_cpu_to_le32_compile_time (ENTRY_SIZE), +@@ -326,13 +326,13 @@ header_test (void) + grub_errno = GRUB_ERR_NONE; + + /* Twiddle the GUID to invalidate the CRC. */ +- primary.guid[0] = 0; ++ primary.guid.data1 = 0; + grub_gpt_header_check (&primary, GRUB_DISK_SECTOR_BITS); + grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, + "unexpected error: %s", grub_errmsg); + grub_errno = GRUB_ERR_NONE; + +- backup.guid[0] = 0; ++ backup.guid.data1 = 0; + grub_gpt_header_check (&backup, GRUB_DISK_SECTOR_BITS); + grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, + "unexpected error: %s", grub_errmsg); +From 6cf94a34ca4f605aa353d2717561d608bc13472d Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Mon, 3 Nov 2014 17:14:37 -0800 +Subject: [PATCH] gpt: new gptprio.next command for selecting priority based + partitions + +Basic usage would look something like this: + + gptprio.next -d usr_dev -u usr_uuid + linuxefi ($usr_dev)/boot/vmlinuz mount.usr=PARTUUID=$usr_uuid + +After booting the system should set the 'successful' bit on the +partition that was used. +--- + Makefile.util.def | 6 ++ + grub-core/Makefile.core.def | 5 + + grub-core/commands/gptprio.c | 238 +++++++++++++++++++++++++++++++++++++++++++ + include/grub/gpt_partition.h | 49 +++++++++ + tests/gptprio_test.in | 150 +++++++++++++++++++++++++++ + 5 files changed, 448 insertions(+) + create mode 100644 grub-core/commands/gptprio.c + create mode 100644 tests/gptprio_test.in + +diff --git a/Makefile.util.def b/Makefile.util.def +index 8156fca5f..9249f77be 100644 +--- a/Makefile.util.def ++++ b/Makefile.util.def +@@ -1165,6 +1165,12 @@ script = { + common = tests/gptrepair_test.in; + }; + ++script = { ++ testcase; ++ name = gptprio_test; ++ common = tests/gptprio_test.in; ++}; ++ + script = { + testcase; + name = file_filter_test; +diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def +index 23a047632..4620138cb 100644 +--- a/grub-core/Makefile.core.def ++++ b/grub-core/Makefile.core.def +@@ -826,6 +826,11 @@ module = { + common = commands/gptrepair.c; + }; + ++module = { ++ name = gptprio; ++ common = commands/gptprio.c; ++}; ++ + module = { + name = gpt; + common = lib/gpt.c; +diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c +new file mode 100644 +index 000000000..29bd11d68 +--- /dev/null ++++ b/grub-core/commands/gptprio.c +@@ -0,0 +1,238 @@ ++/* gptprio.c - manage priority based partition selection. */ ++/* ++ * GRUB -- GRand Unified Bootloader ++ * Copyright (C) 2009 Free Software Foundation, Inc. ++ * Copyright (C) 2014 CoreOS, Inc. ++ * ++ * GRUB is free software: you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation, either version 3 of the License, or ++ * (at your option) any later version. ++ * ++ * GRUB is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with GRUB. If not, see . ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++GRUB_MOD_LICENSE ("GPLv3+"); ++ ++static const struct grub_arg_option options_next[] = { ++ {"set-device", 'd', 0, ++ N_("Set a variable to the name of selected partition."), ++ N_("VARNAME"), ARG_TYPE_STRING}, ++ {"set-uuid", 'u', 0, ++ N_("Set a variable to the GPT UUID of selected partition."), ++ N_("VARNAME"), ARG_TYPE_STRING}, ++ {0, 0, 0, 0, 0, 0} ++}; ++ ++enum options_next ++{ ++ NEXT_SET_DEVICE, ++ NEXT_SET_UUID, ++}; ++ ++static unsigned int ++grub_gptprio_priority (struct grub_gpt_partentry *entry) ++{ ++ return (unsigned int) grub_gpt_entry_attribute ++ (entry, GRUB_GPT_PART_ATTR_OFFSET_GPTPRIO_PRIORITY, 4); ++} ++ ++static unsigned int ++grub_gptprio_tries_left (struct grub_gpt_partentry *entry) ++{ ++ return (unsigned int) grub_gpt_entry_attribute ++ (entry, GRUB_GPT_PART_ATTR_OFFSET_GPTPRIO_TRIES_LEFT, 4); ++} ++ ++static void ++grub_gptprio_set_tries_left (struct grub_gpt_partentry *entry, ++ unsigned int tries_left) ++{ ++ grub_gpt_entry_set_attribute ++ (entry, tries_left, GRUB_GPT_PART_ATTR_OFFSET_GPTPRIO_TRIES_LEFT, 4); ++} ++ ++static unsigned int ++grub_gptprio_successful (struct grub_gpt_partentry *entry) ++{ ++ return (unsigned int) grub_gpt_entry_attribute ++ (entry, GRUB_GPT_PART_ATTR_OFFSET_GPTPRIO_SUCCESSFUL, 1); ++} ++ ++static grub_err_t ++grub_find_next (const char *disk_name, ++ const grub_gpt_part_type_t *part_type, ++ char **part_name, char **part_guid) ++{ ++ struct grub_gpt_partentry *part_found = NULL; ++ grub_device_t dev = NULL; ++ grub_gpt_t gpt = NULL; ++ grub_uint32_t i, part_index; ++ ++ dev = grub_device_open (disk_name); ++ if (!dev) ++ goto done; ++ ++ gpt = grub_gpt_read (dev->disk); ++ if (!gpt) ++ goto done; ++ ++ if (!(gpt->status & GRUB_GPT_BOTH_VALID)) ++ if (grub_gpt_repair (dev->disk, gpt)) ++ goto done; ++ ++ for (i = 0; i < grub_le_to_cpu32 (gpt->primary.maxpart); i++) ++ { ++ struct grub_gpt_partentry *part = &gpt->entries[i]; ++ ++ if (grub_memcmp (part_type, &part->type, sizeof (*part_type)) == 0) ++ { ++ unsigned int priority, tries_left, successful, old_priority = 0; ++ ++ priority = grub_gptprio_priority (part); ++ tries_left = grub_gptprio_tries_left (part); ++ successful = grub_gptprio_successful (part); ++ ++ if (part_found) ++ old_priority = grub_gptprio_priority (part_found); ++ ++ if ((tries_left || successful) && priority > old_priority) ++ { ++ part_index = i; ++ part_found = part; ++ } ++ } ++ } ++ ++ if (!part_found) ++ { ++ grub_error (GRUB_ERR_UNKNOWN_DEVICE, N_("no such partition")); ++ goto done; ++ } ++ ++ if (grub_gptprio_tries_left (part_found)) ++ { ++ unsigned int tries_left = grub_gptprio_tries_left (part_found); ++ ++ grub_gptprio_set_tries_left (part_found, tries_left - 1); ++ ++ if (grub_gpt_update_checksums (gpt)) ++ goto done; ++ ++ if (grub_gpt_write (dev->disk, gpt)) ++ goto done; ++ } ++ ++ *part_name = grub_xasprintf ("%s,gpt%u", disk_name, part_index + 1); ++ if (!*part_name) ++ goto done; ++ ++ *part_guid = ++ grub_xasprintf ("%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", ++ grub_le_to_cpu32 (part_found->guid.data1), ++ grub_le_to_cpu16 (part_found->guid.data2), ++ grub_le_to_cpu16 (part_found->guid.data3), ++ part_found->guid.data4[0], ++ part_found->guid.data4[1], ++ part_found->guid.data4[2], ++ part_found->guid.data4[3], ++ part_found->guid.data4[4], ++ part_found->guid.data4[5], ++ part_found->guid.data4[6], ++ part_found->guid.data4[7]); ++ if (!*part_name) ++ goto done; ++ ++ grub_errno = GRUB_ERR_NONE; ++ ++done: ++ grub_gpt_free (gpt); ++ ++ if (dev) ++ grub_device_close (dev); ++ ++ return grub_errno; ++} ++ ++ ++ ++static grub_err_t ++grub_cmd_next (grub_extcmd_context_t ctxt, int argc, char **args) ++{ ++ struct grub_arg_list *state = ctxt->state; ++ char *p, *root = NULL, *part_name = NULL, *part_guid = NULL; ++ ++ /* TODO: Add a uuid parser and a command line flag for providing type. */ ++ grub_gpt_part_type_t part_type = GRUB_GPT_PARTITION_TYPE_USR_X86_64; ++ ++ if (!state[NEXT_SET_DEVICE].set || !state[NEXT_SET_UUID].set) ++ { ++ grub_error (GRUB_ERR_INVALID_COMMAND, N_("-d and -u are required")); ++ goto done; ++ } ++ ++ if (argc == 0) ++ root = grub_strdup (grub_env_get ("root")); ++ else if (argc == 1) ++ root = grub_strdup (args[0]); ++ else ++ { ++ grub_error (GRUB_ERR_BAD_ARGUMENT, N_("unexpected arguments")); ++ goto done; ++ } ++ ++ if (!root) ++ goto done; ++ ++ /* To make using $root practical strip off the partition name. */ ++ p = grub_strchr (root, ','); ++ if (p) ++ *p = '\0'; ++ ++ if (grub_find_next (root, &part_type, &part_name, &part_guid)) ++ goto done; ++ ++ if (grub_env_set (state[NEXT_SET_DEVICE].arg, part_name)) ++ goto done; ++ ++ if (grub_env_set (state[NEXT_SET_UUID].arg, part_guid)) ++ goto done; ++ ++ grub_errno = GRUB_ERR_NONE; ++ ++done: ++ grub_free (root); ++ grub_free (part_name); ++ grub_free (part_guid); ++ ++ return grub_errno; ++} ++ ++static grub_extcmd_t cmd_next; ++ ++GRUB_MOD_INIT(gptprio) ++{ ++ cmd_next = grub_register_extcmd ("gptprio.next", grub_cmd_next, 0, ++ N_("-d VARNAME -u VARNAME [DEVICE]"), ++ N_("Select next partition to boot."), ++ options_next); ++} ++ ++GRUB_MOD_FINI(gptprio) ++{ ++ grub_unregister_extcmd (cmd_next); ++} +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index df076ca64..e41c66539 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -53,6 +53,10 @@ typedef struct grub_gpt_guid grub_gpt_part_type_t; + GRUB_GPT_GUID_INIT (0x5808c8aa, 0x7e8f, 0x42e0, \ + 0x85, 0xd2, 0xe1, 0xe9, 0x04, 0x34, 0xcf, 0xb3) + ++#define GRUB_GPT_PARTITION_TYPE_USR_X86_64 \ ++ GRUB_GPT_GUID_INIT (0x5dfbf5f4, 0x2848, 0x4bac, \ ++ 0xaa, 0x5e, 0x0d, 0x9a, 0x20, 0xb7, 0x45, 0xa6) ++ + #define GRUB_GPT_HEADER_MAGIC \ + { 0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54 } + +@@ -87,6 +91,51 @@ struct grub_gpt_partentry + char name[72]; + } GRUB_PACKED; + ++enum grub_gpt_part_attr_offset ++{ ++ /* Standard partition attribute bits defined by UEFI. */ ++ GRUB_GPT_PART_ATTR_OFFSET_REQUIRED = 0, ++ GRUB_GPT_PART_ATTR_OFFSET_NO_BLOCK_IO_PROTOCOL = 1, ++ GRUB_GPT_PART_ATTR_OFFSET_LEGACY_BIOS_BOOTABLE = 2, ++ ++ /* De facto standard attribute bits defined by Microsoft and reused by ++ * http://www.freedesktop.org/wiki/Specifications/DiscoverablePartitionsSpec */ ++ GRUB_GPT_PART_ATTR_OFFSET_READ_ONLY = 60, ++ GRUB_GPT_PART_ATTR_OFFSET_NO_AUTO = 63, ++ ++ /* Partition attributes for priority based selection, ++ * Currently only valid for PARTITION_TYPE_USR_X86_64. ++ * TRIES_LEFT and PRIORITY are 4 bit wide fields. */ ++ GRUB_GPT_PART_ATTR_OFFSET_GPTPRIO_PRIORITY = 48, ++ GRUB_GPT_PART_ATTR_OFFSET_GPTPRIO_TRIES_LEFT = 52, ++ GRUB_GPT_PART_ATTR_OFFSET_GPTPRIO_SUCCESSFUL = 56, ++}; ++ ++/* Helpers for reading/writing partition attributes. */ ++static inline grub_uint64_t ++grub_gpt_entry_attribute (struct grub_gpt_partentry *entry, ++ enum grub_gpt_part_attr_offset offset, ++ unsigned int bits) ++{ ++ grub_uint64_t attrib = grub_le_to_cpu64 (entry->attrib); ++ ++ return (attrib >> offset) & ((1ULL << bits) - 1); ++} ++ ++static inline void ++grub_gpt_entry_set_attribute (struct grub_gpt_partentry *entry, ++ grub_uint64_t value, ++ enum grub_gpt_part_attr_offset offset, ++ unsigned int bits) ++{ ++ grub_uint64_t attrib, mask; ++ ++ mask = (((1ULL << bits) - 1) << offset); ++ attrib = grub_le_to_cpu64 (entry->attrib) & ~mask; ++ attrib |= ((value << offset) & mask); ++ entry->attrib = grub_cpu_to_le64 (attrib); ++} ++ + /* Basic GPT partmap module. */ + grub_err_t + grub_gpt_partition_map_iterate (grub_disk_t disk, +diff --git a/tests/gptprio_test.in b/tests/gptprio_test.in +new file mode 100644 +index 000000000..f4aea0dc9 +--- /dev/null ++++ b/tests/gptprio_test.in +@@ -0,0 +1,150 @@ ++#! /bin/bash ++set -e ++ ++# Copyright (C) 2010 Free Software Foundation, Inc. ++# Copyright (C) 2014 CoreOS, Inc. ++# ++# GRUB is free software: you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation, either version 3 of the License, or ++# (at your option) any later version. ++# ++# GRUB is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GRUB. If not, see . ++ ++sgdisk=sgdisk ++grubshell=@builddir@/grub-shell ++ ++if ! which "${sgdisk}" >/dev/null 2>&1; then ++ echo "sgdisk not installed; cannot test gptprio." ++ exit 77 ++fi ++ ++. "@builddir@/grub-core/modinfo.sh" ++ ++case "${grub_modinfo_target_cpu}-${grub_modinfo_platform}" in ++ mips-qemu_mips | mipsel-qemu_mips | i386-qemu | i386-multiboot | i386-coreboot | mipsel-loongson) ++ disk=ata0 ++ ;; ++ powerpc-ieee1275) ++ disk=ieee1275//pci@80000000/mac-io@4/ata-3@20000/disk@0 ++ # FIXME: QEMU firmware has bugs which prevent it from accessing hard disk w/o recognised label. ++ exit 0 ++ ;; ++ sparc64-ieee1275) ++ disk=ieee1275//pci@1fe\,0/pci-ata@5/ide0@500/disk@0 ++ # FIXME: QEMU firmware has bugs which prevent it from accessing hard disk w/o recognised label. ++ exit 0 ++ ;; ++ i386-ieee1275) ++ disk=ieee1275/d ++ # FIXME: QEMU firmware has bugs which prevent it from accessing hard disk w/o recognised label. ++ exit 0 ++ ;; ++ mips-arc) ++ # FIXME: ARC firmware has bugs which prevent it from accessing hard disk w/o dvh disklabel. ++ exit 0 ;; ++ mipsel-arc) ++ disk=arc/scsi0/disk0/rdisk0 ++ ;; ++ *) ++ disk=hd0 ++ ;; ++esac ++img1="`mktemp "${TMPDIR:-/tmp}/tmp.XXXXXXXXXX"`" || exit 1 ++trap "rm -f '${img1}'" EXIT ++ ++prio_type="5dfbf5f4-2848-4bac-aa5e-0d9a20b745a6" ++declare -a prio_uuid ++prio_uuid[2]="9b003904-d006-4ab3-97f1-73f547b7af1a" ++prio_uuid[3]="1aa5a658-5b02-414d-9b71-f7e6c151f0cd" ++prio_uuid[4]="8aa0240d-98af-42b0-b32a-ccbe0572d62b" ++ ++create_disk_image () { ++ rm -f "${img1}" ++ dd if=/dev/zero of="${img1}" bs=512 count=1 seek=100 status=none ++ ${sgdisk} \ ++ -n 1:0:+1 -c 1:ESP -t 1:ef00 \ ++ -n 2:0:+1 -c 2:A -t 2:"${prio_type}" -u 2:"${prio_uuid[2]}" \ ++ -n 3:0:+1 -c 3:B -t 3:"${prio_type}" -u 3:"${prio_uuid[3]}" \ ++ -n 4:0:+1 -c 4:C -t 4:"${prio_type}" -u 4:"${prio_uuid[4]}" \ ++ "${img1}" >/dev/null ++} ++ ++ ++fmt_prio () { ++ priority=$(( ( $1 & 15 ) << 48 )) ++ tries=$(( ( $2 & 15 ) << 52 )) ++ success=$(( ( $3 & 1 ) << 56 )) ++ printf %016x $(( priority | tries | success )) ++} ++ ++set_prio () { ++ part="$1" ++ attr=$(fmt_prio $2 $3 $4) ++ ${sgdisk} -A "${part}:=:${attr}" "${img1}" >/dev/null ++} ++ ++check_prio () { ++ part="$1" ++ expect=$(fmt_prio $2 $3 $4) ++ result=$(LANG=C ${sgdisk} -i "${part}" "${img1}" \ ++ | awk '/^Attribute flags: / {print $3}') ++ if [[ "${expect}" != "${result}" ]]; then ++ echo "Partition ${part} has attributes ${result}, not ${expect}" >&2 ++ exit 1 ++ fi ++} ++ ++run_next() { ++ "${grubshell}" --disk="${img1}" --modules=gptprio < +Date: Sat, 15 Nov 2014 13:27:13 -0800 +Subject: [PATCH] gpt: split out checksum recomputation + +For basic data modifications the full repair function is overkill. +--- + grub-core/lib/gpt.c | 30 ++++++++++++++++++++---------- + include/grub/gpt_partition.h | 3 +++ + 2 files changed, 23 insertions(+), 10 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 67ffdf703..198234071 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -293,7 +293,6 @@ grub_err_t + grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + { + grub_uint64_t backup_header, backup_entries; +- grub_uint32_t crc; + + if (disk->log_sector_size != gpt->log_sector_size) + return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, +@@ -331,13 +330,32 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + gpt->backup.alternate_lba = gpt->primary.header_lba; + gpt->backup.partitions = grub_cpu_to_le64 (backup_entries); + ++ /* Recompute checksums. */ ++ if (grub_gpt_update_checksums (gpt)) ++ return grub_errno; ++ ++ /* Sanity check. */ ++ if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) ++ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); ++ ++ if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) ++ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); ++ ++ gpt->status |= GRUB_GPT_BOTH_VALID; ++ return GRUB_ERR_NONE; ++} ++ ++grub_err_t ++grub_gpt_update_checksums (grub_gpt_t gpt) ++{ ++ grub_uint32_t crc; ++ + /* Writing headers larger than our header structure are unsupported. */ + gpt->primary.headersize = + grub_cpu_to_le32_compile_time (sizeof (gpt->primary)); + gpt->backup.headersize = + grub_cpu_to_le32_compile_time (sizeof (gpt->backup)); + +- /* Recompute checksums. */ + if (grub_gpt_lecrc32 (gpt->entries, gpt->entries_size, &crc)) + return grub_errno; + +@@ -350,14 +368,6 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + if (grub_gpt_header_lecrc32 (&gpt->backup, &gpt->backup.crc32)) + return grub_errno; + +- /* Sanity check. */ +- if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) +- return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); +- +- if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) +- return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); +- +- gpt->status |= GRUB_GPT_BOTH_VALID; + return GRUB_ERR_NONE; + } + +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index e41c66539..50592d6d0 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -200,6 +200,9 @@ grub_gpt_t grub_gpt_read (grub_disk_t disk); + /* Sync up primary and backup headers, recompute checksums. */ + grub_err_t grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt); + ++/* Recompute checksums, must be called after modifying GPT data. */ ++grub_err_t grub_gpt_update_checksums (grub_gpt_t gpt); ++ + /* Write headers and entry tables back to disk. */ + grub_err_t grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt); + +From 548fe74144c4745f25c6a488f99cf3a7c04aa20b Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Thu, 27 Nov 2014 12:55:53 -0800 +Subject: [PATCH] gpt: move gpt guid printing function to common library + +--- + grub-core/commands/gptprio.c | 16 ++-------------- + grub-core/lib/gpt.c | 13 +++++++++++++ + include/grub/gpt_partition.h | 4 ++++ + 3 files changed, 19 insertions(+), 14 deletions(-) + +diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c +index 29bd11d68..ce5840b4e 100644 +--- a/grub-core/commands/gptprio.c ++++ b/grub-core/commands/gptprio.c +@@ -141,20 +141,8 @@ grub_find_next (const char *disk_name, + if (!*part_name) + goto done; + +- *part_guid = +- grub_xasprintf ("%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", +- grub_le_to_cpu32 (part_found->guid.data1), +- grub_le_to_cpu16 (part_found->guid.data2), +- grub_le_to_cpu16 (part_found->guid.data3), +- part_found->guid.data4[0], +- part_found->guid.data4[1], +- part_found->guid.data4[2], +- part_found->guid.data4[3], +- part_found->guid.data4[4], +- part_found->guid.data4[5], +- part_found->guid.data4[6], +- part_found->guid.data4[7]); +- if (!*part_name) ++ *part_guid = grub_gpt_guid_to_str (&part_found->guid); ++ if (!*part_guid) + goto done; + + grub_errno = GRUB_ERR_NONE; +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 198234071..9a1835b84 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -31,6 +31,19 @@ GRUB_MOD_LICENSE ("GPLv3+"); + static grub_uint8_t grub_gpt_magic[] = GRUB_GPT_HEADER_MAGIC; + + ++char * ++grub_gpt_guid_to_str (grub_gpt_guid_t *guid) ++{ ++ return grub_xasprintf ("%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", ++ grub_le_to_cpu32 (guid->data1), ++ grub_le_to_cpu16 (guid->data2), ++ grub_le_to_cpu16 (guid->data3), ++ guid->data4[0], guid->data4[1], ++ guid->data4[2], guid->data4[3], ++ guid->data4[4], guid->data4[5], ++ guid->data4[6], guid->data4[7]); ++} ++ + static grub_uint64_t + grub_gpt_size_to_sectors (grub_gpt_t gpt, grub_size_t size) + { +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index 50592d6d0..166fd4b55 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -33,6 +33,10 @@ struct grub_gpt_guid + typedef struct grub_gpt_guid grub_gpt_guid_t; + typedef struct grub_gpt_guid grub_gpt_part_type_t; + ++/* Format the raw little-endian GUID as a newly allocated string. */ ++char * grub_gpt_guid_to_str (grub_gpt_guid_t *guid); ++ ++ + #define GRUB_GPT_GUID_INIT(a, b, c, d1, d2, d3, d4, d5, d6, d7, d8) \ + { \ + grub_cpu_to_le32_compile_time (a), \ +From ef9950304568defc9cc6a674cbad58a3d7947200 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Thu, 27 Nov 2014 14:54:27 -0800 +Subject: [PATCH] gpt: switch partition names to a 16 bit type + +In UEFI/GPT strings are UTF-16 so use a uint16 to make dealing with the +string practical. +--- + include/grub/gpt_partition.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index 166fd4b55..1142317e3 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -92,7 +92,7 @@ struct grub_gpt_partentry + grub_uint64_t start; + grub_uint64_t end; + grub_uint64_t attrib; +- char name[72]; ++ grub_uint16_t name[36]; + } GRUB_PACKED; + + enum grub_gpt_part_attr_offset +From 65c930d19113f45719a2f2696a7dac5447ec90ed Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Thu, 27 Nov 2014 15:49:57 -0800 +Subject: [PATCH] tests: add some partitions to the gpt unit test data + +--- + tests/gpt_unit_test.c | 65 +++++++++++++++++++++++++++++++++++++++++++-------- + 1 file changed, 55 insertions(+), 10 deletions(-) + +diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c +index 86e4364a5..5692a5a52 100644 +--- a/tests/gpt_unit_test.c ++++ b/tests/gpt_unit_test.c +@@ -89,12 +89,12 @@ struct test_data + }; + + +-/* Sample primary GPT header for an empty 1MB disk. */ ++/* Sample primary GPT header for a 1MB disk. */ + static const struct grub_gpt_header example_primary = { + .magic = GRUB_GPT_HEADER_MAGIC, + .version = GRUB_GPT_HEADER_VERSION, + .headersize = sizeof (struct grub_gpt_header), +- .crc32 = grub_cpu_to_le32_compile_time (0x7cd8642c), ++ .crc32 = grub_cpu_to_le32_compile_time (0xb985abe0), + .header_lba = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), + .alternate_lba = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), + .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), +@@ -104,7 +104,52 @@ static const struct grub_gpt_header example_primary = { + .partitions = grub_cpu_to_le64_compile_time (PRIMARY_TABLE_SECTOR), + .maxpart = grub_cpu_to_le32_compile_time (TABLE_ENTRIES), + .partentry_size = grub_cpu_to_le32_compile_time (ENTRY_SIZE), +- .partentry_crc32 = grub_cpu_to_le32_compile_time (0xab54d286), ++ .partentry_crc32 = grub_cpu_to_le32_compile_time (0x074e052c), ++}; ++ ++static const struct grub_gpt_partentry example_entries[TABLE_ENTRIES] = { ++ { ++ .type = GRUB_GPT_PARTITION_TYPE_EFI_SYSTEM, ++ .guid = GRUB_GPT_GUID_INIT (0xa0f1792e, 0xb4ce, 0x4136, 0xbc, 0xf2, ++ 0x1a, 0xfc, 0x13, 0x3c, 0x28, 0x28), ++ .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), ++ .end = grub_cpu_to_le64_compile_time (0x3f), ++ .attrib = 0x0, ++ .name = { ++ grub_cpu_to_le16_compile_time ('E'), ++ grub_cpu_to_le16_compile_time ('F'), ++ grub_cpu_to_le16_compile_time ('I'), ++ grub_cpu_to_le16_compile_time (' '), ++ grub_cpu_to_le16_compile_time ('S'), ++ grub_cpu_to_le16_compile_time ('Y'), ++ grub_cpu_to_le16_compile_time ('S'), ++ grub_cpu_to_le16_compile_time ('T'), ++ grub_cpu_to_le16_compile_time ('E'), ++ grub_cpu_to_le16_compile_time ('M'), ++ 0x0, ++ } ++ }, ++ { ++ .type = GRUB_GPT_PARTITION_TYPE_BIOS_BOOT, ++ .guid = GRUB_GPT_GUID_INIT (0x876c898d, 0x1b40, 0x4727, 0xa1, 0x61, ++ 0xed, 0xf9, 0xb5, 0x48, 0x66, 0x74), ++ .start = grub_cpu_to_le64_compile_time (0x40), ++ .end = grub_cpu_to_le64_compile_time (0x7f), ++ .attrib = grub_cpu_to_le64_compile_time ( ++ 1ULL << GRUB_GPT_PART_ATTR_OFFSET_LEGACY_BIOS_BOOTABLE), ++ .name = { ++ grub_cpu_to_le16_compile_time ('B'), ++ grub_cpu_to_le16_compile_time ('I'), ++ grub_cpu_to_le16_compile_time ('O'), ++ grub_cpu_to_le16_compile_time ('S'), ++ grub_cpu_to_le16_compile_time (' '), ++ grub_cpu_to_le16_compile_time ('B'), ++ grub_cpu_to_le16_compile_time ('O'), ++ grub_cpu_to_le16_compile_time ('O'), ++ grub_cpu_to_le16_compile_time ('T'), ++ 0x0, ++ } ++ }, + }; + + /* And the backup header. */ +@@ -112,7 +157,7 @@ static const struct grub_gpt_header example_backup = { + .magic = GRUB_GPT_HEADER_MAGIC, + .version = GRUB_GPT_HEADER_VERSION, + .headersize = sizeof (struct grub_gpt_header), +- .crc32 = grub_cpu_to_le32_compile_time (0xcfaa4a27), ++ .crc32 = grub_cpu_to_le32_compile_time (0x0af785eb), + .header_lba = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), + .alternate_lba = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), + .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), +@@ -122,7 +167,7 @@ static const struct grub_gpt_header example_backup = { + .partitions = grub_cpu_to_le64_compile_time (BACKUP_TABLE_SECTOR), + .maxpart = grub_cpu_to_le32_compile_time (TABLE_ENTRIES), + .partentry_size = grub_cpu_to_le32_compile_time (ENTRY_SIZE), +- .partentry_crc32 = grub_cpu_to_le32_compile_time (0xab54d286), ++ .partentry_crc32 = grub_cpu_to_le32_compile_time (0x074e052c), + }; + + /* Sample protective MBR for the same 1MB disk. Note, this matches +@@ -192,6 +237,10 @@ reset_disk (struct test_data *data) + memcpy (&data->raw->mbr, &example_pmbr, sizeof (data->raw->mbr)); + memcpy (&data->raw->primary_header, &example_primary, + sizeof (data->raw->primary_header)); ++ memcpy (&data->raw->primary_entries, &example_entries, ++ sizeof (data->raw->primary_entries)); ++ memcpy (&data->raw->backup_entries, &example_entries, ++ sizeof (data->raw->backup_entries)); + memcpy (&data->raw->backup_header, &example_backup, + sizeof (data->raw->backup_header)); + +@@ -270,11 +319,7 @@ read_disk (struct test_data *data) + + gpt = grub_gpt_read (data->dev->disk); + if (gpt == NULL) +- { +- grub_print_error (); +- grub_fatal ("grub_gpt_read failed"); +- } +- ++ grub_fatal ("grub_gpt_read failed: %s", grub_errmsg); + + return gpt; + } +From ff730f68d8f9816001d4b4cff974f500f670a992 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Thu, 27 Nov 2014 16:34:21 -0800 +Subject: [PATCH] gpt: add search by partition label and uuid commands + +Builds on the existing filesystem search code. Only for GPT right now. +--- + Makefile.util.def | 2 + + grub-core/Makefile.core.def | 10 ++++ + grub-core/commands/search.c | 49 ++++++++++++++++++++ + grub-core/commands/search_part_label.c | 5 ++ + grub-core/commands/search_part_uuid.c | 5 ++ + grub-core/commands/search_wrap.c | 12 +++++ + grub-core/lib/gpt.c | 64 ++++++++++++++++++++++++++ + include/grub/gpt_partition.h | 16 +++++++ + include/grub/search.h | 4 ++ + tests/gpt_unit_test.c | 84 ++++++++++++++++++++++++++++++++++ + 10 files changed, 251 insertions(+) + create mode 100644 grub-core/commands/search_part_label.c + create mode 100644 grub-core/commands/search_part_uuid.c + +diff --git a/Makefile.util.def b/Makefile.util.def +index 9249f77be..bc0f178ff 100644 +--- a/Makefile.util.def ++++ b/Makefile.util.def +@@ -1271,6 +1271,8 @@ program = { + name = gpt_unit_test; + common = tests/gpt_unit_test.c; + common = tests/lib/unit_test.c; ++ common = grub-core/commands/search_part_label.c; ++ common = grub-core/commands/search_part_uuid.c; + common = grub-core/disk/host.c; + common = grub-core/kern/emu/hostfs.c; + common = grub-core/lib/gpt.c; +diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def +index 4620138cb..4cce18d6d 100644 +--- a/grub-core/Makefile.core.def ++++ b/grub-core/Makefile.core.def +@@ -1013,6 +1013,16 @@ module = { + common = commands/search_label.c; + }; + ++module = { ++ name = search_part_uuid; ++ common = commands/search_part_uuid.c; ++}; ++ ++module = { ++ name = search_part_label; ++ common = commands/search_part_label.c; ++}; ++ + module = { + name = setpci; + common = commands/setpci.c; +diff --git a/grub-core/commands/search.c b/grub-core/commands/search.c +index 7dd32e445..09e165ed3 100644 +--- a/grub-core/commands/search.c ++++ b/grub-core/commands/search.c +@@ -30,6 +30,9 @@ + #include + #include + #include ++#if defined(DO_SEARCH_PART_UUID) || defined(DO_SEARCH_PART_LABEL) ++#include ++#endif + + GRUB_MOD_LICENSE ("GPLv3+"); + +@@ -90,6 +93,44 @@ iterate_device (const char *name, void *data) + } + grub_free (buf); + } ++#elif defined(DO_SEARCH_PART_UUID) ++ { ++ grub_device_t dev; ++ char *quid; ++ ++ dev = grub_device_open (name); ++ if (dev) ++ { ++ if (grub_gpt_part_uuid (dev, &quid) == GRUB_ERR_NONE) ++ { ++ if (grub_strcasecmp (quid, ctx->key) == 0) ++ found = 1; ++ ++ grub_free (quid); ++ } ++ ++ grub_device_close (dev); ++ } ++ } ++#elif defined(DO_SEARCH_PART_LABEL) ++ { ++ grub_device_t dev; ++ char *quid; ++ ++ dev = grub_device_open (name); ++ if (dev) ++ { ++ if (grub_gpt_part_label (dev, &quid) == GRUB_ERR_NONE) ++ { ++ if (grub_strcmp (quid, ctx->key) == 0) ++ found = 1; ++ ++ grub_free (quid); ++ } ++ ++ grub_device_close (dev); ++ } ++ } + #else + { + /* SEARCH_FS_UUID or SEARCH_LABEL */ +@@ -313,6 +354,10 @@ static grub_command_t cmd; + + #ifdef DO_SEARCH_FILE + GRUB_MOD_INIT(search_fs_file) ++#elif defined(DO_SEARCH_PART_UUID) ++GRUB_MOD_INIT(search_part_uuid) ++#elif defined(DO_SEARCH_PART_LABEL) ++GRUB_MOD_INIT(search_part_label) + #elif defined (DO_SEARCH_FS_UUID) + GRUB_MOD_INIT(search_fs_uuid) + #else +@@ -327,6 +372,10 @@ GRUB_MOD_INIT(search_label) + + #ifdef DO_SEARCH_FILE + GRUB_MOD_FINI(search_fs_file) ++#elif defined(DO_SEARCH_PART_UUID) ++GRUB_MOD_FINI(search_part_uuid) ++#elif defined(DO_SEARCH_PART_LABEL) ++GRUB_MOD_FINI(search_part_label) + #elif defined (DO_SEARCH_FS_UUID) + GRUB_MOD_FINI(search_fs_uuid) + #else +diff --git a/grub-core/commands/search_part_label.c b/grub-core/commands/search_part_label.c +new file mode 100644 +index 000000000..ca906cbd9 +--- /dev/null ++++ b/grub-core/commands/search_part_label.c +@@ -0,0 +1,5 @@ ++#define DO_SEARCH_PART_LABEL 1 ++#define FUNC_NAME grub_search_part_label ++#define COMMAND_NAME "search.part_label" ++#define HELP_MESSAGE N_("Search devices by partition label. If VARIABLE is specified, the first device found is set to a variable.") ++#include "search.c" +diff --git a/grub-core/commands/search_part_uuid.c b/grub-core/commands/search_part_uuid.c +new file mode 100644 +index 000000000..2d1d3d0d7 +--- /dev/null ++++ b/grub-core/commands/search_part_uuid.c +@@ -0,0 +1,5 @@ ++#define DO_SEARCH_PART_UUID 1 ++#define FUNC_NAME grub_search_part_uuid ++#define COMMAND_NAME "search.part_uuid" ++#define HELP_MESSAGE N_("Search devices by partition UUID. If VARIABLE is specified, the first device found is set to a variable.") ++#include "search.c" +diff --git a/grub-core/commands/search_wrap.c b/grub-core/commands/search_wrap.c +index d7fd26b94..e3ff756df 100644 +--- a/grub-core/commands/search_wrap.c ++++ b/grub-core/commands/search_wrap.c +@@ -36,6 +36,10 @@ static const struct grub_arg_option options[] = + 0, 0}, + {"fs-uuid", 'u', 0, N_("Search devices by a filesystem UUID."), + 0, 0}, ++ {"part-label", 'L', 0, N_("Search devices by a partition label."), ++ 0, 0}, ++ {"part-uuid", 'U', 0, N_("Search devices by a partition UUID."), ++ 0, 0}, + {"set", 's', GRUB_ARG_OPTION_OPTIONAL, + N_("Set a variable to the first device found."), N_("VARNAME"), + ARG_TYPE_STRING}, +@@ -71,6 +75,8 @@ enum options + SEARCH_FILE, + SEARCH_LABEL, + SEARCH_FS_UUID, ++ SEARCH_PART_LABEL, ++ SEARCH_PART_UUID, + SEARCH_SET, + SEARCH_NO_FLOPPY, + SEARCH_HINT, +@@ -186,6 +192,12 @@ grub_cmd_search (grub_extcmd_context_t ctxt, int argc, char **args) + else if (state[SEARCH_FS_UUID].set) + grub_search_fs_uuid (id, var, state[SEARCH_NO_FLOPPY].set, + hints, nhints); ++ else if (state[SEARCH_PART_LABEL].set) ++ grub_search_part_label (id, var, state[SEARCH_NO_FLOPPY].set, ++ hints, nhints); ++ else if (state[SEARCH_PART_UUID].set) ++ grub_search_part_uuid (id, var, state[SEARCH_NO_FLOPPY].set, ++ hints, nhints); + else if (state[SEARCH_FILE].set) + grub_search_fs_file (id, var, state[SEARCH_NO_FLOPPY].set, + hints, nhints); +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 9a1835b84..10a4b852d 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -18,7 +18,9 @@ + * along with GRUB. If not, see . + */ + ++#include + #include ++#include + #include + #include + #include +@@ -44,6 +46,68 @@ grub_gpt_guid_to_str (grub_gpt_guid_t *guid) + guid->data4[6], guid->data4[7]); + } + ++static grub_err_t ++grub_gpt_device_partentry (grub_device_t device, ++ struct grub_gpt_partentry *entry) ++{ ++ grub_disk_t disk = device->disk; ++ grub_partition_t p; ++ grub_err_t err; ++ ++ if (!disk || !disk->partition) ++ return grub_error (GRUB_ERR_BUG, "not a partition"); ++ ++ if (grub_strcmp (disk->partition->partmap->name, "gpt")) ++ return grub_error (GRUB_ERR_BAD_ARGUMENT, "not a GPT partition"); ++ ++ p = disk->partition; ++ disk->partition = p->parent; ++ err = grub_disk_read (disk, p->offset, p->index, sizeof (*entry), entry); ++ disk->partition = p; ++ ++ return err; ++} ++ ++grub_err_t ++grub_gpt_part_label (grub_device_t device, char **label) ++{ ++ struct grub_gpt_partentry entry; ++ const grub_size_t name_len = ARRAY_SIZE (entry.name); ++ const grub_size_t label_len = name_len * GRUB_MAX_UTF8_PER_UTF16 + 1; ++ grub_size_t i; ++ grub_uint8_t *end; ++ ++ if (grub_gpt_device_partentry (device, &entry)) ++ return grub_errno; ++ ++ *label = grub_malloc (label_len); ++ if (!*label) ++ return grub_errno; ++ ++ for (i = 0; i < name_len; i++) ++ entry.name[i] = grub_le_to_cpu16 (entry.name[i]); ++ ++ end = grub_utf16_to_utf8 ((grub_uint8_t *) *label, entry.name, name_len); ++ *end = '\0'; ++ ++ return GRUB_ERR_NONE; ++} ++ ++grub_err_t ++grub_gpt_part_uuid (grub_device_t device, char **uuid) ++{ ++ struct grub_gpt_partentry entry; ++ ++ if (grub_gpt_device_partentry (device, &entry)) ++ return grub_errno; ++ ++ *uuid = grub_gpt_guid_to_str (&entry.guid); ++ if (!*uuid) ++ return grub_errno; ++ ++ return GRUB_ERR_NONE; ++} ++ + static grub_uint64_t + grub_gpt_size_to_sectors (grub_gpt_t gpt, grub_size_t size) + { +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index 1142317e3..8ff62d67f 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -49,6 +49,10 @@ char * grub_gpt_guid_to_str (grub_gpt_guid_t *guid); + GRUB_GPT_GUID_INIT (0x0, 0x0, 0x0, \ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0) + ++#define GRUB_GPT_PARTITION_TYPE_EFI_SYSTEM \ ++ GRUB_GPT_GUID_INIT (0xc12a7328, 0xf81f, 0x11d2, \ ++ 0xba, 0x4b, 0x00, 0xa0, 0xc9, 0x3e, 0xc9, 0x3b) ++ + #define GRUB_GPT_PARTITION_TYPE_BIOS_BOOT \ + GRUB_GPT_GUID_INIT (0x21686148, 0x6449, 0x6e6f, \ + 0x74, 0x4e, 0x65, 0x65, 0x64, 0x45, 0x46, 0x49) +@@ -216,4 +220,16 @@ grub_err_t grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr); + grub_err_t grub_gpt_header_check (struct grub_gpt_header *gpt, + unsigned int log_sector_size); + ++ ++/* Utilities for simple partition data lookups, usage is intended to ++ * be similar to fs->label and fs->uuid functions. */ ++ ++/* Return the partition label of the device DEVICE in LABEL. ++ * The label is in a new buffer and should be freed by the caller. */ ++grub_err_t grub_gpt_part_label (grub_device_t device, char **label); ++ ++/* Return the partition uuid of the device DEVICE in UUID. ++ * The label is in a new buffer and should be freed by the caller. */ ++grub_err_t grub_gpt_part_uuid (grub_device_t device, char **uuid); ++ + #endif /* ! GRUB_GPT_PARTITION_HEADER */ +diff --git a/include/grub/search.h b/include/grub/search.h +index d80347df3..c2f40abe9 100644 +--- a/include/grub/search.h ++++ b/include/grub/search.h +@@ -25,5 +25,9 @@ void grub_search_fs_uuid (const char *key, const char *var, int no_floppy, + char **hints, unsigned nhints); + void grub_search_label (const char *key, const char *var, int no_floppy, + char **hints, unsigned nhints); ++void grub_search_part_uuid (const char *key, const char *var, int no_floppy, ++ char **hints, unsigned nhints); ++void grub_search_part_label (const char *key, const char *var, int no_floppy, ++ char **hints, unsigned nhints); + + #endif +diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c +index 5692a5a52..deb55a926 100644 +--- a/tests/gpt_unit_test.c ++++ b/tests/gpt_unit_test.c +@@ -21,10 +21,12 @@ + #include + #include + #include ++#include + #include + #include + #include + #include ++#include + #include + + #include +@@ -534,6 +536,84 @@ repair_test (void) + + close_disk (&data); + } ++ ++static void ++search_label_test (void) ++{ ++ struct test_data data; ++ const char *test_result; ++ char *expected_result; ++ ++ open_disk (&data); ++ ++ expected_result = grub_xasprintf ("%s,gpt1", data.dev->disk->name); ++ grub_env_unset ("test_result"); ++ grub_search_part_label ("EFI SYSTEM", "test_result", 0, NULL, 0); ++ test_result = grub_env_get ("test_result"); ++ grub_test_assert (test_result && strcmp (test_result, expected_result) == 0, ++ "wrong device: %s (%s)", test_result, expected_result); ++ grub_free (expected_result); ++ ++ expected_result = grub_xasprintf ("%s,gpt2", data.dev->disk->name); ++ grub_env_unset ("test_result"); ++ grub_search_part_label ("BIOS BOOT", "test_result", 0, NULL, 0); ++ test_result = grub_env_get ("test_result"); ++ grub_test_assert (test_result && strcmp (test_result, expected_result) == 0, ++ "wrong device: %s (%s)", test_result, expected_result); ++ grub_free (expected_result); ++ ++ grub_env_unset ("test_result"); ++ grub_search_part_label ("bogus name", "test_result", 0, NULL, 0); ++ test_result = grub_env_get ("test_result"); ++ grub_test_assert (test_result == NULL, ++ "unexpected device: %s", test_result); ++ grub_test_assert (grub_errno == GRUB_ERR_FILE_NOT_FOUND, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++ ++ close_disk (&data); ++} ++ ++static void ++search_uuid_test (void) ++{ ++ struct test_data data; ++ const char gpt1_uuid[] = "A0F1792E-B4CE-4136-BCF2-1AFC133C2828"; ++ const char gpt2_uuid[] = "876c898d-1b40-4727-a161-edf9b5486674"; ++ const char bogus_uuid[] = "1534c928-c50e-4866-9daf-6a9fd7918a76"; ++ const char *test_result; ++ char *expected_result; ++ ++ open_disk (&data); ++ ++ expected_result = grub_xasprintf ("%s,gpt1", data.dev->disk->name); ++ grub_env_unset ("test_result"); ++ grub_search_part_uuid (gpt1_uuid, "test_result", 0, NULL, 0); ++ test_result = grub_env_get ("test_result"); ++ grub_test_assert (test_result && strcmp (test_result, expected_result) == 0, ++ "wrong device: %s (%s)", test_result, expected_result); ++ grub_free (expected_result); ++ ++ expected_result = grub_xasprintf ("%s,gpt2", data.dev->disk->name); ++ grub_env_unset ("test_result"); ++ grub_search_part_uuid (gpt2_uuid, "test_result", 0, NULL, 0); ++ test_result = grub_env_get ("test_result"); ++ grub_test_assert (test_result && strcmp (test_result, expected_result) == 0, ++ "wrong device: %s (%s)", test_result, expected_result); ++ grub_free (expected_result); ++ ++ grub_env_unset ("test_result"); ++ grub_search_part_uuid (bogus_uuid, "test_result", 0, NULL, 0); ++ test_result = grub_env_get ("test_result"); ++ grub_test_assert (test_result == NULL, ++ "unexpected device: %s", test_result); ++ grub_test_assert (grub_errno == GRUB_ERR_FILE_NOT_FOUND, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++ ++ close_disk (&data); ++} ++ + void + grub_unit_test_init (void) + { +@@ -546,6 +626,8 @@ grub_unit_test_init (void) + grub_test_register ("gpt_read_invalid_test", read_invalid_entries_test); + grub_test_register ("gpt_read_fallback_test", read_fallback_test); + grub_test_register ("gpt_repair_test", repair_test); ++ grub_test_register ("gpt_search_label_test", search_label_test); ++ grub_test_register ("gpt_search_uuid_test", search_uuid_test); + } + + void +@@ -557,5 +639,7 @@ grub_unit_test_fini (void) + grub_test_unregister ("gpt_read_invalid_test"); + grub_test_unregister ("gpt_read_fallback_test"); + grub_test_unregister ("gpt_repair_test"); ++ grub_test_unregister ("gpt_search_label_test"); ++ grub_test_unregister ("gpt_search_uuid_test"); + grub_fini_all (); + } +From 57d264518a2a01730c1ba14d058d2a5573d6bd15 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Fri, 31 Jul 2015 15:03:11 -0700 +Subject: [PATCH] gpt: clean up little-endian crc32 computation + + - Remove problematic cast from *uint8_t to *uint32_t (alignment issue). + - Remove dynamic allocation and associated error handling paths. + - Match parameter ordering to existing grub_crypto_hash function. +--- + grub-core/lib/gpt.c | 51 +++++++++++++-------------------------------------- + 1 file changed, 13 insertions(+), 38 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 10a4b852d..aedc4f7a1 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -122,45 +122,29 @@ grub_gpt_size_to_sectors (grub_gpt_t gpt, grub_size_t size) + return sectors; + } + +-static grub_err_t +-grub_gpt_lecrc32 (void *data, grub_size_t len, grub_uint32_t *crc) ++static void ++grub_gpt_lecrc32 (grub_uint32_t *crc, const void *data, grub_size_t len) + { +- grub_uint8_t *crc32_context; +- +- crc32_context = grub_zalloc (GRUB_MD_CRC32->contextsize); +- if (!crc32_context) +- return grub_errno; ++ grub_uint32_t crc32_val; + +- GRUB_MD_CRC32->init (crc32_context); +- GRUB_MD_CRC32->write (crc32_context, data, len); +- GRUB_MD_CRC32->final (crc32_context); ++ grub_crypto_hash (GRUB_MD_CRC32, &crc32_val, data, len); + + /* GRUB_MD_CRC32 always uses big endian, gpt is always little. */ +- *crc = grub_swap_bytes32 (*(grub_uint32_t *) +- GRUB_MD_CRC32->read (crc32_context)); +- +- grub_free (crc32_context); +- +- return GRUB_ERR_NONE; ++ *crc = grub_swap_bytes32 (crc32_val); + } + +-static grub_err_t +-grub_gpt_header_lecrc32 (struct grub_gpt_header *header, grub_uint32_t *crc) ++static void ++grub_gpt_header_lecrc32 (grub_uint32_t *crc, struct grub_gpt_header *header) + { + grub_uint32_t old, new; +- grub_err_t err; + + /* crc32 must be computed with the field cleared. */ + old = header->crc32; + header->crc32 = 0; +- err = grub_gpt_lecrc32 (header, sizeof (*header), &new); ++ grub_gpt_lecrc32 (&new, header, sizeof (*header)); + header->crc32 = old; + +- if (err) +- return err; +- + *crc = new; +- return GRUB_ERR_NONE; + } + + /* Make sure the MBR is a protective MBR and not a normal MBR. */ +@@ -192,9 +176,7 @@ grub_gpt_header_check (struct grub_gpt_header *gpt, + if (gpt->version != GRUB_GPT_HEADER_VERSION) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "unknown GPT version"); + +- if (grub_gpt_header_lecrc32 (gpt, &crc)) +- return grub_errno; +- ++ grub_gpt_header_lecrc32 (&crc, gpt); + if (gpt->crc32 != crc) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT header crc32"); + +@@ -289,9 +271,7 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, + if (grub_disk_read (disk, addr, 0, entries_size, entries)) + goto fail; + +- if (grub_gpt_lecrc32 (entries, entries_size, &crc)) +- goto fail; +- ++ grub_gpt_lecrc32 (&crc, entries, entries_size); + if (crc != header->partentry_crc32) + { + grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT entry crc32"); +@@ -433,17 +413,12 @@ grub_gpt_update_checksums (grub_gpt_t gpt) + gpt->backup.headersize = + grub_cpu_to_le32_compile_time (sizeof (gpt->backup)); + +- if (grub_gpt_lecrc32 (gpt->entries, gpt->entries_size, &crc)) +- return grub_errno; +- ++ grub_gpt_lecrc32 (&crc, gpt->entries, gpt->entries_size); + gpt->primary.partentry_crc32 = crc; + gpt->backup.partentry_crc32 = crc; + +- if (grub_gpt_header_lecrc32 (&gpt->primary, &gpt->primary.crc32)) +- return grub_errno; +- +- if (grub_gpt_header_lecrc32 (&gpt->backup, &gpt->backup.crc32)) +- return grub_errno; ++ grub_gpt_header_lecrc32 (&gpt->primary.crc32, &gpt->primary); ++ grub_gpt_header_lecrc32 (&gpt->backup.crc32, &gpt->backup); + + return GRUB_ERR_NONE; + } +From 8ec39207e9c81685459a9901ee2c057924eb8ec0 Mon Sep 17 00:00:00 2001 +From: Alex Crawford +Date: Mon, 31 Aug 2015 15:23:39 -0700 +Subject: [PATCH] gpt: minor cleanup + +--- + include/grub/gpt_partition.h | 2 +- + tests/gpt_unit_test.c | 12 ++++++------ + 2 files changed, 7 insertions(+), 7 deletions(-) + +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index 8ff62d67f..21359f08a 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -229,7 +229,7 @@ grub_err_t grub_gpt_header_check (struct grub_gpt_header *gpt, + grub_err_t grub_gpt_part_label (grub_device_t device, char **label); + + /* Return the partition uuid of the device DEVICE in UUID. +- * The label is in a new buffer and should be freed by the caller. */ ++ * The uuid is in a new buffer and should be freed by the caller. */ + grub_err_t grub_gpt_part_uuid (grub_device_t device, char **uuid); + + #endif /* ! GRUB_GPT_PARTITION_HEADER */ +diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c +index deb55a926..7a1af46e1 100644 +--- a/tests/gpt_unit_test.c ++++ b/tests/gpt_unit_test.c +@@ -538,7 +538,7 @@ repair_test (void) + } + + static void +-search_label_test (void) ++search_part_label_test (void) + { + struct test_data data; + const char *test_result; +@@ -575,7 +575,7 @@ search_label_test (void) + } + + static void +-search_uuid_test (void) ++search_part_uuid_test (void) + { + struct test_data data; + const char gpt1_uuid[] = "A0F1792E-B4CE-4136-BCF2-1AFC133C2828"; +@@ -626,8 +626,8 @@ grub_unit_test_init (void) + grub_test_register ("gpt_read_invalid_test", read_invalid_entries_test); + grub_test_register ("gpt_read_fallback_test", read_fallback_test); + grub_test_register ("gpt_repair_test", repair_test); +- grub_test_register ("gpt_search_label_test", search_label_test); +- grub_test_register ("gpt_search_uuid_test", search_uuid_test); ++ grub_test_register ("gpt_search_part_label_test", search_part_label_test); ++ grub_test_register ("gpt_search_uuid_test", search_part_uuid_test); + } + + void +@@ -639,7 +639,7 @@ grub_unit_test_fini (void) + grub_test_unregister ("gpt_read_invalid_test"); + grub_test_unregister ("gpt_read_fallback_test"); + grub_test_unregister ("gpt_repair_test"); +- grub_test_unregister ("gpt_search_label_test"); +- grub_test_unregister ("gpt_search_uuid_test"); ++ grub_test_unregister ("gpt_search_part_label_test"); ++ grub_test_unregister ("gpt_search_part_uuid_test"); + grub_fini_all (); + } +From 786e2f7da69a2a238390cb56f394c102c3938f09 Mon Sep 17 00:00:00 2001 +From: Alex Crawford +Date: Mon, 31 Aug 2015 15:15:48 -0700 +Subject: [PATCH] gpt: add search by disk uuid command + +--- + Makefile.util.def | 1 + + grub-core/Makefile.core.def | 5 +++++ + grub-core/commands/search.c | 28 ++++++++++++++++++++++++++-- + grub-core/commands/search_disk_uuid.c | 5 +++++ + grub-core/commands/search_wrap.c | 6 ++++++ + grub-core/lib/gpt.c | 21 +++++++++++++++++++++ + include/grub/gpt_partition.h | 4 ++++ + include/grub/search.h | 2 ++ + tests/gpt_unit_test.c | 33 +++++++++++++++++++++++++++++++++ + 9 files changed, 103 insertions(+), 2 deletions(-) + create mode 100644 grub-core/commands/search_disk_uuid.c + +diff --git a/Makefile.util.def b/Makefile.util.def +index bc0f178ff..4b1b4c410 100644 +--- a/Makefile.util.def ++++ b/Makefile.util.def +@@ -1273,6 +1273,7 @@ program = { + common = tests/lib/unit_test.c; + common = grub-core/commands/search_part_label.c; + common = grub-core/commands/search_part_uuid.c; ++ common = grub-core/commands/search_disk_uuid.c; + common = grub-core/disk/host.c; + common = grub-core/kern/emu/hostfs.c; + common = grub-core/lib/gpt.c; +diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def +index 4cce18d6d..ae68b75a8 100644 +--- a/grub-core/Makefile.core.def ++++ b/grub-core/Makefile.core.def +@@ -1023,6 +1023,11 @@ module = { + common = commands/search_part_label.c; + }; + ++module = { ++ name = search_disk_uuid; ++ common = commands/search_disk_uuid.c; ++}; ++ + module = { + name = setpci; + common = commands/setpci.c; +diff --git a/grub-core/commands/search.c b/grub-core/commands/search.c +index 09e165ed3..83837b564 100644 +--- a/grub-core/commands/search.c ++++ b/grub-core/commands/search.c +@@ -30,7 +30,8 @@ + #include + #include + #include +-#if defined(DO_SEARCH_PART_UUID) || defined(DO_SEARCH_PART_LABEL) ++#if defined(DO_SEARCH_PART_UUID) || defined(DO_SEARCH_PART_LABEL) || \ ++ defined(DO_SEARCH_DISK_UUID) + #include + #endif + +@@ -69,7 +70,7 @@ iterate_device (const char *name, void *data) + name[0] == 'f' && name[1] == 'd' && name[2] >= '0' && name[2] <= '9') + return 1; + +-#ifdef DO_SEARCH_FS_UUID ++#if defined(DO_SEARCH_FS_UUID) || defined(DO_SEARCH_DISK_UUID) + #define compare_fn grub_strcasecmp + #else + #define compare_fn grub_strcmp +@@ -128,6 +129,25 @@ iterate_device (const char *name, void *data) + grub_free (quid); + } + ++ grub_device_close (dev); ++ } ++ } ++#elif defined(DO_SEARCH_DISK_UUID) ++ { ++ grub_device_t dev; ++ char *quid; ++ ++ dev = grub_device_open (name); ++ if (dev) ++ { ++ if (grub_gpt_disk_uuid (dev, &quid) == GRUB_ERR_NONE) ++ { ++ if (grub_strcmp (quid, ctx->key) == 0) ++ found = 1; ++ ++ grub_free (quid); ++ } ++ + grub_device_close (dev); + } + } +@@ -360,6 +380,8 @@ GRUB_MOD_INIT(search_part_uuid) + GRUB_MOD_INIT(search_part_label) + #elif defined (DO_SEARCH_FS_UUID) + GRUB_MOD_INIT(search_fs_uuid) ++#elif defined (DO_SEARCH_DISK_UUID) ++GRUB_MOD_INIT(search_disk_uuid) + #else + GRUB_MOD_INIT(search_label) + #endif +@@ -378,6 +400,8 @@ GRUB_MOD_FINI(search_part_uuid) + GRUB_MOD_FINI(search_part_label) + #elif defined (DO_SEARCH_FS_UUID) + GRUB_MOD_FINI(search_fs_uuid) ++#elif defined (DO_SEARCH_DISK_UUID) ++GRUB_MOD_FINI(search_disk_uuid) + #else + GRUB_MOD_FINI(search_label) + #endif +diff --git a/grub-core/commands/search_disk_uuid.c b/grub-core/commands/search_disk_uuid.c +new file mode 100644 +index 000000000..fba96f6b8 +--- /dev/null ++++ b/grub-core/commands/search_disk_uuid.c +@@ -0,0 +1,5 @@ ++#define DO_SEARCH_DISK_UUID 1 ++#define FUNC_NAME grub_search_disk_uuid ++#define COMMAND_NAME "search.disk_uuid" ++#define HELP_MESSAGE N_("Search devices by disk UUID. If VARIABLE is specified, the first device found is set to a variable.") ++#include "search.c" +diff --git a/grub-core/commands/search_wrap.c b/grub-core/commands/search_wrap.c +index e3ff756df..d931c56c5 100644 +--- a/grub-core/commands/search_wrap.c ++++ b/grub-core/commands/search_wrap.c +@@ -40,6 +40,8 @@ static const struct grub_arg_option options[] = + 0, 0}, + {"part-uuid", 'U', 0, N_("Search devices by a partition UUID."), + 0, 0}, ++ {"disk-uuid", 'U', 0, N_("Search devices by a disk UUID."), ++ 0, 0}, + {"set", 's', GRUB_ARG_OPTION_OPTIONAL, + N_("Set a variable to the first device found."), N_("VARNAME"), + ARG_TYPE_STRING}, +@@ -77,6 +79,7 @@ enum options + SEARCH_FS_UUID, + SEARCH_PART_LABEL, + SEARCH_PART_UUID, ++ SEARCH_DISK_UUID, + SEARCH_SET, + SEARCH_NO_FLOPPY, + SEARCH_HINT, +@@ -198,6 +201,9 @@ grub_cmd_search (grub_extcmd_context_t ctxt, int argc, char **args) + else if (state[SEARCH_PART_UUID].set) + grub_search_part_uuid (id, var, state[SEARCH_NO_FLOPPY].set, + hints, nhints); ++ else if (state[SEARCH_DISK_UUID].set) ++ grub_search_disk_uuid (id, var, state[SEARCH_NO_FLOPPY].set, ++ hints, nhints); + else if (state[SEARCH_FILE].set) + grub_search_fs_file (id, var, state[SEARCH_NO_FLOPPY].set, + hints, nhints); +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index aedc4f7a1..e162bafd3 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -108,6 +108,27 @@ grub_gpt_part_uuid (grub_device_t device, char **uuid) + return GRUB_ERR_NONE; + } + ++grub_err_t ++grub_gpt_disk_uuid (grub_device_t device, char **uuid) ++{ ++ grub_gpt_t gpt = grub_gpt_read (device->disk); ++ if (!gpt) ++ goto done; ++ ++ grub_errno = GRUB_ERR_NONE; ++ ++ if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) ++ *uuid = grub_gpt_guid_to_str (&gpt->primary.guid); ++ else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) ++ *uuid = grub_gpt_guid_to_str (&gpt->backup.guid); ++ else ++ grub_errno = grub_error (GRUB_ERR_BUG, "No valid GPT header"); ++ ++done: ++ grub_gpt_free (gpt); ++ return grub_errno; ++} ++ + static grub_uint64_t + grub_gpt_size_to_sectors (grub_gpt_t gpt, grub_size_t size) + { +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index 21359f08a..4a6ed25b3 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -232,4 +232,8 @@ grub_err_t grub_gpt_part_label (grub_device_t device, char **label); + * The uuid is in a new buffer and should be freed by the caller. */ + grub_err_t grub_gpt_part_uuid (grub_device_t device, char **uuid); + ++/* Return the disk uuid of the device DEVICE in UUID. ++ * The uuid is in a new buffer and should be freed by the caller. */ ++grub_err_t grub_gpt_disk_uuid (grub_device_t device, char **uuid); ++ + #endif /* ! GRUB_GPT_PARTITION_HEADER */ +diff --git a/include/grub/search.h b/include/grub/search.h +index c2f40abe9..7f69d25d1 100644 +--- a/include/grub/search.h ++++ b/include/grub/search.h +@@ -29,5 +29,7 @@ void grub_search_part_uuid (const char *key, const char *var, int no_floppy, + char **hints, unsigned nhints); + void grub_search_part_label (const char *key, const char *var, int no_floppy, + char **hints, unsigned nhints); ++void grub_search_disk_uuid (const char *key, const char *var, int no_floppy, ++ char **hints, unsigned nhints); + + #endif +diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c +index 7a1af46e1..60f601729 100644 +--- a/tests/gpt_unit_test.c ++++ b/tests/gpt_unit_test.c +@@ -614,6 +614,37 @@ search_part_uuid_test (void) + close_disk (&data); + } + ++static void ++search_disk_uuid_test (void) ++{ ++ struct test_data data; ++ const char disk_uuid[] = "69c131ad-67d6-46c6-93c4-124c755256ac"; ++ const char bogus_uuid[] = "1534c928-c50e-4866-9daf-6a9fd7918a76"; ++ const char *test_result; ++ char *expected_result; ++ ++ open_disk (&data); ++ ++ expected_result = grub_xasprintf ("%s", data.dev->disk->name); ++ grub_env_unset ("test_result"); ++ grub_search_disk_uuid (disk_uuid, "test_result", 0, NULL, 0); ++ test_result = grub_env_get ("test_result"); ++ grub_test_assert (test_result && strcmp (test_result, expected_result) == 0, ++ "wrong device: %s (%s)", test_result, expected_result); ++ grub_free (expected_result); ++ ++ grub_env_unset ("test_result"); ++ grub_search_disk_uuid (bogus_uuid, "test_result", 0, NULL, 0); ++ test_result = grub_env_get ("test_result"); ++ grub_test_assert (test_result == NULL, ++ "unexpected device: %s", test_result); ++ grub_test_assert (grub_errno == GRUB_ERR_FILE_NOT_FOUND, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++ ++ close_disk (&data); ++} ++ + void + grub_unit_test_init (void) + { +@@ -628,6 +659,7 @@ grub_unit_test_init (void) + grub_test_register ("gpt_repair_test", repair_test); + grub_test_register ("gpt_search_part_label_test", search_part_label_test); + grub_test_register ("gpt_search_uuid_test", search_part_uuid_test); ++ grub_test_register ("gpt_search_disk_uuid_test", search_disk_uuid_test); + } + + void +@@ -641,5 +673,6 @@ grub_unit_test_fini (void) + grub_test_unregister ("gpt_repair_test"); + grub_test_unregister ("gpt_search_part_label_test"); + grub_test_unregister ("gpt_search_part_uuid_test"); ++ grub_test_unregister ("gpt_search_disk_uuid_test"); + grub_fini_all (); + } +From b993a3cec81275809b5a9b55b10c170a8f862bfe Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Mon, 25 Jul 2016 14:59:29 -0700 +Subject: [PATCH] gpt: do not use disk sizes GRUB will reject as invalid later + on + +GRUB assumes that no disk is ever larger than 1EiB and rejects +reads/writes to such locations. Unfortunately this is not conveyed in +the usual way with the special GRUB_DISK_SIZE_UNKNOWN value. +--- + grub-core/lib/gpt.c | 26 ++++++++++++++++++++++++-- + 1 file changed, 24 insertions(+), 2 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index e162bafd3..3e17f2771 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -143,6 +143,28 @@ grub_gpt_size_to_sectors (grub_gpt_t gpt, grub_size_t size) + return sectors; + } + ++/* Copied from grub-core/kern/disk_common.c grub_disk_adjust_range so we can ++ * avoid attempting to use disk->total_sectors when GRUB won't let us. ++ * TODO: Why is disk->total_sectors not set to GRUB_DISK_SIZE_UNKNOWN? */ ++static int ++grub_gpt_disk_size_valid (grub_disk_t disk) ++{ ++ grub_disk_addr_t total_sectors; ++ ++ /* Transform total_sectors to number of 512B blocks. */ ++ total_sectors = disk->total_sectors << (disk->log_sector_size - GRUB_DISK_SECTOR_BITS); ++ ++ /* Some drivers have problems with disks above reasonable. ++ Treat unknown as 1EiB disk. While on it, clamp the size to 1EiB. ++ Just one condition is enough since GRUB_DISK_UNKNOWN_SIZE << ls is always ++ above 9EiB. ++ */ ++ if (total_sectors > (1ULL << 51)) ++ return 0; ++ ++ return 1; ++} ++ + static void + grub_gpt_lecrc32 (grub_uint32_t *crc, const void *data, grub_size_t len) + { +@@ -242,7 +264,7 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) + grub_disk_addr_t addr; + + /* Assumes gpt->log_sector_size == disk->log_sector_size */ +- if (disk->total_sectors != GRUB_DISK_SIZE_UNKNOWN) ++ if (grub_gpt_disk_size_valid(disk)) + sector = disk->total_sectors - 1; + else if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) + sector = grub_le_to_cpu64 (gpt->primary.alternate_lba); +@@ -394,7 +416,7 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + return grub_error (GRUB_ERR_BUG, "No valid GPT header"); + + /* Relocate backup to end if disk whenever possible. */ +- if (disk->total_sectors != GRUB_DISK_SIZE_UNKNOWN) ++ if (grub_gpt_disk_size_valid(disk)) + backup_header = disk->total_sectors - 1; + + backup_entries = backup_header - +From ebc7bbaa181bcfc381b1ddad236e0ce68655de68 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Wed, 10 Aug 2016 18:26:03 -0700 +Subject: [PATCH] gpt: add verbose debug logging + +--- + grub-core/lib/gpt.c | 117 ++++++++++++++++++++++++++++++++++++++++++++++++---- + 1 file changed, 109 insertions(+), 8 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 3e17f2771..c2821b563 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -207,6 +207,18 @@ grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid protective MBR"); + } + ++static grub_uint64_t ++grub_gpt_entries_sectors (struct grub_gpt_header *gpt, ++ unsigned int log_sector_size) ++{ ++ grub_uint64_t sector_bytes, entries_bytes; ++ ++ sector_bytes = 1ULL << log_sector_size; ++ entries_bytes = (grub_uint64_t) grub_le_to_cpu32 (gpt->maxpart) * ++ (grub_uint64_t) grub_le_to_cpu32 (gpt->partentry_size); ++ return grub_divmod64(entries_bytes + sector_bytes - 1, sector_bytes, NULL); ++} ++ + grub_err_t + grub_gpt_header_check (struct grub_gpt_header *gpt, + unsigned int log_sector_size) +@@ -236,6 +248,64 @@ grub_gpt_header_check (struct grub_gpt_header *gpt, + return GRUB_ERR_NONE; + } + ++static grub_err_t ++grub_gpt_check_primary (grub_gpt_t gpt) ++{ ++ grub_uint64_t backup, primary, entries, entries_len, start, end; ++ ++ primary = grub_le_to_cpu64 (gpt->primary.header_lba); ++ backup = grub_le_to_cpu64 (gpt->primary.alternate_lba); ++ entries = grub_le_to_cpu64 (gpt->primary.partitions); ++ entries_len = grub_gpt_entries_sectors(&gpt->primary, gpt->log_sector_size); ++ start = grub_le_to_cpu64 (gpt->primary.start); ++ end = grub_le_to_cpu64 (gpt->primary.end); ++ ++ grub_dprintf ("gpt", "Primary GPT layout:\n" ++ "primary header = 0x%llx backup header = 0x%llx\n" ++ "entries location = 0x%llx length = 0x%llx\n" ++ "first usable = 0x%llx last usable = 0x%llx\n", ++ (unsigned long long) primary, ++ (unsigned long long) backup, ++ (unsigned long long) entries, ++ (unsigned long long) entries_len, ++ (unsigned long long) start, ++ (unsigned long long) end); ++ ++ if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) ++ return grub_errno; ++ ++ return GRUB_ERR_NONE; ++} ++ ++static grub_err_t ++grub_gpt_check_backup (grub_gpt_t gpt) ++{ ++ grub_uint64_t backup, primary, entries, entries_len, start, end; ++ ++ backup = grub_le_to_cpu64 (gpt->backup.header_lba); ++ primary = grub_le_to_cpu64 (gpt->backup.alternate_lba); ++ entries = grub_le_to_cpu64 (gpt->backup.partitions); ++ entries_len = grub_gpt_entries_sectors(&gpt->backup, gpt->log_sector_size); ++ start = grub_le_to_cpu64 (gpt->backup.start); ++ end = grub_le_to_cpu64 (gpt->backup.end); ++ ++ grub_dprintf ("gpt", "Backup GPT layout:\n" ++ "primary header = 0x%llx backup header = 0x%llx\n" ++ "entries location = 0x%llx length = 0x%llx\n" ++ "first usable = 0x%llx last usable = 0x%llx\n", ++ (unsigned long long) primary, ++ (unsigned long long) backup, ++ (unsigned long long) entries, ++ (unsigned long long) entries_len, ++ (unsigned long long) start, ++ (unsigned long long) end); ++ ++ if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) ++ return grub_errno; ++ ++ return GRUB_ERR_NONE; ++} ++ + static grub_err_t + grub_gpt_read_primary (grub_disk_t disk, grub_gpt_t gpt) + { +@@ -246,11 +316,13 @@ grub_gpt_read_primary (grub_disk_t disk, grub_gpt_t gpt) + * but eventually this code should match the existing behavior. */ + gpt->log_sector_size = disk->log_sector_size; + ++ grub_dprintf ("gpt", "reading primary GPT from sector 0x1\n"); ++ + addr = grub_gpt_sector_to_addr (gpt, 1); + if (grub_disk_read (disk, addr, 0, sizeof (gpt->primary), &gpt->primary)) + return grub_errno; + +- if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) ++ if (grub_gpt_check_primary (gpt)) + return grub_errno; + + gpt->status |= GRUB_GPT_PRIMARY_HEADER_VALID; +@@ -272,11 +344,14 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) + return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, + "Unable to locate backup GPT"); + ++ grub_dprintf ("gpt", "reading backup GPT from sector 0x%llx\n", ++ (unsigned long long) sector); ++ + addr = grub_gpt_sector_to_addr (gpt, sector); + if (grub_disk_read (disk, addr, 0, sizeof (gpt->backup), &gpt->backup)) + return grub_errno; + +- if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) ++ if (grub_gpt_check_backup (gpt)) + return grub_errno; + + gpt->status |= GRUB_GPT_BACKUP_HEADER_VALID; +@@ -289,6 +364,7 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, + { + struct grub_gpt_partentry *entries = NULL; + grub_uint32_t count, size, crc; ++ grub_uint64_t sector; + grub_disk_addr_t addr; + grub_size_t entries_size; + +@@ -310,7 +386,12 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, + if (!entries) + goto fail; + +- addr = grub_gpt_sector_to_addr (gpt, grub_le_to_cpu64 (header->partitions)); ++ sector = grub_le_to_cpu64 (header->partitions); ++ grub_dprintf ("gpt", "reading GPT %lu entries from sector 0x%llx\n", ++ (unsigned long) count, ++ (unsigned long long) sector); ++ ++ addr = grub_gpt_sector_to_addr (gpt, sector); + if (grub_disk_read (disk, addr, 0, entries_size, entries)) + goto fail; + +@@ -336,6 +417,8 @@ grub_gpt_read (grub_disk_t disk) + { + grub_gpt_t gpt; + ++ grub_dprintf ("gpt", "reading GPT from %s\n", disk->name); ++ + gpt = grub_zalloc (sizeof (*gpt)); + if (!gpt) + goto fail; +@@ -369,12 +452,18 @@ grub_gpt_read (grub_disk_t disk) + /* Similarly, favor the value or error from the primary table. */ + if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID && + !grub_gpt_read_entries (disk, gpt, &gpt->backup)) +- gpt->status |= GRUB_GPT_BACKUP_ENTRIES_VALID; ++ { ++ grub_dprintf ("gpt", "read valid backup GPT from %s\n", disk->name); ++ gpt->status |= GRUB_GPT_BACKUP_ENTRIES_VALID; ++ } + + grub_errno = GRUB_ERR_NONE; + if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID && + !grub_gpt_read_entries (disk, gpt, &gpt->primary)) +- gpt->status |= GRUB_GPT_PRIMARY_ENTRIES_VALID; ++ { ++ grub_dprintf ("gpt", "read valid primary GPT from %s\n", disk->name); ++ gpt->status |= GRUB_GPT_PRIMARY_ENTRIES_VALID; ++ } + + if (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID || + gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID) +@@ -394,21 +483,25 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + { + grub_uint64_t backup_header, backup_entries; + ++ grub_dprintf ("gpt", "repairing GPT for %s\n", disk->name); ++ + if (disk->log_sector_size != gpt->log_sector_size) + return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, + "GPT sector size must match disk sector size"); + + if (!(gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID || +- gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID)) ++ gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID)) + return grub_error (GRUB_ERR_BUG, "No valid GPT entries"); + + if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) + { ++ grub_dprintf ("gpt", "primary GPT header is valid\n"); + backup_header = grub_le_to_cpu64 (gpt->primary.alternate_lba); + grub_memcpy (&gpt->backup, &gpt->primary, sizeof (gpt->backup)); + } + else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) + { ++ grub_dprintf ("gpt", "backup GPT header is valid\n"); + backup_header = grub_le_to_cpu64 (gpt->backup.header_lba); + grub_memcpy (&gpt->primary, &gpt->backup, sizeof (gpt->primary)); + } +@@ -418,9 +511,13 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + /* Relocate backup to end if disk whenever possible. */ + if (grub_gpt_disk_size_valid(disk)) + backup_header = disk->total_sectors - 1; ++ grub_dprintf ("gpt", "backup GPT header will be located at 0x%llx\n", ++ (unsigned long long) backup_header); + + backup_entries = backup_header - + grub_gpt_size_to_sectors (gpt, gpt->entries_size); ++ grub_dprintf ("gpt", "backup GPT entries will be located at 0x%llx\n", ++ (unsigned long long) backup_entries); + + /* Update/fixup header and partition table locations. */ + gpt->primary.header_lba = grub_cpu_to_le64_compile_time (1); +@@ -435,13 +532,15 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + return grub_errno; + + /* Sanity check. */ +- if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) ++ if (grub_gpt_check_primary (gpt)) + return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); + +- if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) ++ if (grub_gpt_check_backup (gpt)) + return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); + + gpt->status |= GRUB_GPT_BOTH_VALID; ++ grub_dprintf ("gpt", "repairing GPT for %s successful\n", disk->name); ++ + return GRUB_ERR_NONE; + } + +@@ -497,9 +596,11 @@ grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt) + if (!(gpt->status & GRUB_GPT_BOTH_VALID)) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "Invalid GPT data"); + ++ grub_dprintf ("gpt", "writing primary GPT to %s\n", disk->name); + if (grub_gpt_write_table (disk, gpt, &gpt->primary)) + return grub_errno; + ++ grub_dprintf ("gpt", "writing backup GPT to %s\n", disk->name); + if (grub_gpt_write_table (disk, gpt, &gpt->backup)) + return grub_errno; + +From e7ae87c15b57c8bac757df45a83351c5c7a9cd10 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Wed, 10 Aug 2016 18:26:03 -0700 +Subject: [PATCH] gpt: improve validation of GPT headers + +Adds basic validation of all the disk locations in the headers, reducing +the chance of corrupting weird locations on disk. +--- + grub-core/lib/gpt.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 48 insertions(+) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index c2821b563..f83fe29ac 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -224,6 +224,7 @@ grub_gpt_header_check (struct grub_gpt_header *gpt, + unsigned int log_sector_size) + { + grub_uint32_t crc = 0, size; ++ grub_uint64_t start, end; + + if (grub_memcmp (gpt->magic, grub_gpt_magic, sizeof (grub_gpt_magic)) != 0) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT signature"); +@@ -245,9 +246,35 @@ grub_gpt_header_check (struct grub_gpt_header *gpt, + if (size < 128 || size % 128) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT entry size"); + ++ /* And of course there better be some space for partitions! */ ++ start = grub_le_to_cpu64 (gpt->start); ++ end = grub_le_to_cpu64 (gpt->end); ++ if (start > end) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid usable sectors"); ++ + return GRUB_ERR_NONE; + } + ++static int ++grub_gpt_headers_equal (grub_gpt_t gpt) ++{ ++ /* Assume headers passed grub_gpt_header_check so skip magic and version. ++ * Individual fields must be checked instead of just using memcmp because ++ * crc32, header, alternate, and partitions will all normally differ. */ ++ ++ if (gpt->primary.headersize != gpt->backup.headersize || ++ gpt->primary.header_lba != gpt->backup.alternate_lba || ++ gpt->primary.start != gpt->backup.start || ++ gpt->primary.end != gpt->backup.end || ++ gpt->primary.maxpart != gpt->backup.maxpart || ++ gpt->primary.partentry_size != gpt->backup.partentry_size || ++ gpt->primary.partentry_crc32 != gpt->backup.partentry_crc32) ++ return 0; ++ ++ return grub_memcmp(&gpt->primary.guid, &gpt->backup.guid, ++ sizeof(grub_gpt_guid_t)) == 0; ++} ++ + static grub_err_t + grub_gpt_check_primary (grub_gpt_t gpt) + { +@@ -273,6 +300,12 @@ grub_gpt_check_primary (grub_gpt_t gpt) + + if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) + return grub_errno; ++ if (primary != 1) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid primary GPT LBA"); ++ if (entries <= 1 || entries+entries_len > start) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid entries location"); ++ if (backup <= end) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid backup GPT LBA"); + + return GRUB_ERR_NONE; + } +@@ -302,6 +335,12 @@ grub_gpt_check_backup (grub_gpt_t gpt) + + if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) + return grub_errno; ++ if (primary != 1) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid primary GPT LBA"); ++ if (entries <= end || entries+entries_len > backup) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid entries location"); ++ if (backup <= end) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid backup GPT LBA"); + + return GRUB_ERR_NONE; + } +@@ -354,6 +393,15 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) + if (grub_gpt_check_backup (gpt)) + return grub_errno; + ++ /* Ensure the backup header thinks it is located where we found it. */ ++ if (grub_le_to_cpu64 (gpt->backup.header_lba) != sector) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid backup GPT LBA"); ++ ++ /* If both primary and backup are valid but differ prefer the primary. */ ++ if ((gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) && ++ !grub_gpt_headers_equal(gpt)) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "backup GPT of of sync"); ++ + gpt->status |= GRUB_GPT_BACKUP_HEADER_VALID; + return GRUB_ERR_NONE; + } +From dfa966dcac4ff53e83f68be4643d33db6c9d98ef Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Thu, 11 Aug 2016 15:02:21 -0700 +Subject: [PATCH] gpt: refuse to write to sector 0 + +--- + grub-core/lib/gpt.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index f83fe29ac..b7449911a 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -626,10 +626,17 @@ grub_gpt_write_table (grub_disk_t disk, grub_gpt_t gpt, + sizeof (*header)); + + addr = grub_gpt_sector_to_addr (gpt, grub_le_to_cpu64 (header->header_lba)); ++ if (addr == 0) ++ return grub_error (GRUB_ERR_BUG, ++ "Refusing to write GPT header to address 0x0"); + if (grub_disk_write (disk, addr, 0, sizeof (*header), header)) + return grub_errno; + + addr = grub_gpt_sector_to_addr (gpt, grub_le_to_cpu64 (header->partitions)); ++ if (addr < 2) ++ return grub_error (GRUB_ERR_BUG, ++ "Refusing to write GPT entries to address 0x%llx", ++ (unsigned long long) addr); + if (grub_disk_write (disk, addr, 0, gpt->entries_size, gpt->entries)) + return grub_errno; + +From ac69188a7031e0255012900519f406baea7d9278 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Sat, 20 Aug 2016 17:42:12 -0700 +Subject: [PATCH] gpt: properly detect and repair invalid tables + +GPT_BOTH_VALID is 4 bits so simple a boolean check is not sufficient. +This broken condition allowed gptprio to trust bogus disk locations in +headers that were marked invalid causing arbitrary disk corruption. +--- + grub-core/commands/gptprio.c | 2 +- + grub-core/lib/gpt.c | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c +index ce5840b4e..6b61bb56d 100644 +--- a/grub-core/commands/gptprio.c ++++ b/grub-core/commands/gptprio.c +@@ -91,7 +91,7 @@ grub_find_next (const char *disk_name, + if (!gpt) + goto done; + +- if (!(gpt->status & GRUB_GPT_BOTH_VALID)) ++ if ((gpt->status & GRUB_GPT_BOTH_VALID) != GRUB_GPT_BOTH_VALID) + if (grub_gpt_repair (dev->disk, gpt)) + goto done; + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index b7449911a..0daf3f8de 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -648,7 +648,7 @@ grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt) + { + /* TODO: update/repair protective MBRs too. */ + +- if (!(gpt->status & GRUB_GPT_BOTH_VALID)) ++ if ((gpt->status & GRUB_GPT_BOTH_VALID) != GRUB_GPT_BOTH_VALID) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "Invalid GPT data"); + + grub_dprintf ("gpt", "writing primary GPT to %s\n", disk->name); +From 2ec96d73aec713dc3f8b45f27fae4008fb9bb516 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Mon, 22 Aug 2016 16:44:30 -0700 +Subject: [PATCH] gptrepair_test: fix typo in cleanup trap + +--- + tests/gptrepair_test.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tests/gptrepair_test.in b/tests/gptrepair_test.in +index 80b2de633..805dc171a 100644 +--- a/tests/gptrepair_test.in ++++ b/tests/gptrepair_test.in +@@ -53,7 +53,7 @@ case "${grub_modinfo_target_cpu}-${grub_modinfo_platform}" in + esac + img1="`mktemp "${TMPDIR:-/tmp}/tmp.XXXXXXXXXX"`" || exit 1 + img2="`mktemp "${TMPDIR:-/tmp}/tmp.XXXXXXXXXX"`" || exit 1 +-trap "rm -f '${img1}' '${ing2}'" EXIT ++trap "rm -f '${img1}' '${img2}'" EXIT + + create_disk_image () { + size=$1 +From 384c0976b3bb8fca413bd3b0af22604104824511 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Mon, 22 Aug 2016 16:45:10 -0700 +Subject: [PATCH] gptprio_test: check GPT is repaired when appropriate + +--- + tests/gptprio_test.in | 63 ++++++++++++++++++++++++++++++++++++++++++++++++--- + 1 file changed, 60 insertions(+), 3 deletions(-) + +diff --git a/tests/gptprio_test.in b/tests/gptprio_test.in +index f4aea0dc9..c5cf0f3b7 100644 +--- a/tests/gptprio_test.in ++++ b/tests/gptprio_test.in +@@ -66,8 +66,9 @@ prio_uuid[3]="1aa5a658-5b02-414d-9b71-f7e6c151f0cd" + prio_uuid[4]="8aa0240d-98af-42b0-b32a-ccbe0572d62b" + + create_disk_image () { ++ size=$1 + rm -f "${img1}" +- dd if=/dev/zero of="${img1}" bs=512 count=1 seek=100 status=none ++ dd if=/dev/zero of="${img1}" bs=512 count=1 seek=$((size - 1)) status=none + ${sgdisk} \ + -n 1:0:+1 -c 1:ESP -t 1:ef00 \ + -n 2:0:+1 -c 2:A -t 2:"${prio_type}" -u 2:"${prio_uuid[2]}" \ +@@ -76,6 +77,35 @@ create_disk_image () { + "${img1}" >/dev/null + } + ++wipe_disk_area () { ++ sector=$1 ++ size=$2 ++ dd if=/dev/zero of="${img1}" bs=512 count=${size} seek=${sector} conv=notrunc status=none ++} ++ ++is_zero () { ++ sector=$1 ++ size=$2 ++ cmp -s -i $((sector * 512)) -n $((size * 512)) /dev/zero "${img1}" ++} ++ ++check_is_zero () { ++ sector=$1 ++ size=$2 ++ if ! is_zero "$@"; then ++ echo "$size sector(s) starting at $sector should be all zero" ++ exit 1 ++ fi ++} ++ ++check_not_zero () { ++ sector=$1 ++ size=$2 ++ if is_zero "$@"; then ++ echo "$size sector(s) starting at $sector should not be all zero" ++ exit 1 ++ fi ++} + + fmt_prio () { + priority=$(( ( $1 & 15 ) << 48 )) +@@ -93,10 +123,10 @@ set_prio () { + check_prio () { + part="$1" + expect=$(fmt_prio $2 $3 $4) +- result=$(LANG=C ${sgdisk} -i "${part}" "${img1}" \ ++ result=$(LANG=C ${sgdisk} -i "${part}" "${img1}" 2>&1 \ + | awk '/^Attribute flags: / {print $3}') + if [[ "${expect}" != "${result}" ]]; then +- echo "Partition ${part} has attributes ${result}, not ${expect}" >&2 ++ echo "Partition ${part} has attributes ${result:-??}, not ${expect}" + exit 1 + fi + } +@@ -133,6 +163,33 @@ create_disk_image 100 + set_prio 2 3 2 1 + check_prio 2 3 2 1 + ++# Check gptprio works without modifying the disk when no update is required. ++# Leaves any existing corruption as is, repairing in the OS is better. ++create_disk_image 100 ++set_prio 2 1 0 1 ++wipe_disk_area 99 1 ++check_next 2 1 0 1 ++check_is_zero 99 1 ++ ++create_disk_image 100 ++set_prio 2 1 0 1 ++wipe_disk_area 1 1 ++check_next 2 1 0 1 ++check_is_zero 1 1 ++ ++# When writes do need to be made go ahead and perform the repair. ++create_disk_image 100 ++set_prio 2 1 1 0 ++wipe_disk_area 99 1 ++check_next 2 1 0 0 ++check_not_zero 99 1 ++ ++create_disk_image 100 ++set_prio 2 1 1 0 ++wipe_disk_area 1 1 ++check_next 2 1 0 0 ++check_not_zero 1 1 ++ + # Try two partitions before falling before falling back to a third + create_disk_image 100 + set_prio 2 3 3 0 +From 96d36c4ecdb8b3dfe0ecd81dff3c1cd6665e73c3 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Wed, 24 Aug 2016 16:14:20 -0700 +Subject: [PATCH] gpt: fix partition table indexing and validation + +Portions of the code attempted to handle the fact that GPT entries on +disk may be larger than the currently defined struct while others +assumed the data could be indexed by the struct size directly. This +never came up because no utility uses a size larger than 128 bytes but +for the sake of safety we need to do this by the spec. +--- + grub-core/commands/gptprio.c | 6 +-- + grub-core/lib/gpt.c | 51 +++++++++++++++--- + include/grub/gpt_partition.h | 11 +++- + tests/gpt_unit_test.c | 120 +++++++++++++++++++++++++++++++++++++++++++ + 4 files changed, 176 insertions(+), 12 deletions(-) + +diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c +index 6b61bb56d..548925a08 100644 +--- a/grub-core/commands/gptprio.c ++++ b/grub-core/commands/gptprio.c +@@ -78,7 +78,7 @@ grub_find_next (const char *disk_name, + const grub_gpt_part_type_t *part_type, + char **part_name, char **part_guid) + { +- struct grub_gpt_partentry *part_found = NULL; ++ struct grub_gpt_partentry *part, *part_found = NULL; + grub_device_t dev = NULL; + grub_gpt_t gpt = NULL; + grub_uint32_t i, part_index; +@@ -95,10 +95,8 @@ grub_find_next (const char *disk_name, + if (grub_gpt_repair (dev->disk, gpt)) + goto done; + +- for (i = 0; i < grub_le_to_cpu32 (gpt->primary.maxpart); i++) ++ for (i = 0; (part = grub_gpt_get_partentry (gpt, i)) != NULL; i++) + { +- struct grub_gpt_partentry *part = &gpt->entries[i]; +- + if (grub_memcmp (part_type, &part->type, sizeof (*part_type)) == 0) + { + unsigned int priority, tries_left, successful, old_priority = 0; +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 0daf3f8de..205779192 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -207,6 +207,13 @@ grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid protective MBR"); + } + ++static grub_uint64_t ++grub_gpt_entries_size (struct grub_gpt_header *gpt) ++{ ++ return (grub_uint64_t) grub_le_to_cpu32 (gpt->maxpart) * ++ (grub_uint64_t) grub_le_to_cpu32 (gpt->partentry_size); ++} ++ + static grub_uint64_t + grub_gpt_entries_sectors (struct grub_gpt_header *gpt, + unsigned int log_sector_size) +@@ -214,11 +221,16 @@ grub_gpt_entries_sectors (struct grub_gpt_header *gpt, + grub_uint64_t sector_bytes, entries_bytes; + + sector_bytes = 1ULL << log_sector_size; +- entries_bytes = (grub_uint64_t) grub_le_to_cpu32 (gpt->maxpart) * +- (grub_uint64_t) grub_le_to_cpu32 (gpt->partentry_size); ++ entries_bytes = grub_gpt_entries_size (gpt); + return grub_divmod64(entries_bytes + sector_bytes - 1, sector_bytes, NULL); + } + ++static int ++is_pow2 (grub_uint32_t n) ++{ ++ return (n & (n - 1)) == 0; ++} ++ + grub_err_t + grub_gpt_header_check (struct grub_gpt_header *gpt, + unsigned int log_sector_size) +@@ -236,16 +248,23 @@ grub_gpt_header_check (struct grub_gpt_header *gpt, + if (gpt->crc32 != crc) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT header crc32"); + +- /* The header size must be between 92 and the sector size. */ ++ /* The header size "must be greater than or equal to 92 and must be less ++ * than or equal to the logical block size." */ + size = grub_le_to_cpu32 (gpt->headersize); + if (size < 92U || size > (1U << log_sector_size)) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT header size"); + +- /* The partition entry size must be a multiple of 128. */ ++ /* The partition entry size must be "a value of 128*(2^n) where n is an ++ * integer greater than or equal to zero (e.g., 128, 256, 512, etc.)." */ + size = grub_le_to_cpu32 (gpt->partentry_size); +- if (size < 128 || size % 128) ++ if (size < 128U || size % 128U || !is_pow2 (size / 128U)) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT entry size"); + ++ /* The minimum entries table size is specified in terms of bytes, ++ * regardless of how large the individual entry size is. */ ++ if (grub_gpt_entries_size (gpt) < GRUB_GPT_DEFAULT_ENTRIES_SIZE) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT entry table size"); ++ + /* And of course there better be some space for partitions! */ + start = grub_le_to_cpu64 (gpt->start); + end = grub_le_to_cpu64 (gpt->end); +@@ -410,7 +429,7 @@ static grub_err_t + grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, + struct grub_gpt_header *header) + { +- struct grub_gpt_partentry *entries = NULL; ++ void *entries = NULL; + grub_uint32_t count, size, crc; + grub_uint64_t sector; + grub_disk_addr_t addr; +@@ -526,6 +545,26 @@ fail: + return NULL; + } + ++struct grub_gpt_partentry * ++grub_gpt_get_partentry (grub_gpt_t gpt, grub_uint32_t n) ++{ ++ struct grub_gpt_header *header; ++ grub_size_t offset; ++ ++ if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) ++ header = &gpt->primary; ++ else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) ++ header = &gpt->backup; ++ else ++ return NULL; ++ ++ if (n >= grub_le_to_cpu32 (header->maxpart)) ++ return NULL; ++ ++ offset = (grub_size_t) grub_le_to_cpu32 (header->partentry_size) * n; ++ return (struct grub_gpt_partentry *) ((char *) gpt->entries + offset); ++} ++ + grub_err_t + grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + { +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index 4a6ed25b3..cc3a201a5 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -186,8 +186,10 @@ struct grub_gpt + struct grub_gpt_header primary; + struct grub_gpt_header backup; + +- /* Only need one entries table, on disk both copies are identical. */ +- struct grub_gpt_partentry *entries; ++ /* Only need one entries table, on disk both copies are identical. ++ * The on disk entry size may be larger than our partentry struct so ++ * the table cannot be indexed directly. */ ++ void *entries; + grub_size_t entries_size; + + /* Logarithm of sector size, in case GPT and disk driver disagree. */ +@@ -205,6 +207,11 @@ grub_gpt_sector_to_addr (grub_gpt_t gpt, grub_uint64_t sector) + /* Allocates and fills new grub_gpt structure, free with grub_gpt_free. */ + grub_gpt_t grub_gpt_read (grub_disk_t disk); + ++/* Helper for indexing into the entries table. ++ * Returns NULL when the end of the table has been reached. */ ++struct grub_gpt_partentry * grub_gpt_get_partentry (grub_gpt_t gpt, ++ grub_uint32_t n); ++ + /* Sync up primary and backup headers, recompute checksums. */ + grub_err_t grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt); + +diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c +index 60f601729..9cf3414c2 100644 +--- a/tests/gpt_unit_test.c ++++ b/tests/gpt_unit_test.c +@@ -40,6 +40,13 @@ + /* from gnulib */ + #include + ++/* Confirm that the GPT structures conform to the sizes in the spec: ++ * The header size "must be greater than or equal to 92 and must be less ++ * than or equal to the logical block size." ++ * The partition entry size must be "a value of 128*(2^n) where n is an ++ * integer greater than or equal to zero (e.g., 128, 256, 512, etc.)." */ ++verify (sizeof (struct grub_gpt_header) == 92); ++verify (sizeof (struct grub_gpt_partentry) == 128); + + /* GPT section sizes. */ + #define HEADER_SIZE (sizeof (struct grub_gpt_header)) +@@ -537,6 +544,113 @@ repair_test (void) + close_disk (&data); + } + ++static void ++iterate_partitions_test (void) ++{ ++ struct test_data data; ++ struct grub_gpt_partentry *p; ++ grub_gpt_t gpt; ++ grub_uint32_t n; ++ ++ open_disk (&data); ++ gpt = read_disk (&data); ++ ++ for (n = 0; (p = grub_gpt_get_partentry (gpt, n)) != NULL; n++) ++ grub_test_assert (memcmp (p, &example_entries[n], sizeof (*p)) == 0, ++ "unexpected partition %d data", n); ++ ++ grub_test_assert (n == TABLE_ENTRIES, "unexpected partition limit: %d", n); ++ ++ grub_gpt_free (gpt); ++ close_disk (&data); ++} ++ ++static void ++large_partitions_test (void) ++{ ++ struct test_data data; ++ struct grub_gpt_partentry *p; ++ grub_gpt_t gpt; ++ grub_uint32_t n; ++ ++ open_disk (&data); ++ ++ /* Double the entry size, cut the number of entries in half. */ ++ data.raw->primary_header.maxpart = ++ data.raw->backup_header.maxpart = ++ grub_cpu_to_le32_compile_time (TABLE_ENTRIES/2); ++ data.raw->primary_header.partentry_size = ++ data.raw->backup_header.partentry_size = ++ grub_cpu_to_le32_compile_time (ENTRY_SIZE*2); ++ data.raw->primary_header.partentry_crc32 = ++ data.raw->backup_header.partentry_crc32 = ++ grub_cpu_to_le32_compile_time (0xf2c45af8); ++ data.raw->primary_header.crc32 = grub_cpu_to_le32_compile_time (0xde00cc8f); ++ data.raw->backup_header.crc32 = grub_cpu_to_le32_compile_time (0x6d72e284); ++ ++ memset (&data.raw->primary_entries, 0, ++ sizeof (data.raw->primary_entries)); ++ for (n = 0; n < TABLE_ENTRIES/2; n++) ++ memcpy (&data.raw->primary_entries[n*2], &example_entries[n], ++ sizeof (data.raw->primary_entries[0])); ++ memcpy (&data.raw->backup_entries, &data.raw->primary_entries, ++ sizeof (data.raw->backup_entries)); ++ ++ sync_disk(&data); ++ gpt = read_disk (&data); ++ ++ for (n = 0; (p = grub_gpt_get_partentry (gpt, n)) != NULL; n++) ++ grub_test_assert (memcmp (p, &example_entries[n], sizeof (*p)) == 0, ++ "unexpected partition %d data", n); ++ ++ grub_test_assert (n == TABLE_ENTRIES/2, "unexpected partition limit: %d", n); ++ ++ grub_gpt_free (gpt); ++ ++ /* Editing memory beyond the entry structure should still change the crc. */ ++ data.raw->primary_entries[1].attrib = 0xff; ++ ++ sync_disk(&data); ++ gpt = read_disk (&data); ++ grub_test_assert (gpt->status == (GRUB_GPT_PROTECTIVE_MBR | ++ GRUB_GPT_PRIMARY_HEADER_VALID | ++ GRUB_GPT_BACKUP_HEADER_VALID | ++ GRUB_GPT_BACKUP_ENTRIES_VALID), ++ "unexpected status: 0x%02x", gpt->status); ++ grub_gpt_free (gpt); ++ ++ close_disk (&data); ++} ++ ++static void ++invalid_partsize_test (void) ++{ ++ struct grub_gpt_header header = { ++ .magic = GRUB_GPT_HEADER_MAGIC, ++ .version = GRUB_GPT_HEADER_VERSION, ++ .headersize = sizeof (struct grub_gpt_header), ++ .crc32 = grub_cpu_to_le32_compile_time (0x1ff2a054), ++ .header_lba = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), ++ .alternate_lba = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), ++ .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), ++ .end = grub_cpu_to_le64_compile_time (DATA_END_SECTOR), ++ .guid = GRUB_GPT_GUID_INIT(0x69c131ad, 0x67d6, 0x46c6, ++ 0x93, 0xc4, 0x12, 0x4c, 0x75, 0x52, 0x56, 0xac), ++ .partitions = grub_cpu_to_le64_compile_time (PRIMARY_TABLE_SECTOR), ++ .maxpart = grub_cpu_to_le32_compile_time (TABLE_ENTRIES), ++ /* Triple the entry size, which is not valid. */ ++ .partentry_size = grub_cpu_to_le32_compile_time (ENTRY_SIZE*3), ++ .partentry_crc32 = grub_cpu_to_le32_compile_time (0x074e052c), ++ }; ++ ++ grub_gpt_header_check(&header, GRUB_DISK_SECTOR_BITS); ++ grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, ++ "unexpected error: %s", grub_errmsg); ++ grub_test_assert (strcmp(grub_errmsg, "invalid GPT entry size") == 0, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++} ++ + static void + search_part_label_test (void) + { +@@ -657,6 +771,9 @@ grub_unit_test_init (void) + grub_test_register ("gpt_read_invalid_test", read_invalid_entries_test); + grub_test_register ("gpt_read_fallback_test", read_fallback_test); + grub_test_register ("gpt_repair_test", repair_test); ++ grub_test_register ("gpt_iterate_partitions_test", iterate_partitions_test); ++ grub_test_register ("gpt_large_partitions_test", large_partitions_test); ++ grub_test_register ("gpt_invalid_partsize_test", invalid_partsize_test); + grub_test_register ("gpt_search_part_label_test", search_part_label_test); + grub_test_register ("gpt_search_uuid_test", search_part_uuid_test); + grub_test_register ("gpt_search_disk_uuid_test", search_disk_uuid_test); +@@ -671,6 +788,9 @@ grub_unit_test_fini (void) + grub_test_unregister ("gpt_read_invalid_test"); + grub_test_unregister ("gpt_read_fallback_test"); + grub_test_unregister ("gpt_repair_test"); ++ grub_test_unregister ("gpt_iterate_partitions_test"); ++ grub_test_unregister ("gpt_large_partitions_test"); ++ grub_test_unregister ("gpt_invalid_partsize_test"); + grub_test_unregister ("gpt_search_part_label_test"); + grub_test_unregister ("gpt_search_part_uuid_test"); + grub_test_unregister ("gpt_search_disk_uuid_test"); +From 3ef7f041f419f8c62f6d7b5c60dc1da45862c074 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Tue, 23 Aug 2016 13:09:14 -0700 +Subject: [PATCH] gpt: prefer disk size from header over firmware + +The firmware and the OS may disagree on the disk configuration and size. +Although such a setup should be avoided users are unlikely to know about +the problem, assuming everything behaves like the OS. Tolerate this as +best we can and trust the reported on-disk location over the firmware +when looking for the backup GPT. If the location is inaccessible report +the error as best we can and move on. +--- + grub-core/lib/gpt.c | 18 +++++++++++++----- + tests/gpt_unit_test.c | 42 ++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 55 insertions(+), 5 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 205779192..f0c71bde1 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -394,13 +394,21 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) + grub_disk_addr_t addr; + + /* Assumes gpt->log_sector_size == disk->log_sector_size */ +- if (grub_gpt_disk_size_valid(disk)) ++ if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) ++ { ++ sector = grub_le_to_cpu64 (gpt->primary.alternate_lba); ++ if (grub_gpt_disk_size_valid (disk) && sector >= disk->total_sectors) ++ return grub_error (GRUB_ERR_OUT_OF_RANGE, ++ "backup GPT located at 0x%llx, " ++ "beyond last disk sector at 0x%llx", ++ (unsigned long long) sector, ++ (unsigned long long) disk->total_sectors - 1); ++ } ++ else if (grub_gpt_disk_size_valid (disk)) + sector = disk->total_sectors - 1; +- else if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) +- sector = grub_le_to_cpu64 (gpt->primary.alternate_lba); + else +- return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, +- "Unable to locate backup GPT"); ++ return grub_error (GRUB_ERR_OUT_OF_RANGE, ++ "size of disk unknown, cannot locate backup GPT"); + + grub_dprintf ("gpt", "reading backup GPT from sector 0x%llx\n", + (unsigned long long) sector); +diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c +index 9cf3414c2..218b18697 100644 +--- a/tests/gpt_unit_test.c ++++ b/tests/gpt_unit_test.c +@@ -544,6 +544,46 @@ repair_test (void) + close_disk (&data); + } + ++/* Finding/reading/writing the backup GPT may be difficult if the OS and ++ * BIOS report different sizes for the same disk. We need to gracefully ++ * recognize this and avoid causing trouble for the OS. */ ++static void ++weird_disk_size_test (void) ++{ ++ struct test_data data; ++ grub_gpt_t gpt; ++ ++ open_disk (&data); ++ ++ /* Chop off 65536 bytes (128 512B sectors) which may happen when the ++ * BIOS thinks you are using a software RAID system that reserves that ++ * area for metadata when in fact you are not and using the bare disk. */ ++ grub_test_assert(data.dev->disk->total_sectors == DISK_SECTORS, ++ "unexpected disk size: 0x%llx", ++ (unsigned long long) data.dev->disk->total_sectors); ++ data.dev->disk->total_sectors -= 128; ++ ++ gpt = read_disk (&data); ++ assert_error_stack_empty (); ++ /* Reading the alternate_lba should have been blocked and reading ++ * the (new) end of disk should have found no useful data. */ ++ grub_test_assert ((gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) == 0, ++ "unreported missing backup header"); ++ ++ /* We should be able to reconstruct the backup header and the location ++ * of the backup should remain unchanged, trusting the GPT data over ++ * what the BIOS is telling us. Further changes are left to the OS. */ ++ grub_gpt_repair (data.dev->disk, gpt); ++ grub_test_assert (grub_errno == GRUB_ERR_NONE, ++ "repair failed: %s", grub_errmsg); ++ grub_test_assert (memcmp (&gpt->primary, &example_primary, ++ sizeof (gpt->primary)) == 0, ++ "repair corrupted primary header"); ++ ++ grub_gpt_free (gpt); ++ close_disk (&data); ++} ++ + static void + iterate_partitions_test (void) + { +@@ -774,6 +814,7 @@ grub_unit_test_init (void) + grub_test_register ("gpt_iterate_partitions_test", iterate_partitions_test); + grub_test_register ("gpt_large_partitions_test", large_partitions_test); + grub_test_register ("gpt_invalid_partsize_test", invalid_partsize_test); ++ grub_test_register ("gpt_weird_disk_size_test", weird_disk_size_test); + grub_test_register ("gpt_search_part_label_test", search_part_label_test); + grub_test_register ("gpt_search_uuid_test", search_part_uuid_test); + grub_test_register ("gpt_search_disk_uuid_test", search_disk_uuid_test); +@@ -791,6 +832,7 @@ grub_unit_test_fini (void) + grub_test_unregister ("gpt_iterate_partitions_test"); + grub_test_unregister ("gpt_large_partitions_test"); + grub_test_unregister ("gpt_invalid_partsize_test"); ++ grub_test_unregister ("gpt_weird_disk_size_test"); + grub_test_unregister ("gpt_search_part_label_test"); + grub_test_unregister ("gpt_search_part_uuid_test"); + grub_test_unregister ("gpt_search_disk_uuid_test"); +From d1a329f0d8b5f272b925dd1e54c7e1e93ec555ca Mon Sep 17 00:00:00 2001 +From: Vito Caputo +Date: Thu, 25 Aug 2016 17:21:18 -0700 +Subject: [PATCH] gpt: add helper for picking a valid header + +Eliminate some repetition in primary vs. backup header acquisition. +--- + grub-core/lib/gpt.c | 32 ++++++++++++++++++++------------ + 1 file changed, 20 insertions(+), 12 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index f0c71bde1..2550ed87c 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -108,21 +108,32 @@ grub_gpt_part_uuid (grub_device_t device, char **uuid) + return GRUB_ERR_NONE; + } + ++static struct grub_gpt_header * ++grub_gpt_get_header (grub_gpt_t gpt) ++{ ++ if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) ++ return &gpt->primary; ++ else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) ++ return &gpt->backup; ++ ++ grub_error (GRUB_ERR_BUG, "No valid GPT header"); ++ return NULL; ++} ++ + grub_err_t + grub_gpt_disk_uuid (grub_device_t device, char **uuid) + { ++ struct grub_gpt_header *header; ++ + grub_gpt_t gpt = grub_gpt_read (device->disk); + if (!gpt) + goto done; + +- grub_errno = GRUB_ERR_NONE; ++ header = grub_gpt_get_header (gpt); ++ if (!header) ++ goto done; + +- if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) +- *uuid = grub_gpt_guid_to_str (&gpt->primary.guid); +- else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) +- *uuid = grub_gpt_guid_to_str (&gpt->backup.guid); +- else +- grub_errno = grub_error (GRUB_ERR_BUG, "No valid GPT header"); ++ *uuid = grub_gpt_guid_to_str (&header->guid); + + done: + grub_gpt_free (gpt); +@@ -559,11 +570,8 @@ grub_gpt_get_partentry (grub_gpt_t gpt, grub_uint32_t n) + struct grub_gpt_header *header; + grub_size_t offset; + +- if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) +- header = &gpt->primary; +- else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) +- header = &gpt->backup; +- else ++ header = grub_gpt_get_header (gpt); ++ if (!header) + return NULL; + + if (n >= grub_le_to_cpu32 (header->maxpart)) +From e2074ff46920d5332cbc3209160b7987da76080b Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Tue, 20 Sep 2016 13:06:05 -0700 +Subject: [PATCH] gptrepair: fix status checking + +None of these status bit checks were correct. Fix and simplify. +--- + grub-core/commands/gptrepair.c | 28 +++++++++++----------------- + 1 file changed, 11 insertions(+), 17 deletions(-) + +diff --git a/grub-core/commands/gptrepair.c b/grub-core/commands/gptrepair.c +index 38392fd8f..66ac3f7c7 100644 +--- a/grub-core/commands/gptrepair.c ++++ b/grub-core/commands/gptrepair.c +@@ -46,8 +46,6 @@ grub_cmd_gptrepair (grub_command_t cmd __attribute__ ((unused)), + grub_device_t dev = NULL; + grub_gpt_t gpt = NULL; + char *dev_name; +- grub_uint32_t primary_crc, backup_crc; +- enum grub_gpt_status old_status; + + if (argc != 1 || !grub_strlen(args[0])) + return grub_error (GRUB_ERR_BAD_ARGUMENT, "device name required"); +@@ -67,29 +65,25 @@ grub_cmd_gptrepair (grub_command_t cmd __attribute__ ((unused)), + if (!gpt) + goto done; + +- primary_crc = gpt->primary.crc32; +- backup_crc = gpt->backup.crc32; +- old_status = gpt->status; +- +- if (grub_gpt_repair (dev->disk, gpt)) +- goto done; +- +- if (primary_crc == gpt->primary.crc32 && +- backup_crc == gpt->backup.crc32 && +- old_status && gpt->status) ++ if ((gpt->status & GRUB_GPT_BOTH_VALID) == GRUB_GPT_BOTH_VALID) + { + grub_printf_ (N_("GPT already valid, %s unmodified.\n"), dev_name); + goto done; + } + +- if (grub_gpt_write (dev->disk, gpt)) ++ if ((gpt->status & GRUB_GPT_PRIMARY_VALID) != GRUB_GPT_PRIMARY_VALID) ++ grub_printf_ (N_("Found invalid primary GPT on %s\n"), dev_name); ++ ++ if ((gpt->status & GRUB_GPT_BACKUP_VALID) != GRUB_GPT_BACKUP_VALID) ++ grub_printf_ (N_("Found invalid backup GPT on %s\n"), dev_name); ++ ++ if (grub_gpt_repair (dev->disk, gpt)) + goto done; + +- if (!(old_status & GRUB_GPT_PRIMARY_VALID)) +- grub_printf_ (N_("Primary GPT for %s repaired.\n"), dev_name); ++ if (grub_gpt_write (dev->disk, gpt)) ++ goto done; + +- if (!(old_status & GRUB_GPT_BACKUP_VALID)) +- grub_printf_ (N_("Backup GPT for %s repaired.\n"), dev_name); ++ grub_printf_ (N_("Repaired GPT on %s\n"), dev_name); + + done: + if (gpt) +From f51e80579fd2b69d64dd98c0a8be44eb65556363 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Tue, 20 Sep 2016 12:43:01 -0700 +Subject: [PATCH] gpt: use inline functions for checking status bits + +This should prevent bugs like 6078f836 and 4268f3da. +--- + grub-core/commands/gptprio.c | 2 +- + grub-core/commands/gptrepair.c | 6 +++--- + grub-core/lib/gpt.c | 9 +++++++-- + include/grub/gpt_partition.h | 35 ++++++++++++++++++++++++++++------- + 4 files changed, 39 insertions(+), 13 deletions(-) + +diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c +index 548925a08..25f867a81 100644 +--- a/grub-core/commands/gptprio.c ++++ b/grub-core/commands/gptprio.c +@@ -91,7 +91,7 @@ grub_find_next (const char *disk_name, + if (!gpt) + goto done; + +- if ((gpt->status & GRUB_GPT_BOTH_VALID) != GRUB_GPT_BOTH_VALID) ++ if (!grub_gpt_both_valid(gpt)) + if (grub_gpt_repair (dev->disk, gpt)) + goto done; + +diff --git a/grub-core/commands/gptrepair.c b/grub-core/commands/gptrepair.c +index 66ac3f7c7..c17c7346c 100644 +--- a/grub-core/commands/gptrepair.c ++++ b/grub-core/commands/gptrepair.c +@@ -65,16 +65,16 @@ grub_cmd_gptrepair (grub_command_t cmd __attribute__ ((unused)), + if (!gpt) + goto done; + +- if ((gpt->status & GRUB_GPT_BOTH_VALID) == GRUB_GPT_BOTH_VALID) ++ if (grub_gpt_both_valid (gpt)) + { + grub_printf_ (N_("GPT already valid, %s unmodified.\n"), dev_name); + goto done; + } + +- if ((gpt->status & GRUB_GPT_PRIMARY_VALID) != GRUB_GPT_PRIMARY_VALID) ++ if (!grub_gpt_primary_valid (gpt)) + grub_printf_ (N_("Found invalid primary GPT on %s\n"), dev_name); + +- if ((gpt->status & GRUB_GPT_BACKUP_VALID) != GRUB_GPT_BACKUP_VALID) ++ if (!grub_gpt_backup_valid (gpt)) + grub_printf_ (N_("Found invalid backup GPT on %s\n"), dev_name); + + if (grub_gpt_repair (dev->disk, gpt)) +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 2550ed87c..3e077c497 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -638,10 +638,15 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + if (grub_gpt_check_primary (gpt)) + return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); + ++ gpt->status |= (GRUB_GPT_PRIMARY_HEADER_VALID | ++ GRUB_GPT_PRIMARY_ENTRIES_VALID); ++ + if (grub_gpt_check_backup (gpt)) + return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); + +- gpt->status |= GRUB_GPT_BOTH_VALID; ++ gpt->status |= (GRUB_GPT_BACKUP_HEADER_VALID | ++ GRUB_GPT_BACKUP_ENTRIES_VALID); ++ + grub_dprintf ("gpt", "repairing GPT for %s successful\n", disk->name); + + return GRUB_ERR_NONE; +@@ -703,7 +708,7 @@ grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt) + { + /* TODO: update/repair protective MBRs too. */ + +- if ((gpt->status & GRUB_GPT_BOTH_VALID) != GRUB_GPT_BOTH_VALID) ++ if (!grub_gpt_both_valid (gpt)) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "Invalid GPT data"); + + grub_dprintf ("gpt", "writing primary GPT to %s\n", disk->name); +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index cc3a201a5..39388ce6e 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -161,13 +161,6 @@ typedef enum grub_gpt_status + GRUB_GPT_BACKUP_ENTRIES_VALID = 0x20, + } grub_gpt_status_t; + +-#define GRUB_GPT_MBR_VALID (GRUB_GPT_PROTECTIVE_MBR|GRUB_GPT_HYBRID_MBR) +-#define GRUB_GPT_PRIMARY_VALID \ +- (GRUB_GPT_PRIMARY_HEADER_VALID|GRUB_GPT_PRIMARY_ENTRIES_VALID) +-#define GRUB_GPT_BACKUP_VALID \ +- (GRUB_GPT_BACKUP_HEADER_VALID|GRUB_GPT_BACKUP_ENTRIES_VALID) +-#define GRUB_GPT_BOTH_VALID (GRUB_GPT_PRIMARY_VALID|GRUB_GPT_BACKUP_VALID) +- + /* UEFI requires the entries table to be at least 16384 bytes for a + * total of 128 entries given the standard 128 byte entry size. */ + #define GRUB_GPT_DEFAULT_ENTRIES_SIZE 16384 +@@ -197,6 +190,34 @@ struct grub_gpt + }; + typedef struct grub_gpt *grub_gpt_t; + ++/* Helpers for checking the gpt status field. */ ++static inline int ++grub_gpt_mbr_valid (grub_gpt_t gpt) ++{ ++ return ((gpt->status & GRUB_GPT_PROTECTIVE_MBR) || ++ (gpt->status & GRUB_GPT_HYBRID_MBR)); ++} ++ ++static inline int ++grub_gpt_primary_valid (grub_gpt_t gpt) ++{ ++ return ((gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) && ++ (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID)); ++} ++ ++static inline int ++grub_gpt_backup_valid (grub_gpt_t gpt) ++{ ++ return ((gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) && ++ (gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID)); ++} ++ ++static inline int ++grub_gpt_both_valid (grub_gpt_t gpt) ++{ ++ return grub_gpt_primary_valid (gpt) && grub_gpt_backup_valid (gpt); ++} ++ + /* Translate GPT sectors to GRUB's 512 byte block addresses. */ + static inline grub_disk_addr_t + grub_gpt_sector_to_addr (grub_gpt_t gpt, grub_uint64_t sector) +From 025c41dafe285a36dae7ff1b4217520d7839bdb4 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Tue, 20 Sep 2016 13:40:11 -0700 +Subject: [PATCH] gpt: allow repair function to noop + +Simplifies usage a little. +--- + grub-core/commands/gptprio.c | 5 ++--- + grub-core/lib/gpt.c | 4 ++++ + 2 files changed, 6 insertions(+), 3 deletions(-) + +diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c +index 25f867a81..a439552e1 100644 +--- a/grub-core/commands/gptprio.c ++++ b/grub-core/commands/gptprio.c +@@ -91,9 +91,8 @@ grub_find_next (const char *disk_name, + if (!gpt) + goto done; + +- if (!grub_gpt_both_valid(gpt)) +- if (grub_gpt_repair (dev->disk, gpt)) +- goto done; ++ if (grub_gpt_repair (dev->disk, gpt)) ++ goto done; + + for (i = 0; (part = grub_gpt_get_partentry (gpt, i)) != NULL; i++) + { +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 3e077c497..9bb19678d 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -586,6 +586,10 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + { + grub_uint64_t backup_header, backup_entries; + ++ /* Skip if there is nothing to do. */ ++ if (grub_gpt_both_valid (gpt)) ++ return GRUB_ERR_NONE; ++ + grub_dprintf ("gpt", "repairing GPT for %s\n", disk->name); + + if (disk->log_sector_size != gpt->log_sector_size) +From d52abba6dabec22edfa420eddf60c8f4d41b7f32 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Wed, 21 Sep 2016 13:22:06 -0700 +Subject: [PATCH] gpt: do not use an enum for status bit values + +--- + include/grub/gpt_partition.h | 19 +++++++++---------- + 1 file changed, 9 insertions(+), 10 deletions(-) + +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index 39388ce6e..ee435d73b 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -151,15 +151,14 @@ grub_gpt_partition_map_iterate (grub_disk_t disk, + void *hook_data); + + /* Advanced GPT library. */ +-typedef enum grub_gpt_status +- { +- GRUB_GPT_PROTECTIVE_MBR = 0x01, +- GRUB_GPT_HYBRID_MBR = 0x02, +- GRUB_GPT_PRIMARY_HEADER_VALID = 0x04, +- GRUB_GPT_PRIMARY_ENTRIES_VALID = 0x08, +- GRUB_GPT_BACKUP_HEADER_VALID = 0x10, +- GRUB_GPT_BACKUP_ENTRIES_VALID = 0x20, +- } grub_gpt_status_t; ++ ++/* Status bits for the grub_gpt.status field. */ ++#define GRUB_GPT_PROTECTIVE_MBR 0x01 ++#define GRUB_GPT_HYBRID_MBR 0x02 ++#define GRUB_GPT_PRIMARY_HEADER_VALID 0x04 ++#define GRUB_GPT_PRIMARY_ENTRIES_VALID 0x08 ++#define GRUB_GPT_BACKUP_HEADER_VALID 0x10 ++#define GRUB_GPT_BACKUP_ENTRIES_VALID 0x20 + + /* UEFI requires the entries table to be at least 16384 bytes for a + * total of 128 entries given the standard 128 byte entry size. */ +@@ -170,7 +169,7 @@ typedef enum grub_gpt_status + struct grub_gpt + { + /* Bit field indicating which structures on disk are valid. */ +- grub_gpt_status_t status; ++ unsigned status; + + /* Protective or hybrid MBR. */ + struct grub_msdos_partition_mbr mbr; +From 294ebeeff46ab1a778ca070e64cab7eb3e8a6581 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Wed, 21 Sep 2016 13:44:11 -0700 +Subject: [PATCH] gpt: check header and entries status bits together + +Use the new status function which checks *_HEADER_VALID and +*_ENTRIES_VALID bits together. It doesn't make sense for the header and +entries bits to mismatch so don't allow for it. +--- + grub-core/lib/gpt.c | 14 +++++--------- + 1 file changed, 5 insertions(+), 9 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 9bb19678d..3c6ff3540 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -596,24 +596,20 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, + "GPT sector size must match disk sector size"); + +- if (!(gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID || +- gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID)) +- return grub_error (GRUB_ERR_BUG, "No valid GPT entries"); +- +- if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) ++ if (grub_gpt_primary_valid (gpt)) + { +- grub_dprintf ("gpt", "primary GPT header is valid\n"); ++ grub_dprintf ("gpt", "primary GPT is valid\n"); + backup_header = grub_le_to_cpu64 (gpt->primary.alternate_lba); + grub_memcpy (&gpt->backup, &gpt->primary, sizeof (gpt->backup)); + } +- else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) ++ else if (grub_gpt_backup_valid (gpt)) + { +- grub_dprintf ("gpt", "backup GPT header is valid\n"); ++ grub_dprintf ("gpt", "backup GPT is valid\n"); + backup_header = grub_le_to_cpu64 (gpt->backup.header_lba); + grub_memcpy (&gpt->primary, &gpt->backup, sizeof (gpt->primary)); + } + else +- return grub_error (GRUB_ERR_BUG, "No valid GPT header"); ++ return grub_error (GRUB_ERR_BUG, "No valid GPT"); + + /* Relocate backup to end if disk whenever possible. */ + if (grub_gpt_disk_size_valid(disk)) +From 27fcd95383567f5f1c1b0760b8a1d7e528e2a802 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Wed, 21 Sep 2016 13:52:52 -0700 +Subject: [PATCH] gpt: be more careful about relocating backup header + +The header was being relocated without checking the new location is +actually safe. If the BIOS thinks the disk is smaller than the OS then +repair may relocate the header into allocated space, failing the final +validation check. So only move it if the disk has grown. + +Additionally, if the backup is valid then we can assume its current +location is good enough and leave it as-is. +--- + grub-core/lib/gpt.c | 16 ++++++++++------ + 1 file changed, 10 insertions(+), 6 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 3c6ff3540..35e65d8d9 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -599,7 +599,17 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + if (grub_gpt_primary_valid (gpt)) + { + grub_dprintf ("gpt", "primary GPT is valid\n"); ++ ++ /* Relocate backup to end if disk if the disk has grown. */ + backup_header = grub_le_to_cpu64 (gpt->primary.alternate_lba); ++ if (grub_gpt_disk_size_valid (disk) && ++ disk->total_sectors - 1 > backup_header) ++ { ++ backup_header = disk->total_sectors - 1; ++ grub_dprintf ("gpt", "backup GPT header relocated to 0x%llx\n", ++ (unsigned long long) backup_header); ++ } ++ + grub_memcpy (&gpt->backup, &gpt->primary, sizeof (gpt->backup)); + } + else if (grub_gpt_backup_valid (gpt)) +@@ -611,12 +621,6 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + else + return grub_error (GRUB_ERR_BUG, "No valid GPT"); + +- /* Relocate backup to end if disk whenever possible. */ +- if (grub_gpt_disk_size_valid(disk)) +- backup_header = disk->total_sectors - 1; +- grub_dprintf ("gpt", "backup GPT header will be located at 0x%llx\n", +- (unsigned long long) backup_header); +- + backup_entries = backup_header - + grub_gpt_size_to_sectors (gpt, gpt->entries_size); + grub_dprintf ("gpt", "backup GPT entries will be located at 0x%llx\n", +From eac47a495c3994aecbb66d12b90057dc2e51bde5 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Wed, 21 Sep 2016 14:33:48 -0700 +Subject: [PATCH] gpt: selectively update fields during repair + +Just a little cleanup/refactor to skip touching data we don't need to. +--- + grub-core/lib/gpt.c | 28 ++++++++++++---------------- + 1 file changed, 12 insertions(+), 16 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 35e65d8d9..03e807b25 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -584,8 +584,6 @@ grub_gpt_get_partentry (grub_gpt_t gpt, grub_uint32_t n) + grub_err_t + grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + { +- grub_uint64_t backup_header, backup_entries; +- + /* Skip if there is nothing to do. */ + if (grub_gpt_both_valid (gpt)) + return GRUB_ERR_NONE; +@@ -598,6 +596,8 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + + if (grub_gpt_primary_valid (gpt)) + { ++ grub_uint64_t backup_header; ++ + grub_dprintf ("gpt", "primary GPT is valid\n"); + + /* Relocate backup to end if disk if the disk has grown. */ +@@ -608,32 +608,28 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + backup_header = disk->total_sectors - 1; + grub_dprintf ("gpt", "backup GPT header relocated to 0x%llx\n", + (unsigned long long) backup_header); ++ ++ gpt->primary.alternate_lba = grub_cpu_to_le64 (backup_header); + } + + grub_memcpy (&gpt->backup, &gpt->primary, sizeof (gpt->backup)); ++ gpt->backup.header_lba = gpt->primary.alternate_lba; ++ gpt->backup.alternate_lba = gpt->primary.header_lba; ++ gpt->backup.partitions = grub_cpu_to_le64 (backup_header - ++ grub_gpt_size_to_sectors (gpt, gpt->entries_size)); + } + else if (grub_gpt_backup_valid (gpt)) + { + grub_dprintf ("gpt", "backup GPT is valid\n"); +- backup_header = grub_le_to_cpu64 (gpt->backup.header_lba); ++ + grub_memcpy (&gpt->primary, &gpt->backup, sizeof (gpt->primary)); ++ gpt->primary.header_lba = gpt->backup.alternate_lba; ++ gpt->primary.alternate_lba = gpt->backup.header_lba; ++ gpt->primary.partitions = grub_cpu_to_le64_compile_time (2); + } + else + return grub_error (GRUB_ERR_BUG, "No valid GPT"); + +- backup_entries = backup_header - +- grub_gpt_size_to_sectors (gpt, gpt->entries_size); +- grub_dprintf ("gpt", "backup GPT entries will be located at 0x%llx\n", +- (unsigned long long) backup_entries); +- +- /* Update/fixup header and partition table locations. */ +- gpt->primary.header_lba = grub_cpu_to_le64_compile_time (1); +- gpt->primary.alternate_lba = grub_cpu_to_le64 (backup_header); +- gpt->primary.partitions = grub_cpu_to_le64_compile_time (2); +- gpt->backup.header_lba = gpt->primary.alternate_lba; +- gpt->backup.alternate_lba = gpt->primary.header_lba; +- gpt->backup.partitions = grub_cpu_to_le64 (backup_entries); +- + /* Recompute checksums. */ + if (grub_gpt_update_checksums (gpt)) + return grub_errno; +From dab5d9e809c0cea5c6b5d0f5ba093068465fe1cb Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Wed, 21 Sep 2016 14:55:19 -0700 +Subject: [PATCH] gpt: always revalidate when recomputing checksums + +This ensures all code modifying GPT data include the same sanity check +that repair does. If revalidation fails the status flags are left in the +appropriate state. +--- + grub-core/lib/gpt.c | 32 ++++++++++++++++++-------------- + 1 file changed, 18 insertions(+), 14 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 03e807b25..3ac2987c6 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -630,23 +630,9 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + else + return grub_error (GRUB_ERR_BUG, "No valid GPT"); + +- /* Recompute checksums. */ + if (grub_gpt_update_checksums (gpt)) + return grub_errno; + +- /* Sanity check. */ +- if (grub_gpt_check_primary (gpt)) +- return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); +- +- gpt->status |= (GRUB_GPT_PRIMARY_HEADER_VALID | +- GRUB_GPT_PRIMARY_ENTRIES_VALID); +- +- if (grub_gpt_check_backup (gpt)) +- return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); +- +- gpt->status |= (GRUB_GPT_BACKUP_HEADER_VALID | +- GRUB_GPT_BACKUP_ENTRIES_VALID); +- + grub_dprintf ("gpt", "repairing GPT for %s successful\n", disk->name); + + return GRUB_ERR_NONE; +@@ -657,6 +643,12 @@ grub_gpt_update_checksums (grub_gpt_t gpt) + { + grub_uint32_t crc; + ++ /* Clear status bits, require revalidation of everything. */ ++ gpt->status &= ~(GRUB_GPT_PRIMARY_HEADER_VALID | ++ GRUB_GPT_PRIMARY_ENTRIES_VALID | ++ GRUB_GPT_BACKUP_HEADER_VALID | ++ GRUB_GPT_BACKUP_ENTRIES_VALID); ++ + /* Writing headers larger than our header structure are unsupported. */ + gpt->primary.headersize = + grub_cpu_to_le32_compile_time (sizeof (gpt->primary)); +@@ -670,6 +662,18 @@ grub_gpt_update_checksums (grub_gpt_t gpt) + grub_gpt_header_lecrc32 (&gpt->primary.crc32, &gpt->primary); + grub_gpt_header_lecrc32 (&gpt->backup.crc32, &gpt->backup); + ++ if (grub_gpt_check_primary (gpt)) ++ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); ++ ++ gpt->status |= (GRUB_GPT_PRIMARY_HEADER_VALID | ++ GRUB_GPT_PRIMARY_ENTRIES_VALID); ++ ++ if (grub_gpt_check_backup (gpt)) ++ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); ++ ++ gpt->status |= (GRUB_GPT_BACKUP_HEADER_VALID | ++ GRUB_GPT_BACKUP_ENTRIES_VALID); ++ + return GRUB_ERR_NONE; + } + +From de5adec64e3b72c79b08b398f24152e84ab89b5f Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Wed, 21 Sep 2016 15:01:09 -0700 +Subject: [PATCH] gpt: include backup-in-sync check in revalidation + +--- + grub-core/lib/gpt.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 3ac2987c6..c27bcc510 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -372,6 +372,11 @@ grub_gpt_check_backup (grub_gpt_t gpt) + if (backup <= end) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid backup GPT LBA"); + ++ /* If both primary and backup are valid but differ prefer the primary. */ ++ if ((gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) && ++ !grub_gpt_headers_equal (gpt)) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "backup GPT out of sync"); ++ + return GRUB_ERR_NONE; + } + +@@ -435,11 +440,6 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) + if (grub_le_to_cpu64 (gpt->backup.header_lba) != sector) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid backup GPT LBA"); + +- /* If both primary and backup are valid but differ prefer the primary. */ +- if ((gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) && +- !grub_gpt_headers_equal(gpt)) +- return grub_error (GRUB_ERR_BAD_PART_TABLE, "backup GPT of of sync"); +- + gpt->status |= GRUB_GPT_BACKUP_HEADER_VALID; + return GRUB_ERR_NONE; + } +From 7b0ccb8fcc584be82e3f4778aad8afe512468f3b Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Wed, 21 Sep 2016 15:29:55 -0700 +Subject: [PATCH] gpt: read entries table at the same time as the header + +I personally think this reads easier. Also has the side effect of +directly comparing the primary and backup tables instead of presuming +they are equal if the crc32 matches. +--- + grub-core/lib/gpt.c | 69 +++++++++++++++++++++++++++++++---------------------- + 1 file changed, 41 insertions(+), 28 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index c27bcc510..b93cedea1 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -32,6 +32,11 @@ GRUB_MOD_LICENSE ("GPLv3+"); + + static grub_uint8_t grub_gpt_magic[] = GRUB_GPT_HEADER_MAGIC; + ++static grub_err_t ++grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, ++ struct grub_gpt_header *header, ++ void **ret_entries, ++ grub_size_t *ret_entries_size); + + char * + grub_gpt_guid_to_str (grub_gpt_guid_t *guid) +@@ -400,12 +405,21 @@ grub_gpt_read_primary (grub_disk_t disk, grub_gpt_t gpt) + return grub_errno; + + gpt->status |= GRUB_GPT_PRIMARY_HEADER_VALID; ++ ++ if (grub_gpt_read_entries (disk, gpt, &gpt->primary, ++ &gpt->entries, &gpt->entries_size)) ++ return grub_errno; ++ ++ gpt->status |= GRUB_GPT_PRIMARY_ENTRIES_VALID; ++ + return GRUB_ERR_NONE; + } + + static grub_err_t + grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) + { ++ void *entries = NULL; ++ grub_size_t entries_size; + grub_uint64_t sector; + grub_disk_addr_t addr; + +@@ -441,12 +455,35 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid backup GPT LBA"); + + gpt->status |= GRUB_GPT_BACKUP_HEADER_VALID; ++ ++ if (grub_gpt_read_entries (disk, gpt, &gpt->backup, ++ &entries, &entries_size)) ++ return grub_errno; ++ ++ if (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID) ++ { ++ if (entries_size != gpt->entries_size || ++ grub_memcmp (entries, gpt->entries, entries_size) != 0) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "backup GPT out of sync"); ++ ++ grub_free (entries); ++ } ++ else ++ { ++ gpt->entries = entries; ++ gpt->entries_size = entries_size; ++ } ++ ++ gpt->status |= GRUB_GPT_BACKUP_ENTRIES_VALID; ++ + return GRUB_ERR_NONE; + } + + static grub_err_t + grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, +- struct grub_gpt_header *header) ++ struct grub_gpt_header *header, ++ void **ret_entries, ++ grub_size_t *ret_entries_size) + { + void *entries = NULL; + grub_uint32_t count, size, crc; +@@ -488,9 +525,8 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, + goto fail; + } + +- grub_free (gpt->entries); +- gpt->entries = entries; +- gpt->entries_size = entries_size; ++ *ret_entries = entries; ++ *ret_entries_size = entries_size; + return GRUB_ERR_NONE; + + fail: +@@ -529,30 +565,7 @@ grub_gpt_read (grub_disk_t disk) + grub_gpt_read_backup (disk, gpt); + + /* If either succeeded clear any possible error from the other. */ +- if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID || +- gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) +- grub_errno = GRUB_ERR_NONE; +- else +- goto fail; +- +- /* Similarly, favor the value or error from the primary table. */ +- if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID && +- !grub_gpt_read_entries (disk, gpt, &gpt->backup)) +- { +- grub_dprintf ("gpt", "read valid backup GPT from %s\n", disk->name); +- gpt->status |= GRUB_GPT_BACKUP_ENTRIES_VALID; +- } +- +- grub_errno = GRUB_ERR_NONE; +- if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID && +- !grub_gpt_read_entries (disk, gpt, &gpt->primary)) +- { +- grub_dprintf ("gpt", "read valid primary GPT from %s\n", disk->name); +- gpt->status |= GRUB_GPT_PRIMARY_ENTRIES_VALID; +- } +- +- if (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID || +- gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID) ++ if (grub_gpt_primary_valid (gpt) || grub_gpt_backup_valid (gpt)) + grub_errno = GRUB_ERR_NONE; + else + goto fail; +From ff51b717a6122c2811fe221dd65a29a03dcef347 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Wed, 21 Sep 2016 16:02:53 -0700 +Subject: [PATCH] gpt: report all revalidation errors + +Before returning an error that the primary or backup GPT is invalid push +the existing error onto the stack so the user will be told what is bad. +--- + grub-core/lib/gpt.c | 10 ++++++++-- + 1 file changed, 8 insertions(+), 2 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index b93cedea1..f6f853309 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -676,13 +676,19 @@ grub_gpt_update_checksums (grub_gpt_t gpt) + grub_gpt_header_lecrc32 (&gpt->backup.crc32, &gpt->backup); + + if (grub_gpt_check_primary (gpt)) +- return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); ++ { ++ grub_error_push (); ++ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); ++ } + + gpt->status |= (GRUB_GPT_PRIMARY_HEADER_VALID | + GRUB_GPT_PRIMARY_ENTRIES_VALID); + + if (grub_gpt_check_backup (gpt)) +- return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); ++ { ++ grub_error_push (); ++ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); ++ } + + gpt->status |= (GRUB_GPT_BACKUP_HEADER_VALID | + GRUB_GPT_BACKUP_ENTRIES_VALID); +From 4eb61a681cd1f4217dd19903b67c8fb141eafd82 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Thu, 22 Sep 2016 10:00:27 -0700 +Subject: [PATCH] gpt: rename and update documentation for grub_gpt_update + +The function now does more than just recompute checksums so give it a +more general name to reflect that. +--- + grub-core/commands/gptprio.c | 2 +- + grub-core/lib/gpt.c | 4 ++-- + include/grub/gpt_partition.h | 7 ++++--- + 3 files changed, 7 insertions(+), 6 deletions(-) + +diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c +index a439552e1..4a24fa62d 100644 +--- a/grub-core/commands/gptprio.c ++++ b/grub-core/commands/gptprio.c +@@ -127,7 +127,7 @@ grub_find_next (const char *disk_name, + + grub_gptprio_set_tries_left (part_found, tries_left - 1); + +- if (grub_gpt_update_checksums (gpt)) ++ if (grub_gpt_update (gpt)) + goto done; + + if (grub_gpt_write (dev->disk, gpt)) +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index f6f853309..430404848 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -643,7 +643,7 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + else + return grub_error (GRUB_ERR_BUG, "No valid GPT"); + +- if (grub_gpt_update_checksums (gpt)) ++ if (grub_gpt_update (gpt)) + return grub_errno; + + grub_dprintf ("gpt", "repairing GPT for %s successful\n", disk->name); +@@ -652,7 +652,7 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + } + + grub_err_t +-grub_gpt_update_checksums (grub_gpt_t gpt) ++grub_gpt_update (grub_gpt_t gpt) + { + grub_uint32_t crc; + +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index ee435d73b..4730fe362 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -232,11 +232,12 @@ grub_gpt_t grub_gpt_read (grub_disk_t disk); + struct grub_gpt_partentry * grub_gpt_get_partentry (grub_gpt_t gpt, + grub_uint32_t n); + +-/* Sync up primary and backup headers, recompute checksums. */ ++/* Sync and update primary and backup headers if either are invalid. */ + grub_err_t grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt); + +-/* Recompute checksums, must be called after modifying GPT data. */ +-grub_err_t grub_gpt_update_checksums (grub_gpt_t gpt); ++/* Recompute checksums and revalidate everything, must be called after ++ * modifying any GPT data. */ ++grub_err_t grub_gpt_update (grub_gpt_t gpt); + + /* Write headers and entry tables back to disk. */ + grub_err_t grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt); +From 313d88eab9e5c11a168c45978feccdd52b5ec6e5 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Thu, 22 Sep 2016 11:18:42 -0700 +Subject: [PATCH] gpt: write backup GPT first, skip if inaccessible. + +Writing the primary GPT before the backup may lead to a confusing +situation: booting a freshly updated system could consistently fail and +next boot will fall back to the old system if writing the primary works +but writing the backup fails. If the backup is written first and fails +the primary is left in the old state so the next boot will re-try and +possibly fail in the exact same way. Making that repeatable should make +it easier for users to identify the error. + +Additionally if the firmware and OS disagree on the disk size, making +the backup inaccessible to GRUB, then just skip writing the backup. +When this happens the automatic call to `coreos-setgoodroot` after boot +will take care of repairing the backup. +--- + grub-core/lib/gpt.c | 28 ++++++++++++++++++++++++---- + 1 file changed, 24 insertions(+), 4 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 430404848..c3e3a25f9 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -729,19 +729,39 @@ grub_gpt_write_table (grub_disk_t disk, grub_gpt_t gpt, + grub_err_t + grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt) + { ++ grub_uint64_t backup_header; ++ + /* TODO: update/repair protective MBRs too. */ + + if (!grub_gpt_both_valid (gpt)) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "Invalid GPT data"); + ++ /* Write the backup GPT first so if writing fails the update is aborted ++ * and the primary is left intact. However if the backup location is ++ * inaccessible we have to just skip and hope for the best, the backup ++ * will need to be repaired in the OS. */ ++ backup_header = grub_le_to_cpu64 (gpt->backup.header_lba); ++ if (grub_gpt_disk_size_valid (disk) && ++ backup_header >= disk->total_sectors) ++ { ++ grub_printf ("warning: backup GPT located at 0x%llx, " ++ "beyond last disk sector at 0x%llx\n", ++ (unsigned long long) backup_header, ++ (unsigned long long) disk->total_sectors - 1); ++ grub_printf ("warning: only writing primary GPT, " ++ "the backup GPT must be repaired from the OS\n"); ++ } ++ else ++ { ++ grub_dprintf ("gpt", "writing backup GPT to %s\n", disk->name); ++ if (grub_gpt_write_table (disk, gpt, &gpt->backup)) ++ return grub_errno; ++ } ++ + grub_dprintf ("gpt", "writing primary GPT to %s\n", disk->name); + if (grub_gpt_write_table (disk, gpt, &gpt->primary)) + return grub_errno; + +- grub_dprintf ("gpt", "writing backup GPT to %s\n", disk->name); +- if (grub_gpt_write_table (disk, gpt, &gpt->backup)) +- return grub_errno; +- + return GRUB_ERR_NONE; + } + +From 290b82244de9e527ced46a9a04e2f7c6817f51e5 Mon Sep 17 00:00:00 2001 +From: iliana destroyer of worlds +Date: Thu, 28 Mar 2019 16:28:41 -0700 +Subject: [PATCH] Generate new gptprio partition type + +Signed-off-by: iliana destroyer of worlds +--- + include/grub/gpt_partition.h | 4 ++-- + tests/gptprio_test.in | 2 +- + 2 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index 4730fe362..438d983a6 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -62,8 +62,8 @@ char * grub_gpt_guid_to_str (grub_gpt_guid_t *guid); + 0x85, 0xd2, 0xe1, 0xe9, 0x04, 0x34, 0xcf, 0xb3) + + #define GRUB_GPT_PARTITION_TYPE_USR_X86_64 \ +- GRUB_GPT_GUID_INIT (0x5dfbf5f4, 0x2848, 0x4bac, \ +- 0xaa, 0x5e, 0x0d, 0x9a, 0x20, 0xb7, 0x45, 0xa6) ++ GRUB_GPT_GUID_INIT (0x6b636168, 0x7420, 0x6568, \ ++ 0x20, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x74, 0x21) + + #define GRUB_GPT_HEADER_MAGIC \ + { 0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54 } +diff --git a/tests/gptprio_test.in b/tests/gptprio_test.in +index c5cf0f3b7..325627546 100644 +--- a/tests/gptprio_test.in ++++ b/tests/gptprio_test.in +@@ -59,7 +59,7 @@ esac + img1="`mktemp "${TMPDIR:-/tmp}/tmp.XXXXXXXXXX"`" || exit 1 + trap "rm -f '${img1}'" EXIT + +-prio_type="5dfbf5f4-2848-4bac-aa5e-0d9a20b745a6" ++prio_type="6b636168-7420-6568-2070-6c616e657421" + declare -a prio_uuid + prio_uuid[2]="9b003904-d006-4ab3-97f1-73f547b7af1a" + prio_uuid[3]="1aa5a658-5b02-414d-9b71-f7e6c151f0cd" diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index a580d915..39cf1b0b 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -9,7 +9,9 @@ URL: https://www.gnu.org/software/grub/ Source0: https://ftp.gnu.org/gnu/grub/grub-%{version}.tar.xz Source1: core.cfg Patch1: 0001-x86-64-Treat-R_X86_64_PLT32-as-R_X86_64_PC32.patch +Patch2: gpt.patch +BuildRequires: automake BuildRequires: bison BuildRequires: flex BuildRequires: gcc-%{_cross_target} @@ -39,8 +41,6 @@ Summary: Tools for the bootloader with support for Linux and more %global grub_ldflags -static %build -install -T -m0644 %{SOURCE1} core.cfg - export \ CPP="%{_cross_target}-gcc -E" \ TARGET_CC="%{_cross_target}-gcc" \ @@ -50,7 +50,9 @@ export \ TARGET_NM="%{_cross_target}-nm" \ TARGET_OBJCOPY="%{_cross_target}-objcopy" \ TARGET_STRIP="%{_cross_target}-strip" \ + PYTHON="python3" \ +./autogen.sh %cross_configure \ CFLAGS="" \ LDFLAGS="" \ @@ -70,12 +72,12 @@ export \ mkdir -p %{buildroot}%{_cross_grubdir} grub2-mkimage \ - -c core.cfg \ + -c %{SOURCE1} \ -d ./grub-core/ \ -O "%{_cross_grub_tuple}" \ -o "%{buildroot}%{_cross_grubdir}/%{_cross_grub_image}" \ -p "%{_cross_grub_prefix}" \ - biosdisk configfile ext2 linux normal part_gpt search_fs_uuid + biosdisk configfile ext2 gptprio linux normal part_gpt search_fs_uuid install -m 0644 ./grub-core/boot.img \ %{buildroot}%{_cross_grubdir}/boot.img From 943d6fe7ef073c20ca8fef2fe1ce2809ff14930e Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Mon, 1 Apr 2019 23:14:16 +0000 Subject: [PATCH 0024/1356] Build separate ext4 boot/root images Signed-off-by: iliana destroyer of worlds --- Makefile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index b3ca46d0..610f3537 100644 --- a/Makefile +++ b/Makefile @@ -53,7 +53,9 @@ define build_image @$(DOCKER) load < build/$(OS)-$(1)-builder.tar @$(DOCKER) run -t -v /dev:/dev -v $(OUTPUT):/local/output --privileged \ $(OS)-builder:$(1) \ - --image-name=$(OS)-$(1).img \ + --disk-image-name=$(OS)-$(1).img \ + --boot-image-name=$(OS)-$(1)-boot.ext4 \ + --root-image-name=$(OS)-$(1)-root.ext4 \ --package-dir=/local/rpms \ --output-dir=/local/output endef From b68d6dd5fcd8391fd656f24248320ff2fa2a107a Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Tue, 2 Apr 2019 21:20:38 +0000 Subject: [PATCH 0025/1356] rpm2img: Use only unprivileged operations Signed-off-by: iliana destroyer of worlds --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 610f3537..27e09a4b 100644 --- a/Makefile +++ b/Makefile @@ -51,7 +51,7 @@ define build_image --output type=docker,name=$(OS)-builder:$(1),dest=build/$(OS)-$(1)-builder.tar \ $(BUILDCTL_ARGS) @$(DOCKER) load < build/$(OS)-$(1)-builder.tar - @$(DOCKER) run -t -v /dev:/dev -v $(OUTPUT):/local/output --privileged \ + @$(DOCKER) run -t -v /dev:/dev -v $(OUTPUT):/local/output \ $(OS)-builder:$(1) \ --disk-image-name=$(OS)-$(1).img \ --boot-image-name=$(OS)-$(1)-boot.ext4 \ From b9a7195b885a15123259fe2d9c977e8278194c8b Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 2 Apr 2019 21:36:50 +0000 Subject: [PATCH 0026/1356] use grub-bios-setup to populate MBR and BBP Signed-off-by: Ben Cressey --- packages/grub/100-grub_setup_root.patch | 118 ++++++++++++++++++++++++ packages/grub/grub.spec | 3 +- 2 files changed, 120 insertions(+), 1 deletion(-) create mode 100644 packages/grub/100-grub_setup_root.patch diff --git a/packages/grub/100-grub_setup_root.patch b/packages/grub/100-grub_setup_root.patch new file mode 100644 index 00000000..f053ba95 --- /dev/null +++ b/packages/grub/100-grub_setup_root.patch @@ -0,0 +1,118 @@ +--- a/util/grub-setup.c ++++ b/util/grub-setup.c +@@ -87,6 +87,8 @@ static struct argp_option options[] = { + N_("install even if problems are detected"), 0}, + {"skip-fs-probe",'s',0, 0, + N_("do not probe for filesystems in DEVICE"), 0}, ++ {"root-device", 'r', N_("DEVICE"), 0, ++ N_("use DEVICE as the root device"), 0}, + {"verbose", 'v', 0, 0, N_("print verbose messages."), 0}, + {"allow-floppy", 'a', 0, 0, + /* TRANSLATORS: The potential breakage isn't limited to floppies but it's +@@ -130,6 +132,7 @@ struct arguments + char *core_file; + char *dir; + char *dev_map; ++ char *root_dev; + int force; + int fs_probe; + int allow_floppy; +@@ -178,6 +181,13 @@ argp_parser (int key, char *arg, struct argp_state *state) + arguments->dev_map = xstrdup (arg); + break; + ++ case 'r': ++ if (arguments->root_dev) ++ free (arguments->root_dev); ++ ++ arguments->root_dev = xstrdup (arg); ++ break; ++ + case 'f': + arguments->force = 1; + break; +@@ -313,7 +323,7 @@ main (int argc, char *argv[]) + GRUB_SETUP_FUNC (arguments.dir ? : DEFAULT_DIRECTORY, + arguments.boot_file ? : DEFAULT_BOOT_FILE, + arguments.core_file ? : DEFAULT_CORE_FILE, +- dest_dev, arguments.force, ++ arguments.root_dev, dest_dev, arguments.force, + arguments.fs_probe, arguments.allow_floppy, + arguments.add_rs_codes); + +--- a/util/setup.c ++++ b/util/setup.c +@@ -247,13 +247,12 @@ identify_partmap (grub_disk_t disk __attribute__ ((unused)), + void + SETUP (const char *dir, + const char *boot_file, const char *core_file, +- const char *dest, int force, ++ const char *root, const char *dest, int force, + int fs_probe, int allow_floppy, + int add_rs_codes __attribute__ ((unused))) /* unused on sparc64 */ + { + char *core_path; + char *boot_img, *core_img, *boot_path; +- char *root = 0; + size_t boot_size, core_size; + #ifdef GRUB_SETUP_BIOS + grub_uint16_t core_sectors; +@@ -307,7 +306,10 @@ SETUP (const char *dir, + + core_dev = dest_dev; + +- { ++ if (root) ++ root_dev = grub_device_open(root); ++ ++ if (!root_dev) { + char **root_devices = grub_guess_root_devices (dir); + char **cur; + int found = 0; +@@ -320,6 +322,8 @@ SETUP (const char *dir, + char *drive; + grub_device_t try_dev; + ++ if (root_dev) ++ break; + drive = grub_util_get_grub_dev (*cur); + if (!drive) + continue; +--- a/include/grub/util/install.h ++++ b/include/grub/util/install.h +@@ -184,13 +184,13 @@ grub_install_get_image_target (const char *arg); + void + grub_util_bios_setup (const char *dir, + const char *boot_file, const char *core_file, +- const char *dest, int force, ++ const char *root, const char *dest, int force, + int fs_probe, int allow_floppy, + int add_rs_codes); + void + grub_util_sparc_setup (const char *dir, + const char *boot_file, const char *core_file, +- const char *dest, int force, ++ const char *root, const char *dest, int force, + int fs_probe, int allow_floppy, + int add_rs_codes); + +--- a/util/grub-install.c ++++ b/util/grub-install.c +@@ -1673,7 +1673,7 @@ main (int argc, char *argv[]) + /* Now perform the installation. */ + if (install_bootsector) + grub_util_bios_setup (platdir, "boot.img", "core.img", +- install_drive, force, ++ NULL, install_drive, force, + fs_probe, allow_floppy, add_rs_codes); + break; + } +@@ -1699,7 +1699,7 @@ main (int argc, char *argv[]) + /* Now perform the installation. */ + if (install_bootsector) + grub_util_sparc_setup (platdir, "boot.img", "core.img", +- install_drive, force, ++ NULL, install_drive, force, + fs_probe, allow_floppy, + 0 /* unused */ ); + break; diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index 39cf1b0b..2cf57798 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -10,6 +10,7 @@ Source0: https://ftp.gnu.org/gnu/grub/grub-%{version}.tar.xz Source1: core.cfg Patch1: 0001-x86-64-Treat-R_X86_64_PLT32-as-R_X86_64_PC32.patch Patch2: gpt.patch +Patch3: 100-grub_setup_root.patch BuildRequires: automake BuildRequires: bison @@ -86,6 +87,7 @@ install -m 0644 ./grub-core/boot.img \ %dir %{_cross_grubdir} %{_cross_grubdir}/boot.img %{_cross_grubdir}/%{_cross_grub_image} +%{_cross_sbindir}/grub-bios-setup %exclude %{_cross_infodir} %exclude %{_cross_localedir} %exclude %{_cross_sysconfdir} @@ -112,7 +114,6 @@ install -m 0644 ./grub-core/boot.img \ %{_cross_bindir}/grub-render-label %{_cross_bindir}/grub-script-check %{_cross_bindir}/grub-syslinux2cfg -%{_cross_sbindir}/grub-bios-setup %{_cross_sbindir}/grub-install %{_cross_sbindir}/grub-macbless %{_cross_sbindir}/grub-mkconfig From 2acf3653141796c2e42a7836eec81d86cf321db2 Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Wed, 3 Apr 2019 19:59:51 +0000 Subject: [PATCH 0027/1356] Use ext4 sans journals; lz4 instead of minimize Signed-off-by: iliana destroyer of worlds --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 27e09a4b..e7e4af72 100644 --- a/Makefile +++ b/Makefile @@ -54,8 +54,8 @@ define build_image @$(DOCKER) run -t -v /dev:/dev -v $(OUTPUT):/local/output \ $(OS)-builder:$(1) \ --disk-image-name=$(OS)-$(1).img \ - --boot-image-name=$(OS)-$(1)-boot.ext4 \ - --root-image-name=$(OS)-$(1)-root.ext4 \ + --boot-image-name=$(OS)-$(1)-boot.ext4.lz4 \ + --root-image-name=$(OS)-$(1)-root.ext4.lz4 \ --package-dir=/local/rpms \ --output-dir=/local/output endef From 015bc1c9ea86834c3046cb3b99200f99a63109aa Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Wed, 3 Apr 2019 20:36:06 +0000 Subject: [PATCH 0028/1356] kernel: Apply "dm: add support to directly boot to a mapped device" Signed-off-by: iliana destroyer of worlds --- packages/kernel/config-aarch64 | 1 + packages/kernel/config-x86_64 | 1 + ...-to-directly-boot-to-a-mapped-device.patch | 665 ++++++++++++++++++ packages/kernel/kernel.spec | 3 +- 4 files changed, 669 insertions(+), 1 deletion(-) create mode 100644 packages/kernel/dm-add-support-to-directly-boot-to-a-mapped-device.patch diff --git a/packages/kernel/config-aarch64 b/packages/kernel/config-aarch64 index 0d465603..1705456b 100644 --- a/packages/kernel/config-aarch64 +++ b/packages/kernel/config-aarch64 @@ -1915,6 +1915,7 @@ CONFIG_DM_MULTIPATH=m CONFIG_DM_MULTIPATH_QL=m CONFIG_DM_MULTIPATH_ST=m CONFIG_DM_DELAY=m +CONFIG_DM_INIT=y CONFIG_DM_UEVENT=y CONFIG_DM_FLAKEY=m CONFIG_DM_VERITY=m diff --git a/packages/kernel/config-x86_64 b/packages/kernel/config-x86_64 index 508e9e44..dcc94d1d 100644 --- a/packages/kernel/config-x86_64 +++ b/packages/kernel/config-x86_64 @@ -1982,6 +1982,7 @@ CONFIG_DM_MULTIPATH=m CONFIG_DM_MULTIPATH_QL=m CONFIG_DM_MULTIPATH_ST=m CONFIG_DM_DELAY=m +CONFIG_DM_INIT=y CONFIG_DM_UEVENT=y CONFIG_DM_FLAKEY=m # CONFIG_DM_VERITY is not set diff --git a/packages/kernel/dm-add-support-to-directly-boot-to-a-mapped-device.patch b/packages/kernel/dm-add-support-to-directly-boot-to-a-mapped-device.patch new file mode 100644 index 00000000..3d55c82d --- /dev/null +++ b/packages/kernel/dm-add-support-to-directly-boot-to-a-mapped-device.patch @@ -0,0 +1,665 @@ +From 4e6a7ccc14989c2c859b302c070363ddf691dff9 Mon Sep 17 00:00:00 2001 +From: Helen Koike +Date: Thu, 21 Feb 2019 17:33:34 -0300 +Subject: [PATCH] dm: add support to directly boot to a mapped device + +Add a "create" module parameter, which allows device-mapper targets to +be configured at boot time. This enables early use of DM targets in the +boot process (as the root device or otherwise) without the need of an +initramfs. + +The syntax used in the boot param is based on the concise format from +the dmsetup tool to follow the rule of least surprise: + + dmsetup table --concise /dev/mapper/lroot + +Which is: + dm-mod.create=,,,,[,
+][;,,,,
[,
+]+] + +Where, + ::= The device name. + ::= xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx | "" + ::= The device minor number | "" + ::= "ro" | "rw" +
::= + ::= "verity" | "linear" | ... + +For example, the following could be added in the boot parameters: +dm-mod.create="lroot,,,rw, 0 4096 linear 98:16 0, 4096 4096 linear 98:32 0" root=/dev/dm-0 + +Only the targets that were tested are allowed and the ones that don't +change any block device when the device is create as read-only. For +example, mirror and cache targets are not allowed. The rationale behind +this is that if the user makes a mistake, choosing the wrong device to +be the mirror or the cache can corrupt data. + +The only targets initially allowed are: +* crypt +* delay +* linear +* snapshot-origin +* striped +* verity + +Co-developed-by: Will Drewry +Co-developed-by: Kees Cook +Co-developed-by: Enric Balletbo i Serra +Signed-off-by: Helen Koike +Reviewed-by: Kees Cook +Signed-off-by: Mike Snitzer +--- + Documentation/device-mapper/dm-init.txt | 114 ++++++++++++ + drivers/md/Kconfig | 12 ++ + drivers/md/Makefile | 4 + + drivers/md/dm-init.c | 303 ++++++++++++++++++++++++++++++++ + drivers/md/dm-ioctl.c | 103 +++++++++++ + include/linux/device-mapper.h | 9 + + 6 files changed, 545 insertions(+) + create mode 100644 Documentation/device-mapper/dm-init.txt + create mode 100644 drivers/md/dm-init.c + +diff --git a/Documentation/device-mapper/dm-init.txt b/Documentation/device-mapper/dm-init.txt +new file mode 100644 +index 000000000000..8464ee7c01b8 +--- /dev/null ++++ b/Documentation/device-mapper/dm-init.txt +@@ -0,0 +1,114 @@ ++Early creation of mapped devices ++==================================== ++ ++It is possible to configure a device-mapper device to act as the root device for ++your system in two ways. ++ ++The first is to build an initial ramdisk which boots to a minimal userspace ++which configures the device, then pivot_root(8) in to it. ++ ++The second is to create one or more device-mappers using the module parameter ++"dm-mod.create=" through the kernel boot command line argument. ++ ++The format is specified as a string of data separated by commas and optionally ++semi-colons, where: ++ - a comma is used to separate fields like name, uuid, flags and table ++ (specifies one device) ++ - a semi-colon is used to separate devices. ++ ++So the format will look like this: ++ ++ dm-mod.create=,,,,
[,
+][;,,,,
[,
+]+] ++ ++Where, ++ ::= The device name. ++ ::= xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx | "" ++ ::= The device minor number | "" ++ ::= "ro" | "rw" ++
::= ++ ::= "verity" | "linear" | ... (see list below) ++ ++The dm line should be equivalent to the one used by the dmsetup tool with the ++--concise argument. ++ ++Target types ++============ ++ ++Not all target types are available as there are serious risks in allowing ++activation of certain DM targets without first using userspace tools to check ++the validity of associated metadata. ++ ++ "cache": constrained, userspace should verify cache device ++ "crypt": allowed ++ "delay": allowed ++ "era": constrained, userspace should verify metadata device ++ "flakey": constrained, meant for test ++ "linear": allowed ++ "log-writes": constrained, userspace should verify metadata device ++ "mirror": constrained, userspace should verify main/mirror device ++ "raid": constrained, userspace should verify metadata device ++ "snapshot": constrained, userspace should verify src/dst device ++ "snapshot-origin": allowed ++ "snapshot-merge": constrained, userspace should verify src/dst device ++ "striped": allowed ++ "switch": constrained, userspace should verify dev path ++ "thin": constrained, requires dm target message from userspace ++ "thin-pool": constrained, requires dm target message from userspace ++ "verity": allowed ++ "writecache": constrained, userspace should verify cache device ++ "zero": constrained, not meant for rootfs ++ ++If the target is not listed above, it is constrained by default (not tested). ++ ++Examples ++======== ++An example of booting to a linear array made up of user-mode linux block ++devices: ++ ++ dm-mod.create="lroot,,,rw, 0 4096 linear 98:16 0, 4096 4096 linear 98:32 0" root=/dev/dm-0 ++ ++This will boot to a rw dm-linear target of 8192 sectors split across two block ++devices identified by their major:minor numbers. After boot, udev will rename ++this target to /dev/mapper/lroot (depending on the rules). No uuid was assigned. ++ ++An example of multiple device-mappers, with the dm-mod.create="..." contents is shown here ++split on multiple lines for readability: ++ ++ vroot,,,ro, ++ 0 1740800 verity 254:0 254:0 1740800 sha1 ++ 76e9be054b15884a9fa85973e9cb274c93afadb6 ++ 5b3549d54d6c7a3837b9b81ed72e49463a64c03680c47835bef94d768e5646fe; ++ vram,,,rw, ++ 0 32768 linear 1:0 0, ++ 32768 32768 linear 1:1 0 ++ ++Other examples (per target): ++ ++"crypt": ++ dm-crypt,,8,ro, ++ 0 1048576 crypt aes-xts-plain64 ++ babebabebabebabebabebabebabebabebabebabebabebabebabebabebabebabe 0 ++ /dev/sda 0 1 allow_discards ++ ++"delay": ++ dm-delay,,4,ro,0 409600 delay /dev/sda1 0 500 ++ ++"linear": ++ dm-linear,,,rw, ++ 0 32768 linear /dev/sda1 0, ++ 32768 1024000 linear /dev/sda2 0, ++ 1056768 204800 linear /dev/sda3 0, ++ 1261568 512000 linear /dev/sda4 0 ++ ++"snapshot-origin": ++ dm-snap-orig,,4,ro,0 409600 snapshot-origin 8:2 ++ ++"striped": ++ dm-striped,,4,ro,0 1638400 striped 4 4096 ++ /dev/sda1 0 /dev/sda2 0 /dev/sda3 0 /dev/sda4 0 ++ ++"verity": ++ dm-verity,,4,ro, ++ 0 1638400 verity 1 8:1 8:2 4096 4096 204800 1 sha256 ++ fb1a5a0f00deb908d8b53cb270858975e76cf64105d412ce764225d53b8f3cfd ++ 51934789604d1b92399c52e7cb149d1b3a1b74bbbcb103b2a0aaacbed5c08584 +diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig +index 4a249ee86364..4ea706f7790b 100644 +--- a/drivers/md/Kconfig ++++ b/drivers/md/Kconfig +@@ -428,6 +428,18 @@ config DM_DELAY + + If unsure, say N. + ++config DM_INIT ++ bool "DM \"dm-mod.create=\" parameter support" ++ depends on BLK_DEV_DM=y ++ ---help--- ++ Enable "dm-mod.create=" parameter to create mapped devices at init time. ++ This option is useful to allow mounting rootfs without requiring an ++ initramfs. ++ See Documentation/device-mapper/dm-init.txt for dm-mod.create="..." ++ format. ++ ++ If unsure, say N. ++ + config DM_UEVENT + bool "DM uevents" + depends on BLK_DEV_DM +diff --git a/drivers/md/Makefile b/drivers/md/Makefile +index e94b6f9be941..d56331fbd895 100644 +--- a/drivers/md/Makefile ++++ b/drivers/md/Makefile +@@ -64,6 +64,10 @@ obj-$(CONFIG_DM_LOG_WRITES) += dm-log-writes.o + obj-$(CONFIG_DM_INTEGRITY) += dm-integrity.o + obj-$(CONFIG_DM_ZONED) += dm-zoned.o + ++ifeq ($(CONFIG_DM_INIT),y) ++dm-mod-objs += dm-init.o ++endif ++ + ifeq ($(CONFIG_DM_UEVENT),y) + dm-mod-objs += dm-uevent.o + endif +diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c +new file mode 100644 +index 000000000000..b53f30f16b4d +--- /dev/null ++++ b/drivers/md/dm-init.c +@@ -0,0 +1,303 @@ ++// SPDX-License-Identifier: GPL-2.0 ++ ++/* ++ * dm-init.c ++ * Copyright (C) 2017 The Chromium OS Authors ++ * ++ * This file is released under the GPLv2. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DM_MSG_PREFIX "init" ++#define DM_MAX_DEVICES 256 ++#define DM_MAX_TARGETS 256 ++#define DM_MAX_STR_SIZE 4096 ++ ++static char *create; ++ ++/* ++ * Format: dm-mod.create=,,,,
[,
+][;,,,,
[,
+]+] ++ * Table format: ++ * ++ * See Documentation/device-mapper/dm-init.txt for dm-mod.create="..." format ++ * details. ++ */ ++ ++struct dm_device { ++ struct dm_ioctl dmi; ++ struct dm_target_spec *table[DM_MAX_TARGETS]; ++ char *target_args_array[DM_MAX_TARGETS]; ++ struct list_head list; ++}; ++ ++const char *dm_allowed_targets[] __initconst = { ++ "crypt", ++ "delay", ++ "linear", ++ "snapshot-origin", ++ "striped", ++ "verity", ++}; ++ ++static int __init dm_verify_target_type(const char *target) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < ARRAY_SIZE(dm_allowed_targets); i++) { ++ if (!strcmp(dm_allowed_targets[i], target)) ++ return 0; ++ } ++ return -EINVAL; ++} ++ ++static void __init dm_setup_cleanup(struct list_head *devices) ++{ ++ struct dm_device *dev, *tmp; ++ unsigned int i; ++ ++ list_for_each_entry_safe(dev, tmp, devices, list) { ++ list_del(&dev->list); ++ for (i = 0; i < dev->dmi.target_count; i++) { ++ kfree(dev->table[i]); ++ kfree(dev->target_args_array[i]); ++ } ++ kfree(dev); ++ } ++} ++ ++/** ++ * str_field_delimit - delimit a string based on a separator char. ++ * @str: the pointer to the string to delimit. ++ * @separator: char that delimits the field ++ * ++ * Find a @separator and replace it by '\0'. ++ * Remove leading and trailing spaces. ++ * Return the remainder string after the @separator. ++ */ ++static char __init *str_field_delimit(char **str, char separator) ++{ ++ char *s; ++ ++ /* TODO: add support for escaped characters */ ++ *str = skip_spaces(*str); ++ s = strchr(*str, separator); ++ /* Delimit the field and remove trailing spaces */ ++ if (s) ++ *s = '\0'; ++ *str = strim(*str); ++ return s ? ++s : NULL; ++} ++ ++/** ++ * dm_parse_table_entry - parse a table entry ++ * @dev: device to store the parsed information. ++ * @str: the pointer to a string with the format: ++ * [, ...] ++ * ++ * Return the remainder string after the table entry, i.e, after the comma which ++ * delimits the entry or NULL if reached the end of the string. ++ */ ++static char __init *dm_parse_table_entry(struct dm_device *dev, char *str) ++{ ++ const unsigned int n = dev->dmi.target_count - 1; ++ struct dm_target_spec *sp; ++ unsigned int i; ++ /* fields: */ ++ char *field[4]; ++ char *next; ++ ++ field[0] = str; ++ /* Delimit first 3 fields that are separated by space */ ++ for (i = 0; i < ARRAY_SIZE(field) - 1; i++) { ++ field[i + 1] = str_field_delimit(&field[i], ' '); ++ if (!field[i + 1]) ++ return ERR_PTR(-EINVAL); ++ } ++ /* Delimit last field that can be terminated by comma */ ++ next = str_field_delimit(&field[i], ','); ++ ++ sp = kzalloc(sizeof(*sp), GFP_KERNEL); ++ if (!sp) ++ return ERR_PTR(-ENOMEM); ++ dev->table[n] = sp; ++ ++ /* start_sector */ ++ if (kstrtoull(field[0], 0, &sp->sector_start)) ++ return ERR_PTR(-EINVAL); ++ /* num_sector */ ++ if (kstrtoull(field[1], 0, &sp->length)) ++ return ERR_PTR(-EINVAL); ++ /* target_type */ ++ strscpy(sp->target_type, field[2], sizeof(sp->target_type)); ++ if (dm_verify_target_type(sp->target_type)) { ++ DMERR("invalid type \"%s\"", sp->target_type); ++ return ERR_PTR(-EINVAL); ++ } ++ /* target_args */ ++ dev->target_args_array[n] = kstrndup(field[3], GFP_KERNEL, ++ DM_MAX_STR_SIZE); ++ if (!dev->target_args_array[n]) ++ return ERR_PTR(-ENOMEM); ++ ++ return next; ++} ++ ++/** ++ * dm_parse_table - parse "dm-mod.create=" table field ++ * @dev: device to store the parsed information. ++ * @str: the pointer to a string with the format: ++ *
[,
+] ++ */ ++static int __init dm_parse_table(struct dm_device *dev, char *str) ++{ ++ char *table_entry = str; ++ ++ while (table_entry) { ++ DMDEBUG("parsing table \"%s\"", str); ++ if (++dev->dmi.target_count >= DM_MAX_TARGETS) { ++ DMERR("too many targets %u > %d", ++ dev->dmi.target_count, DM_MAX_TARGETS); ++ return -EINVAL; ++ } ++ table_entry = dm_parse_table_entry(dev, table_entry); ++ if (IS_ERR(table_entry)) { ++ DMERR("couldn't parse table"); ++ return PTR_ERR(table_entry); ++ } ++ } ++ ++ return 0; ++} ++ ++/** ++ * dm_parse_device_entry - parse a device entry ++ * @dev: device to store the parsed information. ++ * @str: the pointer to a string with the format: ++ * name,uuid,minor,flags,table[; ...] ++ * ++ * Return the remainder string after the table entry, i.e, after the semi-colon ++ * which delimits the entry or NULL if reached the end of the string. ++ */ ++static char __init *dm_parse_device_entry(struct dm_device *dev, char *str) ++{ ++ /* There are 5 fields: name,uuid,minor,flags,table; */ ++ char *field[5]; ++ unsigned int i; ++ char *next; ++ ++ field[0] = str; ++ /* Delimit first 4 fields that are separated by comma */ ++ for (i = 0; i < ARRAY_SIZE(field) - 1; i++) { ++ field[i+1] = str_field_delimit(&field[i], ','); ++ if (!field[i+1]) ++ return ERR_PTR(-EINVAL); ++ } ++ /* Delimit last field that can be delimited by semi-colon */ ++ next = str_field_delimit(&field[i], ';'); ++ ++ /* name */ ++ strscpy(dev->dmi.name, field[0], sizeof(dev->dmi.name)); ++ /* uuid */ ++ strscpy(dev->dmi.uuid, field[1], sizeof(dev->dmi.uuid)); ++ /* minor */ ++ if (strlen(field[2])) { ++ if (kstrtoull(field[2], 0, &dev->dmi.dev)) ++ return ERR_PTR(-EINVAL); ++ dev->dmi.flags |= DM_PERSISTENT_DEV_FLAG; ++ } ++ /* flags */ ++ if (!strcmp(field[3], "ro")) ++ dev->dmi.flags |= DM_READONLY_FLAG; ++ else if (strcmp(field[3], "rw")) ++ return ERR_PTR(-EINVAL); ++ /* table */ ++ if (dm_parse_table(dev, field[4])) ++ return ERR_PTR(-EINVAL); ++ ++ return next; ++} ++ ++/** ++ * dm_parse_devices - parse "dm-mod.create=" argument ++ * @devices: list of struct dm_device to store the parsed information. ++ * @str: the pointer to a string with the format: ++ * [;+] ++ */ ++static int __init dm_parse_devices(struct list_head *devices, char *str) ++{ ++ unsigned long ndev = 0; ++ struct dm_device *dev; ++ char *device = str; ++ ++ DMDEBUG("parsing \"%s\"", str); ++ while (device) { ++ dev = kzalloc(sizeof(*dev), GFP_KERNEL); ++ if (!dev) ++ return -ENOMEM; ++ list_add_tail(&dev->list, devices); ++ ++ if (++ndev >= DM_MAX_DEVICES) { ++ DMERR("too many targets %u > %d", ++ dev->dmi.target_count, DM_MAX_TARGETS); ++ return -EINVAL; ++ } ++ ++ device = dm_parse_device_entry(dev, device); ++ if (IS_ERR(device)) { ++ DMERR("couldn't parse device"); ++ return PTR_ERR(device); ++ } ++ } ++ ++ return 0; ++} ++ ++/** ++ * dm_init_init - parse "dm-mod.create=" argument and configure drivers ++ */ ++static int __init dm_init_init(void) ++{ ++ struct dm_device *dev; ++ LIST_HEAD(devices); ++ char *str; ++ int r; ++ ++ if (!create) ++ return 0; ++ ++ if (strlen(create) >= DM_MAX_STR_SIZE) { ++ DMERR("Argument is too big. Limit is %d\n", DM_MAX_STR_SIZE); ++ return -EINVAL; ++ } ++ str = kstrndup(create, GFP_KERNEL, DM_MAX_STR_SIZE); ++ if (!str) ++ return -ENOMEM; ++ ++ r = dm_parse_devices(&devices, str); ++ if (r) ++ goto out; ++ ++ DMINFO("waiting for all devices to be available before creating mapped devices\n"); ++ wait_for_device_probe(); ++ ++ list_for_each_entry(dev, &devices, list) { ++ if (dm_early_create(&dev->dmi, dev->table, ++ dev->target_args_array)) ++ break; ++ } ++out: ++ kfree(str); ++ dm_setup_cleanup(&devices); ++ return r; ++} ++ ++late_initcall(dm_init_init); ++ ++module_param(create, charp, 0); ++MODULE_PARM_DESC(create, "Create a mapped device in early boot"); +diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c +index ca948155191a..b7e6c7311a93 100644 +--- a/drivers/md/dm-ioctl.c ++++ b/drivers/md/dm-ioctl.c +@@ -2017,3 +2017,106 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) + + return r; + } ++ ++ ++/** ++ * dm_early_create - create a mapped device in early boot. ++ * ++ * @dmi: Contains main information of the device mapping to be created. ++ * @spec_array: array of pointers to struct dm_target_spec. Describes the ++ * mapping table of the device. ++ * @target_params_array: array of strings with the parameters to a specific ++ * target. ++ * ++ * Instead of having the struct dm_target_spec and the parameters for every ++ * target embedded at the end of struct dm_ioctl (as performed in a normal ++ * ioctl), pass them as arguments, so the caller doesn't need to serialize them. ++ * The size of the spec_array and target_params_array is given by ++ * @dmi->target_count. ++ * This function is supposed to be called in early boot, so locking mechanisms ++ * to protect against concurrent loads are not required. ++ */ ++int __init dm_early_create(struct dm_ioctl *dmi, ++ struct dm_target_spec **spec_array, ++ char **target_params_array) ++{ ++ int r, m = DM_ANY_MINOR; ++ struct dm_table *t, *old_map; ++ struct mapped_device *md; ++ unsigned int i; ++ ++ if (!dmi->target_count) ++ return -EINVAL; ++ ++ r = check_name(dmi->name); ++ if (r) ++ return r; ++ ++ if (dmi->flags & DM_PERSISTENT_DEV_FLAG) ++ m = MINOR(huge_decode_dev(dmi->dev)); ++ ++ /* alloc dm device */ ++ r = dm_create(m, &md); ++ if (r) ++ return r; ++ ++ /* hash insert */ ++ r = dm_hash_insert(dmi->name, *dmi->uuid ? dmi->uuid : NULL, md); ++ if (r) ++ goto err_destroy_dm; ++ ++ /* alloc table */ ++ r = dm_table_create(&t, get_mode(dmi), dmi->target_count, md); ++ if (r) ++ goto err_destroy_dm; ++ ++ /* add targets */ ++ for (i = 0; i < dmi->target_count; i++) { ++ r = dm_table_add_target(t, spec_array[i]->target_type, ++ (sector_t) spec_array[i]->sector_start, ++ (sector_t) spec_array[i]->length, ++ target_params_array[i]); ++ if (r) { ++ DMWARN("error adding target to table"); ++ goto err_destroy_table; ++ } ++ } ++ ++ /* finish table */ ++ r = dm_table_complete(t); ++ if (r) ++ goto err_destroy_table; ++ ++ md->type = dm_table_get_type(t); ++ /* setup md->queue to reflect md's type (may block) */ ++ r = dm_setup_md_queue(md, t); ++ if (r) { ++ DMWARN("unable to set up device queue for new table."); ++ goto err_destroy_table; ++ } ++ ++ /* Set new map */ ++ dm_suspend(md, 0); ++ old_map = dm_swap_table(md, t); ++ if (IS_ERR(old_map)) { ++ r = PTR_ERR(old_map); ++ goto err_destroy_table; ++ } ++ set_disk_ro(dm_disk(md), !!(dmi->flags & DM_READONLY_FLAG)); ++ ++ /* resume device */ ++ r = dm_resume(md); ++ if (r) ++ goto err_destroy_table; ++ ++ DMINFO("%s (%s) is ready", md->disk->disk_name, dmi->name); ++ dm_put(md); ++ return 0; ++ ++err_destroy_table: ++ dm_table_destroy(t); ++err_destroy_dm: ++ dm_put(md); ++ dm_destroy(md); ++ return r; ++} +diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h +index a5538433c927..990e7c2f84b1 100644 +--- a/include/linux/device-mapper.h ++++ b/include/linux/device-mapper.h +@@ -10,6 +10,7 @@ + + #include + #include ++#include + #include + #include + +@@ -457,6 +458,14 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, + sector_t start); + union map_info *dm_get_rq_mapinfo(struct request *rq); + ++/* ++ * Device mapper functions to parse and create devices specified by the ++ * parameter "dm-mod.create=" ++ */ ++int __init dm_early_create(struct dm_ioctl *dmi, ++ struct dm_target_spec **spec_array, ++ char **target_params_array); ++ + struct queue_limits *dm_get_queue_limits(struct mapped_device *md); + + /* diff --git a/packages/kernel/kernel.spec b/packages/kernel/kernel.spec index d11c12c0..5c3ffeb2 100644 --- a/packages/kernel/kernel.spec +++ b/packages/kernel/kernel.spec @@ -8,6 +8,7 @@ License: GPLv2 and Redistributable, no modification permitted URL: https://www.kernel.org/ Source0: https://www.kernel.org/pub/linux/kernel/v4.x/linux-%{version}.tar.xz Source100: config-%{_cross_arch} +Patch1000: dm-add-support-to-directly-boot-to-a-mapped-device.patch BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: gcc-%{_cross_target} @@ -30,7 +31,7 @@ Summary: Header files for the Linux kernel for use by glibc %{summary}. %prep -%setup -q -n linux-%{version} +%autosetup -n linux-%{version} -p1 cp %{SOURCE100} "arch/%{_cross_karch}/configs/%{_cross_vendor}_defconfig" %global kmake \ From ee72a4a3d518b17acee815a94c2a074070994809 Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Thu, 4 Apr 2019 00:35:13 +0000 Subject: [PATCH 0029/1356] Enable DM_VERITY, DM_INIT, and BLK_DEV_DM Signed-off-by: iliana destroyer of worlds --- packages/kernel/config-aarch64 | 73 +++++++++++++++---- packages/kernel/config-x86_64 | 128 +++++++++++++++++++++++++-------- 2 files changed, 156 insertions(+), 45 deletions(-) diff --git a/packages/kernel/config-aarch64 b/packages/kernel/config-aarch64 index 1705456b..6483fb4f 100644 --- a/packages/kernel/config-aarch64 +++ b/packages/kernel/config-aarch64 @@ -1,3 +1,7 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/arm64 4.14.102 Kernel Configuration +# CONFIG_ARM64=y CONFIG_64BIT=y CONFIG_ARCH_PHYS_ADDR_T_64BIT=y @@ -31,7 +35,7 @@ CONFIG_FIX_EARLYCON_MEM=y CONFIG_PGTABLE_LEVELS=4 CONFIG_ARCH_SUPPORTS_UPROBES=y CONFIG_ARCH_PROC_KCORE_TEXT=y -CONFIG_DEFCONFIG_LIST="/lib/modules//.config" +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" CONFIG_IRQ_WORK=y CONFIG_BUILDTIME_EXTABLE_SORT=y CONFIG_THREAD_INFO_IN_TASK=y @@ -1564,7 +1568,7 @@ CONFIG_CDROM_PKTCDVD=m CONFIG_CDROM_PKTCDVD_BUFFERS=8 # CONFIG_CDROM_PKTCDVD_WCACHE is not set CONFIG_ATA_OVER_ETH=m -CONFIG_VIRTIO_BLK=m +CONFIG_VIRTIO_BLK=y # CONFIG_VIRTIO_BLK_SCSI is not set CONFIG_BLK_DEV_RBD=m # CONFIG_BLK_DEV_RSXX is not set @@ -1894,10 +1898,10 @@ CONFIG_MD_RAID456=m CONFIG_MD_FAULTY=m # CONFIG_BCACHE is not set CONFIG_BLK_DEV_DM_BUILTIN=y -CONFIG_BLK_DEV_DM=m +CONFIG_BLK_DEV_DM=y # CONFIG_DM_MQ_DEFAULT is not set CONFIG_DM_DEBUG=y -CONFIG_DM_BUFIO=m +CONFIG_DM_BUFIO=y # CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set CONFIG_DM_BIO_PRISON=m CONFIG_DM_PERSISTENT_DATA=m @@ -1918,7 +1922,7 @@ CONFIG_DM_DELAY=m CONFIG_DM_INIT=y CONFIG_DM_UEVENT=y CONFIG_DM_FLAKEY=m -CONFIG_DM_VERITY=m +CONFIG_DM_VERITY=y # CONFIG_DM_VERITY_FEC is not set CONFIG_DM_SWITCH=m # CONFIG_DM_LOG_WRITES is not set @@ -1967,7 +1971,7 @@ CONFIG_TUN=m CONFIG_TAP=m # CONFIG_TUN_VNET_CROSS_LE is not set CONFIG_VETH=m -CONFIG_VIRTIO_NET=m +CONFIG_VIRTIO_NET=y CONFIG_NLMON=m # CONFIG_VSOCKMON is not set # CONFIG_ARCNET is not set @@ -2798,6 +2802,7 @@ CONFIG_SENSORS_DS1621=m CONFIG_SENSORS_F71805F=m CONFIG_SENSORS_F71882FG=m CONFIG_SENSORS_F75375S=m +# CONFIG_SENSORS_FTSTEUTATES is not set CONFIG_SENSORS_GL518SM=m CONFIG_SENSORS_GL520SM=m CONFIG_SENSORS_G760A=m @@ -2887,6 +2892,8 @@ CONFIG_SENSORS_SMSC47M1=m CONFIG_SENSORS_SMSC47M192=m CONFIG_SENSORS_SMSC47B397=m # CONFIG_SENSORS_SCH56XX_COMMON is not set +# CONFIG_SENSORS_SCH5627 is not set +# CONFIG_SENSORS_SCH5636 is not set # CONFIG_SENSORS_STTS751 is not set # CONFIG_SENSORS_SMM665 is not set CONFIG_SENSORS_ADC128D818=m @@ -2952,7 +2959,45 @@ CONFIG_HISI_THERMAL=m # # Qualcomm thermal drivers # -# CONFIG_WATCHDOG is not set +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_SYSFS=y + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +# CONFIG_GPIO_WATCHDOG is not set +CONFIG_WDAT_WDT=m +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +# CONFIG_ARM_SP805_WATCHDOG is not set +# CONFIG_ARM_SBSA_WATCHDOG is not set +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_QCOM_WDT is not set +# CONFIG_ALIM7101_WDT is not set +# CONFIG_I6300ESB_WDT is not set +# CONFIG_MEN_A21_WDT is not set + +# +# PCI-based Watchdog Cards +# +# CONFIG_PCIPCWATCHDOG is not set +# CONFIG_WDTPCI is not set + +# +# USB-based Watchdog Cards +# +# CONFIG_USBPCWATCHDOG is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set CONFIG_SSB_POSSIBLE=y # @@ -3962,12 +4007,12 @@ CONFIG_VFIO_AMBA=m # CONFIG_VFIO_MDEV is not set CONFIG_IRQ_BYPASS_MANAGER=m # CONFIG_VIRT_DRIVERS is not set -CONFIG_VIRTIO=m +CONFIG_VIRTIO=y # # Virtio drivers # -CONFIG_VIRTIO_PCI=m +CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_PCI_LEGACY=y CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_INPUT=m @@ -4217,8 +4262,6 @@ CONFIG_NVMEM=y # # CONFIG_FSI is not set # CONFIG_TEE is not set -CONFIG_AMAZON_DRIVER_UPDATES=y -CONFIG_AMAZON_ENA_ETHERNET=m # # Firmware Drivers @@ -4289,11 +4332,11 @@ CONFIG_ACPI_APEI_PCIEAER=y CONFIG_ACPI_APEI_SEA=y CONFIG_ACPI_APEI_EINJ=m # CONFIG_ACPI_APEI_ERST_DEBUG is not set +CONFIG_ACPI_WATCHDOG=y # CONFIG_PMIC_OPREGION is not set # CONFIG_ACPI_CONFIGFS is not set CONFIG_ACPI_IORT=y CONFIG_ACPI_GTDT=y -CONFIG_ACPI_PPTT=y # # File systems @@ -4302,15 +4345,15 @@ CONFIG_DCACHE_WORD_ACCESS=y CONFIG_FS_IOMAP=y # CONFIG_EXT2_FS is not set # CONFIG_EXT3_FS is not set -CONFIG_EXT4_FS=m +CONFIG_EXT4_FS=y CONFIG_EXT4_USE_FOR_EXT2=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y # CONFIG_EXT4_ENCRYPTION is not set # CONFIG_EXT4_DEBUG is not set -CONFIG_JBD2=m +CONFIG_JBD2=y # CONFIG_JBD2_DEBUG is not set -CONFIG_FS_MBCACHE=m +CONFIG_FS_MBCACHE=y # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set CONFIG_XFS_FS=m diff --git a/packages/kernel/config-x86_64 b/packages/kernel/config-x86_64 index dcc94d1d..5ec4217e 100644 --- a/packages/kernel/config-x86_64 +++ b/packages/kernel/config-x86_64 @@ -1,3 +1,7 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/x86 4.14.102 Kernel Configuration +# CONFIG_64BIT=y CONFIG_X86_64=y CONFIG_X86=y @@ -38,7 +42,7 @@ CONFIG_X86_64_SMP=y CONFIG_ARCH_SUPPORTS_UPROBES=y CONFIG_FIX_EARLYCON_MEM=y CONFIG_PGTABLE_LEVELS=4 -CONFIG_DEFCONFIG_LIST="/lib/modules//.config" +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" CONFIG_IRQ_WORK=y CONFIG_BUILDTIME_EXTABLE_SORT=y CONFIG_THREAD_INFO_IN_TASK=y @@ -655,8 +659,11 @@ CONFIG_CRASH_DUMP=y # CONFIG_KEXEC_JUMP is not set CONFIG_PHYSICAL_START=0x1000000 CONFIG_RELOCATABLE=y -# CONFIG_RANDOMIZE_BASE is not set +CONFIG_RANDOMIZE_BASE=y +CONFIG_X86_NEED_RELOCS=y CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_RANDOMIZE_MEMORY=y +CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa CONFIG_HOTPLUG_CPU=y # CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set # CONFIG_DEBUG_HOTPLUG_CPU0 is not set @@ -694,7 +701,6 @@ CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y # CONFIG_ACPI_DEBUGGER is not set -CONFIG_ACPI_SPCR_TABLE=y CONFIG_ACPI_SLEEP=y CONFIG_ACPI_PROCFS_POWER=y # CONFIG_ACPI_REV_OVERRIDE_POSSIBLE is not set @@ -732,6 +738,7 @@ CONFIG_HAVE_ACPI_APEI=y CONFIG_HAVE_ACPI_APEI_NMI=y # CONFIG_ACPI_APEI is not set # CONFIG_DPTF_POWER is not set +CONFIG_ACPI_WATCHDOG=y CONFIG_ACPI_EXTLOG=m # CONFIG_PMIC_OPREGION is not set # CONFIG_ACPI_CONFIGFS is not set @@ -1845,7 +1852,7 @@ CONFIG_SCSI_DEBUG=m # CONFIG_SCSI_PMCRAID is not set # CONFIG_SCSI_PM8001 is not set # CONFIG_SCSI_BFA_FC is not set -CONFIG_SCSI_VIRTIO=y +CONFIG_SCSI_VIRTIO=m CONFIG_SCSI_CHELSIO_FCOE=m # CONFIG_SCSI_DH is not set CONFIG_SCSI_OSD_INITIATOR=m @@ -1960,10 +1967,10 @@ CONFIG_BCACHE=m # CONFIG_BCACHE_DEBUG is not set # CONFIG_BCACHE_CLOSURES_DEBUG is not set CONFIG_BLK_DEV_DM_BUILTIN=y -CONFIG_BLK_DEV_DM=m +CONFIG_BLK_DEV_DM=y # CONFIG_DM_MQ_DEFAULT is not set CONFIG_DM_DEBUG=y -CONFIG_DM_BUFIO=m +CONFIG_DM_BUFIO=y CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y # CONFIG_DM_DEBUG_BLOCK_STACK_TRACING is not set CONFIG_DM_BIO_PRISON=m @@ -1985,7 +1992,8 @@ CONFIG_DM_DELAY=m CONFIG_DM_INIT=y CONFIG_DM_UEVENT=y CONFIG_DM_FLAKEY=m -# CONFIG_DM_VERITY is not set +CONFIG_DM_VERITY=y +# CONFIG_DM_VERITY_FEC is not set # CONFIG_DM_SWITCH is not set # CONFIG_DM_LOG_WRITES is not set CONFIG_DM_INTEGRITY=m @@ -2390,14 +2398,14 @@ CONFIG_HVC_DRIVER=y CONFIG_HVC_IRQ=y CONFIG_HVC_XEN=y CONFIG_HVC_XEN_FRONTEND=y -CONFIG_VIRTIO_CONSOLE=y +CONFIG_VIRTIO_CONSOLE=m # CONFIG_IPMI_HANDLER is not set CONFIG_HW_RANDOM=m # CONFIG_HW_RANDOM_TIMERIOMEM is not set CONFIG_HW_RANDOM_INTEL=m CONFIG_HW_RANDOM_AMD=m CONFIG_HW_RANDOM_VIA=m -CONFIG_HW_RANDOM_VIRTIO=y +CONFIG_HW_RANDOM_VIRTIO=m CONFIG_HW_RANDOM_TPM=m CONFIG_NVRAM=m # CONFIG_R3964 is not set @@ -2578,6 +2586,7 @@ CONFIG_SENSORS_DELL_SMM=m # CONFIG_SENSORS_F71882FG is not set # CONFIG_SENSORS_F75375S is not set # CONFIG_SENSORS_FSCHMD is not set +# CONFIG_SENSORS_FTSTEUTATES is not set # CONFIG_SENSORS_GL518SM is not set # CONFIG_SENSORS_GL520SM is not set # CONFIG_SENSORS_G760A is not set @@ -2644,6 +2653,8 @@ CONFIG_SENSORS_DELL_SMM=m # CONFIG_SENSORS_SMSC47M192 is not set # CONFIG_SENSORS_SMSC47B397 is not set # CONFIG_SENSORS_SCH56XX_COMMON is not set +# CONFIG_SENSORS_SCH5627 is not set +# CONFIG_SENSORS_SCH5636 is not set # CONFIG_SENSORS_STTS751 is not set # CONFIG_SENSORS_SMM665 is not set # CONFIG_SENSORS_ADC128D818 is not set @@ -2702,7 +2713,71 @@ CONFIG_X86_PKG_TEMP_THERMAL=m # # CONFIG_INT340X_THERMAL is not set # CONFIG_INTEL_PCH_THERMAL is not set -# CONFIG_WATCHDOG is not set +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_SYSFS=y + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +CONFIG_WDAT_WDT=m +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_ACQUIRE_WDT is not set +# CONFIG_ADVANTECH_WDT is not set +# CONFIG_ALIM1535_WDT is not set +# CONFIG_ALIM7101_WDT is not set +# CONFIG_F71808E_WDT is not set +# CONFIG_SP5100_TCO is not set +# CONFIG_SBC_FITPC2_WATCHDOG is not set +# CONFIG_EUROTECH_WDT is not set +# CONFIG_IB700_WDT is not set +# CONFIG_IBMASR is not set +# CONFIG_WAFER_WDT is not set +# CONFIG_I6300ESB_WDT is not set +# CONFIG_IE6XX_WDT is not set +# CONFIG_ITCO_WDT is not set +# CONFIG_IT8712F_WDT is not set +# CONFIG_IT87_WDT is not set +# CONFIG_HP_WATCHDOG is not set +# CONFIG_SC1200_WDT is not set +# CONFIG_PC87413_WDT is not set +# CONFIG_NV_TCO is not set +# CONFIG_60XX_WDT is not set +# CONFIG_CPU5_WDT is not set +# CONFIG_SMSC_SCH311X_WDT is not set +# CONFIG_SMSC37B787_WDT is not set +# CONFIG_VIA_WDT is not set +# CONFIG_W83627HF_WDT is not set +# CONFIG_W83877F_WDT is not set +# CONFIG_W83977F_WDT is not set +# CONFIG_MACHZ_WDT is not set +# CONFIG_SBC_EPX_C3_WATCHDOG is not set +# CONFIG_NI903X_WDT is not set +# CONFIG_NIC7018_WDT is not set +# CONFIG_XEN_WDT is not set + +# +# PCI-based Watchdog Cards +# +# CONFIG_PCIPCWATCHDOG is not set +# CONFIG_WDTPCI is not set + +# +# USB-based Watchdog Cards +# +# CONFIG_USBPCWATCHDOG is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set CONFIG_SSB_POSSIBLE=y # @@ -3433,12 +3508,7 @@ CONFIG_STAGING=y # Android # # CONFIG_LTE_GDM724X is not set -CONFIG_LNET=m -CONFIG_LNET_MAX_PAYLOAD=1048576 -# CONFIG_LNET_SELFTEST is not set -# CONFIG_LNET_XPRT_IB is not set -CONFIG_LUSTRE_FS=m -# CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK is not set +# CONFIG_LNET is not set # CONFIG_DGNC is not set # CONFIG_GS_FPGABOOT is not set # CONFIG_CRYPTO_SKEIN is not set @@ -3599,7 +3669,7 @@ CONFIG_RAS=y # # CONFIG_ANDROID is not set # CONFIG_LIBNVDIMM is not set -CONFIG_DAX=m +CONFIG_DAX=y # CONFIG_DEV_DAX is not set # CONFIG_NVMEM is not set # CONFIG_STM is not set @@ -3612,8 +3682,6 @@ CONFIG_DAX=m CONFIG_FSI=m CONFIG_FSI_MASTER_HUB=m CONFIG_FSI_SCOM=m -CONFIG_AMAZON_DRIVER_UPDATES=y -CONFIG_AMAZON_ENA_ETHERNET=m # # Firmware Drivers @@ -3654,9 +3722,9 @@ CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y CONFIG_EXT4_FS_ENCRYPTION=y CONFIG_EXT4_DEBUG=y -CONFIG_JBD2=m +CONFIG_JBD2=y CONFIG_JBD2_DEBUG=y -CONFIG_FS_MBCACHE=m +CONFIG_FS_MBCACHE=y # CONFIG_REISERFS_FS is not set CONFIG_JFS_FS=m CONFIG_JFS_POSIX_ACL=y @@ -3685,7 +3753,7 @@ CONFIG_EXPORTFS=y # CONFIG_EXPORTFS_BLOCK_OPS is not set CONFIG_FILE_LOCKING=y CONFIG_MANDATORY_FILE_LOCKING=y -CONFIG_FS_ENCRYPTION=m +CONFIG_FS_ENCRYPTION=y CONFIG_FSNOTIFY=y CONFIG_DNOTIFY=y CONFIG_INOTIFY_USER=y @@ -4307,13 +4375,13 @@ CONFIG_CRYPTO_ECHAINIV=m # # Block modes # -CONFIG_CRYPTO_CBC=m +CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_CTR=y -CONFIG_CRYPTO_CTS=m -CONFIG_CRYPTO_ECB=m +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_XTS=y CONFIG_CRYPTO_KEYWRAP=m # @@ -4344,9 +4412,9 @@ CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA1_SSSE3=m -CONFIG_CRYPTO_SHA256_SSSE3=m -CONFIG_CRYPTO_SHA512_SSSE3=m +CONFIG_CRYPTO_SHA1_SSSE3=y +CONFIG_CRYPTO_SHA256_SSSE3=y +CONFIG_CRYPTO_SHA512_SSSE3=y CONFIG_CRYPTO_SHA1_MB=m CONFIG_CRYPTO_SHA256_MB=m CONFIG_CRYPTO_SHA512_MB=m @@ -4482,7 +4550,7 @@ CONFIG_GENERIC_IO=y CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y CONFIG_ARCH_HAS_FAST_MULTIPLIER=y CONFIG_CRC_CCITT=m -CONFIG_CRC16=m +CONFIG_CRC16=y CONFIG_CRC_T10DIF=y CONFIG_CRC_ITU_T=m CONFIG_CRC32=y From d04064de7e58616145ee013a5960a0c73c5c704f Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Fri, 5 Apr 2019 17:13:25 +0000 Subject: [PATCH 0030/1356] Set up dm-verity for the root partition (!!!) Signed-off-by: iliana destroyer of worlds --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index e7e4af72..07f36526 100644 --- a/Makefile +++ b/Makefile @@ -55,6 +55,7 @@ define build_image $(OS)-builder:$(1) \ --disk-image-name=$(OS)-$(1).img \ --boot-image-name=$(OS)-$(1)-boot.ext4.lz4 \ + --verity-image-name=$(OS)-$(1)-root.verity.lz4 \ --root-image-name=$(OS)-$(1)-root.ext4.lz4 \ --package-dir=/local/rpms \ --output-dir=/local/output From 2e37c5f78cd2fc4d4e0fada561a9291006522929 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sun, 5 May 2019 03:04:02 +0000 Subject: [PATCH 0031/1356] generate dependencies for first-party Rust code Signed-off-by: Ben Cressey --- .gitignore | 1 + Makefile | 24 ++++++++++++++++++++++-- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 05fa7fca..50ec4306 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ /build *.makevar *.makepkg +*.makedep diff --git a/Makefile b/Makefile index 07f36526..5a8dad9f 100644 --- a/Makefile +++ b/Makefile @@ -2,10 +2,14 @@ OS := thar TOPDIR := $(strip $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))) +DEP4SPEC ?= $(TOPDIR)/bin/dep4spec SPEC2VAR ?= $(TOPDIR)/bin/spec2var SPEC2PKG ?= $(TOPDIR)/bin/spec2pkg SPECS = $(wildcard packages/*/*.spec) +TOMLS = $(wildcard workspaces/*/Cargo.toml) + +DEPS = $(shell echo $(TOMLS)|awk -F '/' '{print "packages/"$$2"/"$$2".makedep"}') VARS = $(SPECS:.spec=.makevar) PKGS = $(SPECS:.spec=.makepkg) @@ -66,12 +70,28 @@ space := $(empty) $(empty) comma := , list = $(subst $(space),$(comma),$(1)) +# `makedep` files are a hook to provide additional dependencies when +# building `makevar` and `makepkg` files. The intended use case is +# to generate source files that must be in place before parsing the +# spec file. +%.makedep : %.spec $(DEP4SPEC) + @$(DEP4SPEC) --spec=$< > $@ + +# `makevar` files generate variables that the `makepkg` files for +# other packages can refer to. All `makevar` files must be evaluated +# before any `makepkg` files, or else empty values could be added to +# the dependency list. %.makevar : %.spec $(SPEC2VAR) - @set -e; $(SPEC2VAR) --spec=$< --arches=$(call list,$(ARCHES)) > $@ + @$(SPEC2VAR) --spec=$< --arches=$(call list,$(ARCHES)) > $@ +# `makepkg` files define the package outputs obtained by building +# the spec file, as well as the dependencies needed to build that +# package. %.makepkg : %.spec $(SPEC2PKG) - @set -e; $(SPEC2PKG) --spec=$< --arches=$(call list,$(ARCHES)) > $@ + @$(SPEC2PKG) --spec=$< --arches=$(call list,$(ARCHES)) > $@ +# Order is important here. +-include $(DEPS) -include $(VARS) -include $(PKGS) From 2ef0f009aee112c7d93d35d2cab37309878065f1 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sun, 5 May 2019 19:14:03 +0000 Subject: [PATCH 0032/1356] simplify architecture handling and dependency generators Functional aarch64 builds are pending fixes for the image build, so the current multi-architecture builds are not adding any value. In any case, this was largely a convenience for initial development, allowing us to easily exercise package builds for two architectures. For production builds, we can simply invoke `make` as many times as needed to build for all architectures. Take advantage of the required changes to the dependency generators to clean up and streamline the code. Signed-off-by: Ben Cressey --- Makefile | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/Makefile b/Makefile index 5a8dad9f..fefb709d 100644 --- a/Makefile +++ b/Makefile @@ -17,7 +17,7 @@ OUTPUT ?= $(TOPDIR)/build OUTVAR := $(shell mkdir -p $(OUTPUT)) DATE := $(shell date --rfc-3339=date) -ARCHES := x86_64 aarch64 +ARCH := x86_64 DOCKER ?= docker @@ -65,11 +65,6 @@ define build_image --output-dir=/local/output endef -empty := -space := $(empty) $(empty) -comma := , -list = $(subst $(space),$(comma),$(1)) - # `makedep` files are a hook to provide additional dependencies when # building `makevar` and `makepkg` files. The intended use case is # to generate source files that must be in place before parsing the @@ -82,27 +77,27 @@ list = $(subst $(space),$(comma),$(1)) # before any `makepkg` files, or else empty values could be added to # the dependency list. %.makevar : %.spec $(SPEC2VAR) - @$(SPEC2VAR) --spec=$< --arches=$(call list,$(ARCHES)) > $@ + @$(SPEC2VAR) --spec=$< --arch=$(ARCH) > $@ # `makepkg` files define the package outputs obtained by building # the spec file, as well as the dependencies needed to build that # package. %.makepkg : %.spec $(SPEC2PKG) - @$(SPEC2PKG) --spec=$< --arches=$(call list,$(ARCHES)) > $@ + @$(SPEC2PKG) --spec=$< --arch=$(ARCH) > $@ # Order is important here. -include $(DEPS) -include $(VARS) -include $(PKGS) -.PHONY: all $(ARCHES) +.PHONY: all $(ARCH) .SECONDEXPANSION: -$(ARCHES): $$($(OS)-$$(@)-release) - $(eval PKGS:= $(wildcard $(OUTPUT)/$(OS)-$(@)-*.rpm)) +$(ARCH): $$($(OS)-$(ARCH)-release) + $(eval PKGS:= $(wildcard $(OUTPUT)/$(OS)-$(ARCH)-*.rpm)) $(call build_image,$@,$(PKGS)) -all: $(ARCHES) +all: $(ARCH) .PHONY: clean clean: From 1c264ed46f2c1b4fdcb3fadab7b892cdaa1ce753 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 6 May 2019 22:04:42 +0000 Subject: [PATCH 0033/1356] always build makedep files and enforce proper ordering Although the included files are rebuilt if they do not exist, they are not built in the listed order. Furthermore, the rules to add dependencies to an existing target do not take effect until the next time the Makefile is loaded. The heavy-handed approach here always generates a makedep file, even if it is empty, and does a one-off build of the crate to ensure that it exists before the other scripts run. Errors when building these files now also cause `make` to fail, and avoid creating the output file. This will help avoid "successful" runs that are doomed or broken in subtle ways. Signed-off-by: Ben Cressey --- Makefile | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/Makefile b/Makefile index fefb709d..3443e9c0 100644 --- a/Makefile +++ b/Makefile @@ -7,9 +7,7 @@ SPEC2VAR ?= $(TOPDIR)/bin/spec2var SPEC2PKG ?= $(TOPDIR)/bin/spec2pkg SPECS = $(wildcard packages/*/*.spec) -TOMLS = $(wildcard workspaces/*/Cargo.toml) - -DEPS = $(shell echo $(TOMLS)|awk -F '/' '{print "packages/"$$2"/"$$2".makedep"}') +DEPS = $(SPECS:.spec=.makedep) VARS = $(SPECS:.spec=.makevar) PKGS = $(SPECS:.spec=.makepkg) @@ -70,25 +68,27 @@ endef # to generate source files that must be in place before parsing the # spec file. %.makedep : %.spec $(DEP4SPEC) - @$(DEP4SPEC) --spec=$< > $@ + @$(DEP4SPEC) --spec=$< > $@.tmp + @mv $@.tmp $@ # `makevar` files generate variables that the `makepkg` files for # other packages can refer to. All `makevar` files must be evaluated # before any `makepkg` files, or else empty values could be added to # the dependency list. -%.makevar : %.spec $(SPEC2VAR) - @$(SPEC2VAR) --spec=$< --arch=$(ARCH) > $@ +%.makevar : %.spec %.makedep $(SPEC2VAR) + @$(SPEC2VAR) --spec=$< --arch=$(ARCH) > $@.tmp + @mv $@.tmp $@ # `makepkg` files define the package outputs obtained by building # the spec file, as well as the dependencies needed to build that # package. -%.makepkg : %.spec $(SPEC2PKG) - @$(SPEC2PKG) --spec=$< --arch=$(ARCH) > $@ +%.makepkg : %.spec %.makedep %.makevar $(SPEC2PKG) + @$(SPEC2PKG) --spec=$< --arch=$(ARCH) > $@.tmp + @mv $@.tmp $@ -# Order is important here. --include $(DEPS) --include $(VARS) --include $(PKGS) +include $(DEPS) +include $(VARS) +include $(PKGS) .PHONY: all $(ARCH) From 90a4c65b5ba5f063279b9a0044b9adb9f954628b Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 7 May 2019 19:06:31 +0000 Subject: [PATCH 0034/1356] detect architecture and allow it to be overridden Signed-off-by: Ben Cressey --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3443e9c0..ef71dec7 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ OUTPUT ?= $(TOPDIR)/build OUTVAR := $(shell mkdir -p $(OUTPUT)) DATE := $(shell date --rfc-3339=date) -ARCH := x86_64 +ARCH ?= $(shell uname -m) DOCKER ?= docker From 8c652996d2bdb84706f02423269deba37f0ddaae Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 7 May 2019 19:07:06 +0000 Subject: [PATCH 0035/1356] update clean rule to remove image artifacts Signed-off-by: Ben Cressey --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index ef71dec7..fe682138 100644 --- a/Makefile +++ b/Makefile @@ -101,6 +101,7 @@ all: $(ARCH) .PHONY: clean clean: - @rm -f $(OUTPUT)/*.rpm + @rm -f $(OUTPUT)/*.rpm $(OUTPUT)/*.tar $(OUTPUT)/*.lz4 $(OUTPUT)/*.img + @find $(TOPDIR) -name '*.make*' -delete include $(TOPDIR)/hack/rules.mk From d296c2c1c7b3e1ca19cad004d3635a5ae4b7c5ee Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Tue, 7 May 2019 22:23:16 +0000 Subject: [PATCH 0036/1356] Fetch upstream sources from lookaside cache or URL Attempt to fetch sources from an S3 bucket first, and if that fails attempt to fetch them from the Source URL in the RPM spec. Disallow fetching the RPM spec's URL if ALLOW_ARBITRARY_SOURCE_URL is not "true", for use in production builds. Signed-off-by: iliana destroyer of worlds --- Makefile | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/Makefile b/Makefile index fe682138..f6ea51c7 100644 --- a/Makefile +++ b/Makefile @@ -5,6 +5,7 @@ TOPDIR := $(strip $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))) DEP4SPEC ?= $(TOPDIR)/bin/dep4spec SPEC2VAR ?= $(TOPDIR)/bin/spec2var SPEC2PKG ?= $(TOPDIR)/bin/spec2pkg +ALLOW_ARBITRARY_SOURCE_URL ?= true SPECS = $(wildcard packages/*/*.spec) DEPS = $(SPECS:.spec=.makedep) @@ -63,6 +64,14 @@ define build_image --output-dir=/local/output endef +define fetch_upstream + curl -fsSL "https://thar-upstream-lookaside-cache.s3.us-west-2.amazonaws.com/$(3)/$(4)/$(3)" -o "packages/$(1)/$(3)" \ + || { [[ "z$(ALLOW_ARBITRARY_SOURCE_URL)" = "ztrue" ]] && curl -fsSL "$(2)" -o "packages/$(1)/$(3)"; } + if ! echo "SHA512 (packages/$(1)/$(3)) = $(4)" | sha512sum -c; then \ + rm -f "packages/$(1)/$(3)"; false; \ + fi +endef + # `makedep` files are a hook to provide additional dependencies when # building `makevar` and `makepkg` files. The intended use case is # to generate source files that must be in place before parsing the From 12569afef43ceaee2865d13ad06f5885b4837595 Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Mon, 13 May 2019 18:47:09 +0000 Subject: [PATCH 0037/1356] Fetch dependencies for Rust packages Signed-off-by: iliana destroyer of worlds --- Makefile | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/Makefile b/Makefile index f6ea51c7..8d705daf 100644 --- a/Makefile +++ b/Makefile @@ -64,20 +64,12 @@ define build_image --output-dir=/local/output endef -define fetch_upstream - curl -fsSL "https://thar-upstream-lookaside-cache.s3.us-west-2.amazonaws.com/$(3)/$(4)/$(3)" -o "packages/$(1)/$(3)" \ - || { [[ "z$(ALLOW_ARBITRARY_SOURCE_URL)" = "ztrue" ]] && curl -fsSL "$(2)" -o "packages/$(1)/$(3)"; } - if ! echo "SHA512 (packages/$(1)/$(3)) = $(4)" | sha512sum -c; then \ - rm -f "packages/$(1)/$(3)"; false; \ - fi -endef - # `makedep` files are a hook to provide additional dependencies when # building `makevar` and `makepkg` files. The intended use case is # to generate source files that must be in place before parsing the # spec file. %.makedep : %.spec $(DEP4SPEC) - @$(DEP4SPEC) --spec=$< > $@.tmp + @$(DEP4SPEC) --spec=$< --arch=$(ARCH) > $@.tmp @mv $@.tmp $@ # `makevar` files generate variables that the `makepkg` files for From af05f47e049aca3366c8f31cd1dff525be47180a Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Wed, 15 May 2019 17:08:20 +0000 Subject: [PATCH 0038/1356] Further build script improvements from review Signed-off-by: iliana destroyer of worlds --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 8d705daf..1aae1fef 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,8 @@ TOPDIR := $(strip $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))) DEP4SPEC ?= $(TOPDIR)/bin/dep4spec SPEC2VAR ?= $(TOPDIR)/bin/spec2var SPEC2PKG ?= $(TOPDIR)/bin/spec2pkg -ALLOW_ARBITRARY_SOURCE_URL ?= true +FETCH_UPSTREAM ?= $(TOPDIR)/bin/fetch-upstream +export ALLOW_ARBITRARY_SOURCE_URL ?= true SPECS = $(wildcard packages/*/*.spec) DEPS = $(SPECS:.spec=.makedep) From 20735a9f3c9b55e0706731f4ad89ca2ac45ccbe8 Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Wed, 15 May 2019 17:46:31 +0000 Subject: [PATCH 0039/1356] Add upload-sources target This requires credentials to the cache bucket if any files need to be uploaded; I expect that this will be part of our CI system so that sources automatically get mirrored when pull requests get merged. Signed-off-by: iliana destroyer of worlds --- Makefile | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/Makefile b/Makefile index 1aae1fef..11036434 100644 --- a/Makefile +++ b/Makefile @@ -6,6 +6,7 @@ DEP4SPEC ?= $(TOPDIR)/bin/dep4spec SPEC2VAR ?= $(TOPDIR)/bin/spec2var SPEC2PKG ?= $(TOPDIR)/bin/spec2pkg FETCH_UPSTREAM ?= $(TOPDIR)/bin/fetch-upstream +UPLOAD_SOURCES ?= $(TOPDIR)/bin/upload-sources export ALLOW_ARBITRARY_SOURCE_URL ?= true SPECS = $(wildcard packages/*/*.spec) @@ -106,4 +107,11 @@ clean: @rm -f $(OUTPUT)/*.rpm $(OUTPUT)/*.tar $(OUTPUT)/*.lz4 $(OUTPUT)/*.img @find $(TOPDIR) -name '*.make*' -delete +.PHONY: sources +sources: $(SOURCES) + +.PHONY: upload-sources +upload-sources: $(SOURCES) + @$(UPLOAD_SOURCES) $^ + include $(TOPDIR)/hack/rules.mk From 2d2a42016f73bc0b54221a69fd8ab7609a0c7969 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 22 May 2019 15:58:36 +0000 Subject: [PATCH 0040/1356] exclude unnecessary artifacts from build context Signed-off-by: Ben Cressey --- .dockerignore | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .dockerignore diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..21142e5a --- /dev/null +++ b/.dockerignore @@ -0,0 +1,5 @@ +/build/*.img +/build/*.lz4 +/build/*.tar +/build/*-debuginfo-*.rpm +/build/*-debugsource-*.rpm From a7be5d678ee103a467b1c86643ec589acef42928 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sun, 9 Jun 2019 18:19:43 +0000 Subject: [PATCH 0041/1356] update build container to fedora 30 Signed-off-by: Ben Cressey --- packages/kernel/kernel.spec | 1 + ...el-linux-socket.h-for-genheaders-and.patch | 70 +++++++++++++++++++ 2 files changed, 71 insertions(+) create mode 100644 packages/kernel/selinux-use-kernel-linux-socket.h-for-genheaders-and.patch diff --git a/packages/kernel/kernel.spec b/packages/kernel/kernel.spec index 5c3ffeb2..240c1b0e 100644 --- a/packages/kernel/kernel.spec +++ b/packages/kernel/kernel.spec @@ -9,6 +9,7 @@ URL: https://www.kernel.org/ Source0: https://www.kernel.org/pub/linux/kernel/v4.x/linux-%{version}.tar.xz Source100: config-%{_cross_arch} Patch1000: dm-add-support-to-directly-boot-to-a-mapped-device.patch +Patch1001: selinux-use-kernel-linux-socket.h-for-genheaders-and.patch BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: gcc-%{_cross_target} diff --git a/packages/kernel/selinux-use-kernel-linux-socket.h-for-genheaders-and.patch b/packages/kernel/selinux-use-kernel-linux-socket.h-for-genheaders-and.patch new file mode 100644 index 00000000..725c82e0 --- /dev/null +++ b/packages/kernel/selinux-use-kernel-linux-socket.h-for-genheaders-and.patch @@ -0,0 +1,70 @@ +From dfbd199a7cfe3e3cd8531e1353cdbd7175bfbc5e Mon Sep 17 00:00:00 2001 +From: Paulo Alcantara +Date: Sun, 24 Feb 2019 21:55:28 -0300 +Subject: [PATCH] selinux: use kernel linux/socket.h for genheaders and mdp + +When compiling genheaders and mdp from a newer host kernel, the +following error happens: + + In file included from scripts/selinux/genheaders/genheaders.c:18: + ./security/selinux/include/classmap.h:238:2: error: #error New + address family defined, please update secclass_map. #error New + address family defined, please update secclass_map. ^~~~~ + make[3]: *** [scripts/Makefile.host:107: + scripts/selinux/genheaders/genheaders] Error 1 make[2]: *** + [scripts/Makefile.build:599: scripts/selinux/genheaders] Error 2 + make[1]: *** [scripts/Makefile.build:599: scripts/selinux] Error 2 + make[1]: *** Waiting for unfinished jobs.... + +Instead of relying on the host definition, include linux/socket.h in +classmap.h to have PF_MAX. + +Cc: stable@vger.kernel.org +Signed-off-by: Paulo Alcantara +Acked-by: Stephen Smalley +[PM: manually merge in mdp.c, subject line tweaks] +Signed-off-by: Paul Moore +--- + scripts/selinux/genheaders/genheaders.c | 1 - + scripts/selinux/mdp/mdp.c | 1 - + security/selinux/include/classmap.h | 1 + + 3 files changed, 1 insertion(+), 2 deletions(-) + +diff --git a/scripts/selinux/genheaders/genheaders.c b/scripts/selinux/genheaders/genheaders.c +index 1ceedea847dd..544ca126a8a8 100644 +--- a/scripts/selinux/genheaders/genheaders.c ++++ b/scripts/selinux/genheaders/genheaders.c +@@ -9,7 +9,6 @@ + #include + #include + #include +-#include + + struct security_class_mapping { + const char *name; +diff --git a/scripts/selinux/mdp/mdp.c b/scripts/selinux/mdp/mdp.c +index 073fe7537f6c..6d51b74bc679 100644 +--- a/scripts/selinux/mdp/mdp.c ++++ b/scripts/selinux/mdp/mdp.c +@@ -32,7 +32,6 @@ + #include + #include + #include +-#include + + static void usage(char *name) + { +diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h +index bd5fe0d3204a..201f7e588a29 100644 +--- a/security/selinux/include/classmap.h ++++ b/security/selinux/include/classmap.h +@@ -1,5 +1,6 @@ + /* SPDX-License-Identifier: GPL-2.0 */ + #include ++#include + + #define COMMON_FILE_SOCK_PERMS "ioctl", "read", "write", "create", \ + "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append", "map" +-- +2.21.0 + From bf8d1f26e247a572551455f37aa27d66c27590ea Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Tue, 16 Jul 2019 10:35:24 -0700 Subject: [PATCH 0042/1356] Initial update of CONTRIBUTING.md from template This makes our CONTRIBUTING document slightly more specific to us. * Fix the branch name we use * Add links to open/closed PR lists * General wording improvements * Put sentences on their own lines so they're easier to diff in the future * Don't arbitrarily break lines Signed-off-by: Tom Kirchner --- CONTRIBUTING.md | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 78300391..e19d5b7d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,18 +1,18 @@ # Contributing Guidelines -Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional -documentation, we greatly value feedback and contributions from our community. +Thank you for your interest in contributing to our project. +Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community. -Please read through this document before submitting any issues or pull requests to ensure we have all the necessary -information to effectively respond to your bug report or contribution. +Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution. ## Reporting Bugs/Feature Requests We welcome you to use the GitHub issue tracker to report bugs or suggest features. -When filing an issue, please check [existing open](https://github.com/amazonlinux/PRIVATE-thar/issues), or [recently closed](https://github.com/amazonlinux/PRIVATE-thar/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already -reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: +When filing an issue, please check [existing open](https://github.com/amazonlinux/PRIVATE-thar/issues) and [closed](https://github.com/amazonlinux/PRIVATE-thar/issues?q=is%3Aissue+is%3Aclosed) issues to make sure somebody else hasn't already reported the issue. +Please try to include as much information as you can. +Details like these are incredibly useful: * A reproducible test case or series of steps * The version of our code being used @@ -21,41 +21,43 @@ reported the issue. Please try to include as much information as you can. Detail ## Contributing via Pull Requests -Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: +Contributions via pull requests are much appreciated. +Before starting a pull request, please ensure that: -1. You are working against the latest source on the *master* branch. -2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. -3. You open an issue to discuss any significant work - we would hate for your time to be wasted. +1. You open an issue first to discuss any significant work - we would hate for your time to be wasted. +2. You are working against the latest source on the *develop* branch. +3. You check existing [open](https://github.com/amazonlinux/PRIVATE-thar/pulls) and [merged](https://github.com/amazonlinux/PRIVATE-thar/pulls?q=is%3Apr+is%3Aclosed) pull requests to make sure someone else hasn't addressed the problem already. To send us a pull request, please: 1. Fork the repository. -2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. +2. Modify the source; please focus on the specific change you are contributing. If you also reformat the code, it will be hard for us to focus on your change. 3. Ensure local tests pass. 4. Commit to your fork using clear commit messages. 5. Send us a pull request, answering any default questions in the pull request interface. 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. -GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and -[creating a pull request](https://help.github.com/articles/creating-a-pull-request/). +GitHub provides additional documentation on [forking a repository](https://help.github.com/articles/fork-a-repo/) and [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). ## Finding contributions to work on -Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/amazonlinux/PRIVATE-thar/labels/help%20wanted) issues is a great place to start. +Looking at the existing issues is a great way to find something to contribute on. +As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/amazonlinux/PRIVATE-thar/labels/help%20wanted) issues is a great place to start. ## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). -For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact -opensource-codeofconduct@amazon.com with any additional questions or comments. +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. ## Security issue notifications -If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. +If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). +Please do **not** create a public GitHub issue. ## Licensing -See the [LICENSE](https://github.com/amazonlinux/PRIVATE-thar/blob/master/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. +See the [LICENSE](https://github.com/amazonlinux/PRIVATE-thar/blob/master/LICENSE) file for our project's licensing. +We will ask you to confirm the licensing of your contribution. We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. From f90a0be14e4a1eaebd5be2e258b78ceabe0c05e1 Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Tue, 16 Jul 2019 21:55:51 +0000 Subject: [PATCH 0043/1356] Enable BLK_DEV_NVME Signed-off-by: iliana destroyer of worlds --- packages/kernel/config-aarch64 | 4 ++-- packages/kernel/config-x86_64 | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel/config-aarch64 b/packages/kernel/config-aarch64 index 6483fb4f..87c876ca 100644 --- a/packages/kernel/config-aarch64 +++ b/packages/kernel/config-aarch64 @@ -1572,8 +1572,8 @@ CONFIG_VIRTIO_BLK=y # CONFIG_VIRTIO_BLK_SCSI is not set CONFIG_BLK_DEV_RBD=m # CONFIG_BLK_DEV_RSXX is not set -CONFIG_NVME_CORE=m -CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_CORE=y +CONFIG_BLK_DEV_NVME=y CONFIG_NVME_FABRICS=m CONFIG_NVME_RDMA=m # CONFIG_NVME_FC is not set diff --git a/packages/kernel/config-x86_64 b/packages/kernel/config-x86_64 index 5ec4217e..8847f23a 100644 --- a/packages/kernel/config-x86_64 +++ b/packages/kernel/config-x86_64 @@ -1656,8 +1656,8 @@ CONFIG_VIRTIO_BLK=y # CONFIG_VIRTIO_BLK_SCSI is not set CONFIG_BLK_DEV_RBD=m # CONFIG_BLK_DEV_RSXX is not set -CONFIG_NVME_CORE=m -CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_CORE=y +CONFIG_BLK_DEV_NVME=y # CONFIG_NVME_RDMA is not set # CONFIG_NVME_FC is not set # CONFIG_NVME_TARGET is not set From 6349825e5898a5248ad3105946470dce4168608b Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Tue, 16 Jul 2019 22:59:56 +0000 Subject: [PATCH 0044/1356] Enable XEN_BLKDEV_FRONTEND Signed-off-by: iliana destroyer of worlds --- packages/kernel/config-x86_64 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/kernel/config-x86_64 b/packages/kernel/config-x86_64 index 8847f23a..af20e359 100644 --- a/packages/kernel/config-x86_64 +++ b/packages/kernel/config-x86_64 @@ -1650,7 +1650,7 @@ CONFIG_CDROM_PKTCDVD=m CONFIG_CDROM_PKTCDVD_BUFFERS=8 # CONFIG_CDROM_PKTCDVD_WCACHE is not set CONFIG_ATA_OVER_ETH=m -CONFIG_XEN_BLKDEV_FRONTEND=m +CONFIG_XEN_BLKDEV_FRONTEND=y CONFIG_XEN_BLKDEV_BACKEND=m CONFIG_VIRTIO_BLK=y # CONFIG_VIRTIO_BLK_SCSI is not set From 6cc342e44be6cf0cd88adc9198bf0b93a34ca382 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 13 May 2019 17:36:01 +0000 Subject: [PATCH 0045/1356] build containerd Signed-off-by: Ben Cressey --- packages/kernel/config-aarch64 | 2 +- packages/kernel/config-x86_64 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/kernel/config-aarch64 b/packages/kernel/config-aarch64 index 87c876ca..8f6f302b 100644 --- a/packages/kernel/config-aarch64 +++ b/packages/kernel/config-aarch64 @@ -4390,7 +4390,7 @@ CONFIG_QUOTACTL=y CONFIG_AUTOFS4_FS=y CONFIG_FUSE_FS=m CONFIG_CUSE=m -CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS=y # CONFIG_OVERLAY_FS_REDIRECT_DIR is not set # CONFIG_OVERLAY_FS_INDEX is not set diff --git a/packages/kernel/config-x86_64 b/packages/kernel/config-x86_64 index af20e359..daa179b5 100644 --- a/packages/kernel/config-x86_64 +++ b/packages/kernel/config-x86_64 @@ -3771,7 +3771,7 @@ CONFIG_QUOTACTL_COMPAT=y CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=m CONFIG_CUSE=m -CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS=y # CONFIG_OVERLAY_FS_REDIRECT_DIR is not set # CONFIG_OVERLAY_FS_INDEX is not set From b92c3e9ccd0372bac8cbafc9b943833beaa747f2 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 16 May 2019 17:03:06 +0000 Subject: [PATCH 0046/1356] kernel: require kmod for module info Signed-off-by: Ben Cressey --- packages/kernel/kernel.spec | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/kernel/kernel.spec b/packages/kernel/kernel.spec index 240c1b0e..6eadce84 100644 --- a/packages/kernel/kernel.spec +++ b/packages/kernel/kernel.spec @@ -14,6 +14,7 @@ BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: gcc-%{_cross_target} BuildRequires: hostname +BuildRequires: kmod BuildRequires: openssl-devel %description From 8ad291f035f8e370f460038b18bffac2f9cceef5 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 16 May 2019 17:59:04 +0000 Subject: [PATCH 0047/1356] kernel: enable ena driver Signed-off-by: Ben Cressey --- packages/kernel/config-x86_64 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/kernel/config-x86_64 b/packages/kernel/config-x86_64 index daa179b5..61a5edea 100644 --- a/packages/kernel/config-x86_64 +++ b/packages/kernel/config-x86_64 @@ -4628,3 +4628,5 @@ CONFIG_ARCH_HAS_PMEM_API=y CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y CONFIG_SBITMAP=y # CONFIG_STRING_SELFTEST is not set +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_ENA_ETHERNET=y From f2738b0ff05e834809642f2037171c29bb58fb18 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 18 Jun 2019 22:56:57 +0000 Subject: [PATCH 0048/1356] add AWS EKS AMI recipe Signed-off-by: Ben Cressey --- Makefile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 11036434..4ed4309f 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,7 @@ .DEFAULT_GOAL := all OS := thar +RECIPE ?= aws-eks-ami TOPDIR := $(strip $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))) DEP4SPEC ?= $(TOPDIR)/bin/dep4spec SPEC2VAR ?= $(TOPDIR)/bin/spec2var @@ -49,7 +50,7 @@ define build_image $(eval HASH:= $(shell sha1sum $(2) /dev/null | sha1sum - | awk '{print $$1}')) @$(BUILDCTL) build \ --opt target=builder \ - --opt build-arg:PACKAGE=$(OS)-$(1)-release \ + --opt build-arg:PACKAGE=$(OS)-$(1)-$(RECIPE) \ --opt build-arg:ARCH=$(1) \ --opt build-arg:HASH=$(HASH) \ --opt build-arg:DATE=$(DATE) \ @@ -96,7 +97,7 @@ include $(PKGS) .PHONY: all $(ARCH) .SECONDEXPANSION: -$(ARCH): $$($(OS)-$(ARCH)-release) +$(ARCH): $$($(OS)-$(ARCH)-$(RECIPE)) $(eval PKGS:= $(wildcard $(OUTPUT)/$(OS)-$(ARCH)-*.rpm)) $(call build_image,$@,$(PKGS)) From ba52da95f74ad2d35f57f73ead34f45cf650db22 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 25 Jul 2019 03:17:31 +0000 Subject: [PATCH 0049/1356] kernel: add patches to trust CPU hwrng Signed-off-by: Ben Cressey --- packages/kernel/kernel.spec | 2 + ...nfig-option-to-trust-the-CPU-s-hwrng.patch | 78 ++++++++++++++++++ ...ndom-make-CPU-trust-a-boot-parameter.patch | 82 +++++++++++++++++++ 3 files changed, 162 insertions(+) create mode 100644 packages/kernel/random-add-a-config-option-to-trust-the-CPU-s-hwrng.patch create mode 100644 packages/kernel/random-make-CPU-trust-a-boot-parameter.patch diff --git a/packages/kernel/kernel.spec b/packages/kernel/kernel.spec index 6eadce84..c067676b 100644 --- a/packages/kernel/kernel.spec +++ b/packages/kernel/kernel.spec @@ -10,6 +10,8 @@ Source0: https://www.kernel.org/pub/linux/kernel/v4.x/linux-%{version}.tar.xz Source100: config-%{_cross_arch} Patch1000: dm-add-support-to-directly-boot-to-a-mapped-device.patch Patch1001: selinux-use-kernel-linux-socket.h-for-genheaders-and.patch +Patch1002: random-add-a-config-option-to-trust-the-CPU-s-hwrng.patch +Patch1003: random-make-CPU-trust-a-boot-parameter.patch BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: gcc-%{_cross_target} diff --git a/packages/kernel/random-add-a-config-option-to-trust-the-CPU-s-hwrng.patch b/packages/kernel/random-add-a-config-option-to-trust-the-CPU-s-hwrng.patch new file mode 100644 index 00000000..0cfa7508 --- /dev/null +++ b/packages/kernel/random-add-a-config-option-to-trust-the-CPU-s-hwrng.patch @@ -0,0 +1,78 @@ +From 538b177b9031dcc46bc14ae358525892c35e2581 Mon Sep 17 00:00:00 2001 +From: Theodore Ts'o +Date: Tue, 17 Jul 2018 18:24:27 -0400 +Subject: [PATCH 1/2] random: add a config option to trust the CPU's hwrng + +This gives the user building their own kernel (or a Linux +distribution) the option of deciding whether or not to trust the CPU's +hardware random number generator (e.g., RDRAND for x86 CPU's) as being +correctly implemented and not having a back door introduced (perhaps +courtesy of a Nation State's law enforcement or intelligence +agencies). + +This will prevent getrandom(2) from blocking, if there is a +willingness to trust the CPU manufacturer. + +Signed-off-by: Theodore Ts'o +--- + drivers/char/Kconfig | 14 ++++++++++++++ + drivers/char/random.c | 11 ++++++++++- + 2 files changed, 24 insertions(+), 1 deletion(-) + +diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig +index c28dca0c613d..b732016921a2 100644 +--- a/drivers/char/Kconfig ++++ b/drivers/char/Kconfig +@@ -590,3 +590,17 @@ source "drivers/char/xillybus/Kconfig" + + endmenu + ++config RANDOM_TRUST_CPU ++ bool "Trust the CPU manufacturer to initialize Linux's CRNG" ++ depends on X86 || S390 || PPC ++ default n ++ help ++ Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or ++ RDRAND, IBM for the S390 and Power PC architectures) is trustworthy ++ for the purposes of initializing Linux's CRNG. Since this is not ++ something that can be independently audited, this amounts to trusting ++ that CPU manufacturer (perhaps with the insistence or mandate ++ of a Nation State's intelligence or law enforcement agencies) ++ has not installed a hidden back door to compromise the CPU's ++ random number generation facilities. ++ +diff --git a/drivers/char/random.c b/drivers/char/random.c +index 8ad92707e45f..efdb37fa18ed 100644 +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -770,6 +770,7 @@ static void invalidate_batched_entropy(void); + static void crng_initialize(struct crng_state *crng) + { + int i; ++ int arch_init = 1; + unsigned long rv; + + memcpy(&crng->state[0], "expand 32-byte k", 16); +@@ -780,10 +781,18 @@ static void crng_initialize(struct crng_state *crng) + _get_random_bytes(&crng->state[4], sizeof(__u32) * 12); + for (i = 4; i < 16; i++) { + if (!arch_get_random_seed_long(&rv) && +- !arch_get_random_long(&rv)) ++ !arch_get_random_long(&rv)) { + rv = random_get_entropy(); ++ arch_init = 0; ++ } + crng->state[i] ^= rv; + } ++#ifdef CONFIG_RANDOM_TRUST_CPU ++ if (arch_init) { ++ crng_init = 2; ++ pr_notice("random: crng done (trusting CPU's manufacturer)\n"); ++ } ++#endif + crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; + } + +-- +2.21.0 + diff --git a/packages/kernel/random-make-CPU-trust-a-boot-parameter.patch b/packages/kernel/random-make-CPU-trust-a-boot-parameter.patch new file mode 100644 index 00000000..7a90b5bd --- /dev/null +++ b/packages/kernel/random-make-CPU-trust-a-boot-parameter.patch @@ -0,0 +1,82 @@ +From 8438d10a15a63e3ebe7f73b134423c988712eec8 Mon Sep 17 00:00:00 2001 +From: Kees Cook +Date: Mon, 27 Aug 2018 14:51:54 -0700 +Subject: [PATCH 2/2] random: make CPU trust a boot parameter + +Instead of forcing a distro or other system builder to choose +at build time whether the CPU is trusted for CRNG seeding via +CONFIG_RANDOM_TRUST_CPU, provide a boot-time parameter for end users to +control the choice. The CONFIG will set the default state instead. + +Signed-off-by: Kees Cook +Signed-off-by: Theodore Ts'o +--- + Documentation/admin-guide/kernel-parameters.txt | 6 ++++++ + drivers/char/Kconfig | 4 ++-- + drivers/char/random.c | 11 ++++++++--- + 3 files changed, 16 insertions(+), 5 deletions(-) + +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index 05496622b4ef..915013dab0bf 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -3267,6 +3267,12 @@ + ramdisk_size= [RAM] Sizes of RAM disks in kilobytes + See Documentation/blockdev/ramdisk.txt. + ++ random.trust_cpu={on,off} ++ [KNL] Enable or disable trusting the use of the ++ CPU's random number generator (if available) to ++ fully seed the kernel's CRNG. Default is controlled ++ by CONFIG_RANDOM_TRUST_CPU. ++ + ras=option[,option,...] [KNL] RAS-specific options + + cec_disable [X86] +diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig +index b732016921a2..6328b571b4b8 100644 +--- a/drivers/char/Kconfig ++++ b/drivers/char/Kconfig +@@ -602,5 +602,5 @@ config RANDOM_TRUST_CPU + that CPU manufacturer (perhaps with the insistence or mandate + of a Nation State's intelligence or law enforcement agencies) + has not installed a hidden back door to compromise the CPU's +- random number generation facilities. +- ++ random number generation facilities. This can also be configured ++ at boot with "random.trust_cpu=on/off". +diff --git a/drivers/char/random.c b/drivers/char/random.c +index efdb37fa18ed..5f3955220487 100644 +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -767,6 +767,13 @@ static struct crng_state **crng_node_pool __read_mostly; + + static void invalidate_batched_entropy(void); + ++static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); ++static int __init parse_trust_cpu(char *arg) ++{ ++ return kstrtobool(arg, &trust_cpu); ++} ++early_param("random.trust_cpu", parse_trust_cpu); ++ + static void crng_initialize(struct crng_state *crng) + { + int i; +@@ -787,12 +794,10 @@ static void crng_initialize(struct crng_state *crng) + } + crng->state[i] ^= rv; + } +-#ifdef CONFIG_RANDOM_TRUST_CPU +- if (arch_init) { ++ if (trust_cpu && arch_init) { + crng_init = 2; + pr_notice("random: crng done (trusting CPU's manufacturer)\n"); + } +-#endif + crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; + } + +-- +2.21.0 + From 9059e7c1a792e6da8fba419713619ceb36d3dc5c Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Tue, 30 Jul 2019 20:52:15 +0000 Subject: [PATCH 0050/1356] kernel: Rebase to 4.19.58-21.57.amzn2 Signed-off-by: iliana destroyer of worlds --- packages/kernel/.gitignore | 2 +- ...to-directly-boot-to-a-mapped-device.patch} | 30 +- ...t-confusion-for-dm_allowed_targets-a.patch | 30 + ...-init-fix-max-devices-targets-checks.patch | 47 + ...hang-in-early-create-error-condition.patch | 48 + ...-init-fix-incorrect-uses-of-kstrndup.patch | 41 + ...railing-newline-from-calls-to-DMERR-.patch | 41 + ...fsx-Disable-Werror-stringop-overflow.patch | 20 + packages/kernel/config-aarch64 | 5170 ----------------- packages/kernel/config-thar | 23 + packages/kernel/config-x86_64 | 4632 --------------- packages/kernel/kernel.spec | 32 +- packages/kernel/latest-srpm-url.sh | 2 + ...nfig-option-to-trust-the-CPU-s-hwrng.patch | 78 - ...ndom-make-CPU-trust-a-boot-parameter.patch | 82 - ...el-linux-socket.h-for-genheaders-and.patch | 70 - packages/kernel/sources | 2 +- 17 files changed, 293 insertions(+), 10057 deletions(-) rename packages/kernel/{dm-add-support-to-directly-boot-to-a-mapped-device.patch => 0001-dm-add-support-to-directly-boot-to-a-mapped-device.patch} (96%) create mode 100644 packages/kernel/0002-dm-init-fix-const-confusion-for-dm_allowed_targets-a.patch create mode 100644 packages/kernel/0003-dm-init-fix-max-devices-targets-checks.patch create mode 100644 packages/kernel/0004-dm-ioctl-fix-hang-in-early-create-error-condition.patch create mode 100644 packages/kernel/0005-dm-init-fix-incorrect-uses-of-kstrndup.patch create mode 100644 packages/kernel/0006-dm-init-remove-trailing-newline-from-calls-to-DMERR-.patch create mode 100644 packages/kernel/0007-lustrefsx-Disable-Werror-stringop-overflow.patch delete mode 100644 packages/kernel/config-aarch64 create mode 100644 packages/kernel/config-thar delete mode 100644 packages/kernel/config-x86_64 create mode 100755 packages/kernel/latest-srpm-url.sh delete mode 100644 packages/kernel/random-add-a-config-option-to-trust-the-CPU-s-hwrng.patch delete mode 100644 packages/kernel/random-make-CPU-trust-a-boot-parameter.patch delete mode 100644 packages/kernel/selinux-use-kernel-linux-socket.h-for-genheaders-and.patch diff --git a/packages/kernel/.gitignore b/packages/kernel/.gitignore index 91a1b59d..a791d78a 100644 --- a/packages/kernel/.gitignore +++ b/packages/kernel/.gitignore @@ -1 +1 @@ -linux-4.14.102.tar.xz +kernel-4.19.58-21.57.amzn2.src.rpm diff --git a/packages/kernel/dm-add-support-to-directly-boot-to-a-mapped-device.patch b/packages/kernel/0001-dm-add-support-to-directly-boot-to-a-mapped-device.patch similarity index 96% rename from packages/kernel/dm-add-support-to-directly-boot-to-a-mapped-device.patch rename to packages/kernel/0001-dm-add-support-to-directly-boot-to-a-mapped-device.patch index 3d55c82d..07262cb9 100644 --- a/packages/kernel/dm-add-support-to-directly-boot-to-a-mapped-device.patch +++ b/packages/kernel/0001-dm-add-support-to-directly-boot-to-a-mapped-device.patch @@ -1,8 +1,10 @@ -From 4e6a7ccc14989c2c859b302c070363ddf691dff9 Mon Sep 17 00:00:00 2001 +From ee8d4f136ea11e6b0a1488eccc154f30387ef231 Mon Sep 17 00:00:00 2001 From: Helen Koike Date: Thu, 21 Feb 2019 17:33:34 -0300 Subject: [PATCH] dm: add support to directly boot to a mapped device +commit 6bbc923dfcf57d6b97388819a7393835664c7a8e upstream. + Add a "create" module parameter, which allows device-mapper targets to be configured at boot time. This enables early use of DM targets in the boot process (as the root device or otherwise) without the need of an @@ -48,11 +50,11 @@ Signed-off-by: Helen Koike Reviewed-by: Kees Cook Signed-off-by: Mike Snitzer --- - Documentation/device-mapper/dm-init.txt | 114 ++++++++++++ - drivers/md/Kconfig | 12 ++ + Documentation/device-mapper/dm-init.txt | 114 +++++++++ + drivers/md/Kconfig | 12 + drivers/md/Makefile | 4 + - drivers/md/dm-init.c | 303 ++++++++++++++++++++++++++++++++ - drivers/md/dm-ioctl.c | 103 +++++++++++ + drivers/md/dm-init.c | 303 ++++++++++++++++++++++++ + drivers/md/dm-ioctl.c | 103 ++++++++ include/linux/device-mapper.h | 9 + 6 files changed, 545 insertions(+) create mode 100644 Documentation/device-mapper/dm-init.txt @@ -179,10 +181,10 @@ index 000000000000..8464ee7c01b8 + fb1a5a0f00deb908d8b53cb270858975e76cf64105d412ce764225d53b8f3cfd + 51934789604d1b92399c52e7cb149d1b3a1b74bbbcb103b2a0aaacbed5c08584 diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig -index 4a249ee86364..4ea706f7790b 100644 +index 8b8c123cae66..9949fd9d8a5e 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig -@@ -428,6 +428,18 @@ config DM_DELAY +@@ -447,6 +447,18 @@ config DM_DELAY If unsure, say N. @@ -202,12 +204,12 @@ index 4a249ee86364..4ea706f7790b 100644 bool "DM uevents" depends on BLK_DEV_DM diff --git a/drivers/md/Makefile b/drivers/md/Makefile -index e94b6f9be941..d56331fbd895 100644 +index 822f4e8753bc..a52b703e588e 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile -@@ -64,6 +64,10 @@ obj-$(CONFIG_DM_LOG_WRITES) += dm-log-writes.o - obj-$(CONFIG_DM_INTEGRITY) += dm-integrity.o +@@ -69,6 +69,10 @@ obj-$(CONFIG_DM_INTEGRITY) += dm-integrity.o obj-$(CONFIG_DM_ZONED) += dm-zoned.o + obj-$(CONFIG_DM_WRITECACHE) += dm-writecache.o +ifeq ($(CONFIG_DM_INIT),y) +dm-mod-objs += dm-init.o @@ -526,10 +528,10 @@ index 000000000000..b53f30f16b4d +module_param(create, charp, 0); +MODULE_PARM_DESC(create, "Create a mapped device in early boot"); diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c -index ca948155191a..b7e6c7311a93 100644 +index f666778ad237..c740153b4e52 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c -@@ -2017,3 +2017,106 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) +@@ -2018,3 +2018,106 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) return r; } @@ -637,7 +639,7 @@ index ca948155191a..b7e6c7311a93 100644 + return r; +} diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h -index a5538433c927..990e7c2f84b1 100644 +index bef2e36c01b4..6f20e62cfd92 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -10,6 +10,7 @@ @@ -648,7 +650,7 @@ index a5538433c927..990e7c2f84b1 100644 #include #include -@@ -457,6 +458,14 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, +@@ -424,6 +425,14 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start); union map_info *dm_get_rq_mapinfo(struct request *rq); diff --git a/packages/kernel/0002-dm-init-fix-const-confusion-for-dm_allowed_targets-a.patch b/packages/kernel/0002-dm-init-fix-const-confusion-for-dm_allowed_targets-a.patch new file mode 100644 index 00000000..4c6c4692 --- /dev/null +++ b/packages/kernel/0002-dm-init-fix-const-confusion-for-dm_allowed_targets-a.patch @@ -0,0 +1,30 @@ +From cacedd0b9539f80107be153f6c76bfc910c40c2e Mon Sep 17 00:00:00 2001 +From: Andi Kleen +Date: Thu, 21 Mar 2019 15:00:09 -0700 +Subject: [PATCH] dm init: fix const confusion for dm_allowed_targets array + +commit 93fc91675a6c84d6ab355188aea398bda2cc51f8 upstream. + +A non const pointer to const cannot be marked initconst. +Mark the array actually const. + +Fixes: 6bbc923dfcf5 dm: add support to directly boot to a mapped device +Signed-off-by: Andi Kleen +Signed-off-by: Mike Snitzer +--- + drivers/md/dm-init.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c +index b53f30f16b4d..4b76f84424c3 100644 +--- a/drivers/md/dm-init.c ++++ b/drivers/md/dm-init.c +@@ -36,7 +36,7 @@ struct dm_device { + struct list_head list; + }; + +-const char *dm_allowed_targets[] __initconst = { ++const char * const dm_allowed_targets[] __initconst = { + "crypt", + "delay", + "linear", diff --git a/packages/kernel/0003-dm-init-fix-max-devices-targets-checks.patch b/packages/kernel/0003-dm-init-fix-max-devices-targets-checks.patch new file mode 100644 index 00000000..9d694169 --- /dev/null +++ b/packages/kernel/0003-dm-init-fix-max-devices-targets-checks.patch @@ -0,0 +1,47 @@ +From 9e2f243adcc763d54b1403e6c7c69680d358b3c5 Mon Sep 17 00:00:00 2001 +From: Helen Koike +Date: Fri, 26 Apr 2019 17:09:55 -0300 +Subject: [PATCH] dm init: fix max devices/targets checks + +commit 8e890c1ab1b1e0f765cd8da82c4dee011698a5e8 upstream. + +dm-init should allow up to DM_MAX_{DEVICES,TARGETS} for devices/targets, +and not DM_MAX_{DEVICES,TARGETS} - 1. + +Fix the checks and also fix the error message when the number of devices +is surpassed. + +Fixes: 6bbc923dfcf57d ("dm: add support to directly boot to a mapped device") +Cc: stable@vger.kernel.org +Signed-off-by: Helen Koike +Signed-off-by: Mike Snitzer +--- + drivers/md/dm-init.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c +index 4b76f84424c3..352e803f566e 100644 +--- a/drivers/md/dm-init.c ++++ b/drivers/md/dm-init.c +@@ -160,7 +160,7 @@ static int __init dm_parse_table(struct dm_device *dev, char *str) + + while (table_entry) { + DMDEBUG("parsing table \"%s\"", str); +- if (++dev->dmi.target_count >= DM_MAX_TARGETS) { ++ if (++dev->dmi.target_count > DM_MAX_TARGETS) { + DMERR("too many targets %u > %d", + dev->dmi.target_count, DM_MAX_TARGETS); + return -EINVAL; +@@ -242,9 +242,9 @@ static int __init dm_parse_devices(struct list_head *devices, char *str) + return -ENOMEM; + list_add_tail(&dev->list, devices); + +- if (++ndev >= DM_MAX_DEVICES) { +- DMERR("too many targets %u > %d", +- dev->dmi.target_count, DM_MAX_TARGETS); ++ if (++ndev > DM_MAX_DEVICES) { ++ DMERR("too many devices %lu > %d", ++ ndev, DM_MAX_DEVICES); + return -EINVAL; + } + diff --git a/packages/kernel/0004-dm-ioctl-fix-hang-in-early-create-error-condition.patch b/packages/kernel/0004-dm-ioctl-fix-hang-in-early-create-error-condition.patch new file mode 100644 index 00000000..a3fa52fa --- /dev/null +++ b/packages/kernel/0004-dm-ioctl-fix-hang-in-early-create-error-condition.patch @@ -0,0 +1,48 @@ +From 269b0730e509ee13b67971257078c2b4a06b0668 Mon Sep 17 00:00:00 2001 +From: Helen Koike +Date: Wed, 15 May 2019 13:50:54 -0300 +Subject: [PATCH] dm ioctl: fix hang in early create error condition + +commit 0f41fcf78849c902ddca564f99a8e23ccfc80333 upstream. + +The dm_early_create() function (which deals with "dm-mod.create=" kernel +command line option) calls dm_hash_insert() who gets an extra reference +to the md object. + +In case of failure, this reference wasn't being released, causing +dm_destroy() to hang, thus hanging the whole boot process. + +Fix this by calling __hash_remove() in the error path. + +Fixes: 6bbc923dfcf57d ("dm: add support to directly boot to a mapped device") +Cc: stable@vger.kernel.org +Signed-off-by: Helen Koike +Signed-off-by: Mike Snitzer +--- + drivers/md/dm-ioctl.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c +index c740153b4e52..1e03bc89e20f 100644 +--- a/drivers/md/dm-ioctl.c ++++ b/drivers/md/dm-ioctl.c +@@ -2069,7 +2069,7 @@ int __init dm_early_create(struct dm_ioctl *dmi, + /* alloc table */ + r = dm_table_create(&t, get_mode(dmi), dmi->target_count, md); + if (r) +- goto err_destroy_dm; ++ goto err_hash_remove; + + /* add targets */ + for (i = 0; i < dmi->target_count; i++) { +@@ -2116,6 +2116,10 @@ int __init dm_early_create(struct dm_ioctl *dmi, + + err_destroy_table: + dm_table_destroy(t); ++err_hash_remove: ++ (void) __hash_remove(__get_name_cell(dmi->name)); ++ /* release reference from __get_name_cell */ ++ dm_put(md); + err_destroy_dm: + dm_put(md); + dm_destroy(md); diff --git a/packages/kernel/0005-dm-init-fix-incorrect-uses-of-kstrndup.patch b/packages/kernel/0005-dm-init-fix-incorrect-uses-of-kstrndup.patch new file mode 100644 index 00000000..ab342cb7 --- /dev/null +++ b/packages/kernel/0005-dm-init-fix-incorrect-uses-of-kstrndup.patch @@ -0,0 +1,41 @@ +From 34ccdc7a31cc7edc98e93a141b0bcb86ce28154e Mon Sep 17 00:00:00 2001 +From: Gen Zhang +Date: Wed, 29 May 2019 09:33:20 +0800 +Subject: [PATCH] dm init: fix incorrect uses of kstrndup() + +commit dec7e6494e1aea6bf676223da3429cd17ce0af79 upstream. + +Fix 2 kstrndup() calls with incorrect argument order. + +Fixes: 6bbc923dfcf5 ("dm: add support to directly boot to a mapped device") +Cc: stable@vger.kernel.org # v5.1 +Signed-off-by: Gen Zhang +Signed-off-by: Mike Snitzer +--- + drivers/md/dm-init.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c +index 352e803f566e..64611633e77c 100644 +--- a/drivers/md/dm-init.c ++++ b/drivers/md/dm-init.c +@@ -140,8 +140,8 @@ static char __init *dm_parse_table_entry(struct dm_device *dev, char *str) + return ERR_PTR(-EINVAL); + } + /* target_args */ +- dev->target_args_array[n] = kstrndup(field[3], GFP_KERNEL, +- DM_MAX_STR_SIZE); ++ dev->target_args_array[n] = kstrndup(field[3], DM_MAX_STR_SIZE, ++ GFP_KERNEL); + if (!dev->target_args_array[n]) + return ERR_PTR(-ENOMEM); + +@@ -275,7 +275,7 @@ static int __init dm_init_init(void) + DMERR("Argument is too big. Limit is %d\n", DM_MAX_STR_SIZE); + return -EINVAL; + } +- str = kstrndup(create, GFP_KERNEL, DM_MAX_STR_SIZE); ++ str = kstrndup(create, DM_MAX_STR_SIZE, GFP_KERNEL); + if (!str) + return -ENOMEM; + diff --git a/packages/kernel/0006-dm-init-remove-trailing-newline-from-calls-to-DMERR-.patch b/packages/kernel/0006-dm-init-remove-trailing-newline-from-calls-to-DMERR-.patch new file mode 100644 index 00000000..ee8cc668 --- /dev/null +++ b/packages/kernel/0006-dm-init-remove-trailing-newline-from-calls-to-DMERR-.patch @@ -0,0 +1,41 @@ +From f19469dbd4fe5c90d6de943e488553b6f53d20f5 Mon Sep 17 00:00:00 2001 +From: Stephen Boyd +Date: Tue, 4 Jun 2019 18:27:29 -0700 +Subject: [PATCH] dm init: remove trailing newline from calls to DMERR() and + DMINFO() + +commit 10c9c8e7c09b4d32b31df1bd14673bd6dbfc50be upstream. + +These printing macros already add a trailing newline, so having another +one here just makes for blank lines when these prints are enabled. +Remove these needless newlines. + +Fixes: 6bbc923dfcf5 ("dm: add support to directly boot to a mapped device") +Signed-off-by: Stephen Boyd +Signed-off-by: Mike Snitzer +--- + drivers/md/dm-init.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c +index 64611633e77c..728733a514c7 100644 +--- a/drivers/md/dm-init.c ++++ b/drivers/md/dm-init.c +@@ -272,7 +272,7 @@ static int __init dm_init_init(void) + return 0; + + if (strlen(create) >= DM_MAX_STR_SIZE) { +- DMERR("Argument is too big. Limit is %d\n", DM_MAX_STR_SIZE); ++ DMERR("Argument is too big. Limit is %d", DM_MAX_STR_SIZE); + return -EINVAL; + } + str = kstrndup(create, DM_MAX_STR_SIZE, GFP_KERNEL); +@@ -283,7 +283,7 @@ static int __init dm_init_init(void) + if (r) + goto out; + +- DMINFO("waiting for all devices to be available before creating mapped devices\n"); ++ DMINFO("waiting for all devices to be available before creating mapped devices"); + wait_for_device_probe(); + + list_for_each_entry(dev, &devices, list) { diff --git a/packages/kernel/0007-lustrefsx-Disable-Werror-stringop-overflow.patch b/packages/kernel/0007-lustrefsx-Disable-Werror-stringop-overflow.patch new file mode 100644 index 00000000..9030e96c --- /dev/null +++ b/packages/kernel/0007-lustrefsx-Disable-Werror-stringop-overflow.patch @@ -0,0 +1,20 @@ +From b85e7195a25319afb421a6a3ee2065fc8d225a8b Mon Sep 17 00:00:00 2001 +From: iliana destroyer of worlds +Date: Tue, 30 Jul 2019 12:59:09 -0700 +Subject: [PATCH] lustrefsx: Disable -Werror=stringop-overflow= + +Signed-off-by: iliana destroyer of worlds +--- + drivers/staging/lustrefsx/Makefile.rules | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/staging/lustrefsx/Makefile.rules b/drivers/staging/lustrefsx/Makefile.rules +index a0d56e80f2ce..62390580a3b4 100644 +--- a/drivers/staging/lustrefsx/Makefile.rules ++++ b/drivers/staging/lustrefsx/Makefile.rules +@@ -3,4 +3,4 @@ ccflags-y += -include $(srctree)/drivers/staging/lustrefsx/config.h + ccflags-y += -I$(srctree)/drivers/staging/lustrefsx/libcfs/include + ccflags-y += -I$(srctree)/drivers/staging/lustrefsx/lnet/include + ccflags-y += -I$(srctree)/drivers/staging/lustrefsx/lustre/include +-ccflags-y += -Wno-format-truncation -Werror ++ccflags-y += -Wno-format-truncation -Werror -Wno-error=stringop-overflow= diff --git a/packages/kernel/config-aarch64 b/packages/kernel/config-aarch64 deleted file mode 100644 index 8f6f302b..00000000 --- a/packages/kernel/config-aarch64 +++ /dev/null @@ -1,5170 +0,0 @@ -# -# Automatically generated file; DO NOT EDIT. -# Linux/arm64 4.14.102 Kernel Configuration -# -CONFIG_ARM64=y -CONFIG_64BIT=y -CONFIG_ARCH_PHYS_ADDR_T_64BIT=y -CONFIG_MMU=y -CONFIG_ARM64_PAGE_SHIFT=12 -CONFIG_ARM64_CONT_SHIFT=4 -CONFIG_ARCH_MMAP_RND_BITS_MIN=18 -CONFIG_ARCH_MMAP_RND_BITS_MAX=33 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_TRACE_IRQFLAGS_SUPPORT=y -CONFIG_RWSEM_XCHGADD_ALGORITHM=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y -CONFIG_GENERIC_HWEIGHT=y -CONFIG_GENERIC_CSUM=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_ZONE_DMA=y -CONFIG_HAVE_GENERIC_GUP=y -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_NEED_SG_DMA_LENGTH=y -CONFIG_SMP=y -CONFIG_SWIOTLB=y -CONFIG_IOMMU_HELPER=y -CONFIG_KERNEL_MODE_NEON=y -CONFIG_FIX_EARLYCON_MEM=y -CONFIG_PGTABLE_LEVELS=4 -CONFIG_ARCH_SUPPORTS_UPROBES=y -CONFIG_ARCH_PROC_KCORE_TEXT=y -CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" -CONFIG_IRQ_WORK=y -CONFIG_BUILDTIME_EXTABLE_SORT=y -CONFIG_THREAD_INFO_IN_TASK=y - -# -# General setup -# -CONFIG_INIT_ENV_ARG_LIMIT=32 -CONFIG_CROSS_COMPILE="" -# CONFIG_COMPILE_TEST is not set -CONFIG_LOCALVERSION="" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_DEFAULT_HOSTNAME="(none)" -CONFIG_SWAP=y -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y -CONFIG_CROSS_MEMORY_ATTACH=y -CONFIG_FHANDLE=y -# CONFIG_USELIB is not set -CONFIG_AUDIT=y -CONFIG_HAVE_ARCH_AUDITSYSCALL=y -CONFIG_AUDITSYSCALL=y -CONFIG_AUDIT_WATCH=y -CONFIG_AUDIT_TREE=y - -# -# IRQ subsystem -# -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_GENERIC_IRQ_SHOW=y -CONFIG_GENERIC_IRQ_SHOW_LEVEL=y -CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y -CONFIG_GENERIC_IRQ_MIGRATION=y -CONFIG_HARDIRQS_SW_RESEND=y -CONFIG_GENERIC_IRQ_CHIP=y -CONFIG_IRQ_DOMAIN=y -CONFIG_IRQ_DOMAIN_HIERARCHY=y -CONFIG_GENERIC_MSI_IRQ=y -CONFIG_GENERIC_MSI_IRQ_DOMAIN=y -CONFIG_HANDLE_DOMAIN_IRQ=y -# CONFIG_IRQ_DOMAIN_DEBUG is not set -CONFIG_IRQ_FORCED_THREADING=y -CONFIG_SPARSE_IRQ=y -# CONFIG_GENERIC_IRQ_DEBUGFS is not set -CONFIG_ARCH_CLOCKSOURCE_DATA=y -CONFIG_GENERIC_TIME_VSYSCALL=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_ARCH_HAS_TICK_BROADCAST=y -CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y - -# -# Timers subsystem -# -CONFIG_TICK_ONESHOT=y -CONFIG_NO_HZ_COMMON=y -# CONFIG_HZ_PERIODIC is not set -CONFIG_NO_HZ_IDLE=y -# CONFIG_NO_HZ_FULL is not set -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y - -# -# CPU/Task time and stats accounting -# -CONFIG_VIRT_CPU_ACCOUNTING=y -# CONFIG_TICK_CPU_ACCOUNTING is not set -CONFIG_VIRT_CPU_ACCOUNTING_GEN=y -# CONFIG_IRQ_TIME_ACCOUNTING is not set -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y - -# -# RCU Subsystem -# -CONFIG_TREE_RCU=y -# CONFIG_RCU_EXPERT is not set -CONFIG_SRCU=y -CONFIG_TREE_SRCU=y -# CONFIG_TASKS_RCU is not set -CONFIG_RCU_STALL_COMMON=y -CONFIG_RCU_NEED_SEGCBLIST=y -CONFIG_CONTEXT_TRACKING=y -# CONFIG_CONTEXT_TRACKING_FORCE is not set -# CONFIG_BUILD_BIN2C is not set -# CONFIG_IKCONFIG is not set -CONFIG_LOG_BUF_SHIFT=20 -CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 -CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 -CONFIG_GENERIC_SCHED_CLOCK=y -CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y -CONFIG_NUMA_BALANCING=y -CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y -CONFIG_CGROUPS=y -CONFIG_PAGE_COUNTER=y -CONFIG_MEMCG=y -CONFIG_MEMCG_SWAP=y -CONFIG_MEMCG_SWAP_ENABLED=y -CONFIG_BLK_CGROUP=y -# CONFIG_DEBUG_BLK_CGROUP is not set -CONFIG_CGROUP_WRITEBACK=y -CONFIG_CGROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_CGROUP_PIDS=y -# CONFIG_CGROUP_RDMA is not set -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_HUGETLB=y -CONFIG_CPUSETS=y -CONFIG_PROC_PID_CPUSET=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_PERF=y -# CONFIG_CGROUP_DEBUG is not set -CONFIG_SOCK_CGROUP_DATA=y -# CONFIG_CHECKPOINT_RESTORE is not set -CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_IPC_NS=y -CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y -CONFIG_SCHED_AUTOGROUP=y -# CONFIG_SYSFS_DEPRECATED is not set -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_RD_XZ=y -CONFIG_RD_LZO=y -CONFIG_RD_LZ4=y -CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_SYSCTL=y -CONFIG_ANON_INODES=y -CONFIG_SYSCTL_EXCEPTION_TRACE=y -CONFIG_BPF=y -# CONFIG_EXPERT is not set -CONFIG_MULTIUSER=y -# CONFIG_SGETMASK_SYSCALL is not set -CONFIG_SYSFS_SYSCALL=y -# CONFIG_SYSCTL_SYSCALL is not set -CONFIG_POSIX_TIMERS=y -CONFIG_KALLSYMS=y -CONFIG_KALLSYMS_ALL=y -# CONFIG_KALLSYMS_ABSOLUTE_PERCPU is not set -CONFIG_KALLSYMS_BASE_RELATIVE=y -CONFIG_PRINTK=y -CONFIG_PRINTK_NMI=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_FUTEX_PI=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -# CONFIG_BPF_SYSCALL is not set -CONFIG_SHMEM=y -CONFIG_AIO=y -CONFIG_ADVISE_SYSCALLS=y -CONFIG_USERFAULTFD=y -CONFIG_PCI_QUIRKS=y -CONFIG_MEMBARRIER=y -# CONFIG_EMBEDDED is not set -CONFIG_HAVE_PERF_EVENTS=y -# CONFIG_PC104 is not set - -# -# Kernel Performance Events And Counters -# -CONFIG_PERF_EVENTS=y -# CONFIG_DEBUG_PERF_USE_VMALLOC is not set -CONFIG_VM_EVENT_COUNTERS=y -CONFIG_SLUB_DEBUG=y -# CONFIG_SLUB_MEMCG_SYSFS_ON is not set -# CONFIG_COMPAT_BRK is not set -# CONFIG_SLAB is not set -CONFIG_SLUB=y -CONFIG_SLAB_MERGE_DEFAULT=y -# CONFIG_SLAB_FREELIST_RANDOM is not set -# CONFIG_SLAB_FREELIST_HARDENED is not set -CONFIG_SLUB_CPU_PARTIAL=y -CONFIG_SYSTEM_DATA_VERIFICATION=y -CONFIG_PROFILING=y -CONFIG_TRACEPOINTS=y -CONFIG_CRASH_CORE=y -CONFIG_KEXEC_CORE=y -CONFIG_KPROBES=y -CONFIG_JUMP_LABEL=y -# CONFIG_STATIC_KEYS_SELFTEST is not set -CONFIG_UPROBES=y -# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_KRETPROBES=y -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_HAVE_NMI=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_CONTIGUOUS=y -CONFIG_GENERIC_SMP_IDLE_THREAD=y -CONFIG_GENERIC_IDLE_POLL_SETUP=y -CONFIG_ARCH_HAS_FORTIFY_SOURCE=y -CONFIG_ARCH_HAS_SET_MEMORY=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_CLK=y -CONFIG_HAVE_DMA_API_DEBUG=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_PERF_REGS=y -CONFIG_HAVE_PERF_USER_STACK_DUMP=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_HAVE_RCU_TABLE_FREE=y -CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y -CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y -CONFIG_HAVE_CMPXCHG_LOCAL=y -CONFIG_HAVE_CMPXCHG_DOUBLE=y -CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y -CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -CONFIG_SECCOMP_FILTER=y -CONFIG_HAVE_GCC_PLUGINS=y -# CONFIG_GCC_PLUGINS is not set -CONFIG_HAVE_CC_STACKPROTECTOR=y -CONFIG_CC_STACKPROTECTOR=y -# CONFIG_CC_STACKPROTECTOR_NONE is not set -# CONFIG_CC_STACKPROTECTOR_REGULAR is not set -CONFIG_CC_STACKPROTECTOR_STRONG=y -CONFIG_THIN_ARCHIVES=y -CONFIG_HAVE_CONTEXT_TRACKING=y -CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y -CONFIG_HAVE_ARCH_HUGE_VMAP=y -CONFIG_MODULES_USE_ELF_RELA=y -CONFIG_ARCH_HAS_ELF_RANDOMIZE=y -CONFIG_HAVE_ARCH_MMAP_RND_BITS=y -CONFIG_ARCH_MMAP_RND_BITS=18 -# CONFIG_HAVE_ARCH_HASH is not set -# CONFIG_ISA_BUS_API is not set -CONFIG_CLONE_BACKWARDS=y -# CONFIG_CPU_NO_EFFICIENT_FFS is not set -CONFIG_HAVE_ARCH_VMAP_STACK=y -CONFIG_VMAP_STACK=y -# CONFIG_ARCH_OPTIONAL_KERNEL_RWX is not set -# CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT is not set -CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y -CONFIG_STRICT_KERNEL_RWX=y -CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y -CONFIG_STRICT_MODULE_RWX=y -# CONFIG_REFCOUNT_FULL is not set - -# -# GCOV-based kernel profiling -# -# CONFIG_GCOV_KERNEL is not set -CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y -CONFIG_HAVE_GENERIC_DMA_COHERENT=y -CONFIG_SLABINFO=y -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULES=y -CONFIG_MODULE_FORCE_LOAD=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_MODULE_FORCE_UNLOAD is not set -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_MODULE_SIG=y -# CONFIG_MODULE_SIG_FORCE is not set -CONFIG_MODULE_SIG_ALL=y -CONFIG_MODULE_SIG_SHA1=y -# CONFIG_MODULE_SIG_SHA224 is not set -# CONFIG_MODULE_SIG_SHA256 is not set -# CONFIG_MODULE_SIG_SHA384 is not set -# CONFIG_MODULE_SIG_SHA512 is not set -CONFIG_MODULE_SIG_HASH="sha1" -# CONFIG_MODULE_COMPRESS is not set -# CONFIG_TRIM_UNUSED_KSYMS is not set -CONFIG_MODULES_TREE_LOOKUP=y -CONFIG_BLOCK=y -CONFIG_BLK_SCSI_REQUEST=y -CONFIG_BLK_DEV_BSG=y -CONFIG_BLK_DEV_BSGLIB=y -CONFIG_BLK_DEV_INTEGRITY=y -# CONFIG_BLK_DEV_ZONED is not set -CONFIG_BLK_DEV_THROTTLING=y -# CONFIG_BLK_DEV_THROTTLING_LOW is not set -# CONFIG_BLK_CMDLINE_PARSER is not set -# CONFIG_BLK_WBT is not set -CONFIG_BLK_DEBUG_FS=y -# CONFIG_BLK_SED_OPAL is not set - -# -# Partition Types -# -CONFIG_PARTITION_ADVANCED=y -# CONFIG_ACORN_PARTITION is not set -# CONFIG_AIX_PARTITION is not set -# CONFIG_OSF_PARTITION is not set -# CONFIG_AMIGA_PARTITION is not set -# CONFIG_ATARI_PARTITION is not set -# CONFIG_MAC_PARTITION is not set -CONFIG_MSDOS_PARTITION=y -CONFIG_BSD_DISKLABEL=y -# CONFIG_MINIX_SUBPARTITION is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set -# CONFIG_LDM_PARTITION is not set -# CONFIG_SGI_PARTITION is not set -# CONFIG_ULTRIX_PARTITION is not set -# CONFIG_SUN_PARTITION is not set -# CONFIG_KARMA_PARTITION is not set -CONFIG_EFI_PARTITION=y -# CONFIG_SYSV68_PARTITION is not set -# CONFIG_CMDLINE_PARTITION is not set -CONFIG_BLK_MQ_PCI=y -CONFIG_BLK_MQ_VIRTIO=y -CONFIG_BLK_MQ_RDMA=y - -# -# IO Schedulers -# -CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_DEADLINE=y -CONFIG_IOSCHED_CFQ=y -CONFIG_CFQ_GROUP_IOSCHED=y -CONFIG_DEFAULT_DEADLINE=y -# CONFIG_DEFAULT_CFQ is not set -# CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="deadline" -CONFIG_MQ_IOSCHED_DEADLINE=y -CONFIG_MQ_IOSCHED_KYBER=y -# CONFIG_IOSCHED_BFQ is not set -CONFIG_PREEMPT_NOTIFIERS=y -CONFIG_PADATA=y -CONFIG_ASN1=y -CONFIG_INLINE_SPIN_UNLOCK_IRQ=y -CONFIG_INLINE_READ_UNLOCK=y -CONFIG_INLINE_READ_UNLOCK_IRQ=y -CONFIG_INLINE_WRITE_UNLOCK=y -CONFIG_INLINE_WRITE_UNLOCK_IRQ=y -CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_RWSEM_SPIN_ON_OWNER=y -CONFIG_LOCK_SPIN_ON_OWNER=y -CONFIG_FREEZER=y - -# -# Platform selection -# -# CONFIG_ARCH_ACTIONS is not set -# CONFIG_ARCH_SUNXI is not set -CONFIG_ARCH_ALPINE=y -# CONFIG_ARCH_BCM2835 is not set -# CONFIG_ARCH_BCM_IPROC is not set -# CONFIG_ARCH_BERLIN is not set -# CONFIG_ARCH_BRCMSTB is not set -# CONFIG_ARCH_EXYNOS is not set -# CONFIG_ARCH_LAYERSCAPE is not set -# CONFIG_ARCH_LG1K is not set -CONFIG_ARCH_HISI=y -# CONFIG_ARCH_MEDIATEK is not set -# CONFIG_ARCH_MESON is not set -# CONFIG_ARCH_MVEBU is not set -CONFIG_ARCH_QCOM=y -# CONFIG_ARCH_REALTEK is not set -# CONFIG_ARCH_ROCKCHIP is not set -CONFIG_ARCH_SEATTLE=y -# CONFIG_ARCH_RENESAS is not set -# CONFIG_ARCH_STRATIX10 is not set -# CONFIG_ARCH_TEGRA is not set -# CONFIG_ARCH_SPRD is not set -CONFIG_ARCH_THUNDER=y -CONFIG_ARCH_THUNDER2=y -# CONFIG_ARCH_UNIPHIER is not set -CONFIG_ARCH_VEXPRESS=y -# CONFIG_ARCH_VULCAN is not set -CONFIG_ARCH_XGENE=y -# CONFIG_ARCH_ZX is not set -# CONFIG_ARCH_ZYNQMP is not set - -# -# Bus support -# -CONFIG_PCI=y -CONFIG_PCI_DOMAINS=y -CONFIG_PCI_DOMAINS_GENERIC=y -CONFIG_PCI_SYSCALL=y -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y -CONFIG_PCIEAER=y -CONFIG_PCIE_ECRC=y -CONFIG_PCIEAER_INJECT=m -CONFIG_PCIEASPM=y -# CONFIG_PCIEASPM_DEBUG is not set -CONFIG_PCIEASPM_DEFAULT=y -# CONFIG_PCIEASPM_POWERSAVE is not set -# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set -# CONFIG_PCIEASPM_PERFORMANCE is not set -CONFIG_PCIE_PME=y -# CONFIG_PCIE_DPC is not set -# CONFIG_PCIE_PTM is not set -CONFIG_PCI_BUS_ADDR_T_64BIT=y -CONFIG_PCI_MSI=y -CONFIG_PCI_MSI_IRQ_DOMAIN=y -# CONFIG_PCI_DEBUG is not set -# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set -CONFIG_PCI_STUB=y -CONFIG_PCI_ATS=y -CONFIG_PCI_ECAM=y -CONFIG_PCI_IOV=y -CONFIG_PCI_PRI=y -CONFIG_PCI_PASID=y -CONFIG_PCI_LABEL=y -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_ACPI=y -CONFIG_HOTPLUG_PCI_ACPI_IBM=m -# CONFIG_HOTPLUG_PCI_CPCI is not set -# CONFIG_HOTPLUG_PCI_SHPC is not set - -# -# DesignWare PCI Core Support -# -CONFIG_PCIE_DW=y -CONFIG_PCIE_DW_HOST=y -# CONFIG_PCIE_DW_PLAT is not set -CONFIG_PCI_HISI=y -# CONFIG_PCIE_QCOM is not set -# CONFIG_PCIE_KIRIN is not set - -# -# PCI host controller drivers -# -CONFIG_PCI_HOST_COMMON=y -CONFIG_PCI_HOST_GENERIC=y -CONFIG_PCI_XGENE=y -CONFIG_PCI_XGENE_MSI=y -CONFIG_PCI_HOST_THUNDER_PEM=y -CONFIG_PCI_HOST_THUNDER_ECAM=y - -# -# PCI Endpoint -# -# CONFIG_PCI_ENDPOINT is not set - -# -# PCI switch controller drivers -# -# CONFIG_PCI_SW_SWITCHTEC is not set - -# -# Kernel Features -# - -# -# ARM errata workarounds via the alternatives framework -# -CONFIG_ARM64_ERRATUM_826319=y -CONFIG_ARM64_ERRATUM_827319=y -CONFIG_ARM64_ERRATUM_824069=y -CONFIG_ARM64_ERRATUM_819472=y -CONFIG_ARM64_ERRATUM_832075=y -CONFIG_ARM64_ERRATUM_834220=y -CONFIG_ARM64_ERRATUM_843419=y -CONFIG_ARM64_ERRATUM_1024718=y -CONFIG_CAVIUM_ERRATUM_22375=y -CONFIG_CAVIUM_ERRATUM_23144=y -CONFIG_CAVIUM_ERRATUM_23154=y -CONFIG_CAVIUM_ERRATUM_27456=y -CONFIG_CAVIUM_ERRATUM_30115=y -CONFIG_QCOM_FALKOR_ERRATUM_1003=y -CONFIG_QCOM_FALKOR_ERRATUM_1009=y -CONFIG_QCOM_QDF2400_ERRATUM_0065=y -CONFIG_QCOM_FALKOR_ERRATUM_E1041=y -CONFIG_ARM64_4K_PAGES=y -# CONFIG_ARM64_16K_PAGES is not set -# CONFIG_ARM64_64K_PAGES is not set -# CONFIG_ARM64_VA_BITS_39 is not set -CONFIG_ARM64_VA_BITS_48=y -CONFIG_ARM64_VA_BITS=48 -# CONFIG_CPU_BIG_ENDIAN is not set -CONFIG_SCHED_MC=y -CONFIG_SCHED_SMT=y -CONFIG_NR_CPUS=4096 -CONFIG_HOTPLUG_CPU=y -CONFIG_NUMA=y -CONFIG_NODES_SHIFT=2 -CONFIG_USE_PERCPU_NUMA_NODE_ID=y -CONFIG_HAVE_SETUP_PER_CPU_AREA=y -CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y -CONFIG_HOLES_IN_ZONE=y -# CONFIG_PREEMPT_NONE is not set -CONFIG_PREEMPT_VOLUNTARY=y -# CONFIG_PREEMPT is not set -CONFIG_HZ_100=y -# CONFIG_HZ_250 is not set -# CONFIG_HZ_300 is not set -# CONFIG_HZ_1000 is not set -CONFIG_HZ=100 -CONFIG_SCHED_HRTICK=y -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y -CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y -CONFIG_ARCH_SPARSEMEM_ENABLE=y -CONFIG_ARCH_SPARSEMEM_DEFAULT=y -CONFIG_ARCH_SELECT_MEMORY_MODEL=y -CONFIG_HAVE_ARCH_PFN_VALID=y -CONFIG_HW_PERF_EVENTS=y -CONFIG_SYS_SUPPORTS_HUGETLBFS=y -CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y -CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_SPARSEMEM_MANUAL=y -CONFIG_SPARSEMEM=y -CONFIG_NEED_MULTIPLE_NODES=y -CONFIG_HAVE_MEMORY_PRESENT=y -CONFIG_SPARSEMEM_EXTREME=y -CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y -CONFIG_SPARSEMEM_VMEMMAP=y -CONFIG_HAVE_MEMBLOCK=y -CONFIG_HAVE_MEMBLOCK_NODE_MAP=y -CONFIG_NO_BOOTMEM=y -CONFIG_MEMORY_ISOLATION=y -# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set -CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_MEMORY_BALLOON=y -CONFIG_BALLOON_COMPACTION=y -CONFIG_COMPACTION=y -CONFIG_MIGRATION=y -CONFIG_PHYS_ADDR_T_64BIT=y -CONFIG_BOUNCE=y -CONFIG_MMU_NOTIFIER=y -CONFIG_KSM=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 -CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y -# CONFIG_MEMORY_FAILURE is not set -CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y -# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set -# CONFIG_ARCH_WANTS_THP_SWAP is not set -CONFIG_TRANSPARENT_HUGE_PAGECACHE=y -CONFIG_CLEANCACHE=y -CONFIG_FRONTSWAP=y -CONFIG_CMA=y -# CONFIG_CMA_DEBUG is not set -# CONFIG_CMA_DEBUGFS is not set -CONFIG_CMA_AREAS=7 -CONFIG_ZSWAP=y -CONFIG_ZPOOL=y -CONFIG_ZBUD=y -# CONFIG_Z3FOLD is not set -CONFIG_ZSMALLOC=y -# CONFIG_PGTABLE_MAPPING is not set -CONFIG_ZSMALLOC_STAT=y -CONFIG_GENERIC_EARLY_IOREMAP=y -CONFIG_IDLE_PAGE_TRACKING=y -# CONFIG_PERCPU_STATS is not set -CONFIG_SECCOMP=y -CONFIG_PARAVIRT=y -CONFIG_PARAVIRT_TIME_ACCOUNTING=y -CONFIG_KEXEC=y -CONFIG_CRASH_DUMP=y -# CONFIG_XEN is not set -CONFIG_FORCE_MAX_ZONEORDER=11 -CONFIG_UNMAP_KERNEL_AT_EL0=y -CONFIG_HARDEN_BRANCH_PREDICTOR=y -CONFIG_ARM64_SSBD=y -# CONFIG_ARM64_SW_TTBR0_PAN is not set - -# -# ARMv8.1 architectural features -# -CONFIG_ARM64_HW_AFDBM=y -CONFIG_ARM64_PAN=y -CONFIG_ARM64_LSE_ATOMICS=y -CONFIG_ARM64_VHE=y - -# -# ARMv8.2 architectural features -# -CONFIG_ARM64_UAO=y -# CONFIG_ARM64_PMEM is not set -CONFIG_ARM64_MODULE_CMODEL_LARGE=y -# CONFIG_RANDOMIZE_BASE is not set - -# -# Boot options -# -CONFIG_ARM64_ACPI_PARKING_PROTOCOL=y -CONFIG_CMDLINE="console=ttyAMA0" -# CONFIG_CMDLINE_FORCE is not set -CONFIG_EFI_STUB=y -CONFIG_EFI=y -CONFIG_DMI=y - -# -# Userspace binary formats -# -CONFIG_BINFMT_ELF=y -CONFIG_ELFCORE=y -CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y -CONFIG_BINFMT_SCRIPT=y -# CONFIG_HAVE_AOUT is not set -CONFIG_BINFMT_MISC=m -CONFIG_COREDUMP=y -# CONFIG_COMPAT is not set - -# -# Power management options -# -CONFIG_SUSPEND=y -CONFIG_SUSPEND_FREEZER=y -CONFIG_HIBERNATE_CALLBACKS=y -CONFIG_HIBERNATION=y -CONFIG_PM_STD_PARTITION="" -CONFIG_PM_SLEEP=y -CONFIG_PM_SLEEP_SMP=y -# CONFIG_PM_AUTOSLEEP is not set -# CONFIG_PM_WAKELOCKS is not set -CONFIG_PM=y -# CONFIG_PM_DEBUG is not set -CONFIG_PM_OPP=y -CONFIG_PM_CLK=y -CONFIG_PM_GENERIC_DOMAINS=y -# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set -CONFIG_PM_GENERIC_DOMAINS_SLEEP=y -CONFIG_PM_GENERIC_DOMAINS_OF=y -CONFIG_CPU_PM=y -CONFIG_ARCH_HIBERNATION_POSSIBLE=y -CONFIG_ARCH_HIBERNATION_HEADER=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y - -# -# CPU Power Management -# - -# -# CPU Idle -# -CONFIG_CPU_IDLE=y -CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y -# CONFIG_CPU_IDLE_GOV_LADDER is not set -CONFIG_CPU_IDLE_GOV_MENU=y -CONFIG_DT_IDLE_STATES=y - -# -# ARM CPU Idle Drivers -# -CONFIG_ARM_CPUIDLE=y -# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set - -# -# CPU Frequency scaling -# -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_GOV_ATTR_SET=y -CONFIG_CPU_FREQ_GOV_COMMON=y -CONFIG_CPU_FREQ_STAT=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set -CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set - -# -# CPU frequency scaling drivers -# -# CONFIG_CPUFREQ_DT is not set -# CONFIG_ARM_BIG_LITTLE_CPUFREQ is not set -# CONFIG_ARM_KIRKWOOD_CPUFREQ is not set -CONFIG_ACPI_CPPC_CPUFREQ=y -# CONFIG_QORIQ_CPUFREQ is not set -CONFIG_NET=y -CONFIG_NET_INGRESS=y -CONFIG_NET_EGRESS=y - -# -# Networking options -# -CONFIG_PACKET=y -CONFIG_PACKET_DIAG=m -CONFIG_UNIX=y -CONFIG_UNIX_DIAG=m -# CONFIG_TLS is not set -CONFIG_XFRM=y -CONFIG_XFRM_ALGO=y -CONFIG_XFRM_USER=y -CONFIG_XFRM_SUB_POLICY=y -CONFIG_XFRM_MIGRATE=y -CONFIG_XFRM_STATISTICS=y -CONFIG_XFRM_IPCOMP=m -CONFIG_NET_KEY=m -CONFIG_NET_KEY_MIGRATE=y -# CONFIG_SMC is not set -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_FIB_TRIE_STATS=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_ROUTE_CLASSID=y -# CONFIG_IP_PNP is not set -CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m -CONFIG_NET_IP_TUNNEL=m -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y -CONFIG_IP_MROUTE=y -CONFIG_IP_MROUTE_MULTIPLE_TABLES=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m -CONFIG_NET_UDP_TUNNEL=m -# CONFIG_NET_FOU is not set -# CONFIG_NET_FOU_IP_TUNNELS is not set -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -# CONFIG_INET_ESP_OFFLOAD is not set -CONFIG_INET_IPCOMP=m -CONFIG_INET_XFRM_TUNNEL=m -CONFIG_INET_TUNNEL=m -CONFIG_INET_XFRM_MODE_TRANSPORT=m -CONFIG_INET_XFRM_MODE_TUNNEL=m -CONFIG_INET_XFRM_MODE_BEET=m -CONFIG_INET_DIAG=m -CONFIG_INET_TCP_DIAG=m -CONFIG_INET_UDP_DIAG=m -# CONFIG_INET_RAW_DIAG is not set -# CONFIG_INET_DIAG_DESTROY is not set -CONFIG_TCP_CONG_ADVANCED=y -CONFIG_TCP_CONG_BIC=m -CONFIG_TCP_CONG_CUBIC=y -CONFIG_TCP_CONG_WESTWOOD=m -CONFIG_TCP_CONG_HTCP=m -CONFIG_TCP_CONG_HSTCP=m -CONFIG_TCP_CONG_HYBLA=m -CONFIG_TCP_CONG_VEGAS=m -CONFIG_TCP_CONG_NV=m -CONFIG_TCP_CONG_SCALABLE=m -CONFIG_TCP_CONG_LP=m -CONFIG_TCP_CONG_VENO=m -CONFIG_TCP_CONG_YEAH=m -CONFIG_TCP_CONG_ILLINOIS=m -CONFIG_TCP_CONG_DCTCP=m -# CONFIG_TCP_CONG_CDG is not set -# CONFIG_TCP_CONG_BBR is not set -CONFIG_DEFAULT_CUBIC=y -# CONFIG_DEFAULT_RENO is not set -CONFIG_DEFAULT_TCP_CONG="cubic" -CONFIG_TCP_MD5SIG=y -CONFIG_IPV6=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -# CONFIG_INET6_ESP_OFFLOAD is not set -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_MIP6=m -# CONFIG_IPV6_ILA is not set -CONFIG_INET6_XFRM_TUNNEL=m -CONFIG_INET6_TUNNEL=m -CONFIG_INET6_XFRM_MODE_TRANSPORT=m -CONFIG_INET6_XFRM_MODE_TUNNEL=m -CONFIG_INET6_XFRM_MODE_BEET=m -CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m -CONFIG_IPV6_VTI=m -CONFIG_IPV6_SIT=m -CONFIG_IPV6_SIT_6RD=y -CONFIG_IPV6_NDISC_NODETYPE=y -CONFIG_IPV6_TUNNEL=m -CONFIG_IPV6_GRE=m -# CONFIG_IPV6_FOU is not set -# CONFIG_IPV6_FOU_TUNNEL is not set -CONFIG_IPV6_MULTIPLE_TABLES=y -# CONFIG_IPV6_SUBTREES is not set -CONFIG_IPV6_MROUTE=y -CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y -CONFIG_IPV6_PIMSM_V2=y -# CONFIG_IPV6_SEG6_LWTUNNEL is not set -# CONFIG_IPV6_SEG6_HMAC is not set -CONFIG_NETLABEL=y -CONFIG_NETWORK_SECMARK=y -CONFIG_NET_PTP_CLASSIFY=y -CONFIG_NETWORK_PHY_TIMESTAMPING=y -CONFIG_NETFILTER=y -CONFIG_NETFILTER_ADVANCED=y -CONFIG_BRIDGE_NETFILTER=m - -# -# Core Netfilter Configuration -# -CONFIG_NETFILTER_INGRESS=y -CONFIG_NETFILTER_NETLINK=m -CONFIG_NETFILTER_NETLINK_ACCT=m -CONFIG_NETFILTER_NETLINK_QUEUE=m -CONFIG_NETFILTER_NETLINK_LOG=m -CONFIG_NF_CONNTRACK=m -CONFIG_NF_LOG_COMMON=m -# CONFIG_NF_LOG_NETDEV is not set -CONFIG_NF_CONNTRACK_MARK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_ZONES=y -CONFIG_NF_CONNTRACK_PROCFS=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_TIMEOUT=y -CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CONNTRACK_LABELS=y -CONFIG_NF_CT_PROTO_DCCP=y -CONFIG_NF_CT_PROTO_GRE=m -CONFIG_NF_CT_PROTO_SCTP=y -CONFIG_NF_CT_PROTO_UDPLITE=y -CONFIG_NF_CONNTRACK_AMANDA=m -CONFIG_NF_CONNTRACK_FTP=m -CONFIG_NF_CONNTRACK_H323=m -CONFIG_NF_CONNTRACK_IRC=m -CONFIG_NF_CONNTRACK_BROADCAST=m -CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m -CONFIG_NF_CONNTRACK_PPTP=m -CONFIG_NF_CONNTRACK_SANE=m -CONFIG_NF_CONNTRACK_SIP=m -CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NF_CT_NETLINK=m -CONFIG_NF_CT_NETLINK_TIMEOUT=m -CONFIG_NF_CT_NETLINK_HELPER=m -CONFIG_NETFILTER_NETLINK_GLUE_CT=y -CONFIG_NF_NAT=m -CONFIG_NF_NAT_NEEDED=y -CONFIG_NF_NAT_PROTO_DCCP=y -CONFIG_NF_NAT_PROTO_UDPLITE=y -CONFIG_NF_NAT_PROTO_SCTP=y -CONFIG_NF_NAT_AMANDA=m -CONFIG_NF_NAT_FTP=m -CONFIG_NF_NAT_IRC=m -CONFIG_NF_NAT_SIP=m -CONFIG_NF_NAT_TFTP=m -CONFIG_NF_NAT_REDIRECT=m -CONFIG_NETFILTER_SYNPROXY=m -CONFIG_NF_TABLES=m -CONFIG_NF_TABLES_INET=m -CONFIG_NF_TABLES_NETDEV=m -CONFIG_NFT_EXTHDR=m -CONFIG_NFT_META=m -# CONFIG_NFT_RT is not set -# CONFIG_NFT_NUMGEN is not set -CONFIG_NFT_CT=m -# CONFIG_NFT_SET_RBTREE is not set -# CONFIG_NFT_SET_HASH is not set -# CONFIG_NFT_SET_BITMAP is not set -CONFIG_NFT_COUNTER=m -CONFIG_NFT_LOG=m -CONFIG_NFT_LIMIT=m -CONFIG_NFT_MASQ=m -CONFIG_NFT_REDIR=m -CONFIG_NFT_NAT=m -# CONFIG_NFT_OBJREF is not set -CONFIG_NFT_QUEUE=m -# CONFIG_NFT_QUOTA is not set -CONFIG_NFT_REJECT=m -CONFIG_NFT_REJECT_INET=m -CONFIG_NFT_COMPAT=m -CONFIG_NFT_HASH=m -CONFIG_NF_DUP_NETDEV=m -CONFIG_NFT_DUP_NETDEV=m -CONFIG_NFT_FWD_NETDEV=m -CONFIG_NETFILTER_XTABLES=m - -# -# Xtables combined modules -# -CONFIG_NETFILTER_XT_MARK=m -CONFIG_NETFILTER_XT_CONNMARK=m -CONFIG_NETFILTER_XT_SET=m - -# -# Xtables targets -# -CONFIG_NETFILTER_XT_TARGET_AUDIT=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m -CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m -CONFIG_NETFILTER_XT_TARGET_CT=m -CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HL=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LED=m -CONFIG_NETFILTER_XT_TARGET_LOG=m -CONFIG_NETFILTER_XT_TARGET_MARK=m -CONFIG_NETFILTER_XT_NAT=m -CONFIG_NETFILTER_XT_TARGET_NETMAP=m -CONFIG_NETFILTER_XT_TARGET_NFLOG=m -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_RATEEST=m -CONFIG_NETFILTER_XT_TARGET_REDIRECT=m -CONFIG_NETFILTER_XT_TARGET_TEE=m -CONFIG_NETFILTER_XT_TARGET_TPROXY=m -CONFIG_NETFILTER_XT_TARGET_TRACE=m -CONFIG_NETFILTER_XT_TARGET_SECMARK=m -CONFIG_NETFILTER_XT_TARGET_TCPMSS=m -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m - -# -# Xtables matches -# -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m -CONFIG_NETFILTER_XT_MATCH_CGROUP=m -CONFIG_NETFILTER_XT_MATCH_CLUSTER=m -CONFIG_NETFILTER_XT_MATCH_COMMENT=m -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m -CONFIG_NETFILTER_XT_MATCH_CONNMARK=m -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_CPU=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m -CONFIG_NETFILTER_XT_MATCH_DSCP=m -CONFIG_NETFILTER_XT_MATCH_ECN=m -CONFIG_NETFILTER_XT_MATCH_ESP=m -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m -CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_HL=m -# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set -CONFIG_NETFILTER_XT_MATCH_IPRANGE=m -CONFIG_NETFILTER_XT_MATCH_IPVS=m -# CONFIG_NETFILTER_XT_MATCH_L2TP is not set -CONFIG_NETFILTER_XT_MATCH_LENGTH=m -CONFIG_NETFILTER_XT_MATCH_LIMIT=m -CONFIG_NETFILTER_XT_MATCH_MAC=m -CONFIG_NETFILTER_XT_MATCH_MARK=m -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m -CONFIG_NETFILTER_XT_MATCH_OWNER=m -CONFIG_NETFILTER_XT_MATCH_POLICY=m -CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m -CONFIG_NETFILTER_XT_MATCH_QUOTA=m -CONFIG_NETFILTER_XT_MATCH_RATEEST=m -CONFIG_NETFILTER_XT_MATCH_REALM=m -CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_SCTP=m -CONFIG_NETFILTER_XT_MATCH_SOCKET=m -CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NETFILTER_XT_MATCH_STATISTIC=m -CONFIG_NETFILTER_XT_MATCH_STRING=m -CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_TIME=m -CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_SET=m -CONFIG_IP_SET_MAX=256 -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPMARK=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -# CONFIG_IP_SET_HASH_IPMAC is not set -CONFIG_IP_SET_HASH_MAC=m -CONFIG_IP_SET_HASH_NETPORTNET=m -CONFIG_IP_SET_HASH_NET=m -CONFIG_IP_SET_HASH_NETNET=m -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m -CONFIG_IP_VS=m -CONFIG_IP_VS_IPV6=y -# CONFIG_IP_VS_DEBUG is not set -CONFIG_IP_VS_TAB_BITS=12 - -# -# IPVS transport protocol load balancing support -# -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_AH_ESP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -CONFIG_IP_VS_PROTO_SCTP=y - -# -# IPVS scheduler -# -CONFIG_IP_VS_RR=m -CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_LC=m -CONFIG_IP_VS_WLC=m -CONFIG_IP_VS_FO=m -CONFIG_IP_VS_OVF=m -CONFIG_IP_VS_LBLC=m -CONFIG_IP_VS_LBLCR=m -CONFIG_IP_VS_DH=m -CONFIG_IP_VS_SH=m -CONFIG_IP_VS_SED=m -CONFIG_IP_VS_NQ=m - -# -# IPVS SH scheduler -# -CONFIG_IP_VS_SH_TAB_BITS=8 - -# -# IPVS application helper -# -CONFIG_IP_VS_FTP=m -CONFIG_IP_VS_NFCT=y -CONFIG_IP_VS_PE_SIP=m - -# -# IP: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV4=m -CONFIG_NF_CONNTRACK_IPV4=m -CONFIG_NF_SOCKET_IPV4=m -CONFIG_NF_TABLES_IPV4=m -CONFIG_NFT_CHAIN_ROUTE_IPV4=m -CONFIG_NFT_REJECT_IPV4=m -CONFIG_NFT_DUP_IPV4=m -# CONFIG_NFT_FIB_IPV4 is not set -CONFIG_NF_TABLES_ARP=m -CONFIG_NF_DUP_IPV4=m -CONFIG_NF_LOG_ARP=m -CONFIG_NF_LOG_IPV4=m -CONFIG_NF_REJECT_IPV4=m -CONFIG_NF_NAT_IPV4=m -CONFIG_NFT_CHAIN_NAT_IPV4=m -CONFIG_NF_NAT_MASQUERADE_IPV4=m -CONFIG_NFT_MASQ_IPV4=m -CONFIG_NFT_REDIR_IPV4=m -CONFIG_NF_NAT_SNMP_BASIC=m -CONFIG_NF_NAT_PROTO_GRE=m -CONFIG_NF_NAT_PPTP=m -CONFIG_NF_NAT_H323=m -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_AH=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_SYNPROXY=m -CONFIG_IP_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_CLUSTERIP=m -CONFIG_IP_NF_TARGET_ECN=m -CONFIG_IP_NF_TARGET_TTL=m -CONFIG_IP_NF_RAW=m -CONFIG_IP_NF_SECURITY=m -CONFIG_IP_NF_ARPTABLES=m -CONFIG_IP_NF_ARPFILTER=m -CONFIG_IP_NF_ARP_MANGLE=m - -# -# IPv6: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV6=m -CONFIG_NF_CONNTRACK_IPV6=m -CONFIG_NF_SOCKET_IPV6=m -CONFIG_NF_TABLES_IPV6=m -CONFIG_NFT_CHAIN_ROUTE_IPV6=m -CONFIG_NFT_CHAIN_NAT_IPV6=m -CONFIG_NFT_MASQ_IPV6=m -CONFIG_NFT_REDIR_IPV6=m -CONFIG_NFT_REJECT_IPV6=m -CONFIG_NFT_DUP_IPV6=m -# CONFIG_NFT_FIB_IPV6 is not set -CONFIG_NF_DUP_IPV6=m -CONFIG_NF_REJECT_IPV6=m -CONFIG_NF_LOG_IPV6=m -CONFIG_NF_NAT_IPV6=m -CONFIG_NF_NAT_MASQUERADE_IPV6=m -CONFIG_IP6_NF_IPTABLES=m -CONFIG_IP6_NF_MATCH_AH=m -CONFIG_IP6_NF_MATCH_EUI64=m -CONFIG_IP6_NF_MATCH_FRAG=m -CONFIG_IP6_NF_MATCH_OPTS=m -CONFIG_IP6_NF_MATCH_HL=m -CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m -CONFIG_IP6_NF_MATCH_RT=m -CONFIG_IP6_NF_TARGET_HL=m -CONFIG_IP6_NF_FILTER=m -CONFIG_IP6_NF_TARGET_REJECT=m -CONFIG_IP6_NF_TARGET_SYNPROXY=m -CONFIG_IP6_NF_MANGLE=m -CONFIG_IP6_NF_RAW=m -CONFIG_IP6_NF_SECURITY=m -CONFIG_IP6_NF_NAT=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -# CONFIG_IP6_NF_TARGET_NPT is not set -CONFIG_NF_TABLES_BRIDGE=m -CONFIG_NFT_BRIDGE_META=m -CONFIG_NFT_BRIDGE_REJECT=m -CONFIG_NF_LOG_BRIDGE=m -CONFIG_BRIDGE_NF_EBTABLES=m -CONFIG_BRIDGE_EBT_BROUTE=m -CONFIG_BRIDGE_EBT_T_FILTER=m -CONFIG_BRIDGE_EBT_T_NAT=m -CONFIG_BRIDGE_EBT_802_3=m -CONFIG_BRIDGE_EBT_AMONG=m -CONFIG_BRIDGE_EBT_ARP=m -CONFIG_BRIDGE_EBT_IP=m -CONFIG_BRIDGE_EBT_IP6=m -CONFIG_BRIDGE_EBT_LIMIT=m -CONFIG_BRIDGE_EBT_MARK=m -CONFIG_BRIDGE_EBT_PKTTYPE=m -CONFIG_BRIDGE_EBT_STP=m -CONFIG_BRIDGE_EBT_VLAN=m -CONFIG_BRIDGE_EBT_ARPREPLY=m -CONFIG_BRIDGE_EBT_DNAT=m -CONFIG_BRIDGE_EBT_MARK_T=m -CONFIG_BRIDGE_EBT_REDIRECT=m -CONFIG_BRIDGE_EBT_SNAT=m -CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_NFLOG=m -# CONFIG_IP_DCCP is not set -CONFIG_IP_SCTP=m -CONFIG_NET_SCTPPROBE=m -# CONFIG_SCTP_DBG_OBJCNT is not set -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set -CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set -CONFIG_SCTP_COOKIE_HMAC_MD5=y -CONFIG_SCTP_COOKIE_HMAC_SHA1=y -CONFIG_INET_SCTP_DIAG=m -# CONFIG_RDS is not set -# CONFIG_TIPC is not set -CONFIG_ATM=m -CONFIG_ATM_CLIP=m -# CONFIG_ATM_CLIP_NO_ICMP is not set -CONFIG_ATM_LANE=m -# CONFIG_ATM_MPOA is not set -CONFIG_ATM_BR2684=m -# CONFIG_ATM_BR2684_IPFILTER is not set -CONFIG_L2TP=m -CONFIG_L2TP_DEBUGFS=m -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=m -CONFIG_L2TP_ETH=m -CONFIG_STP=m -CONFIG_GARP=m -CONFIG_MRP=m -CONFIG_BRIDGE=m -CONFIG_BRIDGE_IGMP_SNOOPING=y -CONFIG_BRIDGE_VLAN_FILTERING=y -CONFIG_HAVE_NET_DSA=y -# CONFIG_NET_DSA is not set -CONFIG_VLAN_8021Q=m -CONFIG_VLAN_8021Q_GVRP=y -CONFIG_VLAN_8021Q_MVRP=y -# CONFIG_DECNET is not set -CONFIG_LLC=m -# CONFIG_LLC2 is not set -# CONFIG_IPX is not set -# CONFIG_ATALK is not set -# CONFIG_X25 is not set -# CONFIG_LAPB is not set -# CONFIG_PHONET is not set -# CONFIG_6LOWPAN is not set -# CONFIG_IEEE802154 is not set -CONFIG_NET_SCHED=y - -# -# Queueing/Scheduling -# -CONFIG_NET_SCH_CBQ=m -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_ATM=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_MULTIQ=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFB=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_DSMARK=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_DRR=m -CONFIG_NET_SCH_MQPRIO=m -CONFIG_NET_SCH_CHOKE=m -CONFIG_NET_SCH_QFQ=m -CONFIG_NET_SCH_CODEL=m -CONFIG_NET_SCH_FQ_CODEL=m -CONFIG_NET_SCH_FQ=m -CONFIG_NET_SCH_HHF=m -CONFIG_NET_SCH_PIE=m -CONFIG_NET_SCH_INGRESS=m -CONFIG_NET_SCH_PLUG=m -# CONFIG_NET_SCH_DEFAULT is not set - -# -# Classification -# -CONFIG_NET_CLS=y -CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_TCINDEX=m -CONFIG_NET_CLS_ROUTE4=m -CONFIG_NET_CLS_FW=m -CONFIG_NET_CLS_U32=m -CONFIG_CLS_U32_PERF=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_RSVP=m -CONFIG_NET_CLS_RSVP6=m -CONFIG_NET_CLS_FLOW=m -CONFIG_NET_CLS_CGROUP=y -CONFIG_NET_CLS_BPF=m -CONFIG_NET_CLS_FLOWER=m -CONFIG_NET_CLS_MATCHALL=m -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_STACK=32 -CONFIG_NET_EMATCH_CMP=m -CONFIG_NET_EMATCH_NBYTE=m -CONFIG_NET_EMATCH_U32=m -CONFIG_NET_EMATCH_META=m -CONFIG_NET_EMATCH_TEXT=m -CONFIG_NET_EMATCH_IPSET=m -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=m -CONFIG_NET_ACT_GACT=m -CONFIG_GACT_PROB=y -CONFIG_NET_ACT_MIRRED=m -# CONFIG_NET_ACT_SAMPLE is not set -CONFIG_NET_ACT_IPT=m -CONFIG_NET_ACT_NAT=m -CONFIG_NET_ACT_PEDIT=m -CONFIG_NET_ACT_SIMP=m -CONFIG_NET_ACT_SKBEDIT=m -CONFIG_NET_ACT_CSUM=m -CONFIG_NET_ACT_VLAN=m -# CONFIG_NET_ACT_BPF is not set -# CONFIG_NET_ACT_CONNMARK is not set -# CONFIG_NET_ACT_SKBMOD is not set -# CONFIG_NET_ACT_IFE is not set -CONFIG_NET_ACT_TUNNEL_KEY=m -CONFIG_NET_CLS_IND=y -CONFIG_NET_SCH_FIFO=y -CONFIG_DCB=y -CONFIG_DNS_RESOLVER=m -# CONFIG_BATMAN_ADV is not set -CONFIG_OPENVSWITCH=m -CONFIG_OPENVSWITCH_GRE=m -CONFIG_OPENVSWITCH_VXLAN=m -CONFIG_OPENVSWITCH_GENEVE=m -CONFIG_VSOCKETS=m -CONFIG_VIRTIO_VSOCKETS=m -CONFIG_VIRTIO_VSOCKETS_COMMON=m -CONFIG_NETLINK_DIAG=m -CONFIG_MPLS=y -CONFIG_NET_MPLS_GSO=m -# CONFIG_MPLS_ROUTING is not set -# CONFIG_NET_NSH is not set -# CONFIG_HSR is not set -CONFIG_NET_SWITCHDEV=y -# CONFIG_NET_L3_MASTER_DEV is not set -# CONFIG_QRTR is not set -# CONFIG_NET_NCSI is not set -CONFIG_RPS=y -CONFIG_RFS_ACCEL=y -CONFIG_XPS=y -CONFIG_CGROUP_NET_PRIO=y -CONFIG_CGROUP_NET_CLASSID=y -CONFIG_NET_RX_BUSY_POLL=y -CONFIG_BQL=y -CONFIG_BPF_JIT=y -CONFIG_NET_FLOW_LIMIT=y - -# -# Network testing -# -CONFIG_NET_PKTGEN=m -# CONFIG_NET_TCPPROBE is not set -CONFIG_NET_DROP_MONITOR=y -# CONFIG_HAMRADIO is not set -# CONFIG_CAN is not set -# CONFIG_BT is not set -# CONFIG_AF_RXRPC is not set -# CONFIG_AF_KCM is not set -# CONFIG_STREAM_PARSER is not set -CONFIG_FIB_RULES=y -# CONFIG_WIRELESS is not set -# CONFIG_WIMAX is not set -CONFIG_RFKILL=m -CONFIG_RFKILL_LEDS=y -CONFIG_RFKILL_INPUT=y -CONFIG_RFKILL_GPIO=m -# CONFIG_NET_9P is not set -# CONFIG_CAIF is not set -CONFIG_CEPH_LIB=m -# CONFIG_CEPH_LIB_PRETTYDEBUG is not set -CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y -# CONFIG_NFC is not set -# CONFIG_PSAMPLE is not set -# CONFIG_NET_IFE is not set -CONFIG_LWTUNNEL=y -CONFIG_LWTUNNEL_BPF=y -CONFIG_DST_CACHE=y -CONFIG_GRO_CELLS=y -CONFIG_NET_DEVLINK=m -CONFIG_MAY_USE_DEVLINK=m -CONFIG_HAVE_EBPF_JIT=y - -# -# Device Drivers -# -CONFIG_ARM_AMBA=y - -# -# Generic Driver Options -# -# CONFIG_UEVENT_HELPER is not set -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y -CONFIG_FW_LOADER=y -# CONFIG_FIRMWARE_IN_KERNEL is not set -CONFIG_EXTRA_FIRMWARE="" -CONFIG_FW_LOADER_USER_HELPER=y -# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set -CONFIG_ALLOW_DEV_COREDUMP=y -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set -# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set -# CONFIG_SYS_HYPERVISOR is not set -# CONFIG_GENERIC_CPU_DEVICES is not set -CONFIG_GENERIC_CPU_AUTOPROBE=y -CONFIG_REGMAP=y -CONFIG_REGMAP_I2C=y -CONFIG_REGMAP_SPI=y -CONFIG_REGMAP_MMIO=y -CONFIG_DMA_SHARED_BUFFER=y -# CONFIG_DMA_FENCE_TRACE is not set -CONFIG_DMA_CMA=y - -# -# Default contiguous memory area size: -# -CONFIG_CMA_SIZE_MBYTES=64 -CONFIG_CMA_SIZE_SEL_MBYTES=y -# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set -# CONFIG_CMA_SIZE_SEL_MIN is not set -# CONFIG_CMA_SIZE_SEL_MAX is not set -CONFIG_CMA_ALIGNMENT=8 -CONFIG_GENERIC_ARCH_TOPOLOGY=y - -# -# Bus devices -# -CONFIG_ARM_CCI=y -CONFIG_ARM_CCI_PMU=y -CONFIG_ARM_CCI400_COMMON=y -CONFIG_ARM_CCI400_PMU=y -CONFIG_ARM_CCI5xx_PMU=y -CONFIG_ARM_CCN=y -# CONFIG_BRCMSTB_GISB_ARB is not set -CONFIG_QCOM_EBI2=y -# CONFIG_SIMPLE_PM_BUS is not set -CONFIG_VEXPRESS_CONFIG=y -CONFIG_CONNECTOR=y -CONFIG_PROC_EVENTS=y -CONFIG_MTD=m -# CONFIG_MTD_TESTS is not set -# CONFIG_MTD_REDBOOT_PARTS is not set -# CONFIG_MTD_CMDLINE_PARTS is not set -# CONFIG_MTD_AFS_PARTS is not set -CONFIG_MTD_OF_PARTS=m -# CONFIG_MTD_AR7_PARTS is not set - -# -# Partition parsers -# - -# -# User Modules And Translation Layers -# -CONFIG_MTD_BLKDEVS=m -CONFIG_MTD_BLOCK=m -# CONFIG_MTD_BLOCK_RO is not set -# CONFIG_FTL is not set -# CONFIG_NFTL is not set -# CONFIG_INFTL is not set -# CONFIG_RFD_FTL is not set -# CONFIG_SSFDC is not set -# CONFIG_SM_FTL is not set -# CONFIG_MTD_OOPS is not set -# CONFIG_MTD_SWAP is not set -# CONFIG_MTD_PARTITIONED_MASTER is not set - -# -# RAM/ROM/Flash chip drivers -# -CONFIG_MTD_CFI=m -# CONFIG_MTD_JEDECPROBE is not set -CONFIG_MTD_GEN_PROBE=m -# CONFIG_MTD_CFI_ADV_OPTIONS is not set -CONFIG_MTD_MAP_BANK_WIDTH_1=y -CONFIG_MTD_MAP_BANK_WIDTH_2=y -CONFIG_MTD_MAP_BANK_WIDTH_4=y -# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set -# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set -# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set -CONFIG_MTD_CFI_I1=y -CONFIG_MTD_CFI_I2=y -# CONFIG_MTD_CFI_I4 is not set -# CONFIG_MTD_CFI_I8 is not set -CONFIG_MTD_CFI_INTELEXT=m -CONFIG_MTD_CFI_AMDSTD=m -CONFIG_MTD_CFI_STAA=m -CONFIG_MTD_CFI_UTIL=m -# CONFIG_MTD_RAM is not set -# CONFIG_MTD_ROM is not set -# CONFIG_MTD_ABSENT is not set - -# -# Mapping drivers for chip access -# -# CONFIG_MTD_COMPLEX_MAPPINGS is not set -CONFIG_MTD_PHYSMAP=m -# CONFIG_MTD_PHYSMAP_COMPAT is not set -CONFIG_MTD_PHYSMAP_OF=m -# CONFIG_MTD_PHYSMAP_OF_VERSATILE is not set -# CONFIG_MTD_PHYSMAP_OF_GEMINI is not set -# CONFIG_MTD_INTEL_VR_NOR is not set -# CONFIG_MTD_PLATRAM is not set - -# -# Self-contained MTD device drivers -# -# CONFIG_MTD_PMC551 is not set -# CONFIG_MTD_DATAFLASH is not set -# CONFIG_MTD_MCHP23K256 is not set -# CONFIG_MTD_SST25L is not set -# CONFIG_MTD_SLRAM is not set -# CONFIG_MTD_PHRAM is not set -# CONFIG_MTD_MTDRAM is not set -# CONFIG_MTD_BLOCK2MTD is not set - -# -# Disk-On-Chip Device Drivers -# -# CONFIG_MTD_DOCG3 is not set -# CONFIG_MTD_NAND is not set -# CONFIG_MTD_ONENAND is not set - -# -# LPDDR & LPDDR2 PCM memory drivers -# -# CONFIG_MTD_LPDDR is not set -# CONFIG_MTD_SPI_NOR is not set -CONFIG_MTD_UBI=m -CONFIG_MTD_UBI_WL_THRESHOLD=4096 -CONFIG_MTD_UBI_BEB_LIMIT=20 -# CONFIG_MTD_UBI_FASTMAP is not set -# CONFIG_MTD_UBI_GLUEBI is not set -# CONFIG_MTD_UBI_BLOCK is not set -CONFIG_DTC=y -CONFIG_OF=y -# CONFIG_OF_UNITTEST is not set -CONFIG_OF_FLATTREE=y -CONFIG_OF_EARLY_FLATTREE=y -CONFIG_OF_DYNAMIC=y -CONFIG_OF_ADDRESS=y -CONFIG_OF_ADDRESS_PCI=y -CONFIG_OF_IRQ=y -CONFIG_OF_NET=y -CONFIG_OF_MDIO=y -CONFIG_OF_PCI=y -CONFIG_OF_PCI_IRQ=y -CONFIG_OF_RESERVED_MEM=y -CONFIG_OF_RESOLVE=y -CONFIG_OF_OVERLAY=y -CONFIG_OF_NUMA=y -# CONFIG_PARPORT is not set -CONFIG_PNP=y -CONFIG_PNP_DEBUG_MESSAGES=y - -# -# Protocols -# -CONFIG_PNPACPI=y -CONFIG_BLK_DEV=y -CONFIG_BLK_DEV_NULL_BLK=m -# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set -CONFIG_ZRAM=m -# CONFIG_ZRAM_WRITEBACK is not set -# CONFIG_BLK_DEV_DAC960 is not set -# CONFIG_BLK_DEV_UMEM is not set -# CONFIG_BLK_DEV_COW_COMMON is not set -CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 -# CONFIG_BLK_DEV_CRYPTOLOOP is not set -# CONFIG_BLK_DEV_DRBD is not set -# CONFIG_BLK_DEV_NBD is not set -# CONFIG_BLK_DEV_SKD is not set -# CONFIG_BLK_DEV_SX8 is not set -CONFIG_BLK_DEV_RAM=m -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=16384 -CONFIG_BLK_DEV_RAM_DAX=y -CONFIG_CDROM_PKTCDVD=m -CONFIG_CDROM_PKTCDVD_BUFFERS=8 -# CONFIG_CDROM_PKTCDVD_WCACHE is not set -CONFIG_ATA_OVER_ETH=m -CONFIG_VIRTIO_BLK=y -# CONFIG_VIRTIO_BLK_SCSI is not set -CONFIG_BLK_DEV_RBD=m -# CONFIG_BLK_DEV_RSXX is not set -CONFIG_NVME_CORE=y -CONFIG_BLK_DEV_NVME=y -CONFIG_NVME_FABRICS=m -CONFIG_NVME_RDMA=m -# CONFIG_NVME_FC is not set -CONFIG_NVME_TARGET=m -CONFIG_NVME_TARGET_LOOP=m -CONFIG_NVME_TARGET_RDMA=m -# CONFIG_NVME_TARGET_FC is not set - -# -# Misc devices -# -# CONFIG_SENSORS_LIS3LV02D is not set -# CONFIG_AD525X_DPOT is not set -# CONFIG_DUMMY_IRQ is not set -# CONFIG_PHANTOM is not set -# CONFIG_SGI_IOC4 is not set -CONFIG_TIFM_CORE=m -# CONFIG_TIFM_7XX1 is not set -# CONFIG_ICS932S401 is not set -CONFIG_ENCLOSURE_SERVICES=m -# CONFIG_HP_ILO is not set -# CONFIG_APDS9802ALS is not set -# CONFIG_ISL29003 is not set -# CONFIG_ISL29020 is not set -# CONFIG_SENSORS_TSL2550 is not set -# CONFIG_SENSORS_BH1770 is not set -# CONFIG_SENSORS_APDS990X is not set -# CONFIG_HMC6352 is not set -# CONFIG_DS1682 is not set -# CONFIG_TI_DAC7512 is not set -# CONFIG_USB_SWITCH_FSA9480 is not set -# CONFIG_LATTICE_ECP3_CONFIG is not set -# CONFIG_SRAM is not set -CONFIG_VEXPRESS_SYSCFG=y -# CONFIG_PCI_ENDPOINT_TEST is not set -# CONFIG_C2PORT is not set - -# -# EEPROM support -# -# CONFIG_EEPROM_AT24 is not set -# CONFIG_EEPROM_AT25 is not set -CONFIG_EEPROM_LEGACY=m -CONFIG_EEPROM_MAX6875=m -CONFIG_EEPROM_93CX6=m -# CONFIG_EEPROM_93XX46 is not set -# CONFIG_EEPROM_IDT_89HPESX is not set -CONFIG_CB710_CORE=m -# CONFIG_CB710_DEBUG is not set -CONFIG_CB710_DEBUG_ASSUMPTIONS=y - -# -# Texas Instruments shared transport line discipline -# -# CONFIG_TI_ST is not set -# CONFIG_SENSORS_LIS3_I2C is not set - -# -# Altera FPGA firmware download module -# -# CONFIG_ALTERA_STAPL is not set - -# -# Intel MIC Bus Driver -# - -# -# SCIF Bus Driver -# - -# -# VOP Bus Driver -# - -# -# Intel MIC Host Driver -# - -# -# Intel MIC Card Driver -# - -# -# SCIF Driver -# - -# -# Intel MIC Coprocessor State Management (COSM) Drivers -# - -# -# VOP Driver -# -# CONFIG_GENWQE is not set -# CONFIG_ECHO is not set -# CONFIG_CXL_BASE is not set -# CONFIG_CXL_AFU_DRIVER_OPS is not set -# CONFIG_CXL_LIB is not set - -# -# SCSI device support -# -CONFIG_SCSI_MOD=y -CONFIG_RAID_ATTRS=m -CONFIG_SCSI=y -CONFIG_SCSI_DMA=y -CONFIG_SCSI_NETLINK=y -# CONFIG_SCSI_MQ_DEFAULT is not set -CONFIG_SCSI_PROC_FS=y - -# -# SCSI support type (disk, tape, CD-ROM) -# -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_ST=m -CONFIG_CHR_DEV_OSST=m -CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y -CONFIG_CHR_DEV_SG=m -CONFIG_CHR_DEV_SCH=m -CONFIG_SCSI_ENCLOSURE=m -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SCAN_ASYNC=y - -# -# SCSI Transports -# -CONFIG_SCSI_SPI_ATTRS=m -CONFIG_SCSI_FC_ATTRS=m -CONFIG_SCSI_ISCSI_ATTRS=m -CONFIG_SCSI_SAS_ATTRS=m -CONFIG_SCSI_SAS_LIBSAS=m -CONFIG_SCSI_SAS_ATA=y -CONFIG_SCSI_SAS_HOST_SMP=y -CONFIG_SCSI_SRP_ATTRS=m -CONFIG_SCSI_LOWLEVEL=y -CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m -# CONFIG_SCSI_CXGB3_ISCSI is not set -CONFIG_SCSI_CXGB4_ISCSI=m -# CONFIG_SCSI_BNX2_ISCSI is not set -# CONFIG_SCSI_BNX2X_FCOE is not set -CONFIG_BE2ISCSI=m -# CONFIG_BLK_DEV_3W_XXXX_RAID is not set -CONFIG_SCSI_HPSA=m -# CONFIG_SCSI_3W_9XXX is not set -# CONFIG_SCSI_3W_SAS is not set -# CONFIG_SCSI_ACARD is not set -# CONFIG_SCSI_AACRAID is not set -# CONFIG_SCSI_AIC7XXX is not set -# CONFIG_SCSI_AIC79XX is not set -# CONFIG_SCSI_AIC94XX is not set -CONFIG_SCSI_HISI_SAS=m -# CONFIG_SCSI_HISI_SAS_PCI is not set -CONFIG_SCSI_MVSAS=m -# CONFIG_SCSI_MVSAS_DEBUG is not set -CONFIG_SCSI_MVSAS_TASKLET=y -CONFIG_SCSI_MVUMI=m -# CONFIG_SCSI_ADVANSYS is not set -# CONFIG_SCSI_ARCMSR is not set -# CONFIG_SCSI_ESAS2R is not set -# CONFIG_MEGARAID_NEWGEN is not set -# CONFIG_MEGARAID_LEGACY is not set -CONFIG_MEGARAID_SAS=m -CONFIG_SCSI_MPT3SAS=m -CONFIG_SCSI_MPT2SAS_MAX_SGE=128 -CONFIG_SCSI_MPT3SAS_MAX_SGE=128 -# CONFIG_SCSI_MPT2SAS is not set -CONFIG_SCSI_SMARTPQI=m -CONFIG_SCSI_UFSHCD=m -CONFIG_SCSI_UFSHCD_PCI=m -# CONFIG_SCSI_UFS_DWC_TC_PCI is not set -# CONFIG_SCSI_UFSHCD_PLATFORM is not set -CONFIG_SCSI_HPTIOP=m -CONFIG_LIBFC=m -CONFIG_LIBFCOE=m -CONFIG_FCOE=m -# CONFIG_SCSI_SNIC is not set -# CONFIG_SCSI_DMX3191D is not set -# CONFIG_SCSI_FUTURE_DOMAIN is not set -# CONFIG_SCSI_IPS is not set -CONFIG_SCSI_INITIO=m -# CONFIG_SCSI_INIA100 is not set -CONFIG_SCSI_STEX=m -# CONFIG_SCSI_SYM53C8XX_2 is not set -CONFIG_SCSI_IPR=m -CONFIG_SCSI_IPR_TRACE=y -CONFIG_SCSI_IPR_DUMP=y -# CONFIG_SCSI_QLOGIC_1280 is not set -CONFIG_SCSI_QLA_FC=m -# CONFIG_TCM_QLA2XXX is not set -CONFIG_SCSI_QLA_ISCSI=m -CONFIG_SCSI_LPFC=m -# CONFIG_SCSI_LPFC_DEBUG_FS is not set -# CONFIG_SCSI_DC395x is not set -# CONFIG_SCSI_AM53C974 is not set -# CONFIG_SCSI_WD719X is not set -CONFIG_SCSI_DEBUG=m -CONFIG_SCSI_PMCRAID=m -# CONFIG_SCSI_PM8001 is not set -# CONFIG_SCSI_BFA_FC is not set -CONFIG_SCSI_VIRTIO=m -CONFIG_SCSI_CHELSIO_FCOE=m -# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set -CONFIG_SCSI_DH=y -CONFIG_SCSI_DH_RDAC=y -CONFIG_SCSI_DH_HP_SW=y -CONFIG_SCSI_DH_EMC=y -CONFIG_SCSI_DH_ALUA=y -CONFIG_SCSI_OSD_INITIATOR=m -CONFIG_SCSI_OSD_ULD=m -CONFIG_SCSI_OSD_DPRINT_SENSE=1 -# CONFIG_SCSI_OSD_DEBUG is not set -CONFIG_HAVE_PATA_PLATFORM=y -CONFIG_ATA=y -# CONFIG_ATA_NONSTANDARD is not set -CONFIG_ATA_VERBOSE_ERROR=y -CONFIG_ATA_ACPI=y -# CONFIG_SATA_ZPODD is not set -CONFIG_SATA_PMP=y - -# -# Controllers with non-SFF native interface -# -CONFIG_SATA_AHCI=y -CONFIG_SATA_AHCI_PLATFORM=m -# CONFIG_AHCI_CEVA is not set -CONFIG_AHCI_XGENE=m -# CONFIG_AHCI_QORIQ is not set -CONFIG_SATA_AHCI_SEATTLE=m -# CONFIG_SATA_INIC162X is not set -CONFIG_SATA_ACARD_AHCI=m -CONFIG_SATA_SIL24=m -CONFIG_ATA_SFF=y - -# -# SFF controllers with custom DMA interface -# -CONFIG_PDC_ADMA=m -CONFIG_SATA_QSTOR=m -CONFIG_SATA_SX4=m -CONFIG_ATA_BMDMA=y - -# -# SATA SFF controllers with BMDMA -# -CONFIG_ATA_PIIX=y -# CONFIG_SATA_DWC is not set -CONFIG_SATA_MV=m -CONFIG_SATA_NV=m -CONFIG_SATA_PROMISE=m -CONFIG_SATA_SIL=m -# CONFIG_SATA_SIS is not set -CONFIG_SATA_SVW=m -CONFIG_SATA_ULI=m -CONFIG_SATA_VIA=m -CONFIG_SATA_VITESSE=m - -# -# PATA SFF controllers with BMDMA -# -# CONFIG_PATA_ALI is not set -# CONFIG_PATA_AMD is not set -# CONFIG_PATA_ARTOP is not set -# CONFIG_PATA_ATIIXP is not set -# CONFIG_PATA_ATP867X is not set -# CONFIG_PATA_CMD64X is not set -# CONFIG_PATA_CYPRESS is not set -# CONFIG_PATA_EFAR is not set -# CONFIG_PATA_HPT366 is not set -# CONFIG_PATA_HPT37X is not set -# CONFIG_PATA_HPT3X2N is not set -# CONFIG_PATA_HPT3X3 is not set -# CONFIG_PATA_IT8213 is not set -# CONFIG_PATA_IT821X is not set -# CONFIG_PATA_JMICRON is not set -# CONFIG_PATA_MARVELL is not set -# CONFIG_PATA_NETCELL is not set -# CONFIG_PATA_NINJA32 is not set -# CONFIG_PATA_NS87415 is not set -# CONFIG_PATA_OLDPIIX is not set -# CONFIG_PATA_OPTIDMA is not set -# CONFIG_PATA_PDC2027X is not set -# CONFIG_PATA_PDC_OLD is not set -# CONFIG_PATA_RADISYS is not set -# CONFIG_PATA_RDC is not set -# CONFIG_PATA_SCH is not set -# CONFIG_PATA_SERVERWORKS is not set -# CONFIG_PATA_SIL680 is not set -# CONFIG_PATA_SIS is not set -# CONFIG_PATA_TOSHIBA is not set -# CONFIG_PATA_TRIFLEX is not set -# CONFIG_PATA_VIA is not set -# CONFIG_PATA_WINBOND is not set - -# -# PIO-only SFF controllers -# -# CONFIG_PATA_CMD640_PCI is not set -# CONFIG_PATA_MPIIX is not set -# CONFIG_PATA_NS87410 is not set -# CONFIG_PATA_OPTI is not set -# CONFIG_PATA_PLATFORM is not set -# CONFIG_PATA_RZ1000 is not set - -# -# Generic fallback / legacy drivers -# -# CONFIG_PATA_ACPI is not set -CONFIG_ATA_GENERIC=m -# CONFIG_PATA_LEGACY is not set -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_AUTODETECT=y -CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m -CONFIG_MD_RAID10=m -CONFIG_MD_RAID456=m -# CONFIG_MD_MULTIPATH is not set -CONFIG_MD_FAULTY=m -# CONFIG_BCACHE is not set -CONFIG_BLK_DEV_DM_BUILTIN=y -CONFIG_BLK_DEV_DM=y -# CONFIG_DM_MQ_DEFAULT is not set -CONFIG_DM_DEBUG=y -CONFIG_DM_BUFIO=y -# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set -CONFIG_DM_BIO_PRISON=m -CONFIG_DM_PERSISTENT_DATA=m -CONFIG_DM_CRYPT=m -CONFIG_DM_SNAPSHOT=m -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m -CONFIG_DM_CACHE_SMQ=m -CONFIG_DM_ERA=m -CONFIG_DM_MIRROR=m -CONFIG_DM_LOG_USERSPACE=m -CONFIG_DM_RAID=m -CONFIG_DM_ZERO=m -CONFIG_DM_MULTIPATH=m -CONFIG_DM_MULTIPATH_QL=m -CONFIG_DM_MULTIPATH_ST=m -CONFIG_DM_DELAY=m -CONFIG_DM_INIT=y -CONFIG_DM_UEVENT=y -CONFIG_DM_FLAKEY=m -CONFIG_DM_VERITY=y -# CONFIG_DM_VERITY_FEC is not set -CONFIG_DM_SWITCH=m -# CONFIG_DM_LOG_WRITES is not set -# CONFIG_DM_INTEGRITY is not set -CONFIG_TARGET_CORE=m -CONFIG_TCM_IBLOCK=m -CONFIG_TCM_FILEIO=m -CONFIG_TCM_PSCSI=m -CONFIG_TCM_USER2=m -CONFIG_LOOPBACK_TARGET=m -CONFIG_TCM_FC=m -CONFIG_ISCSI_TARGET=m -CONFIG_ISCSI_TARGET_CXGB4=m -# CONFIG_FUSION is not set - -# -# IEEE 1394 (FireWire) support -# -# CONFIG_FIREWIRE is not set -# CONFIG_FIREWIRE_NOSY is not set -CONFIG_NETDEVICES=y -CONFIG_MII=m -CONFIG_NET_CORE=y -CONFIG_BONDING=m -CONFIG_DUMMY=m -# CONFIG_EQUALIZER is not set -CONFIG_NET_FC=y -CONFIG_IFB=m -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -CONFIG_NET_TEAM_MODE_RANDOM=m -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_MACVLAN=m -CONFIG_MACVTAP=m -CONFIG_VXLAN=m -CONFIG_GENEVE=m -# CONFIG_GTP is not set -CONFIG_MACSEC=m -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y -CONFIG_NETPOLL=y -CONFIG_NET_POLL_CONTROLLER=y -CONFIG_TUN=m -CONFIG_TAP=m -# CONFIG_TUN_VNET_CROSS_LE is not set -CONFIG_VETH=m -CONFIG_VIRTIO_NET=y -CONFIG_NLMON=m -# CONFIG_VSOCKMON is not set -# CONFIG_ARCNET is not set -# CONFIG_ATM_DRIVERS is not set - -# -# CAIF transport drivers -# - -# -# Distributed Switch Architecture drivers -# -CONFIG_ETHERNET=y -CONFIG_MDIO=m -# CONFIG_NET_VENDOR_3COM is not set -# CONFIG_NET_VENDOR_ADAPTEC is not set -# CONFIG_NET_VENDOR_AGERE is not set -CONFIG_NET_VENDOR_ALACRITECH=y -# CONFIG_SLICOSS is not set -# CONFIG_NET_VENDOR_ALTEON is not set -# CONFIG_ALTERA_TSE is not set -CONFIG_NET_VENDOR_AMAZON=y -CONFIG_NET_VENDOR_AMD=y -# CONFIG_AMD8111_ETH is not set -# CONFIG_PCNET32 is not set -CONFIG_AMD_XGBE=m -# CONFIG_AMD_XGBE_DCB is not set -# CONFIG_AMD_XGBE_HAVE_ECC is not set -CONFIG_NET_XGENE=m -CONFIG_NET_XGENE_V2=m -CONFIG_NET_VENDOR_AQUANTIA=y -CONFIG_NET_VENDOR_ARC=y -CONFIG_NET_VENDOR_ATHEROS=y -# CONFIG_ATL2 is not set -CONFIG_ATL1=m -CONFIG_ATL1E=m -CONFIG_ATL1C=m -CONFIG_ALX=m -# CONFIG_NET_VENDOR_AURORA is not set -CONFIG_NET_CADENCE=y -# CONFIG_MACB is not set -CONFIG_NET_VENDOR_BROADCOM=y -# CONFIG_B44 is not set -# CONFIG_BCMGENET is not set -CONFIG_BNX2=m -# CONFIG_CNIC is not set -CONFIG_TIGON3=m -CONFIG_TIGON3_HWMON=y -CONFIG_BNX2X=m -CONFIG_BNX2X_SRIOV=y -# CONFIG_SYSTEMPORT is not set -CONFIG_BNXT=m -CONFIG_BNXT_SRIOV=y -CONFIG_BNXT_FLOWER_OFFLOAD=y -# CONFIG_BNXT_DCB is not set -# CONFIG_NET_VENDOR_BROCADE is not set -CONFIG_NET_VENDOR_CAVIUM=y -CONFIG_THUNDER_NIC_PF=m -CONFIG_THUNDER_NIC_VF=m -CONFIG_THUNDER_NIC_BGX=m -CONFIG_THUNDER_NIC_RGX=m -CONFIG_LIQUIDIO=m -# CONFIG_LIQUIDIO_VF is not set -CONFIG_NET_VENDOR_CHELSIO=y -# CONFIG_CHELSIO_T1 is not set -# CONFIG_CHELSIO_T3 is not set -CONFIG_CHELSIO_T4=m -# CONFIG_CHELSIO_T4_DCB is not set -CONFIG_CHELSIO_T4VF=m -CONFIG_CHELSIO_LIB=m -# CONFIG_NET_VENDOR_CISCO is not set -CONFIG_DNET=m -# CONFIG_NET_VENDOR_DEC is not set -# CONFIG_NET_VENDOR_DLINK is not set -# CONFIG_NET_VENDOR_EMULEX is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_EXAR is not set -CONFIG_NET_VENDOR_HISILICON=y -# CONFIG_HIX5HD2_GMAC is not set -# CONFIG_HISI_FEMAC is not set -# CONFIG_HIP04_ETH is not set -CONFIG_HNS_MDIO=m -CONFIG_HNS=m -CONFIG_HNS_DSAF=m -CONFIG_HNS_ENET=m -# CONFIG_HNS3 is not set -# CONFIG_NET_VENDOR_HP is not set -CONFIG_NET_VENDOR_HUAWEI=y -CONFIG_NET_VENDOR_INTEL=y -# CONFIG_E100 is not set -CONFIG_E1000=m -CONFIG_E1000E=m -CONFIG_IGB=m -CONFIG_IGB_HWMON=y -CONFIG_IGBVF=m -# CONFIG_IXGB is not set -CONFIG_IXGBE=m -CONFIG_IXGBE_HWMON=y -CONFIG_IXGBE_DCB=y -CONFIG_IXGBEVF=m -CONFIG_I40E=m -# CONFIG_I40E_DCB is not set -CONFIG_I40EVF=m -CONFIG_FM10K=m -# CONFIG_NET_VENDOR_I825XX is not set -# CONFIG_JME is not set -CONFIG_NET_VENDOR_MARVELL=y -CONFIG_MVMDIO=m -CONFIG_SKGE=m -# CONFIG_SKGE_DEBUG is not set -CONFIG_SKGE_GENESIS=y -CONFIG_SKY2=m -# CONFIG_SKY2_DEBUG is not set -CONFIG_NET_VENDOR_MELLANOX=y -# CONFIG_MLX4_EN is not set -CONFIG_MLX4_CORE=m -CONFIG_MLX4_DEBUG=y -# CONFIG_MLX5_CORE is not set -# CONFIG_MLXSW_CORE is not set -# CONFIG_MLXFW is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROCHIP is not set -CONFIG_NET_VENDOR_MYRI=y -# CONFIG_MYRI10GE is not set -# CONFIG_FEALNX is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -CONFIG_NET_VENDOR_NETRONOME=y -# CONFIG_NFP is not set -# CONFIG_NET_VENDOR_NVIDIA is not set -CONFIG_NET_VENDOR_OKI=y -CONFIG_ETHOC=m -CONFIG_NET_PACKET_ENGINE=y -# CONFIG_HAMACHI is not set -# CONFIG_YELLOWFIN is not set -# CONFIG_NET_VENDOR_QLOGIC is not set -CONFIG_NET_VENDOR_QUALCOMM=y -# CONFIG_QCA7000_SPI is not set -CONFIG_QCOM_EMAC=m -# CONFIG_RMNET is not set -CONFIG_NET_VENDOR_REALTEK=y -CONFIG_8139CP=m -CONFIG_8139TOO=m -# CONFIG_8139TOO_PIO is not set -# CONFIG_8139TOO_TUNE_TWISTER is not set -CONFIG_8139TOO_8129=y -# CONFIG_8139_OLD_RX_RESET is not set -CONFIG_R8169=m -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_RDC is not set -CONFIG_NET_VENDOR_ROCKER=y -CONFIG_ROCKER=m -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SILAN is not set -# CONFIG_NET_VENDOR_SIS is not set -CONFIG_NET_VENDOR_SOLARFLARE=y -# CONFIG_SFC is not set -# CONFIG_SFC_FALCON is not set -CONFIG_NET_VENDOR_SMSC=y -CONFIG_SMC91X=m -CONFIG_EPIC100=m -CONFIG_SMSC911X=m -# CONFIG_SMSC911X_ARCH_HOOKS is not set -CONFIG_SMSC9420=m -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SUN is not set -# CONFIG_NET_VENDOR_TEHUTI is not set -# CONFIG_NET_VENDOR_TI is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set -CONFIG_NET_VENDOR_SYNOPSYS=y -# CONFIG_DWC_XLGMAC is not set -# CONFIG_FDDI is not set -# CONFIG_HIPPI is not set -# CONFIG_NET_SB1000 is not set -CONFIG_MDIO_DEVICE=y -CONFIG_MDIO_BUS=y -CONFIG_MDIO_BCM_UNIMAC=m -CONFIG_MDIO_BITBANG=m -# CONFIG_MDIO_BUS_MUX_GPIO is not set -# CONFIG_MDIO_BUS_MUX_MMIOREG is not set -CONFIG_MDIO_CAVIUM=m -CONFIG_MDIO_GPIO=m -# CONFIG_MDIO_HISI_FEMAC is not set -CONFIG_MDIO_OCTEON=m -CONFIG_MDIO_THUNDER=m -CONFIG_MDIO_XGENE=m -CONFIG_PHYLIB=y -CONFIG_SWPHY=y -# CONFIG_LED_TRIGGER_PHY is not set - -# -# MII PHY device drivers -# -CONFIG_AMD_PHY=m -CONFIG_AQUANTIA_PHY=m -CONFIG_AT803X_PHY=m -# CONFIG_BCM7XXX_PHY is not set -CONFIG_BCM87XX_PHY=m -CONFIG_BCM_NET_PHYLIB=m -CONFIG_BROADCOM_PHY=m -CONFIG_CICADA_PHY=m -# CONFIG_CORTINA_PHY is not set -CONFIG_DAVICOM_PHY=m -CONFIG_DP83848_PHY=m -CONFIG_DP83867_PHY=m -CONFIG_FIXED_PHY=y -CONFIG_ICPLUS_PHY=m -# CONFIG_INTEL_XWAY_PHY is not set -CONFIG_LSI_ET1011C_PHY=m -CONFIG_LXT_PHY=m -CONFIG_MARVELL_PHY=m -# CONFIG_MARVELL_10G_PHY is not set -CONFIG_MICREL_PHY=m -CONFIG_MICROCHIP_PHY=m -# CONFIG_MICROSEMI_PHY is not set -CONFIG_NATIONAL_PHY=m -CONFIG_QSEMI_PHY=m -CONFIG_REALTEK_PHY=m -# CONFIG_ROCKCHIP_PHY is not set -CONFIG_SMSC_PHY=m -CONFIG_STE10XP=m -CONFIG_TERANETICS_PHY=m -CONFIG_VITESSE_PHY=m -# CONFIG_XILINX_GMII2RGMII is not set -# CONFIG_MICREL_KS8995MA is not set -CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPP_MULTILINK=y -CONFIG_PPPOATM=m -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -CONFIG_SLIP=m -CONFIG_SLHC=m -CONFIG_SLIP_COMPRESSED=y -CONFIG_SLIP_SMART=y -# CONFIG_SLIP_MODE_SLIP6 is not set -CONFIG_USB_NET_DRIVERS=y -CONFIG_USB_CATC=m -CONFIG_USB_KAWETH=m -CONFIG_USB_PEGASUS=m -CONFIG_USB_RTL8150=m -CONFIG_USB_RTL8152=m -CONFIG_USB_LAN78XX=m -CONFIG_USB_USBNET=m -CONFIG_USB_NET_AX8817X=m -CONFIG_USB_NET_AX88179_178A=m -CONFIG_USB_NET_CDCETHER=m -CONFIG_USB_NET_CDC_EEM=m -CONFIG_USB_NET_CDC_NCM=m -CONFIG_USB_NET_HUAWEI_CDC_NCM=m -CONFIG_USB_NET_CDC_MBIM=m -CONFIG_USB_NET_DM9601=m -CONFIG_USB_NET_SR9700=m -# CONFIG_USB_NET_SR9800 is not set -CONFIG_USB_NET_SMSC75XX=m -CONFIG_USB_NET_SMSC95XX=m -CONFIG_USB_NET_GL620A=m -CONFIG_USB_NET_NET1080=m -CONFIG_USB_NET_PLUSB=m -CONFIG_USB_NET_MCS7830=m -CONFIG_USB_NET_RNDIS_HOST=m -CONFIG_USB_NET_CDC_SUBSET_ENABLE=m -CONFIG_USB_NET_CDC_SUBSET=m -CONFIG_USB_ALI_M5632=y -CONFIG_USB_AN2720=y -CONFIG_USB_BELKIN=y -CONFIG_USB_ARMLINUX=y -CONFIG_USB_EPSON2888=y -CONFIG_USB_KC2190=y -CONFIG_USB_NET_ZAURUS=m -CONFIG_USB_NET_CX82310_ETH=m -CONFIG_USB_NET_KALMIA=m -CONFIG_USB_NET_QMI_WWAN=m -CONFIG_USB_HSO=m -CONFIG_USB_NET_INT51X1=m -CONFIG_USB_IPHETH=m -CONFIG_USB_SIERRA_NET=m -CONFIG_USB_VL600=m -CONFIG_USB_NET_CH9200=m -# CONFIG_WLAN is not set - -# -# Enable WiMAX (Networking options) to see the WiMAX drivers -# -CONFIG_WAN=y -CONFIG_HDLC=m -CONFIG_HDLC_RAW=m -# CONFIG_HDLC_RAW_ETH is not set -CONFIG_HDLC_CISCO=m -CONFIG_HDLC_FR=m -CONFIG_HDLC_PPP=m - -# -# X.25/LAPB support is disabled -# -# CONFIG_PCI200SYN is not set -# CONFIG_WANXL is not set -# CONFIG_PC300TOO is not set -# CONFIG_FARSYNC is not set -# CONFIG_DSCC4 is not set -CONFIG_DLCI=m -CONFIG_DLCI_MAX=8 -# CONFIG_VMXNET3 is not set -# CONFIG_FUJITSU_ES is not set -# CONFIG_ISDN is not set -# CONFIG_NVM is not set - -# -# Input device support -# -CONFIG_INPUT=y -CONFIG_INPUT_LEDS=y -CONFIG_INPUT_FF_MEMLESS=m -CONFIG_INPUT_POLLDEV=m -CONFIG_INPUT_SPARSEKMAP=m -# CONFIG_INPUT_MATRIXKMAP is not set - -# -# Userland interfaces -# -CONFIG_INPUT_MOUSEDEV=y -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 -# CONFIG_INPUT_JOYDEV is not set -CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -# CONFIG_INPUT_KEYBOARD is not set -CONFIG_INPUT_MOUSE=y -# CONFIG_MOUSE_PS2 is not set -# CONFIG_MOUSE_SERIAL is not set -# CONFIG_MOUSE_APPLETOUCH is not set -# CONFIG_MOUSE_BCM5974 is not set -# CONFIG_MOUSE_CYAPA is not set -# CONFIG_MOUSE_ELAN_I2C is not set -# CONFIG_MOUSE_VSXXXAA is not set -# CONFIG_MOUSE_GPIO is not set -CONFIG_MOUSE_SYNAPTICS_I2C=m -CONFIG_MOUSE_SYNAPTICS_USB=m -# CONFIG_INPUT_JOYSTICK is not set -# CONFIG_INPUT_TABLET is not set -# CONFIG_INPUT_TOUCHSCREEN is not set -# CONFIG_INPUT_MISC is not set -CONFIG_RMI4_CORE=m -# CONFIG_RMI4_I2C is not set -# CONFIG_RMI4_SPI is not set -# CONFIG_RMI4_SMB is not set -CONFIG_RMI4_F03=y -CONFIG_RMI4_F03_SERIO=m -CONFIG_RMI4_2D_SENSOR=y -CONFIG_RMI4_F11=y -CONFIG_RMI4_F12=y -CONFIG_RMI4_F30=y -# CONFIG_RMI4_F34 is not set -# CONFIG_RMI4_F55 is not set - -# -# Hardware I/O ports -# -CONFIG_SERIO=y -CONFIG_SERIO_SERPORT=y -CONFIG_SERIO_AMBAKMI=y -# CONFIG_SERIO_PCIPS2 is not set -# CONFIG_SERIO_LIBPS2 is not set -CONFIG_SERIO_RAW=m -CONFIG_SERIO_ALTERA_PS2=m -# CONFIG_SERIO_PS2MULT is not set -CONFIG_SERIO_ARC_PS2=m -# CONFIG_SERIO_APBPS2 is not set -# CONFIG_SERIO_GPIO_PS2 is not set -# CONFIG_USERIO is not set -# CONFIG_GAMEPORT is not set - -# -# Character devices -# -CONFIG_TTY=y -CONFIG_VT=y -CONFIG_CONSOLE_TRANSLATIONS=y -CONFIG_VT_CONSOLE=y -CONFIG_VT_CONSOLE_SLEEP=y -CONFIG_HW_CONSOLE=y -CONFIG_VT_HW_CONSOLE_BINDING=y -CONFIG_UNIX98_PTYS=y -# CONFIG_LEGACY_PTYS is not set -CONFIG_SERIAL_NONSTANDARD=y -# CONFIG_ROCKETPORT is not set -CONFIG_CYCLADES=m -# CONFIG_CYZ_INTR is not set -# CONFIG_MOXA_INTELLIO is not set -# CONFIG_MOXA_SMARTIO is not set -CONFIG_SYNCLINKMP=m -CONFIG_SYNCLINK_GT=m -# CONFIG_NOZOMI is not set -# CONFIG_ISI is not set -CONFIG_N_HDLC=m -CONFIG_N_GSM=m -# CONFIG_TRACE_SINK is not set -# CONFIG_DEVMEM is not set - -# -# Serial drivers -# -CONFIG_SERIAL_EARLYCON=y -CONFIG_SERIAL_8250=y -# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set -CONFIG_SERIAL_8250_PNP=y -# CONFIG_SERIAL_8250_FINTEK is not set -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_DMA=y -CONFIG_SERIAL_8250_PCI=y -CONFIG_SERIAL_8250_EXAR=y -CONFIG_SERIAL_8250_NR_UARTS=32 -CONFIG_SERIAL_8250_RUNTIME_UARTS=4 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_MANY_PORTS=y -# CONFIG_SERIAL_8250_ASPEED_VUART is not set -CONFIG_SERIAL_8250_SHARE_IRQ=y -# CONFIG_SERIAL_8250_DETECT_IRQ is not set -CONFIG_SERIAL_8250_RSA=y -CONFIG_SERIAL_8250_FSL=y -CONFIG_SERIAL_8250_DW=y -CONFIG_SERIAL_8250_RT288X=y -# CONFIG_SERIAL_8250_MOXA is not set -CONFIG_SERIAL_OF_PLATFORM=y - -# -# Non-8250 serial port support -# -# CONFIG_SERIAL_AMBA_PL010 is not set -CONFIG_SERIAL_AMBA_PL011=y -CONFIG_SERIAL_AMBA_PL011_CONSOLE=y -CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST=y -# CONFIG_SERIAL_KGDB_NMI is not set -# CONFIG_SERIAL_MAX3100 is not set -# CONFIG_SERIAL_MAX310X is not set -# CONFIG_SERIAL_UARTLITE is not set -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -CONFIG_CONSOLE_POLL=y -# CONFIG_SERIAL_JSM is not set -# CONFIG_SERIAL_MSM is not set -# CONFIG_SERIAL_SCCNXP is not set -# CONFIG_SERIAL_SC16IS7XX is not set -# CONFIG_SERIAL_ALTERA_JTAGUART is not set -# CONFIG_SERIAL_ALTERA_UART is not set -# CONFIG_SERIAL_IFX6X60 is not set -# CONFIG_SERIAL_XILINX_PS_UART is not set -# CONFIG_SERIAL_ARC is not set -# CONFIG_SERIAL_RP2 is not set -# CONFIG_SERIAL_FSL_LPUART is not set -# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set -# CONFIG_SERIAL_DEV_BUS is not set -CONFIG_HVC_DRIVER=y -# CONFIG_HVC_DCC is not set -CONFIG_VIRTIO_CONSOLE=m -CONFIG_IPMI_HANDLER=m -CONFIG_IPMI_DMI_DECODE=y -# CONFIG_IPMI_PANIC_EVENT is not set -CONFIG_IPMI_DEVICE_INTERFACE=m -CONFIG_IPMI_SI=m -CONFIG_IPMI_SSIF=m -CONFIG_IPMI_WATCHDOG=m -CONFIG_IPMI_POWEROFF=m -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_TIMERIOMEM=m -CONFIG_HW_RANDOM_VIRTIO=m -CONFIG_HW_RANDOM_TPM=m -CONFIG_HW_RANDOM_HISI=y -# CONFIG_HW_RANDOM_MSM is not set -CONFIG_HW_RANDOM_XGENE=m -CONFIG_HW_RANDOM_CAVIUM=m -# CONFIG_R3964 is not set -# CONFIG_APPLICOM is not set - -# -# PCMCIA character devices -# -CONFIG_RAW_DRIVER=y -CONFIG_MAX_RAW_DEVS=8192 -# CONFIG_HPET is not set -CONFIG_TCG_TPM=m -CONFIG_TCG_TIS_CORE=m -CONFIG_TCG_TIS=m -# CONFIG_TCG_TIS_SPI is not set -# CONFIG_TCG_TIS_I2C_ATMEL is not set -# CONFIG_TCG_TIS_I2C_INFINEON is not set -# CONFIG_TCG_TIS_I2C_NUVOTON is not set -CONFIG_TCG_ATMEL=m -# CONFIG_TCG_INFINEON is not set -# CONFIG_TCG_CRB is not set -# CONFIG_TCG_VTPM_PROXY is not set -# CONFIG_TCG_TIS_ST33ZP24_I2C is not set -# CONFIG_TCG_TIS_ST33ZP24_SPI is not set -# CONFIG_DEVPORT is not set -# CONFIG_XILLYBUS is not set - -# -# I2C support -# -CONFIG_I2C=y -CONFIG_ACPI_I2C_OPREGION=y -CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_COMPAT=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_MUX=y - -# -# Multiplexer I2C Chip support -# -CONFIG_I2C_ARB_GPIO_CHALLENGE=m -CONFIG_I2C_MUX_GPIO=m -# CONFIG_I2C_MUX_GPMUX is not set -# CONFIG_I2C_MUX_LTC4306 is not set -CONFIG_I2C_MUX_PCA9541=m -CONFIG_I2C_MUX_PCA954x=m -CONFIG_I2C_MUX_PINCTRL=m -# CONFIG_I2C_MUX_REG is not set -# CONFIG_I2C_DEMUX_PINCTRL is not set -# CONFIG_I2C_MUX_MLXCPLD is not set -# CONFIG_I2C_HELPER_AUTO is not set -CONFIG_I2C_SMBUS=m - -# -# I2C Algorithms -# -CONFIG_I2C_ALGOBIT=y -CONFIG_I2C_ALGOPCF=m -CONFIG_I2C_ALGOPCA=m - -# -# I2C Hardware Bus support -# - -# -# PC SMBus host controller drivers -# -# CONFIG_I2C_ALI1535 is not set -# CONFIG_I2C_ALI1563 is not set -# CONFIG_I2C_ALI15X3 is not set -# CONFIG_I2C_AMD756 is not set -# CONFIG_I2C_AMD8111 is not set -# CONFIG_I2C_HIX5HD2 is not set -# CONFIG_I2C_I801 is not set -# CONFIG_I2C_ISCH is not set -# CONFIG_I2C_PIIX4 is not set -CONFIG_I2C_NFORCE2=m -# CONFIG_I2C_SIS5595 is not set -# CONFIG_I2C_SIS630 is not set -# CONFIG_I2C_SIS96X is not set -# CONFIG_I2C_VIA is not set -# CONFIG_I2C_VIAPRO is not set - -# -# ACPI drivers -# -# CONFIG_I2C_SCMI is not set - -# -# I2C system bus drivers (mostly embedded / system-on-chip) -# -# CONFIG_I2C_CADENCE is not set -# CONFIG_I2C_CBUS_GPIO is not set -CONFIG_I2C_DESIGNWARE_CORE=y -CONFIG_I2C_DESIGNWARE_PLATFORM=y -# CONFIG_I2C_DESIGNWARE_SLAVE is not set -# CONFIG_I2C_DESIGNWARE_PCI is not set -# CONFIG_I2C_EMEV2 is not set -CONFIG_I2C_GPIO=m -# CONFIG_I2C_NOMADIK is not set -# CONFIG_I2C_OCORES is not set -CONFIG_I2C_PCA_PLATFORM=m -# CONFIG_I2C_PXA_PCI is not set -CONFIG_I2C_QUP=m -# CONFIG_I2C_RK3X is not set -CONFIG_I2C_SIMTEC=m -CONFIG_I2C_VERSATILE=m -CONFIG_I2C_THUNDERX=m -# CONFIG_I2C_XILINX is not set -CONFIG_I2C_XLP9XX=m - -# -# External I2C/SMBus adapter drivers -# -CONFIG_I2C_DIOLAN_U2C=m -CONFIG_I2C_PARPORT_LIGHT=m -# CONFIG_I2C_ROBOTFUZZ_OSIF is not set -# CONFIG_I2C_TAOS_EVM is not set -CONFIG_I2C_TINY_USB=m - -# -# Other I2C/SMBus bus drivers -# -CONFIG_I2C_XGENE_SLIMPRO=m -CONFIG_I2C_STUB=m -CONFIG_I2C_SLAVE=y -CONFIG_I2C_SLAVE_EEPROM=m -# CONFIG_I2C_DEBUG_CORE is not set -# CONFIG_I2C_DEBUG_ALGO is not set -# CONFIG_I2C_DEBUG_BUS is not set -CONFIG_SPI=y -# CONFIG_SPI_DEBUG is not set -CONFIG_SPI_MASTER=y - -# -# SPI Master Controller Drivers -# -# CONFIG_SPI_ALTERA is not set -# CONFIG_SPI_AXI_SPI_ENGINE is not set -# CONFIG_SPI_BITBANG is not set -CONFIG_SPI_CADENCE=m -# CONFIG_SPI_DESIGNWARE is not set -# CONFIG_SPI_GPIO is not set -# CONFIG_SPI_FSL_SPI is not set -# CONFIG_SPI_OC_TINY is not set -CONFIG_SPI_PL022=m -# CONFIG_SPI_PXA2XX is not set -# CONFIG_SPI_PXA2XX_PCI is not set -# CONFIG_SPI_ROCKCHIP is not set -CONFIG_SPI_QUP=y -# CONFIG_SPI_SC18IS602 is not set -# CONFIG_SPI_THUNDERX is not set -# CONFIG_SPI_XCOMM is not set -# CONFIG_SPI_XILINX is not set -CONFIG_SPI_XLP=m -# CONFIG_SPI_ZYNQMP_GQSPI is not set - -# -# SPI Protocol Masters -# -# CONFIG_SPI_SPIDEV is not set -# CONFIG_SPI_LOOPBACK_TEST is not set -# CONFIG_SPI_TLE62X0 is not set -# CONFIG_SPI_SLAVE is not set -# CONFIG_SPMI is not set -# CONFIG_HSI is not set -CONFIG_PPS=m -# CONFIG_PPS_DEBUG is not set - -# -# PPS clients support -# -# CONFIG_PPS_CLIENT_KTIMER is not set -CONFIG_PPS_CLIENT_LDISC=m -CONFIG_PPS_CLIENT_GPIO=m - -# -# PPS generators support -# - -# -# PTP clock support -# -CONFIG_PTP_1588_CLOCK=m -CONFIG_DP83640_PHY=m -CONFIG_PINCTRL=y - -# -# Pin controllers -# -CONFIG_PINMUX=y -CONFIG_PINCONF=y -CONFIG_GENERIC_PINCONF=y -# CONFIG_DEBUG_PINCTRL is not set -# CONFIG_PINCTRL_AMD is not set -# CONFIG_PINCTRL_MCP23S08 is not set -# CONFIG_PINCTRL_SINGLE is not set -# CONFIG_PINCTRL_SX150X is not set -CONFIG_PINCTRL_MSM=y -# CONFIG_PINCTRL_APQ8064 is not set -# CONFIG_PINCTRL_APQ8084 is not set -# CONFIG_PINCTRL_IPQ4019 is not set -# CONFIG_PINCTRL_IPQ8064 is not set -# CONFIG_PINCTRL_IPQ8074 is not set -# CONFIG_PINCTRL_MSM8660 is not set -# CONFIG_PINCTRL_MSM8960 is not set -# CONFIG_PINCTRL_MDM9615 is not set -# CONFIG_PINCTRL_MSM8X74 is not set -# CONFIG_PINCTRL_MSM8916 is not set -# CONFIG_PINCTRL_MSM8994 is not set -# CONFIG_PINCTRL_MSM8996 is not set -CONFIG_PINCTRL_QDF2XXX=y -# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set -CONFIG_GPIOLIB=y -CONFIG_OF_GPIO=y -CONFIG_GPIO_ACPI=y -CONFIG_GPIOLIB_IRQCHIP=y -# CONFIG_DEBUG_GPIO is not set -CONFIG_GPIO_SYSFS=y -CONFIG_GPIO_GENERIC=m - -# -# Memory mapped GPIO drivers -# -# CONFIG_GPIO_74XX_MMIO is not set -# CONFIG_GPIO_ALTERA is not set -CONFIG_GPIO_AMDPT=m -CONFIG_GPIO_DWAPB=m -# CONFIG_GPIO_EXAR is not set -# CONFIG_GPIO_FTGPIO010 is not set -CONFIG_GPIO_GENERIC_PLATFORM=m -# CONFIG_GPIO_GRGPIO is not set -# CONFIG_GPIO_MOCKUP is not set -CONFIG_GPIO_PL061=y -# CONFIG_GPIO_SYSCON is not set -# CONFIG_GPIO_THUNDERX is not set -CONFIG_GPIO_XGENE=y -CONFIG_GPIO_XGENE_SB=m -# CONFIG_GPIO_XILINX is not set -CONFIG_GPIO_XLP=m - -# -# I2C GPIO expanders -# -# CONFIG_GPIO_ADP5588 is not set -# CONFIG_GPIO_ADNP is not set -# CONFIG_GPIO_MAX7300 is not set -# CONFIG_GPIO_MAX732X is not set -# CONFIG_GPIO_PCA953X is not set -# CONFIG_GPIO_PCF857X is not set -# CONFIG_GPIO_SX150X is not set -# CONFIG_GPIO_TPIC2810 is not set - -# -# MFD GPIO expanders -# - -# -# PCI GPIO expanders -# -# CONFIG_GPIO_BT8XX is not set -# CONFIG_GPIO_PCI_IDIO_16 is not set -# CONFIG_GPIO_RDC321X is not set - -# -# SPI GPIO expanders -# -# CONFIG_GPIO_74X164 is not set -# CONFIG_GPIO_MAX7301 is not set -# CONFIG_GPIO_MC33880 is not set -# CONFIG_GPIO_PISOSR is not set -# CONFIG_GPIO_XRA1403 is not set - -# -# USB GPIO expanders -# -# CONFIG_W1 is not set -# CONFIG_POWER_AVS is not set -CONFIG_POWER_RESET=y -# CONFIG_POWER_RESET_BRCMSTB is not set -CONFIG_POWER_RESET_GPIO=y -CONFIG_POWER_RESET_GPIO_RESTART=y -CONFIG_POWER_RESET_HISI=y -# CONFIG_POWER_RESET_MSM is not set -# CONFIG_POWER_RESET_LTC2952 is not set -CONFIG_POWER_RESET_RESTART=y -CONFIG_POWER_RESET_VEXPRESS=y -# CONFIG_POWER_RESET_XGENE is not set -CONFIG_POWER_RESET_SYSCON=y -# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set -# CONFIG_SYSCON_REBOOT_MODE is not set -CONFIG_POWER_SUPPLY=y -# CONFIG_POWER_SUPPLY_DEBUG is not set -# CONFIG_PDA_POWER is not set -# CONFIG_TEST_POWER is not set -# CONFIG_BATTERY_DS2780 is not set -# CONFIG_BATTERY_DS2781 is not set -# CONFIG_BATTERY_DS2782 is not set -# CONFIG_BATTERY_SBS is not set -# CONFIG_CHARGER_SBS is not set -# CONFIG_BATTERY_BQ27XXX is not set -# CONFIG_BATTERY_MAX17040 is not set -# CONFIG_BATTERY_MAX17042 is not set -# CONFIG_CHARGER_MAX8903 is not set -# CONFIG_CHARGER_LP8727 is not set -# CONFIG_CHARGER_GPIO is not set -# CONFIG_CHARGER_LTC3651 is not set -# CONFIG_CHARGER_DETECTOR_MAX14656 is not set -# CONFIG_CHARGER_BQ2415X is not set -# CONFIG_CHARGER_BQ24190 is not set -# CONFIG_CHARGER_BQ24257 is not set -# CONFIG_CHARGER_BQ24735 is not set -# CONFIG_CHARGER_BQ25890 is not set -CONFIG_CHARGER_SMB347=m -# CONFIG_BATTERY_GAUGE_LTC2941 is not set -# CONFIG_CHARGER_RT9455 is not set -CONFIG_HWMON=y -CONFIG_HWMON_VID=m -# CONFIG_HWMON_DEBUG_CHIP is not set - -# -# Native drivers -# -CONFIG_SENSORS_AD7314=m -CONFIG_SENSORS_AD7414=m -CONFIG_SENSORS_AD7418=m -CONFIG_SENSORS_ADM1021=m -CONFIG_SENSORS_ADM1025=m -CONFIG_SENSORS_ADM1026=m -CONFIG_SENSORS_ADM1029=m -CONFIG_SENSORS_ADM1031=m -CONFIG_SENSORS_ADM9240=m -CONFIG_SENSORS_ADT7X10=m -CONFIG_SENSORS_ADT7310=m -CONFIG_SENSORS_ADT7410=m -CONFIG_SENSORS_ADT7411=m -CONFIG_SENSORS_ADT7462=m -CONFIG_SENSORS_ADT7470=m -CONFIG_SENSORS_ADT7475=m -CONFIG_SENSORS_ASC7621=m -CONFIG_SENSORS_ARM_SCPI=m -# CONFIG_SENSORS_ASPEED is not set -CONFIG_SENSORS_ATXP1=m -CONFIG_SENSORS_DS620=m -CONFIG_SENSORS_DS1621=m -# CONFIG_SENSORS_I5K_AMB is not set -CONFIG_SENSORS_F71805F=m -CONFIG_SENSORS_F71882FG=m -CONFIG_SENSORS_F75375S=m -# CONFIG_SENSORS_FTSTEUTATES is not set -CONFIG_SENSORS_GL518SM=m -CONFIG_SENSORS_GL520SM=m -CONFIG_SENSORS_G760A=m -CONFIG_SENSORS_G762=m -# CONFIG_SENSORS_GPIO_FAN is not set -# CONFIG_SENSORS_HIH6130 is not set -CONFIG_SENSORS_IBMAEM=m -CONFIG_SENSORS_IBMPEX=m -CONFIG_SENSORS_IT87=m -# CONFIG_SENSORS_JC42 is not set -CONFIG_SENSORS_POWR1220=m -CONFIG_SENSORS_LINEAGE=m -CONFIG_SENSORS_LTC2945=m -# CONFIG_SENSORS_LTC2990 is not set -CONFIG_SENSORS_LTC4151=m -CONFIG_SENSORS_LTC4215=m -CONFIG_SENSORS_LTC4222=m -CONFIG_SENSORS_LTC4245=m -CONFIG_SENSORS_LTC4260=m -CONFIG_SENSORS_LTC4261=m -CONFIG_SENSORS_MAX1111=m -CONFIG_SENSORS_MAX16065=m -CONFIG_SENSORS_MAX1619=m -CONFIG_SENSORS_MAX1668=m -CONFIG_SENSORS_MAX197=m -# CONFIG_SENSORS_MAX31722 is not set -CONFIG_SENSORS_MAX6639=m -CONFIG_SENSORS_MAX6642=m -CONFIG_SENSORS_MAX6650=m -CONFIG_SENSORS_MAX6697=m -CONFIG_SENSORS_MAX31790=m -CONFIG_SENSORS_MCP3021=m -# CONFIG_SENSORS_TC654 is not set -CONFIG_SENSORS_ADCXX=m -CONFIG_SENSORS_LM63=m -CONFIG_SENSORS_LM70=m -CONFIG_SENSORS_LM73=m -CONFIG_SENSORS_LM75=m -CONFIG_SENSORS_LM77=m -CONFIG_SENSORS_LM78=m -CONFIG_SENSORS_LM80=m -CONFIG_SENSORS_LM83=m -CONFIG_SENSORS_LM85=m -CONFIG_SENSORS_LM87=m -CONFIG_SENSORS_LM90=m -CONFIG_SENSORS_LM92=m -CONFIG_SENSORS_LM93=m -CONFIG_SENSORS_LM95234=m -CONFIG_SENSORS_LM95241=m -CONFIG_SENSORS_LM95245=m -CONFIG_SENSORS_PC87360=m -CONFIG_SENSORS_PC87427=m -CONFIG_SENSORS_NTC_THERMISTOR=m -CONFIG_SENSORS_NCT6683=m -CONFIG_SENSORS_NCT6775=m -CONFIG_SENSORS_NCT7802=m -CONFIG_SENSORS_NCT7904=m -CONFIG_SENSORS_PCF8591=m -CONFIG_PMBUS=m -CONFIG_SENSORS_PMBUS=m -CONFIG_SENSORS_ADM1275=m -# CONFIG_SENSORS_IBM_CFFPS is not set -# CONFIG_SENSORS_IR35221 is not set -CONFIG_SENSORS_LM25066=m -CONFIG_SENSORS_LTC2978=m -CONFIG_SENSORS_LTC3815=m -CONFIG_SENSORS_MAX16064=m -CONFIG_SENSORS_MAX20751=m -CONFIG_SENSORS_MAX34440=m -CONFIG_SENSORS_MAX8688=m -CONFIG_SENSORS_TPS40422=m -# CONFIG_SENSORS_TPS53679 is not set -CONFIG_SENSORS_UCD9000=m -CONFIG_SENSORS_UCD9200=m -CONFIG_SENSORS_ZL6100=m -CONFIG_SENSORS_PWM_FAN=m -CONFIG_SENSORS_SHT15=m -CONFIG_SENSORS_SHT21=m -# CONFIG_SENSORS_SHT3x is not set -CONFIG_SENSORS_SHTC1=m -CONFIG_SENSORS_SIS5595=m -CONFIG_SENSORS_DME1737=m -CONFIG_SENSORS_EMC1403=m -# CONFIG_SENSORS_EMC2103 is not set -CONFIG_SENSORS_EMC6W201=m -CONFIG_SENSORS_SMSC47M1=m -CONFIG_SENSORS_SMSC47M192=m -CONFIG_SENSORS_SMSC47B397=m -# CONFIG_SENSORS_SCH56XX_COMMON is not set -# CONFIG_SENSORS_SCH5627 is not set -# CONFIG_SENSORS_SCH5636 is not set -# CONFIG_SENSORS_STTS751 is not set -# CONFIG_SENSORS_SMM665 is not set -CONFIG_SENSORS_ADC128D818=m -CONFIG_SENSORS_ADS1015=m -CONFIG_SENSORS_ADS7828=m -CONFIG_SENSORS_ADS7871=m -CONFIG_SENSORS_AMC6821=m -CONFIG_SENSORS_INA209=m -CONFIG_SENSORS_INA2XX=m -# CONFIG_SENSORS_INA3221 is not set -CONFIG_SENSORS_TC74=m -CONFIG_SENSORS_THMC50=m -CONFIG_SENSORS_TMP102=m -CONFIG_SENSORS_TMP103=m -# CONFIG_SENSORS_TMP108 is not set -CONFIG_SENSORS_TMP401=m -CONFIG_SENSORS_TMP421=m -CONFIG_SENSORS_VEXPRESS=m -CONFIG_SENSORS_VIA686A=m -CONFIG_SENSORS_VT1211=m -CONFIG_SENSORS_VT8231=m -CONFIG_SENSORS_W83781D=m -CONFIG_SENSORS_W83791D=m -CONFIG_SENSORS_W83792D=m -CONFIG_SENSORS_W83793=m -CONFIG_SENSORS_W83795=m -# CONFIG_SENSORS_W83795_FANCTRL is not set -CONFIG_SENSORS_W83L785TS=m -CONFIG_SENSORS_W83L786NG=m -CONFIG_SENSORS_W83627HF=m -CONFIG_SENSORS_W83627EHF=m -CONFIG_SENSORS_XGENE=m - -# -# ACPI drivers -# -CONFIG_SENSORS_ACPI_POWER=y -CONFIG_THERMAL=y -CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 -CONFIG_THERMAL_HWMON=y -CONFIG_THERMAL_OF=y -# CONFIG_THERMAL_WRITABLE_TRIPS is not set -CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y -# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set -# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set -# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set -CONFIG_THERMAL_GOV_FAIR_SHARE=y -CONFIG_THERMAL_GOV_STEP_WISE=y -# CONFIG_THERMAL_GOV_BANG_BANG is not set -CONFIG_THERMAL_GOV_USER_SPACE=y -# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set -CONFIG_CPU_THERMAL=y -# CONFIG_CLOCK_THERMAL is not set -# CONFIG_DEVFREQ_THERMAL is not set -# CONFIG_THERMAL_EMULATION is not set -CONFIG_HISI_THERMAL=m -# CONFIG_QORIQ_THERMAL is not set - -# -# ACPI INT340X thermal drivers -# - -# -# Qualcomm thermal drivers -# -CONFIG_WATCHDOG=y -CONFIG_WATCHDOG_CORE=y -# CONFIG_WATCHDOG_NOWAYOUT is not set -CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y -CONFIG_WATCHDOG_SYSFS=y - -# -# Watchdog Device Drivers -# -CONFIG_SOFT_WATCHDOG=m -# CONFIG_GPIO_WATCHDOG is not set -CONFIG_WDAT_WDT=m -# CONFIG_XILINX_WATCHDOG is not set -# CONFIG_ZIIRAVE_WATCHDOG is not set -# CONFIG_ARM_SP805_WATCHDOG is not set -# CONFIG_ARM_SBSA_WATCHDOG is not set -# CONFIG_CADENCE_WATCHDOG is not set -# CONFIG_DW_WATCHDOG is not set -# CONFIG_MAX63XX_WATCHDOG is not set -# CONFIG_QCOM_WDT is not set -# CONFIG_ALIM7101_WDT is not set -# CONFIG_I6300ESB_WDT is not set -# CONFIG_MEN_A21_WDT is not set - -# -# PCI-based Watchdog Cards -# -# CONFIG_PCIPCWATCHDOG is not set -# CONFIG_WDTPCI is not set - -# -# USB-based Watchdog Cards -# -# CONFIG_USBPCWATCHDOG is not set - -# -# Watchdog Pretimeout Governors -# -# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set -CONFIG_SSB_POSSIBLE=y - -# -# Sonics Silicon Backplane -# -# CONFIG_SSB is not set -CONFIG_BCMA_POSSIBLE=y -CONFIG_BCMA=m -CONFIG_BCMA_HOST_PCI_POSSIBLE=y -CONFIG_BCMA_HOST_PCI=y -# CONFIG_BCMA_HOST_SOC is not set -CONFIG_BCMA_DRIVER_PCI=y -CONFIG_BCMA_DRIVER_GMAC_CMN=y -CONFIG_BCMA_DRIVER_GPIO=y -# CONFIG_BCMA_DEBUG is not set - -# -# Multifunction device drivers -# -CONFIG_MFD_CORE=m -# CONFIG_MFD_ACT8945A is not set -# CONFIG_MFD_AS3711 is not set -# CONFIG_MFD_AS3722 is not set -# CONFIG_PMIC_ADP5520 is not set -# CONFIG_MFD_AAT2870_CORE is not set -# CONFIG_MFD_ATMEL_FLEXCOM is not set -# CONFIG_MFD_ATMEL_HLCDC is not set -# CONFIG_MFD_BCM590XX is not set -# CONFIG_MFD_BD9571MWV is not set -# CONFIG_MFD_AXP20X_I2C is not set -# CONFIG_MFD_CROS_EC is not set -# CONFIG_PMIC_DA903X is not set -# CONFIG_MFD_DA9052_SPI is not set -# CONFIG_MFD_DA9052_I2C is not set -# CONFIG_MFD_DA9055 is not set -# CONFIG_MFD_DA9062 is not set -# CONFIG_MFD_DA9063 is not set -# CONFIG_MFD_DA9150 is not set -# CONFIG_MFD_DLN2 is not set -# CONFIG_MFD_MC13XXX_SPI is not set -# CONFIG_MFD_MC13XXX_I2C is not set -# CONFIG_MFD_HI6421_PMIC is not set -# CONFIG_MFD_HI655X_PMIC is not set -# CONFIG_HTC_PASIC3 is not set -# CONFIG_HTC_I2CPLD is not set -# CONFIG_LPC_ICH is not set -# CONFIG_LPC_SCH is not set -# CONFIG_MFD_JANZ_CMODIO is not set -# CONFIG_MFD_KEMPLD is not set -# CONFIG_MFD_88PM800 is not set -# CONFIG_MFD_88PM805 is not set -# CONFIG_MFD_88PM860X is not set -# CONFIG_MFD_MAX14577 is not set -# CONFIG_MFD_MAX77620 is not set -# CONFIG_MFD_MAX77686 is not set -# CONFIG_MFD_MAX77693 is not set -# CONFIG_MFD_MAX77843 is not set -# CONFIG_MFD_MAX8907 is not set -# CONFIG_MFD_MAX8925 is not set -# CONFIG_MFD_MAX8997 is not set -# CONFIG_MFD_MAX8998 is not set -# CONFIG_MFD_MT6397 is not set -# CONFIG_MFD_MENF21BMC is not set -# CONFIG_EZX_PCAP is not set -# CONFIG_MFD_CPCAP is not set -# CONFIG_MFD_VIPERBOARD is not set -# CONFIG_MFD_RETU is not set -# CONFIG_MFD_PCF50633 is not set -# CONFIG_MFD_QCOM_RPM is not set -# CONFIG_MFD_RDC321X is not set -# CONFIG_MFD_RTSX_PCI is not set -# CONFIG_MFD_RT5033 is not set -# CONFIG_MFD_RTSX_USB is not set -# CONFIG_MFD_RC5T583 is not set -# CONFIG_MFD_RK808 is not set -# CONFIG_MFD_RN5T618 is not set -# CONFIG_MFD_SEC_CORE is not set -# CONFIG_MFD_SI476X_CORE is not set -# CONFIG_MFD_SM501 is not set -# CONFIG_MFD_SKY81452 is not set -# CONFIG_MFD_SMSC is not set -# CONFIG_ABX500_CORE is not set -# CONFIG_MFD_STMPE is not set -CONFIG_MFD_SYSCON=y -# CONFIG_MFD_TI_AM335X_TSCADC is not set -# CONFIG_MFD_LP3943 is not set -# CONFIG_MFD_LP8788 is not set -# CONFIG_MFD_TI_LMU is not set -# CONFIG_MFD_PALMAS is not set -# CONFIG_TPS6105X is not set -# CONFIG_TPS65010 is not set -# CONFIG_TPS6507X is not set -# CONFIG_MFD_TPS65086 is not set -# CONFIG_MFD_TPS65090 is not set -# CONFIG_MFD_TPS65217 is not set -# CONFIG_MFD_TPS68470 is not set -# CONFIG_MFD_TI_LP873X is not set -# CONFIG_MFD_TI_LP87565 is not set -# CONFIG_MFD_TPS65218 is not set -# CONFIG_MFD_TPS6586X is not set -# CONFIG_MFD_TPS65910 is not set -# CONFIG_MFD_TPS65912_I2C is not set -# CONFIG_MFD_TPS65912_SPI is not set -# CONFIG_MFD_TPS80031 is not set -# CONFIG_TWL4030_CORE is not set -# CONFIG_TWL6040_CORE is not set -# CONFIG_MFD_WL1273_CORE is not set -# CONFIG_MFD_LM3533 is not set -# CONFIG_MFD_TC3589X is not set -# CONFIG_MFD_TMIO is not set -# CONFIG_MFD_VX855 is not set -# CONFIG_MFD_ARIZONA_I2C is not set -# CONFIG_MFD_ARIZONA_SPI is not set -# CONFIG_MFD_WM8400 is not set -# CONFIG_MFD_WM831X_I2C is not set -# CONFIG_MFD_WM831X_SPI is not set -# CONFIG_MFD_WM8350_I2C is not set -# CONFIG_MFD_WM8994 is not set -# CONFIG_MFD_VEXPRESS_SYSREG is not set -# CONFIG_REGULATOR is not set -CONFIG_RC_CORE=y -CONFIG_RC_MAP=y -CONFIG_RC_DECODERS=y -# CONFIG_LIRC is not set -CONFIG_IR_NEC_DECODER=y -CONFIG_IR_RC5_DECODER=y -CONFIG_IR_RC6_DECODER=y -CONFIG_IR_JVC_DECODER=y -CONFIG_IR_SONY_DECODER=y -CONFIG_IR_SANYO_DECODER=y -CONFIG_IR_SHARP_DECODER=y -CONFIG_IR_MCE_KBD_DECODER=y -CONFIG_IR_XMP_DECODER=y -# CONFIG_RC_DEVICES is not set -# CONFIG_MEDIA_SUPPORT is not set - -# -# Graphics support -# -CONFIG_VGA_ARB=y -CONFIG_VGA_ARB_MAX_GPUS=64 -CONFIG_DRM=m -# CONFIG_DRM_DP_AUX_CHARDEV is not set -# CONFIG_DRM_DEBUG_MM_SELFTEST is not set -CONFIG_DRM_KMS_HELPER=m -CONFIG_DRM_KMS_FB_HELPER=y -CONFIG_DRM_FBDEV_EMULATION=y -CONFIG_DRM_FBDEV_OVERALLOC=100 -CONFIG_DRM_LOAD_EDID_FIRMWARE=y -CONFIG_DRM_TTM=m -CONFIG_DRM_VM=y - -# -# I2C encoder or helper chips -# -CONFIG_DRM_I2C_CH7006=m -# CONFIG_DRM_I2C_SIL164 is not set -CONFIG_DRM_I2C_NXP_TDA998X=m -# CONFIG_DRM_HDLCD is not set -# CONFIG_DRM_MALI_DISPLAY is not set -CONFIG_DRM_RADEON=m -CONFIG_DRM_RADEON_USERPTR=y -CONFIG_DRM_AMDGPU=m -# CONFIG_DRM_AMDGPU_SI is not set -CONFIG_DRM_AMDGPU_CIK=y -CONFIG_DRM_AMDGPU_USERPTR=y -# CONFIG_DRM_AMDGPU_GART_DEBUGFS is not set - -# -# ACP (Audio CoProcessor) Configuration -# -# CONFIG_DRM_AMD_ACP is not set -CONFIG_DRM_NOUVEAU=m -CONFIG_NOUVEAU_DEBUG=5 -CONFIG_NOUVEAU_DEBUG_DEFAULT=3 -CONFIG_DRM_NOUVEAU_BACKLIGHT=y -# CONFIG_DRM_VGEM is not set -CONFIG_DRM_UDL=m -CONFIG_DRM_AST=m -CONFIG_DRM_MGAG200=m -CONFIG_DRM_CIRRUS_QEMU=m -# CONFIG_DRM_RCAR_DW_HDMI is not set -CONFIG_DRM_QXL=m -CONFIG_DRM_BOCHS=m -CONFIG_DRM_VIRTIO_GPU=m -# CONFIG_DRM_MSM is not set -CONFIG_DRM_PANEL=y - -# -# Display Panels -# -# CONFIG_DRM_PANEL_LVDS is not set -# CONFIG_DRM_PANEL_SIMPLE is not set -# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set -# CONFIG_DRM_PANEL_LG_LG4573 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set -# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set -CONFIG_DRM_BRIDGE=y -CONFIG_DRM_PANEL_BRIDGE=y - -# -# Display Interface Bridges -# -# CONFIG_DRM_ANALOGIX_ANX78XX is not set -# CONFIG_DRM_DUMB_VGA_DAC is not set -# CONFIG_DRM_LVDS_ENCODER is not set -# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set -# CONFIG_DRM_NXP_PTN3460 is not set -# CONFIG_DRM_PARADE_PS8622 is not set -# CONFIG_DRM_SIL_SII8620 is not set -# CONFIG_DRM_SII902X is not set -# CONFIG_DRM_TOSHIBA_TC358767 is not set -# CONFIG_DRM_TI_TFP410 is not set -# CONFIG_DRM_I2C_ADV7511 is not set -# CONFIG_DRM_ARCPGU is not set -CONFIG_DRM_HISI_HIBMC=m -# CONFIG_DRM_HISI_KIRIN is not set -# CONFIG_DRM_MXSFB is not set -# CONFIG_DRM_TINYDRM is not set -# CONFIG_DRM_PL111 is not set -# CONFIG_DRM_LEGACY is not set -# CONFIG_DRM_LIB_RANDOM is not set - -# -# Frame buffer Devices -# -CONFIG_FB=y -# CONFIG_FIRMWARE_EDID is not set -CONFIG_FB_CMDLINE=y -CONFIG_FB_NOTIFY=y -# CONFIG_FB_DDC is not set -# CONFIG_FB_BOOT_VESA_SUPPORT is not set -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set -CONFIG_FB_SYS_FILLRECT=m -CONFIG_FB_SYS_COPYAREA=m -CONFIG_FB_SYS_IMAGEBLIT=m -# CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA is not set -# CONFIG_FB_FOREIGN_ENDIAN is not set -CONFIG_FB_SYS_FOPS=m -CONFIG_FB_DEFERRED_IO=y -# CONFIG_FB_SVGALIB is not set -# CONFIG_FB_MACMODES is not set -CONFIG_FB_BACKLIGHT=y -CONFIG_FB_MODE_HELPERS=y -CONFIG_FB_TILEBLITTING=y - -# -# Frame buffer hardware drivers -# -# CONFIG_FB_CIRRUS is not set -# CONFIG_FB_PM2 is not set -CONFIG_FB_ARMCLCD=y -# CONFIG_FB_CYBER2000 is not set -# CONFIG_FB_ASILIANT is not set -# CONFIG_FB_IMSTT is not set -# CONFIG_FB_UVESA is not set -CONFIG_FB_EFI=y -# CONFIG_FB_OPENCORES is not set -# CONFIG_FB_S1D13XXX is not set -# CONFIG_FB_NVIDIA is not set -# CONFIG_FB_RIVA is not set -# CONFIG_FB_I740 is not set -# CONFIG_FB_MATROX is not set -# CONFIG_FB_RADEON is not set -# CONFIG_FB_ATY128 is not set -# CONFIG_FB_ATY is not set -# CONFIG_FB_S3 is not set -# CONFIG_FB_SAVAGE is not set -# CONFIG_FB_SIS is not set -# CONFIG_FB_NEOMAGIC is not set -# CONFIG_FB_KYRO is not set -# CONFIG_FB_3DFX is not set -# CONFIG_FB_VOODOO1 is not set -# CONFIG_FB_VT8623 is not set -# CONFIG_FB_TRIDENT is not set -# CONFIG_FB_ARK is not set -# CONFIG_FB_PM3 is not set -# CONFIG_FB_CARMINE is not set -# CONFIG_FB_SMSCUFX is not set -# CONFIG_FB_UDL is not set -# CONFIG_FB_IBM_GXT4500 is not set -# CONFIG_FB_VIRTUAL is not set -# CONFIG_FB_METRONOME is not set -# CONFIG_FB_MB862XX is not set -# CONFIG_FB_BROADSHEET is not set -# CONFIG_FB_AUO_K190X is not set -CONFIG_FB_SIMPLE=y -CONFIG_FB_SSD1307=m -# CONFIG_FB_SM712 is not set -CONFIG_BACKLIGHT_LCD_SUPPORT=y -CONFIG_LCD_CLASS_DEVICE=m -# CONFIG_LCD_L4F00242T03 is not set -# CONFIG_LCD_LMS283GF05 is not set -# CONFIG_LCD_LTV350QV is not set -# CONFIG_LCD_ILI922X is not set -# CONFIG_LCD_ILI9320 is not set -# CONFIG_LCD_TDO24M is not set -# CONFIG_LCD_VGG2432A4 is not set -CONFIG_LCD_PLATFORM=m -# CONFIG_LCD_S6E63M0 is not set -# CONFIG_LCD_LD9040 is not set -# CONFIG_LCD_AMS369FG06 is not set -# CONFIG_LCD_LMS501KF03 is not set -# CONFIG_LCD_HX8357 is not set -CONFIG_BACKLIGHT_CLASS_DEVICE=y -# CONFIG_BACKLIGHT_GENERIC is not set -CONFIG_BACKLIGHT_PWM=m -# CONFIG_BACKLIGHT_PM8941_WLED is not set -# CONFIG_BACKLIGHT_ADP8860 is not set -# CONFIG_BACKLIGHT_ADP8870 is not set -# CONFIG_BACKLIGHT_LM3630A is not set -# CONFIG_BACKLIGHT_LM3639 is not set -CONFIG_BACKLIGHT_LP855X=m -CONFIG_BACKLIGHT_GPIO=m -# CONFIG_BACKLIGHT_LV5207LP is not set -# CONFIG_BACKLIGHT_BD6107 is not set -# CONFIG_BACKLIGHT_ARCXCNN is not set -# CONFIG_VGASTATE is not set -CONFIG_VIDEOMODE_HELPERS=y -CONFIG_HDMI=y - -# -# Console display driver support -# -CONFIG_DUMMY_CONSOLE=y -CONFIG_DUMMY_CONSOLE_COLUMNS=80 -CONFIG_DUMMY_CONSOLE_ROWS=25 -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y -CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -CONFIG_LOGO_LINUX_CLUT224=y -CONFIG_SOUND=m -# CONFIG_SOUND_OSS_CORE is not set -# CONFIG_SND is not set - -# -# HID support -# -CONFIG_HID=y -CONFIG_HID_BATTERY_STRENGTH=y -CONFIG_HIDRAW=y -CONFIG_UHID=m -CONFIG_HID_GENERIC=y - -# -# Special HID drivers -# -CONFIG_HID_A4TECH=m -# CONFIG_HID_ACCUTOUCH is not set -CONFIG_HID_ACRUX=m -# CONFIG_HID_ACRUX_FF is not set -CONFIG_HID_APPLE=m -CONFIG_HID_APPLEIR=m -# CONFIG_HID_ASUS is not set -CONFIG_HID_AUREAL=m -CONFIG_HID_BELKIN=m -CONFIG_HID_BETOP_FF=m -CONFIG_HID_CHERRY=m -CONFIG_HID_CHICONY=m -CONFIG_HID_CORSAIR=m -# CONFIG_HID_CMEDIA is not set -# CONFIG_HID_CP2112 is not set -CONFIG_HID_CYPRESS=m -CONFIG_HID_DRAGONRISE=m -# CONFIG_DRAGONRISE_FF is not set -# CONFIG_HID_EMS_FF is not set -CONFIG_HID_ELECOM=m -CONFIG_HID_ELO=m -CONFIG_HID_EZKEY=m -CONFIG_HID_GEMBIRD=m -CONFIG_HID_GFRM=m -CONFIG_HID_HOLTEK=m -# CONFIG_HOLTEK_FF is not set -CONFIG_HID_GT683R=m -CONFIG_HID_KEYTOUCH=m -CONFIG_HID_KYE=m -CONFIG_HID_UCLOGIC=m -CONFIG_HID_WALTOP=m -CONFIG_HID_GYRATION=m -CONFIG_HID_ICADE=m -CONFIG_HID_ITE=y -CONFIG_HID_TWINHAN=m -CONFIG_HID_KENSINGTON=m -CONFIG_HID_LCPOWER=m -CONFIG_HID_LED=m -CONFIG_HID_LENOVO=m -CONFIG_HID_LOGITECH=m -CONFIG_HID_LOGITECH_DJ=m -CONFIG_HID_LOGITECH_HIDPP=m -# CONFIG_LOGITECH_FF is not set -# CONFIG_LOGIRUMBLEPAD2_FF is not set -# CONFIG_LOGIG940_FF is not set -# CONFIG_LOGIWHEELS_FF is not set -CONFIG_HID_MAGICMOUSE=y -# CONFIG_HID_MAYFLASH is not set -CONFIG_HID_MICROSOFT=m -CONFIG_HID_MONTEREY=m -CONFIG_HID_MULTITOUCH=m -# CONFIG_HID_NTI is not set -CONFIG_HID_NTRIG=y -CONFIG_HID_ORTEK=m -CONFIG_HID_PANTHERLORD=m -# CONFIG_PANTHERLORD_FF is not set -CONFIG_HID_PENMOUNT=m -CONFIG_HID_PETALYNX=m -CONFIG_HID_PICOLCD=m -CONFIG_HID_PICOLCD_FB=y -CONFIG_HID_PICOLCD_BACKLIGHT=y -CONFIG_HID_PICOLCD_LCD=y -CONFIG_HID_PICOLCD_LEDS=y -CONFIG_HID_PICOLCD_CIR=y -CONFIG_HID_PLANTRONICS=m -CONFIG_HID_PRIMAX=m -# CONFIG_HID_RETRODE is not set -CONFIG_HID_ROCCAT=m -CONFIG_HID_SAITEK=m -CONFIG_HID_SAMSUNG=m -CONFIG_HID_SONY=m -CONFIG_SONY_FF=y -CONFIG_HID_SPEEDLINK=m -CONFIG_HID_STEELSERIES=m -CONFIG_HID_SUNPLUS=m -CONFIG_HID_RMI=m -CONFIG_HID_GREENASIA=m -# CONFIG_GREENASIA_FF is not set -CONFIG_HID_SMARTJOYPLUS=m -# CONFIG_SMARTJOYPLUS_FF is not set -CONFIG_HID_TIVO=m -CONFIG_HID_TOPSEED=m -CONFIG_HID_THINGM=m -CONFIG_HID_THRUSTMASTER=m -# CONFIG_THRUSTMASTER_FF is not set -# CONFIG_HID_UDRAW_PS3 is not set -CONFIG_HID_WACOM=m -CONFIG_HID_WIIMOTE=m -CONFIG_HID_XINMO=m -CONFIG_HID_ZEROPLUS=m -# CONFIG_ZEROPLUS_FF is not set -CONFIG_HID_ZYDACRON=m -CONFIG_HID_SENSOR_HUB=m -# CONFIG_HID_SENSOR_CUSTOM_SENSOR is not set -# CONFIG_HID_ALPS is not set - -# -# USB HID support -# -CONFIG_USB_HID=y -CONFIG_HID_PID=y -CONFIG_USB_HIDDEV=y - -# -# I2C HID support -# -CONFIG_I2C_HID=m -CONFIG_USB_OHCI_LITTLE_ENDIAN=y -CONFIG_USB_SUPPORT=y -CONFIG_USB_COMMON=y -CONFIG_USB_ARCH_HAS_HCD=y -CONFIG_USB=y -CONFIG_USB_PCI=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y - -# -# Miscellaneous USB options -# -CONFIG_USB_DEFAULT_PERSIST=y -# CONFIG_USB_DYNAMIC_MINORS is not set -# CONFIG_USB_OTG is not set -# CONFIG_USB_OTG_WHITELIST is not set -# CONFIG_USB_LEDS_TRIGGER_USBPORT is not set -CONFIG_USB_MON=y -CONFIG_USB_WUSB=m -CONFIG_USB_WUSB_CBAF=m -# CONFIG_USB_WUSB_CBAF_DEBUG is not set - -# -# USB Host Controller Drivers -# -# CONFIG_USB_C67X00_HCD is not set -CONFIG_USB_XHCI_HCD=y -CONFIG_USB_XHCI_PCI=y -CONFIG_USB_XHCI_PLATFORM=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_ROOT_HUB_TT=y -CONFIG_USB_EHCI_TT_NEWSCHED=y -CONFIG_USB_EHCI_PCI=y -# CONFIG_USB_EHCI_MSM is not set -CONFIG_USB_EHCI_HCD_PLATFORM=m -# CONFIG_USB_OXU210HP_HCD is not set -# CONFIG_USB_ISP116X_HCD is not set -# CONFIG_USB_ISP1362_HCD is not set -# CONFIG_USB_FOTG210_HCD is not set -# CONFIG_USB_MAX3421_HCD is not set -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_OHCI_HCD_PCI=y -# CONFIG_USB_OHCI_HCD_PLATFORM is not set -CONFIG_USB_UHCI_HCD=y -# CONFIG_USB_U132_HCD is not set -# CONFIG_USB_SL811_HCD is not set -# CONFIG_USB_R8A66597_HCD is not set -# CONFIG_USB_WHCI_HCD is not set -CONFIG_USB_HWA_HCD=m -# CONFIG_USB_HCD_BCMA is not set -# CONFIG_USB_HCD_TEST_MODE is not set - -# -# USB Device Class drivers -# -CONFIG_USB_ACM=m -CONFIG_USB_PRINTER=m -CONFIG_USB_WDM=m -CONFIG_USB_TMC=m - -# -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may -# - -# -# also be needed; see USB_STORAGE Help for more info -# -CONFIG_USB_STORAGE=m -# CONFIG_USB_STORAGE_DEBUG is not set -CONFIG_USB_STORAGE_REALTEK=m -CONFIG_REALTEK_AUTOPM=y -CONFIG_USB_STORAGE_DATAFAB=m -CONFIG_USB_STORAGE_FREECOM=m -CONFIG_USB_STORAGE_ISD200=m -CONFIG_USB_STORAGE_USBAT=m -CONFIG_USB_STORAGE_SDDR09=m -CONFIG_USB_STORAGE_SDDR55=m -CONFIG_USB_STORAGE_JUMPSHOT=m -CONFIG_USB_STORAGE_ALAUDA=m -CONFIG_USB_STORAGE_ONETOUCH=m -CONFIG_USB_STORAGE_KARMA=m -CONFIG_USB_STORAGE_CYPRESS_ATACB=m -CONFIG_USB_STORAGE_ENE_UB6250=m -CONFIG_USB_UAS=m - -# -# USB Imaging devices -# -CONFIG_USB_MDC800=m -CONFIG_USB_MICROTEK=m -# CONFIG_USBIP_CORE is not set -# CONFIG_USB_MUSB_HDRC is not set -# CONFIG_USB_DWC3 is not set -# CONFIG_USB_DWC2 is not set -# CONFIG_USB_CHIPIDEA is not set -# CONFIG_USB_ISP1760 is not set - -# -# USB port drivers -# -CONFIG_USB_SERIAL=y -CONFIG_USB_SERIAL_CONSOLE=y -CONFIG_USB_SERIAL_GENERIC=y -CONFIG_USB_SERIAL_SIMPLE=m -CONFIG_USB_SERIAL_AIRCABLE=m -CONFIG_USB_SERIAL_ARK3116=m -CONFIG_USB_SERIAL_BELKIN=m -CONFIG_USB_SERIAL_CH341=m -CONFIG_USB_SERIAL_WHITEHEAT=m -CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m -CONFIG_USB_SERIAL_CP210X=m -CONFIG_USB_SERIAL_CYPRESS_M8=m -CONFIG_USB_SERIAL_EMPEG=m -CONFIG_USB_SERIAL_FTDI_SIO=m -CONFIG_USB_SERIAL_VISOR=m -CONFIG_USB_SERIAL_IPAQ=m -CONFIG_USB_SERIAL_IR=m -CONFIG_USB_SERIAL_EDGEPORT=m -CONFIG_USB_SERIAL_EDGEPORT_TI=m -# CONFIG_USB_SERIAL_F81232 is not set -# CONFIG_USB_SERIAL_F8153X is not set -CONFIG_USB_SERIAL_GARMIN=m -CONFIG_USB_SERIAL_IPW=m -CONFIG_USB_SERIAL_IUU=m -CONFIG_USB_SERIAL_KEYSPAN_PDA=m -CONFIG_USB_SERIAL_KEYSPAN=m -CONFIG_USB_SERIAL_KLSI=m -CONFIG_USB_SERIAL_KOBIL_SCT=m -CONFIG_USB_SERIAL_MCT_U232=m -# CONFIG_USB_SERIAL_METRO is not set -CONFIG_USB_SERIAL_MOS7720=m -CONFIG_USB_SERIAL_MOS7840=m -# CONFIG_USB_SERIAL_MXUPORT is not set -CONFIG_USB_SERIAL_NAVMAN=m -CONFIG_USB_SERIAL_PL2303=m -CONFIG_USB_SERIAL_OTI6858=m -CONFIG_USB_SERIAL_QCAUX=m -CONFIG_USB_SERIAL_QUALCOMM=m -CONFIG_USB_SERIAL_SPCP8X5=m -CONFIG_USB_SERIAL_SAFE=m -CONFIG_USB_SERIAL_SAFE_PADDED=y -CONFIG_USB_SERIAL_SIERRAWIRELESS=m -CONFIG_USB_SERIAL_SYMBOL=m -CONFIG_USB_SERIAL_TI=m -CONFIG_USB_SERIAL_CYBERJACK=m -CONFIG_USB_SERIAL_XIRCOM=m -CONFIG_USB_SERIAL_WWAN=m -CONFIG_USB_SERIAL_OPTION=m -CONFIG_USB_SERIAL_OMNINET=m -CONFIG_USB_SERIAL_OPTICON=m -CONFIG_USB_SERIAL_XSENS_MT=m -# CONFIG_USB_SERIAL_WISHBONE is not set -CONFIG_USB_SERIAL_SSU100=m -CONFIG_USB_SERIAL_QT2=m -# CONFIG_USB_SERIAL_UPD78F0730 is not set -CONFIG_USB_SERIAL_DEBUG=m - -# -# USB Miscellaneous drivers -# -CONFIG_USB_EMI62=m -CONFIG_USB_EMI26=m -CONFIG_USB_ADUTUX=m -CONFIG_USB_SEVSEG=m -# CONFIG_USB_RIO500 is not set -CONFIG_USB_LEGOTOWER=m -CONFIG_USB_LCD=m -# CONFIG_USB_CYPRESS_CY7C63 is not set -# CONFIG_USB_CYTHERM is not set -CONFIG_USB_IDMOUSE=m -CONFIG_USB_FTDI_ELAN=m -CONFIG_USB_APPLEDISPLAY=m -CONFIG_USB_SISUSBVGA=m -CONFIG_USB_SISUSBVGA_CON=y -CONFIG_USB_LD=m -# CONFIG_USB_TRANCEVIBRATOR is not set -CONFIG_USB_IOWARRIOR=m -# CONFIG_USB_TEST is not set -# CONFIG_USB_EHSET_TEST_FIXTURE is not set -CONFIG_USB_ISIGHTFW=m -# CONFIG_USB_YUREX is not set -CONFIG_USB_EZUSB_FX2=m -# CONFIG_USB_HUB_USB251XB is not set -CONFIG_USB_HSIC_USB3503=m -# CONFIG_USB_HSIC_USB4604 is not set -# CONFIG_USB_LINK_LAYER_TEST is not set -CONFIG_USB_CHAOSKEY=m -CONFIG_USB_ATM=m -# CONFIG_USB_SPEEDTOUCH is not set -CONFIG_USB_CXACRU=m -CONFIG_USB_UEAGLEATM=m -CONFIG_USB_XUSBATM=m - -# -# USB Physical Layer drivers -# -# CONFIG_USB_PHY is not set -# CONFIG_NOP_USB_XCEIV is not set -# CONFIG_USB_GPIO_VBUS is not set -# CONFIG_USB_ISP1301 is not set -# CONFIG_USB_MSM_OTG is not set -# CONFIG_USB_QCOM_8X16_PHY is not set -# CONFIG_USB_ULPI is not set -# CONFIG_USB_GADGET is not set - -# -# USB Power Delivery and Type-C drivers -# -# CONFIG_TYPEC_UCSI is not set -CONFIG_USB_LED_TRIG=y -CONFIG_USB_ULPI_BUS=m -CONFIG_UWB=m -CONFIG_UWB_HWA=m -CONFIG_UWB_WHCI=m -CONFIG_UWB_I1480U=m -CONFIG_MMC=m -CONFIG_PWRSEQ_EMMC=m -CONFIG_PWRSEQ_SIMPLE=m -CONFIG_MMC_BLOCK=m -CONFIG_MMC_BLOCK_MINORS=8 -CONFIG_SDIO_UART=m -# CONFIG_MMC_TEST is not set - -# -# MMC/SD/SDIO Host Controller Drivers -# -# CONFIG_MMC_DEBUG is not set -CONFIG_MMC_ARMMMCI=m -CONFIG_MMC_SDHCI=m -CONFIG_MMC_SDHCI_PCI=m -CONFIG_MMC_RICOH_MMC=y -CONFIG_MMC_SDHCI_ACPI=m -CONFIG_MMC_SDHCI_PLTFM=m -# CONFIG_MMC_SDHCI_OF_ARASAN is not set -# CONFIG_MMC_SDHCI_OF_AT91 is not set -# CONFIG_MMC_SDHCI_CADENCE is not set -# CONFIG_MMC_SDHCI_F_SDH30 is not set -# CONFIG_MMC_SDHCI_MSM is not set -CONFIG_MMC_TIFM_SD=m -CONFIG_MMC_SPI=m -CONFIG_MMC_CB710=m -CONFIG_MMC_VIA_SDMMC=m -# CONFIG_MMC_CAVIUM_THUNDERX is not set -# CONFIG_MMC_DW is not set -CONFIG_MMC_VUB300=m -CONFIG_MMC_USHC=m -# CONFIG_MMC_USDHI6ROL0 is not set -CONFIG_MMC_TOSHIBA_PCI=m -CONFIG_MMC_MTK=m -# CONFIG_MMC_SDHCI_XENON is not set -CONFIG_MEMSTICK=m -# CONFIG_MEMSTICK_DEBUG is not set - -# -# MemoryStick drivers -# -# CONFIG_MEMSTICK_UNSAFE_RESUME is not set -CONFIG_MSPRO_BLOCK=m -# CONFIG_MS_BLOCK is not set - -# -# MemoryStick Host Controller Drivers -# -CONFIG_MEMSTICK_TIFM_MS=m -CONFIG_MEMSTICK_JMICRON_38X=m -CONFIG_MEMSTICK_R592=m -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y -CONFIG_LEDS_CLASS_FLASH=m -# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set - -# -# LED drivers -# -# CONFIG_LEDS_AAT1290 is not set -# CONFIG_LEDS_AS3645A is not set -# CONFIG_LEDS_BCM6328 is not set -# CONFIG_LEDS_BCM6358 is not set -CONFIG_LEDS_LM3530=m -# CONFIG_LEDS_LM3642 is not set -# CONFIG_LEDS_PCA9532 is not set -# CONFIG_LEDS_GPIO is not set -CONFIG_LEDS_LP3944=m -# CONFIG_LEDS_LP3952 is not set -CONFIG_LEDS_LP55XX_COMMON=m -CONFIG_LEDS_LP5521=m -CONFIG_LEDS_LP5523=m -CONFIG_LEDS_LP5562=m -# CONFIG_LEDS_LP8501 is not set -# CONFIG_LEDS_LP8860 is not set -# CONFIG_LEDS_PCA955X is not set -# CONFIG_LEDS_PCA963X is not set -# CONFIG_LEDS_DAC124S085 is not set -# CONFIG_LEDS_PWM is not set -# CONFIG_LEDS_BD2802 is not set -CONFIG_LEDS_LT3593=m -# CONFIG_LEDS_TCA6507 is not set -# CONFIG_LEDS_TLC591XX is not set -# CONFIG_LEDS_LM355x is not set -# CONFIG_LEDS_KTD2692 is not set -# CONFIG_LEDS_IS31FL319X is not set -# CONFIG_LEDS_IS31FL32XX is not set - -# -# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) -# -CONFIG_LEDS_BLINKM=m -# CONFIG_LEDS_SYSCON is not set -# CONFIG_LEDS_USER is not set - -# -# LED Triggers -# -CONFIG_LEDS_TRIGGERS=y -CONFIG_LEDS_TRIGGER_TIMER=m -CONFIG_LEDS_TRIGGER_ONESHOT=m -CONFIG_LEDS_TRIGGER_DISK=y -# CONFIG_LEDS_TRIGGER_MTD is not set -CONFIG_LEDS_TRIGGER_HEARTBEAT=m -CONFIG_LEDS_TRIGGER_BACKLIGHT=m -# CONFIG_LEDS_TRIGGER_CPU is not set -CONFIG_LEDS_TRIGGER_GPIO=m -CONFIG_LEDS_TRIGGER_DEFAULT_ON=m - -# -# iptables trigger is under Netfilter config (LED target) -# -CONFIG_LEDS_TRIGGER_TRANSIENT=m -CONFIG_LEDS_TRIGGER_CAMERA=m -# CONFIG_LEDS_TRIGGER_PANIC is not set -# CONFIG_ACCESSIBILITY is not set -CONFIG_INFINIBAND=m -CONFIG_INFINIBAND_USER_MAD=m -CONFIG_INFINIBAND_USER_ACCESS=m -# CONFIG_INFINIBAND_EXP_USER_ACCESS is not set -CONFIG_INFINIBAND_USER_MEM=y -CONFIG_INFINIBAND_ON_DEMAND_PAGING=y -CONFIG_INFINIBAND_ADDR_TRANS=y -CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y -# CONFIG_INFINIBAND_MTHCA is not set -# CONFIG_INFINIBAND_QIB is not set -CONFIG_INFINIBAND_CXGB4=m -CONFIG_INFINIBAND_I40IW=m -CONFIG_MLX4_INFINIBAND=m -# CONFIG_INFINIBAND_NES is not set -# CONFIG_INFINIBAND_OCRDMA is not set -# CONFIG_INFINIBAND_HNS is not set -CONFIG_INFINIBAND_IPOIB=m -CONFIG_INFINIBAND_IPOIB_CM=y -CONFIG_INFINIBAND_IPOIB_DEBUG=y -# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set -CONFIG_INFINIBAND_SRP=m -CONFIG_INFINIBAND_SRPT=m -CONFIG_INFINIBAND_ISER=m -CONFIG_INFINIBAND_ISERT=m -CONFIG_INFINIBAND_RDMAVT=m -CONFIG_RDMA_RXE=m -# CONFIG_INFINIBAND_BNXT_RE is not set -CONFIG_EDAC_SUPPORT=y -CONFIG_EDAC=y -CONFIG_EDAC_LEGACY_SYSFS=y -# CONFIG_EDAC_DEBUG is not set -# CONFIG_EDAC_GHES is not set -CONFIG_EDAC_THUNDERX=m -CONFIG_EDAC_XGENE=m -CONFIG_RTC_LIB=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_HCTOSYS=y -CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -# CONFIG_RTC_SYSTOHC is not set -# CONFIG_RTC_DEBUG is not set -CONFIG_RTC_NVMEM=y - -# -# RTC interfaces -# -CONFIG_RTC_INTF_SYSFS=y -CONFIG_RTC_INTF_PROC=y -CONFIG_RTC_INTF_DEV=y -# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set -# CONFIG_RTC_DRV_TEST is not set - -# -# I2C RTC drivers -# -CONFIG_RTC_DRV_ABB5ZES3=m -CONFIG_RTC_DRV_ABX80X=m -CONFIG_RTC_DRV_DS1307=m -# CONFIG_RTC_DRV_DS1307_HWMON is not set -# CONFIG_RTC_DRV_DS1307_CENTURY is not set -CONFIG_RTC_DRV_DS1374=m -CONFIG_RTC_DRV_DS1374_WDT=y -CONFIG_RTC_DRV_DS1672=m -# CONFIG_RTC_DRV_HYM8563 is not set -CONFIG_RTC_DRV_MAX6900=m -CONFIG_RTC_DRV_RS5C372=m -CONFIG_RTC_DRV_ISL1208=m -CONFIG_RTC_DRV_ISL12022=m -CONFIG_RTC_DRV_X1205=m -CONFIG_RTC_DRV_PCF8523=m -CONFIG_RTC_DRV_PCF85063=m -CONFIG_RTC_DRV_PCF8563=m -CONFIG_RTC_DRV_PCF8583=m -CONFIG_RTC_DRV_M41T80=m -CONFIG_RTC_DRV_M41T80_WDT=y -CONFIG_RTC_DRV_BQ32K=m -# CONFIG_RTC_DRV_S35390A is not set -CONFIG_RTC_DRV_FM3130=m -CONFIG_RTC_DRV_RX8010=m -CONFIG_RTC_DRV_RX8581=m -CONFIG_RTC_DRV_RX8025=m -CONFIG_RTC_DRV_EM3027=m -# CONFIG_RTC_DRV_RV8803 is not set - -# -# SPI RTC drivers -# -CONFIG_RTC_DRV_M41T93=m -CONFIG_RTC_DRV_M41T94=m -# CONFIG_RTC_DRV_DS1302 is not set -CONFIG_RTC_DRV_DS1305=m -CONFIG_RTC_DRV_DS1343=m -CONFIG_RTC_DRV_DS1347=m -CONFIG_RTC_DRV_DS1390=m -# CONFIG_RTC_DRV_MAX6916 is not set -CONFIG_RTC_DRV_R9701=m -CONFIG_RTC_DRV_RX4581=m -# CONFIG_RTC_DRV_RX6110 is not set -CONFIG_RTC_DRV_RS5C348=m -CONFIG_RTC_DRV_MAX6902=m -CONFIG_RTC_DRV_PCF2123=m -CONFIG_RTC_DRV_MCP795=m -CONFIG_RTC_I2C_AND_SPI=y - -# -# SPI and I2C RTC drivers -# -CONFIG_RTC_DRV_DS3232=m -CONFIG_RTC_DRV_DS3232_HWMON=y -CONFIG_RTC_DRV_PCF2127=m -CONFIG_RTC_DRV_RV3029C2=m -# CONFIG_RTC_DRV_RV3029_HWMON is not set - -# -# Platform RTC drivers -# -CONFIG_RTC_DRV_DS1286=m -CONFIG_RTC_DRV_DS1511=m -CONFIG_RTC_DRV_DS1553=m -CONFIG_RTC_DRV_DS1685_FAMILY=m -CONFIG_RTC_DRV_DS1685=y -# CONFIG_RTC_DRV_DS1689 is not set -# CONFIG_RTC_DRV_DS17285 is not set -# CONFIG_RTC_DRV_DS17485 is not set -# CONFIG_RTC_DRV_DS17885 is not set -# CONFIG_RTC_DS1685_PROC_REGS is not set -CONFIG_RTC_DS1685_SYSFS_REGS=y -CONFIG_RTC_DRV_DS1742=m -CONFIG_RTC_DRV_DS2404=m -CONFIG_RTC_DRV_EFI=y -CONFIG_RTC_DRV_STK17TA8=m -# CONFIG_RTC_DRV_M48T86 is not set -CONFIG_RTC_DRV_M48T35=m -CONFIG_RTC_DRV_M48T59=m -CONFIG_RTC_DRV_MSM6242=m -CONFIG_RTC_DRV_BQ4802=m -CONFIG_RTC_DRV_RP5C01=m -CONFIG_RTC_DRV_V3020=m -# CONFIG_RTC_DRV_ZYNQMP is not set - -# -# on-CPU RTC drivers -# -# CONFIG_RTC_DRV_PL030 is not set -CONFIG_RTC_DRV_PL031=y -# CONFIG_RTC_DRV_FTRTC010 is not set -# CONFIG_RTC_DRV_SNVS is not set -# CONFIG_RTC_DRV_XGENE is not set -# CONFIG_RTC_DRV_R7301 is not set - -# -# HID Sensor RTC drivers -# -# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set -CONFIG_DMADEVICES=y -# CONFIG_DMADEVICES_DEBUG is not set - -# -# DMA Devices -# -CONFIG_DMA_ENGINE=y -CONFIG_DMA_ACPI=y -CONFIG_DMA_OF=y -# CONFIG_ALTERA_MSGDMA is not set -# CONFIG_AMBA_PL08X is not set -# CONFIG_BCM_SBA_RAID is not set -# CONFIG_FSL_EDMA is not set -# CONFIG_INTEL_IDMA64 is not set -# CONFIG_K3_DMA is not set -# CONFIG_MV_XOR_V2 is not set -# CONFIG_PL330_DMA is not set -# CONFIG_XGENE_DMA is not set -# CONFIG_XILINX_DMA is not set -# CONFIG_XILINX_ZYNQMP_DMA is not set -# CONFIG_QCOM_BAM_DMA is not set -CONFIG_QCOM_HIDMA_MGMT=m -CONFIG_QCOM_HIDMA=m -CONFIG_DW_DMAC_CORE=m -CONFIG_DW_DMAC=m -CONFIG_DW_DMAC_PCI=m - -# -# DMA Clients -# -CONFIG_ASYNC_TX_DMA=y -# CONFIG_DMATEST is not set - -# -# DMABUF options -# -CONFIG_SYNC_FILE=y -# CONFIG_SW_SYNC is not set -CONFIG_AUXDISPLAY=y -# CONFIG_HD44780 is not set -# CONFIG_IMG_ASCII_LCD is not set -# CONFIG_HT16K33 is not set -CONFIG_UIO=m -CONFIG_UIO_CIF=m -CONFIG_UIO_PDRV_GENIRQ=m -# CONFIG_UIO_DMEM_GENIRQ is not set -CONFIG_UIO_AEC=m -CONFIG_UIO_SERCOS3=m -CONFIG_UIO_PCI_GENERIC=m -# CONFIG_UIO_NETX is not set -# CONFIG_UIO_PRUSS is not set -# CONFIG_UIO_MF624 is not set -CONFIG_VFIO_IOMMU_TYPE1=m -CONFIG_VFIO_VIRQFD=m -CONFIG_VFIO=m -CONFIG_VFIO_NOIOMMU=y -CONFIG_VFIO_PCI=m -CONFIG_VFIO_PCI_MMAP=y -CONFIG_VFIO_PCI_INTX=y -CONFIG_VFIO_PLATFORM=m -CONFIG_VFIO_AMBA=m -# CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET is not set -# CONFIG_VFIO_PLATFORM_AMDXGBE_RESET is not set -# CONFIG_VFIO_MDEV is not set -CONFIG_IRQ_BYPASS_MANAGER=m -# CONFIG_VIRT_DRIVERS is not set -CONFIG_VIRTIO=y - -# -# Virtio drivers -# -CONFIG_VIRTIO_PCI=y -CONFIG_VIRTIO_PCI_LEGACY=y -CONFIG_VIRTIO_BALLOON=m -CONFIG_VIRTIO_INPUT=m -CONFIG_VIRTIO_MMIO=m -# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set - -# -# Microsoft Hyper-V guest support -# -# CONFIG_HYPERV_TSCPAGE is not set -# CONFIG_STAGING is not set -# CONFIG_GOLDFISH is not set -CONFIG_CHROME_PLATFORMS=y -# CONFIG_CROS_KBD_LED_BACKLIGHT is not set -CONFIG_CLKDEV_LOOKUP=y -CONFIG_HAVE_CLK_PREPARE=y -CONFIG_COMMON_CLK=y - -# -# Common Clock Framework -# -CONFIG_COMMON_CLK_VERSATILE=y -CONFIG_CLK_SP810=y -CONFIG_CLK_VEXPRESS_OSC=y -# CONFIG_CLK_HSDK is not set -CONFIG_COMMON_CLK_SCPI=m -# CONFIG_COMMON_CLK_SI5351 is not set -# CONFIG_COMMON_CLK_SI514 is not set -# CONFIG_COMMON_CLK_SI570 is not set -# CONFIG_COMMON_CLK_CDCE706 is not set -# CONFIG_COMMON_CLK_CDCE925 is not set -# CONFIG_COMMON_CLK_CS2000_CP is not set -# CONFIG_CLK_QORIQ is not set -CONFIG_COMMON_CLK_XGENE=y -# CONFIG_COMMON_CLK_NXP is not set -# CONFIG_COMMON_CLK_PWM is not set -# CONFIG_COMMON_CLK_PXA is not set -# CONFIG_COMMON_CLK_PIC32 is not set -# CONFIG_COMMON_CLK_VC5 is not set -CONFIG_COMMON_CLK_HI3516CV300=y -CONFIG_COMMON_CLK_HI3519=y -CONFIG_COMMON_CLK_HI3660=y -CONFIG_COMMON_CLK_HI3798CV200=y -# CONFIG_COMMON_CLK_HI6220 is not set -CONFIG_RESET_HISI=y -# CONFIG_COMMON_CLK_QCOM is not set -# CONFIG_HWSPINLOCK is not set - -# -# Clock Source drivers -# -CONFIG_TIMER_OF=y -CONFIG_TIMER_ACPI=y -CONFIG_TIMER_PROBE=y -CONFIG_CLKSRC_MMIO=y -CONFIG_ARM_ARCH_TIMER=y -CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y -CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y -CONFIG_FSL_ERRATUM_A008585=y -CONFIG_HISILICON_ERRATUM_161010101=y -CONFIG_ARM64_ERRATUM_858921=y -CONFIG_ARM_TIMER_SP804=y -# CONFIG_ATMEL_PIT is not set -# CONFIG_SH_TIMER_CMT is not set -# CONFIG_SH_TIMER_MTU2 is not set -# CONFIG_SH_TIMER_TMU is not set -# CONFIG_EM_TIMER_STI is not set -CONFIG_MAILBOX=y -CONFIG_ARM_MHU=m -# CONFIG_PLATFORM_MHU is not set -# CONFIG_PL320_MBOX is not set -CONFIG_PCC=y -# CONFIG_ALTERA_MBOX is not set -# CONFIG_HI6220_MBOX is not set -# CONFIG_MAILBOX_TEST is not set -# CONFIG_QCOM_APCS_IPC is not set -CONFIG_XGENE_SLIMPRO_MBOX=m -# CONFIG_BCM_FLEXRM_MBOX is not set -CONFIG_IOMMU_API=y -CONFIG_IOMMU_SUPPORT=y - -# -# Generic IOMMU Pagetable Support -# -CONFIG_IOMMU_IO_PGTABLE=y -CONFIG_IOMMU_IO_PGTABLE_LPAE=y -# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set -# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set -CONFIG_IOMMU_IOVA=y -CONFIG_OF_IOMMU=y -CONFIG_IOMMU_DMA=y -CONFIG_ARM_SMMU=y -CONFIG_ARM_SMMU_V3=y -# CONFIG_QCOM_IOMMU is not set - -# -# Remoteproc drivers -# -# CONFIG_REMOTEPROC is not set - -# -# Rpmsg drivers -# -# CONFIG_RPMSG_QCOM_GLINK_RPM is not set - -# -# SOC (System On Chip) specific Drivers -# - -# -# Amlogic SoC drivers -# - -# -# Broadcom SoC drivers -# -# CONFIG_SOC_BRCMSTB is not set - -# -# i.MX SoC drivers -# - -# -# Qualcomm SoC drivers -# -# CONFIG_QCOM_GSBI is not set -# CONFIG_SUNXI_SRAM is not set -# CONFIG_SOC_TI is not set -CONFIG_PM_DEVFREQ=y - -# -# DEVFREQ Governors -# -CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=m -# CONFIG_DEVFREQ_GOV_PERFORMANCE is not set -# CONFIG_DEVFREQ_GOV_POWERSAVE is not set -# CONFIG_DEVFREQ_GOV_USERSPACE is not set -# CONFIG_DEVFREQ_GOV_PASSIVE is not set - -# -# DEVFREQ Drivers -# -# CONFIG_PM_DEVFREQ_EVENT is not set -CONFIG_EXTCON=m - -# -# Extcon Device Drivers -# -CONFIG_EXTCON_GPIO=m -# CONFIG_EXTCON_MAX3355 is not set -# CONFIG_EXTCON_QCOM_SPMI_MISC is not set -# CONFIG_EXTCON_RT8973A is not set -# CONFIG_EXTCON_SM5502 is not set -# CONFIG_EXTCON_USB_GPIO is not set -# CONFIG_MEMORY is not set -# CONFIG_IIO is not set -# CONFIG_NTB is not set -# CONFIG_VME_BUS is not set -CONFIG_PWM=y -CONFIG_PWM_SYSFS=y -# CONFIG_PWM_FSL_FTM is not set -# CONFIG_PWM_HIBVT is not set -# CONFIG_PWM_PCA9685 is not set -CONFIG_IRQCHIP=y -CONFIG_ARM_GIC=y -CONFIG_ARM_GIC_MAX_NR=1 -CONFIG_ARM_GIC_V2M=y -CONFIG_ARM_GIC_V3=y -CONFIG_ARM_GIC_V3_ITS=y -CONFIG_ALPINE_MSI=y -CONFIG_HISILICON_IRQ_MBIGEN=y -CONFIG_PARTITION_PERCPU=y -# CONFIG_QCOM_IRQ_COMBINER is not set -# CONFIG_IPACK_BUS is not set -CONFIG_RESET_CONTROLLER=y -# CONFIG_RESET_ATH79 is not set -# CONFIG_RESET_BERLIN is not set -# CONFIG_RESET_IMX7 is not set -# CONFIG_RESET_LANTIQ is not set -# CONFIG_RESET_LPC18XX is not set -# CONFIG_RESET_MESON is not set -# CONFIG_RESET_PISTACHIO is not set -# CONFIG_RESET_SOCFPGA is not set -# CONFIG_RESET_STM32 is not set -# CONFIG_RESET_SUNXI is not set -# CONFIG_RESET_TI_SYSCON is not set -# CONFIG_RESET_ZYNQ is not set -CONFIG_COMMON_RESET_HI3660=y -CONFIG_COMMON_RESET_HI6220=m -# CONFIG_RESET_TEGRA_BPMP is not set -CONFIG_FMC=m -CONFIG_FMC_FAKEDEV=m -CONFIG_FMC_TRIVIAL=m -CONFIG_FMC_WRITE_EEPROM=m -CONFIG_FMC_CHARDEV=m - -# -# PHY Subsystem -# -CONFIG_GENERIC_PHY=y -CONFIG_PHY_XGENE=y -# CONFIG_BCM_KONA_USB2_PHY is not set -CONFIG_PHY_HI6220_USB=m -# CONFIG_PHY_PXA_28NM_HSIC is not set -# CONFIG_PHY_PXA_28NM_USB2 is not set -# CONFIG_PHY_QCOM_APQ8064_SATA is not set -# CONFIG_PHY_QCOM_IPQ806X_SATA is not set -# CONFIG_PHY_QCOM_QMP is not set -# CONFIG_PHY_QCOM_QUSB2 is not set -# CONFIG_PHY_QCOM_UFS is not set -# CONFIG_PHY_QCOM_USB_HS is not set -# CONFIG_PHY_QCOM_USB_HSIC is not set -# CONFIG_PHY_TUSB1210 is not set -# CONFIG_POWERCAP is not set -# CONFIG_MCB is not set - -# -# Performance monitor support -# -CONFIG_ARM_PMU=y -CONFIG_ARM_PMU_ACPI=y -# CONFIG_QCOM_L2_PMU is not set -# CONFIG_QCOM_L3_PMU is not set -CONFIG_XGENE_PMU=y -CONFIG_RAS=y - -# -# Android -# -# CONFIG_ANDROID is not set -CONFIG_LIBNVDIMM=m -CONFIG_BLK_DEV_PMEM=m -CONFIG_ND_BLK=m -CONFIG_ND_CLAIM=y -CONFIG_ND_BTT=m -CONFIG_BTT=y -CONFIG_DAX=y -# CONFIG_DEV_DAX is not set -CONFIG_NVMEM=y -# CONFIG_QCOM_QFPROM is not set -# CONFIG_STM is not set -# CONFIG_INTEL_TH is not set -# CONFIG_FPGA is not set - -# -# FSI support -# -# CONFIG_FSI is not set -# CONFIG_TEE is not set - -# -# Firmware Drivers -# -CONFIG_ARM_PSCI_FW=y -# CONFIG_ARM_PSCI_CHECKER is not set -CONFIG_ARM_SCPI_PROTOCOL=m -CONFIG_ARM_SCPI_POWER_DOMAIN=m -# CONFIG_FIRMWARE_MEMMAP is not set -CONFIG_DMIID=y -CONFIG_DMI_SYSFS=y -# CONFIG_FW_CFG_SYSFS is not set -CONFIG_HAVE_ARM_SMCCC=y -# CONFIG_GOOGLE_FIRMWARE is not set - -# -# EFI (Extensible Firmware Interface) Support -# -CONFIG_EFI_VARS=y -CONFIG_EFI_ESRT=y -CONFIG_EFI_VARS_PSTORE=y -CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y -CONFIG_EFI_PARAMS_FROM_FDT=y -CONFIG_EFI_RUNTIME_WRAPPERS=y -CONFIG_EFI_ARMSTUB=y -# CONFIG_EFI_BOOTLOADER_CONTROL is not set -# CONFIG_EFI_CAPSULE_LOADER is not set -# CONFIG_EFI_TEST is not set -# CONFIG_RESET_ATTACK_MITIGATION is not set -CONFIG_UEFI_CPER=y -# CONFIG_EFI_DEV_PATH_PARSER is not set -# CONFIG_MESON_SM is not set - -# -# Tegra firmware driver -# -CONFIG_ACPI=y -CONFIG_ACPI_GENERIC_GSI=y -CONFIG_ACPI_CCA_REQUIRED=y -# CONFIG_ACPI_DEBUGGER is not set -CONFIG_ACPI_SPCR_TABLE=y -# CONFIG_ACPI_EC_DEBUGFS is not set -CONFIG_ACPI_BUTTON=y -CONFIG_ACPI_FAN=y -# CONFIG_ACPI_DOCK is not set -CONFIG_ACPI_PROCESSOR_IDLE=y -CONFIG_ACPI_MCFG=y -CONFIG_ACPI_CPPC_LIB=y -CONFIG_ACPI_PROCESSOR=y -CONFIG_ACPI_IPMI=m -CONFIG_ACPI_HOTPLUG_CPU=y -CONFIG_ACPI_THERMAL=y -CONFIG_ACPI_NUMA=y -# CONFIG_ACPI_CUSTOM_DSDT is not set -CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y -CONFIG_ACPI_TABLE_UPGRADE=y -# CONFIG_ACPI_DEBUG is not set -CONFIG_ACPI_PCI_SLOT=y -CONFIG_ACPI_CONTAINER=y -CONFIG_ACPI_HED=y -# CONFIG_ACPI_CUSTOM_METHOD is not set -# CONFIG_ACPI_BGRT is not set -CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y -CONFIG_HAVE_ACPI_APEI=y -CONFIG_ACPI_APEI=y -CONFIG_ACPI_APEI_GHES=y -CONFIG_ACPI_APEI_PCIEAER=y -CONFIG_ACPI_APEI_SEA=y -CONFIG_ACPI_APEI_EINJ=m -# CONFIG_ACPI_APEI_ERST_DEBUG is not set -CONFIG_ACPI_WATCHDOG=y -# CONFIG_PMIC_OPREGION is not set -# CONFIG_ACPI_CONFIGFS is not set -CONFIG_ACPI_IORT=y -CONFIG_ACPI_GTDT=y - -# -# File systems -# -CONFIG_DCACHE_WORD_ACCESS=y -CONFIG_FS_IOMAP=y -# CONFIG_EXT2_FS is not set -# CONFIG_EXT3_FS is not set -CONFIG_EXT4_FS=y -CONFIG_EXT4_USE_FOR_EXT2=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -# CONFIG_EXT4_ENCRYPTION is not set -# CONFIG_EXT4_DEBUG is not set -CONFIG_JBD2=y -# CONFIG_JBD2_DEBUG is not set -CONFIG_FS_MBCACHE=y -# CONFIG_REISERFS_FS is not set -# CONFIG_JFS_FS is not set -CONFIG_XFS_FS=m -CONFIG_XFS_QUOTA=y -CONFIG_XFS_POSIX_ACL=y -# CONFIG_XFS_RT is not set -# CONFIG_XFS_WARN is not set -# CONFIG_XFS_DEBUG is not set -# CONFIG_GFS2_FS is not set -# CONFIG_OCFS2_FS is not set -# CONFIG_BTRFS_FS is not set -# CONFIG_NILFS2_FS is not set -# CONFIG_F2FS_FS is not set -CONFIG_FS_DAX=y -CONFIG_FS_POSIX_ACL=y -CONFIG_EXPORTFS=y -CONFIG_EXPORTFS_BLOCK_OPS=y -CONFIG_FILE_LOCKING=y -CONFIG_MANDATORY_FILE_LOCKING=y -# CONFIG_FS_ENCRYPTION is not set -CONFIG_FSNOTIFY=y -CONFIG_DNOTIFY=y -CONFIG_INOTIFY_USER=y -CONFIG_FANOTIFY=y -CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -CONFIG_PRINT_QUOTA_WARNING=y -# CONFIG_QUOTA_DEBUG is not set -CONFIG_QUOTA_TREE=y -# CONFIG_QFMT_V1 is not set -CONFIG_QFMT_V2=y -CONFIG_QUOTACTL=y -CONFIG_AUTOFS4_FS=y -CONFIG_FUSE_FS=m -CONFIG_CUSE=m -CONFIG_OVERLAY_FS=y -# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set -# CONFIG_OVERLAY_FS_INDEX is not set - -# -# Caches -# -CONFIG_FSCACHE=m -CONFIG_FSCACHE_STATS=y -# CONFIG_FSCACHE_HISTOGRAM is not set -# CONFIG_FSCACHE_DEBUG is not set -# CONFIG_FSCACHE_OBJECT_LIST is not set -CONFIG_CACHEFILES=m -# CONFIG_CACHEFILES_DEBUG is not set -# CONFIG_CACHEFILES_HISTOGRAM is not set - -# -# CD-ROM/DVD Filesystems -# -CONFIG_ISO9660_FS=m -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=m -CONFIG_UDF_NLS=y - -# -# DOS/FAT/NT Filesystems -# -CONFIG_FAT_FS=m -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=m -CONFIG_FAT_DEFAULT_CODEPAGE=437 -CONFIG_FAT_DEFAULT_IOCHARSET="ascii" -# CONFIG_FAT_DEFAULT_UTF8 is not set -# CONFIG_NTFS_FS is not set - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_KCORE=y -CONFIG_PROC_VMCORE=y -CONFIG_PROC_SYSCTL=y -CONFIG_PROC_PAGE_MONITOR=y -CONFIG_PROC_CHILDREN=y -CONFIG_KERNFS=y -CONFIG_SYSFS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_TMPFS_XATTR=y -CONFIG_HUGETLBFS=y -CONFIG_HUGETLB_PAGE=y -CONFIG_ARCH_HAS_GIGANTIC_PAGE=y -CONFIG_CONFIGFS_FS=y -CONFIG_EFIVAR_FS=y -CONFIG_MISC_FILESYSTEMS=y -# CONFIG_ORANGEFS_FS is not set -# CONFIG_ADFS_FS is not set -# CONFIG_AFFS_FS is not set -# CONFIG_ECRYPT_FS is not set -# CONFIG_HFS_FS is not set -# CONFIG_HFSPLUS_FS is not set -# CONFIG_BEFS_FS is not set -# CONFIG_BFS_FS is not set -# CONFIG_EFS_FS is not set -# CONFIG_JFFS2_FS is not set -# CONFIG_UBIFS_FS is not set -CONFIG_CRAMFS=m -CONFIG_SQUASHFS=m -CONFIG_SQUASHFS_FILE_CACHE=y -# CONFIG_SQUASHFS_FILE_DIRECT is not set -CONFIG_SQUASHFS_DECOMP_SINGLE=y -# CONFIG_SQUASHFS_DECOMP_MULTI is not set -# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set -CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_ZLIB=y -CONFIG_SQUASHFS_LZ4=y -CONFIG_SQUASHFS_LZO=y -CONFIG_SQUASHFS_XZ=y -# CONFIG_SQUASHFS_ZSTD is not set -# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set -# CONFIG_SQUASHFS_EMBEDDED is not set -CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 -# CONFIG_VXFS_FS is not set -# CONFIG_MINIX_FS is not set -# CONFIG_OMFS_FS is not set -# CONFIG_HPFS_FS is not set -# CONFIG_QNX4FS_FS is not set -# CONFIG_QNX6FS_FS is not set -# CONFIG_ROMFS_FS is not set -CONFIG_PSTORE=y -CONFIG_PSTORE_ZLIB_COMPRESS=y -# CONFIG_PSTORE_LZO_COMPRESS is not set -# CONFIG_PSTORE_LZ4_COMPRESS is not set -# CONFIG_PSTORE_CONSOLE is not set -# CONFIG_PSTORE_PMSG is not set -# CONFIG_PSTORE_FTRACE is not set -CONFIG_PSTORE_RAM=m -# CONFIG_SYSV_FS is not set -# CONFIG_UFS_FS is not set -# CONFIG_EXOFS_FS is not set -CONFIG_NETWORK_FILESYSTEMS=y -CONFIG_NFS_FS=m -# CONFIG_NFS_V2 is not set -CONFIG_NFS_V3=m -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=m -# CONFIG_NFS_SWAP is not set -CONFIG_NFS_V4_1=y -CONFIG_NFS_V4_2=y -CONFIG_PNFS_FILE_LAYOUT=m -CONFIG_PNFS_BLOCK=m -CONFIG_PNFS_FLEXFILE_LAYOUT=m -CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" -# CONFIG_NFS_V4_1_MIGRATION is not set -CONFIG_NFS_V4_SECURITY_LABEL=y -CONFIG_NFS_FSCACHE=y -# CONFIG_NFS_USE_LEGACY_DNS is not set -CONFIG_NFS_USE_KERNEL_DNS=y -CONFIG_NFS_DEBUG=y -CONFIG_NFSD=m -CONFIG_NFSD_V2_ACL=y -CONFIG_NFSD_V3=y -CONFIG_NFSD_V3_ACL=y -CONFIG_NFSD_V4=y -CONFIG_NFSD_PNFS=y -# CONFIG_NFSD_BLOCKLAYOUT is not set -CONFIG_NFSD_SCSILAYOUT=y -# CONFIG_NFSD_FLEXFILELAYOUT is not set -CONFIG_NFSD_V4_SECURITY_LABEL=y -# CONFIG_NFSD_FAULT_INJECTION is not set -CONFIG_GRACE_PERIOD=m -CONFIG_LOCKD=m -CONFIG_LOCKD_V4=y -CONFIG_NFS_ACL_SUPPORT=m -CONFIG_NFS_COMMON=y -CONFIG_SUNRPC=m -CONFIG_SUNRPC_GSS=m -CONFIG_SUNRPC_BACKCHANNEL=y -CONFIG_RPCSEC_GSS_KRB5=m -CONFIG_SUNRPC_DEBUG=y -CONFIG_SUNRPC_XPRT_RDMA=m -CONFIG_CEPH_FS=m -# CONFIG_CEPH_FSCACHE is not set -CONFIG_CEPH_FS_POSIX_ACL=y -CONFIG_CIFS=m -CONFIG_CIFS_STATS=y -# CONFIG_CIFS_STATS2 is not set -CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y -CONFIG_CIFS_WEAK_PW_HASH=y -CONFIG_CIFS_UPCALL=y -CONFIG_CIFS_XATTR=y -CONFIG_CIFS_POSIX=y -CONFIG_CIFS_ACL=y -CONFIG_CIFS_DEBUG=y -# CONFIG_CIFS_DEBUG2 is not set -# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set -CONFIG_CIFS_DFS_UPCALL=y -# CONFIG_CIFS_SMB311 is not set -# CONFIG_CIFS_FSCACHE is not set -# CONFIG_NCP_FS is not set -# CONFIG_CODA_FS is not set -# CONFIG_AFS_FS is not set -CONFIG_NLS=y -CONFIG_NLS_DEFAULT="utf8" -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=m -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m -CONFIG_NLS_MAC_ROMAN=m -CONFIG_NLS_MAC_CELTIC=m -CONFIG_NLS_MAC_CENTEURO=m -CONFIG_NLS_MAC_CROATIAN=m -CONFIG_NLS_MAC_CYRILLIC=m -CONFIG_NLS_MAC_GAELIC=m -CONFIG_NLS_MAC_GREEK=m -CONFIG_NLS_MAC_ICELAND=m -CONFIG_NLS_MAC_INUIT=m -CONFIG_NLS_MAC_ROMANIAN=m -CONFIG_NLS_MAC_TURKISH=m -CONFIG_NLS_UTF8=m -# CONFIG_DLM is not set -CONFIG_HAVE_KVM_IRQCHIP=y -CONFIG_HAVE_KVM_IRQFD=y -CONFIG_HAVE_KVM_IRQ_ROUTING=y -CONFIG_HAVE_KVM_EVENTFD=y -CONFIG_KVM_MMIO=y -CONFIG_HAVE_KVM_MSI=y -CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y -CONFIG_KVM_VFIO=y -CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL=y -CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y -CONFIG_VIRTUALIZATION=y -CONFIG_KVM=y -CONFIG_KVM_ARM_HOST=y -CONFIG_KVM_ARM_PMU=y -CONFIG_VHOST_NET=m -# CONFIG_VHOST_SCSI is not set -CONFIG_VHOST_VSOCK=m -CONFIG_VHOST=m -# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set - -# -# Kernel hacking -# - -# -# printk and dmesg options -# -CONFIG_PRINTK_TIME=y -CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 -CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 -CONFIG_BOOT_PRINTK_DELAY=y -CONFIG_DYNAMIC_DEBUG=y - -# -# Compile-time checks and compiler options -# -CONFIG_DEBUG_INFO=y -# CONFIG_DEBUG_INFO_REDUCED is not set -# CONFIG_DEBUG_INFO_SPLIT is not set -# CONFIG_DEBUG_INFO_DWARF4 is not set -# CONFIG_GDB_SCRIPTS is not set -# CONFIG_ENABLE_WARN_DEPRECATED is not set -CONFIG_ENABLE_MUST_CHECK=y -CONFIG_FRAME_WARN=2048 -CONFIG_STRIP_ASM_SYMS=y -# CONFIG_READABLE_ASM is not set -# CONFIG_UNUSED_SYMBOLS is not set -# CONFIG_PAGE_OWNER is not set -CONFIG_DEBUG_FS=y -CONFIG_HEADERS_CHECK=y -CONFIG_DEBUG_SECTION_MISMATCH=y -CONFIG_SECTION_MISMATCH_WARN_ONLY=y -CONFIG_ARCH_WANT_FRAME_POINTERS=y -CONFIG_FRAME_POINTER=y -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 -CONFIG_MAGIC_SYSRQ_SERIAL=y -CONFIG_DEBUG_KERNEL=y - -# -# Memory Debugging -# -# CONFIG_PAGE_EXTENSION is not set -# CONFIG_DEBUG_PAGEALLOC is not set -# CONFIG_PAGE_POISONING is not set -# CONFIG_DEBUG_PAGE_REF is not set -# CONFIG_DEBUG_RODATA_TEST is not set -# CONFIG_DEBUG_OBJECTS is not set -# CONFIG_SLUB_DEBUG_ON is not set -# CONFIG_SLUB_STATS is not set -CONFIG_HAVE_DEBUG_KMEMLEAK=y -# CONFIG_DEBUG_KMEMLEAK is not set -# CONFIG_DEBUG_STACK_USAGE is not set -# CONFIG_DEBUG_VM is not set -CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y -# CONFIG_DEBUG_VIRTUAL is not set -CONFIG_DEBUG_MEMORY_INIT=y -# CONFIG_DEBUG_PER_CPU_MAPS is not set -CONFIG_HAVE_ARCH_KASAN=y -# CONFIG_KASAN is not set -CONFIG_ARCH_HAS_KCOV=y -# CONFIG_KCOV is not set -CONFIG_DEBUG_SHIRQ=y - -# -# Debug Lockups and Hangs -# -# CONFIG_SOFTLOCKUP_DETECTOR is not set -CONFIG_DETECT_HUNG_TASK=y -CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 -# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set -CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 -# CONFIG_WQ_WATCHDOG is not set -CONFIG_PANIC_ON_OOPS=y -CONFIG_PANIC_ON_OOPS_VALUE=1 -CONFIG_PANIC_TIMEOUT=0 -CONFIG_SCHED_DEBUG=y -CONFIG_SCHED_INFO=y -CONFIG_SCHEDSTATS=y -# CONFIG_SCHED_STACK_END_CHECK is not set -# CONFIG_DEBUG_TIMEKEEPING is not set - -# -# Lock Debugging (spinlocks, mutexes, etc...) -# -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_PROVE_LOCKING is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_ATOMIC_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -# CONFIG_LOCK_TORTURE_TEST is not set -# CONFIG_WW_MUTEX_SELFTEST is not set -CONFIG_STACKTRACE=y -# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set -# CONFIG_DEBUG_KOBJECT is not set -CONFIG_HAVE_DEBUG_BUGVERBOSE=y -CONFIG_DEBUG_BUGVERBOSE=y -CONFIG_DEBUG_LIST=y -# CONFIG_DEBUG_PI_LIST is not set -# CONFIG_DEBUG_SG is not set -# CONFIG_DEBUG_NOTIFIERS is not set -# CONFIG_DEBUG_CREDENTIALS is not set - -# -# RCU Debugging -# -# CONFIG_PROVE_RCU is not set -# CONFIG_TORTURE_TEST is not set -# CONFIG_RCU_PERF_TEST is not set -# CONFIG_RCU_TORTURE_TEST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=60 -# CONFIG_RCU_TRACE is not set -# CONFIG_RCU_EQS_DEBUG is not set -# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set -# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set -# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set -# CONFIG_NOTIFIER_ERROR_INJECTION is not set -# CONFIG_FAULT_INJECTION is not set -# CONFIG_LATENCYTOP is not set -CONFIG_NOP_TRACER=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_TRACER_MAX_TRACE=y -CONFIG_TRACE_CLOCK=y -CONFIG_RING_BUFFER=y -CONFIG_EVENT_TRACING=y -CONFIG_CONTEXT_SWITCH_TRACER=y -CONFIG_TRACING=y -CONFIG_GENERIC_TRACER=y -CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -CONFIG_FUNCTION_TRACER=y -CONFIG_FUNCTION_GRAPH_TRACER=y -# CONFIG_IRQSOFF_TRACER is not set -CONFIG_SCHED_TRACER=y -# CONFIG_HWLAT_TRACER is not set -CONFIG_FTRACE_SYSCALLS=y -CONFIG_TRACER_SNAPSHOT=y -# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -# CONFIG_PROFILE_ALL_BRANCHES is not set -# CONFIG_STACK_TRACER is not set -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_KPROBE_EVENTS=y -CONFIG_UPROBE_EVENTS=y -CONFIG_PROBE_EVENTS=y -CONFIG_DYNAMIC_FTRACE=y -# CONFIG_FUNCTION_PROFILER is not set -CONFIG_FTRACE_MCOUNT_RECORD=y -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_HIST_TRIGGERS is not set -# CONFIG_TRACEPOINT_BENCHMARK is not set -CONFIG_RING_BUFFER_BENCHMARK=m -# CONFIG_RING_BUFFER_STARTUP_TEST is not set -# CONFIG_TRACE_EVAL_MAP_FILE is not set -# CONFIG_TRACING_EVENTS_GPIO is not set -# CONFIG_DMA_API_DEBUG is not set - -# -# Runtime Testing -# -# CONFIG_LKDTM is not set -# CONFIG_TEST_LIST_SORT is not set -# CONFIG_TEST_SORT is not set -# CONFIG_KPROBES_SANITY_TEST is not set -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_RBTREE_TEST is not set -# CONFIG_INTERVAL_TREE_TEST is not set -# CONFIG_PERCPU_TEST is not set -CONFIG_ATOMIC64_SELFTEST=y -CONFIG_ASYNC_RAID6_TEST=m -# CONFIG_TEST_HEXDUMP is not set -# CONFIG_TEST_STRING_HELPERS is not set -CONFIG_TEST_KSTRTOX=y -# CONFIG_TEST_PRINTF is not set -# CONFIG_TEST_BITMAP is not set -# CONFIG_TEST_UUID is not set -# CONFIG_TEST_RHASHTABLE is not set -# CONFIG_TEST_HASH is not set -# CONFIG_TEST_LKM is not set -# CONFIG_TEST_USER_COPY is not set -# CONFIG_TEST_BPF is not set -# CONFIG_TEST_FIRMWARE is not set -# CONFIG_TEST_SYSCTL is not set -# CONFIG_TEST_UDELAY is not set -# CONFIG_TEST_STATIC_KEYS is not set -# CONFIG_TEST_KMOD is not set -# CONFIG_MEMTEST is not set -# CONFIG_BUG_ON_DATA_CORRUPTION is not set -# CONFIG_SAMPLES is not set -CONFIG_HAVE_ARCH_KGDB=y -CONFIG_KGDB=y -CONFIG_KGDB_SERIAL_CONSOLE=y -CONFIG_KGDB_TESTS=y -# CONFIG_KGDB_TESTS_ON_BOOT is not set -CONFIG_KGDB_KDB=y -CONFIG_KDB_DEFAULT_ENABLE=0x0 -CONFIG_KDB_KEYBOARD=y -CONFIG_KDB_CONTINUE_CATASTROPHIC=0 -CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y -# CONFIG_ARCH_WANTS_UBSAN_NO_NULL is not set -# CONFIG_UBSAN is not set -CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y -# CONFIG_ARM64_PTDUMP_CORE is not set -# CONFIG_ARM64_PTDUMP_DEBUGFS is not set -# CONFIG_PID_IN_CONTEXTIDR is not set -# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set -# CONFIG_DEBUG_WX is not set -# CONFIG_DEBUG_ALIGN_RODATA is not set -# CONFIG_DEBUG_EFI is not set -# CONFIG_ARM64_RELOC_TEST is not set -# CONFIG_CORESIGHT is not set - -# -# Security options -# -CONFIG_KEYS=y -CONFIG_PERSISTENT_KEYRINGS=y -CONFIG_BIG_KEYS=y -CONFIG_TRUSTED_KEYS=m -CONFIG_ENCRYPTED_KEYS=m -# CONFIG_KEY_DH_OPERATIONS is not set -# CONFIG_SECURITY_DMESG_RESTRICT is not set -CONFIG_SECURITY=y -CONFIG_SECURITY_WRITABLE_HOOKS=y -CONFIG_SECURITYFS=y -CONFIG_SECURITY_NETWORK=y -# CONFIG_SECURITY_INFINIBAND is not set -CONFIG_SECURITY_NETWORK_XFRM=y -# CONFIG_SECURITY_PATH is not set -CONFIG_LSM_MMAP_MIN_ADDR=65535 -CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y -# CONFIG_HARDENED_USERCOPY is not set -# CONFIG_FORTIFY_SOURCE is not set -# CONFIG_STATIC_USERMODEHELPER is not set -CONFIG_SECURITY_SELINUX=y -CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1 -CONFIG_SECURITY_SELINUX_DISABLE=y -CONFIG_SECURITY_SELINUX_DEVELOP=y -CONFIG_SECURITY_SELINUX_AVC_STATS=y -CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 -# CONFIG_SECURITY_SMACK is not set -# CONFIG_SECURITY_TOMOYO is not set -# CONFIG_SECURITY_APPARMOR is not set -# CONFIG_SECURITY_LOADPIN is not set -# CONFIG_SECURITY_YAMA is not set -# CONFIG_INTEGRITY is not set -CONFIG_DEFAULT_SECURITY_SELINUX=y -# CONFIG_DEFAULT_SECURITY_DAC is not set -CONFIG_DEFAULT_SECURITY="selinux" -CONFIG_XOR_BLOCKS=m -CONFIG_ASYNC_CORE=m -CONFIG_ASYNC_MEMCPY=m -CONFIG_ASYNC_XOR=m -CONFIG_ASYNC_PQ=m -CONFIG_ASYNC_RAID6_RECOV=m -CONFIG_CRYPTO=y - -# -# Crypto core or helper -# -CONFIG_CRYPTO_FIPS=y -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_ALGAPI2=y -CONFIG_CRYPTO_AEAD=y -CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_BLKCIPHER=y -CONFIG_CRYPTO_BLKCIPHER2=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_RNG=y -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_RNG_DEFAULT=y -CONFIG_CRYPTO_AKCIPHER2=y -CONFIG_CRYPTO_AKCIPHER=y -CONFIG_CRYPTO_KPP2=y -CONFIG_CRYPTO_ACOMP2=y -CONFIG_CRYPTO_RSA=y -# CONFIG_CRYPTO_DH is not set -# CONFIG_CRYPTO_ECDH is not set -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y -CONFIG_CRYPTO_USER=m -# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set -CONFIG_CRYPTO_GF128MUL=y -CONFIG_CRYPTO_NULL=y -CONFIG_CRYPTO_NULL2=y -CONFIG_CRYPTO_PCRYPT=m -CONFIG_CRYPTO_WORKQUEUE=y -CONFIG_CRYPTO_CRYPTD=y -# CONFIG_CRYPTO_MCRYPTD is not set -CONFIG_CRYPTO_AUTHENC=m -CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_SIMD=y -CONFIG_CRYPTO_ENGINE=m - -# -# Authenticated Encryption with Associated Data -# -CONFIG_CRYPTO_CCM=m -CONFIG_CRYPTO_GCM=y -CONFIG_CRYPTO_CHACHA20POLY1305=m -CONFIG_CRYPTO_SEQIV=y -CONFIG_CRYPTO_ECHAINIV=m - -# -# Block modes -# -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_CTR=y -CONFIG_CRYPTO_CTS=m -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_LRW=m -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_XTS=m -# CONFIG_CRYPTO_KEYWRAP is not set - -# -# Hash modes -# -CONFIG_CRYPTO_CMAC=m -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m - -# -# Digest -# -CONFIG_CRYPTO_CRC32C=y -CONFIG_CRYPTO_CRC32=m -CONFIG_CRYPTO_CRCT10DIF=y -CONFIG_CRYPTO_GHASH=y -CONFIG_CRYPTO_POLY1305=m -CONFIG_CRYPTO_MD4=m -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m -CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=m -# CONFIG_CRYPTO_SHA3 is not set -CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_WP512=m - -# -# Ciphers -# -CONFIG_CRYPTO_AES=y -# CONFIG_CRYPTO_AES_TI is not set -CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_ARC4=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_BLOWFISH_COMMON=m -CONFIG_CRYPTO_CAMELLIA=m -CONFIG_CRYPTO_CAST_COMMON=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_DES=m -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m -CONFIG_CRYPTO_CHACHA20=m -CONFIG_CRYPTO_SEED=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_TWOFISH_COMMON=m - -# -# Compression -# -CONFIG_CRYPTO_DEFLATE=m -CONFIG_CRYPTO_LZO=y -# CONFIG_CRYPTO_842 is not set -# CONFIG_CRYPTO_LZ4 is not set -# CONFIG_CRYPTO_LZ4HC is not set - -# -# Random Number Generation -# -CONFIG_CRYPTO_ANSI_CPRNG=m -CONFIG_CRYPTO_DRBG_MENU=y -CONFIG_CRYPTO_DRBG_HMAC=y -CONFIG_CRYPTO_DRBG_HASH=y -CONFIG_CRYPTO_DRBG_CTR=y -CONFIG_CRYPTO_DRBG=y -CONFIG_CRYPTO_JITTERENTROPY=y -CONFIG_CRYPTO_USER_API=y -CONFIG_CRYPTO_USER_API_HASH=y -CONFIG_CRYPTO_USER_API_SKCIPHER=y -CONFIG_CRYPTO_USER_API_RNG=y -CONFIG_CRYPTO_USER_API_AEAD=y -CONFIG_CRYPTO_HASH_INFO=y -CONFIG_CRYPTO_HW=y -# CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC is not set -CONFIG_CRYPTO_DEV_CCP=y -CONFIG_CRYPTO_DEV_CCP_DD=m -CONFIG_CRYPTO_DEV_SP_CCP=y -CONFIG_CRYPTO_DEV_CCP_CRYPTO=m -CONFIG_CRYPTO_DEV_CPT=m -CONFIG_CAVIUM_CPT=m -# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set -# CONFIG_CRYPTO_DEV_CAVIUM_ZIP is not set -# CONFIG_CRYPTO_DEV_QCE is not set -CONFIG_CRYPTO_DEV_CHELSIO=m -CONFIG_CRYPTO_DEV_VIRTIO=m -CONFIG_ASYMMETRIC_KEY_TYPE=y -CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y -CONFIG_X509_CERTIFICATE_PARSER=y -CONFIG_PKCS7_MESSAGE_PARSER=y -# CONFIG_PKCS7_TEST_KEY is not set -CONFIG_SIGNED_PE_FILE_VERIFICATION=y - -# -# Certificates for signature checking -# -CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" -CONFIG_SYSTEM_TRUSTED_KEYRING=y -CONFIG_SYSTEM_TRUSTED_KEYS="" -# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set -# CONFIG_SECONDARY_TRUSTED_KEYRING is not set -# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set -CONFIG_ARM64_CRYPTO=y -CONFIG_CRYPTO_SHA256_ARM64=m -# CONFIG_CRYPTO_SHA512_ARM64 is not set -CONFIG_CRYPTO_SHA1_ARM64_CE=m -CONFIG_CRYPTO_SHA2_ARM64_CE=m -CONFIG_CRYPTO_GHASH_ARM64_CE=m -# CONFIG_CRYPTO_CRCT10DIF_ARM64_CE is not set -CONFIG_CRYPTO_CRC32_ARM64_CE=m -CONFIG_CRYPTO_AES_ARM64=y -CONFIG_CRYPTO_AES_ARM64_CE=y -CONFIG_CRYPTO_AES_ARM64_CE_CCM=y -CONFIG_CRYPTO_AES_ARM64_CE_BLK=y -CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y -# CONFIG_CRYPTO_CHACHA20_NEON is not set -# CONFIG_CRYPTO_AES_ARM64_BS is not set -CONFIG_BINARY_PRINTF=y - -# -# Library routines -# -CONFIG_RAID6_PQ=m -CONFIG_BITREVERSE=y -CONFIG_HAVE_ARCH_BITREVERSE=y -CONFIG_RATIONAL=y -CONFIG_GENERIC_STRNCPY_FROM_USER=y -CONFIG_GENERIC_STRNLEN_USER=y -CONFIG_GENERIC_NET_UTILS=y -CONFIG_GENERIC_PCI_IOMAP=y -CONFIG_GENERIC_IO=y -CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y -CONFIG_CRC_CCITT=y -CONFIG_CRC16=y -CONFIG_CRC_T10DIF=y -CONFIG_CRC_ITU_T=m -CONFIG_CRC32=y -# CONFIG_CRC32_SELFTEST is not set -CONFIG_CRC32_SLICEBY8=y -# CONFIG_CRC32_SLICEBY4 is not set -# CONFIG_CRC32_SARWATE is not set -# CONFIG_CRC32_BIT is not set -# CONFIG_CRC4 is not set -CONFIG_CRC7=m -CONFIG_LIBCRC32C=m -CONFIG_CRC8=m -CONFIG_AUDIT_GENERIC=y -CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y -# CONFIG_RANDOM32_SELFTEST is not set -CONFIG_ZLIB_INFLATE=y -CONFIG_ZLIB_DEFLATE=y -CONFIG_LZO_COMPRESS=y -CONFIG_LZO_DECOMPRESS=y -CONFIG_LZ4_DECOMPRESS=y -CONFIG_XZ_DEC=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_BCJ=y -# CONFIG_XZ_DEC_TEST is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_BZIP2=y -CONFIG_DECOMPRESS_LZMA=y -CONFIG_DECOMPRESS_XZ=y -CONFIG_DECOMPRESS_LZO=y -CONFIG_DECOMPRESS_LZ4=y -CONFIG_GENERIC_ALLOCATOR=y -CONFIG_REED_SOLOMON=m -CONFIG_REED_SOLOMON_ENC8=y -CONFIG_REED_SOLOMON_DEC8=y -CONFIG_TEXTSEARCH=y -CONFIG_TEXTSEARCH_KMP=m -CONFIG_TEXTSEARCH_BM=m -CONFIG_TEXTSEARCH_FSM=m -CONFIG_BTREE=y -CONFIG_INTERVAL_TREE=y -CONFIG_RADIX_TREE_MULTIORDER=y -CONFIG_ASSOCIATIVE_ARRAY=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT_MAP=y -CONFIG_HAS_DMA=y -# CONFIG_DMA_NOOP_OPS is not set -CONFIG_DMA_VIRT_OPS=y -CONFIG_CHECK_SIGNATURE=y -CONFIG_CPU_RMAP=y -CONFIG_DQL=y -CONFIG_GLOB=y -# CONFIG_GLOB_SELFTEST is not set -CONFIG_NLATTR=y -CONFIG_CLZ_TAB=y -CONFIG_CORDIC=m -# CONFIG_DDR is not set -CONFIG_IRQ_POLL=y -CONFIG_MPILIB=y -CONFIG_LIBFDT=y -CONFIG_OID_REGISTRY=y -CONFIG_UCS2_STRING=y -CONFIG_FONT_SUPPORT=y -# CONFIG_FONTS is not set -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y -# CONFIG_SG_SPLIT is not set -CONFIG_SG_POOL=y -CONFIG_ARCH_HAS_SG_CHAIN=y -CONFIG_SBITMAP=y -# CONFIG_STRING_SELFTEST is not set diff --git a/packages/kernel/config-thar b/packages/kernel/config-thar new file mode 100644 index 00000000..b944c172 --- /dev/null +++ b/packages/kernel/config-thar @@ -0,0 +1,23 @@ +# Because Thar does not have an initramfs, modules required to mount the root +# filesystem must be set to y. + +# The root filesystem is ext4 +CONFIG_EXT4_FS=y + +# NVMe for EC2 Nitro platforms (C5, M5, and later) +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_CORE=y + +# Xen blkfront for Xen-based EC2 platforms +CONFIG_XEN_BLKDEV_FRONTEND=y + +# virtio for local testing with QEMU +CONFIG_VIRTIO=y +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_PCI=y + +# dm-verity and enabling it on the kernel command line +CONFIG_BLK_DEV_DM=y +CONFIG_DAX=y +CONFIG_DM_INIT=y +CONFIG_DM_VERITY=y diff --git a/packages/kernel/config-x86_64 b/packages/kernel/config-x86_64 deleted file mode 100644 index 61a5edea..00000000 --- a/packages/kernel/config-x86_64 +++ /dev/null @@ -1,4632 +0,0 @@ -# -# Automatically generated file; DO NOT EDIT. -# Linux/x86 4.14.102 Kernel Configuration -# -CONFIG_64BIT=y -CONFIG_X86_64=y -CONFIG_X86=y -CONFIG_INSTRUCTION_DECODER=y -CONFIG_OUTPUT_FORMAT="elf64-x86-64" -CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_MMU=y -CONFIG_ARCH_MMAP_RND_BITS_MIN=28 -CONFIG_ARCH_MMAP_RND_BITS_MAX=32 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_NEED_SG_DMA_LENGTH=y -CONFIG_GENERIC_ISA_DMA=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y -CONFIG_GENERIC_HWEIGHT=y -CONFIG_ARCH_MAY_HAVE_PC_FDC=y -CONFIG_RWSEM_XCHGADD_ALGORITHM=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_ARCH_HAS_CPU_RELAX=y -CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y -CONFIG_HAVE_SETUP_PER_CPU_AREA=y -CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y -CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y -CONFIG_ARCH_HIBERNATION_POSSIBLE=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y -CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y -CONFIG_ARCH_WANT_GENERAL_HUGETLB=y -CONFIG_ZONE_DMA32=y -CONFIG_AUDIT_ARCH=y -CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y -CONFIG_HAVE_INTEL_TXT=y -CONFIG_X86_64_SMP=y -CONFIG_ARCH_SUPPORTS_UPROBES=y -CONFIG_FIX_EARLYCON_MEM=y -CONFIG_PGTABLE_LEVELS=4 -CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" -CONFIG_IRQ_WORK=y -CONFIG_BUILDTIME_EXTABLE_SORT=y -CONFIG_THREAD_INFO_IN_TASK=y - -# -# General setup -# -CONFIG_INIT_ENV_ARG_LIMIT=32 -CONFIG_CROSS_COMPILE="" -# CONFIG_COMPILE_TEST is not set -CONFIG_LOCALVERSION="" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_HAVE_KERNEL_GZIP=y -CONFIG_HAVE_KERNEL_BZIP2=y -CONFIG_HAVE_KERNEL_LZMA=y -CONFIG_HAVE_KERNEL_XZ=y -CONFIG_HAVE_KERNEL_LZO=y -CONFIG_HAVE_KERNEL_LZ4=y -CONFIG_KERNEL_GZIP=y -# CONFIG_KERNEL_BZIP2 is not set -# CONFIG_KERNEL_LZMA is not set -# CONFIG_KERNEL_XZ is not set -# CONFIG_KERNEL_LZO is not set -# CONFIG_KERNEL_LZ4 is not set -CONFIG_DEFAULT_HOSTNAME="(none)" -CONFIG_SWAP=y -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y -CONFIG_CROSS_MEMORY_ATTACH=y -CONFIG_FHANDLE=y -# CONFIG_USELIB is not set -CONFIG_AUDIT=y -CONFIG_HAVE_ARCH_AUDITSYSCALL=y -CONFIG_AUDITSYSCALL=y -CONFIG_AUDIT_WATCH=y -CONFIG_AUDIT_TREE=y - -# -# IRQ subsystem -# -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_GENERIC_IRQ_SHOW=y -CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y -CONFIG_GENERIC_PENDING_IRQ=y -CONFIG_GENERIC_IRQ_MIGRATION=y -CONFIG_IRQ_DOMAIN=y -CONFIG_IRQ_DOMAIN_HIERARCHY=y -CONFIG_GENERIC_MSI_IRQ=y -CONFIG_GENERIC_MSI_IRQ_DOMAIN=y -# CONFIG_IRQ_DOMAIN_DEBUG is not set -CONFIG_IRQ_FORCED_THREADING=y -CONFIG_SPARSE_IRQ=y -# CONFIG_GENERIC_IRQ_DEBUGFS is not set -CONFIG_CLOCKSOURCE_WATCHDOG=y -CONFIG_ARCH_CLOCKSOURCE_DATA=y -CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y -CONFIG_GENERIC_TIME_VSYSCALL=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y -CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y -CONFIG_GENERIC_CMOS_UPDATE=y - -# -# Timers subsystem -# -CONFIG_TICK_ONESHOT=y -CONFIG_NO_HZ_COMMON=y -# CONFIG_HZ_PERIODIC is not set -CONFIG_NO_HZ_IDLE=y -# CONFIG_NO_HZ_FULL is not set -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y - -# -# CPU/Task time and stats accounting -# -CONFIG_TICK_CPU_ACCOUNTING=y -# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set -# CONFIG_IRQ_TIME_ACCOUNTING is not set -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y - -# -# RCU Subsystem -# -CONFIG_TREE_RCU=y -# CONFIG_RCU_EXPERT is not set -CONFIG_SRCU=y -CONFIG_TREE_SRCU=y -# CONFIG_TASKS_RCU is not set -CONFIG_RCU_STALL_COMMON=y -CONFIG_RCU_NEED_SEGCBLIST=y -CONFIG_BUILD_BIN2C=y -# CONFIG_IKCONFIG is not set -CONFIG_LOG_BUF_SHIFT=21 -CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 -CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 -CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y -CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y -CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y -CONFIG_ARCH_SUPPORTS_INT128=y -CONFIG_NUMA_BALANCING=y -# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set -CONFIG_CGROUPS=y -CONFIG_PAGE_COUNTER=y -CONFIG_MEMCG=y -CONFIG_MEMCG_SWAP=y -CONFIG_MEMCG_SWAP_ENABLED=y -CONFIG_BLK_CGROUP=y -# CONFIG_DEBUG_BLK_CGROUP is not set -CONFIG_CGROUP_WRITEBACK=y -CONFIG_CGROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_CGROUP_PIDS=y -# CONFIG_CGROUP_RDMA is not set -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_HUGETLB=y -CONFIG_CPUSETS=y -CONFIG_PROC_PID_CPUSET=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_PERF=y -CONFIG_CGROUP_BPF=y -# CONFIG_CGROUP_DEBUG is not set -CONFIG_SOCK_CGROUP_DATA=y -CONFIG_CHECKPOINT_RESTORE=y -CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_IPC_NS=y -CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y -CONFIG_SCHED_AUTOGROUP=y -# CONFIG_SYSFS_DEPRECATED is not set -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_RD_XZ=y -CONFIG_RD_LZO=y -CONFIG_RD_LZ4=y -CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_SYSCTL=y -CONFIG_ANON_INODES=y -CONFIG_HAVE_UID16=y -CONFIG_SYSCTL_EXCEPTION_TRACE=y -CONFIG_HAVE_PCSPKR_PLATFORM=y -CONFIG_BPF=y -CONFIG_EXPERT=y -CONFIG_UID16=y -CONFIG_MULTIUSER=y -# CONFIG_SGETMASK_SYSCALL is not set -# CONFIG_SYSFS_SYSCALL is not set -# CONFIG_SYSCTL_SYSCALL is not set -CONFIG_POSIX_TIMERS=y -CONFIG_KALLSYMS=y -CONFIG_KALLSYMS_ALL=y -CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y -CONFIG_KALLSYMS_BASE_RELATIVE=y -CONFIG_PRINTK=y -CONFIG_PRINTK_NMI=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_PCSPKR_PLATFORM=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_FUTEX_PI=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -CONFIG_BPF_SYSCALL=y -CONFIG_BPF_JIT_ALWAYS_ON=y -CONFIG_SHMEM=y -CONFIG_AIO=y -CONFIG_ADVISE_SYSCALLS=y -CONFIG_USERFAULTFD=y -CONFIG_PCI_QUIRKS=y -CONFIG_MEMBARRIER=y -# CONFIG_EMBEDDED is not set -CONFIG_HAVE_PERF_EVENTS=y -# CONFIG_PC104 is not set - -# -# Kernel Performance Events And Counters -# -CONFIG_PERF_EVENTS=y -# CONFIG_DEBUG_PERF_USE_VMALLOC is not set -CONFIG_VM_EVENT_COUNTERS=y -CONFIG_SLUB_DEBUG=y -CONFIG_SLUB_MEMCG_SYSFS_ON=y -# CONFIG_COMPAT_BRK is not set -# CONFIG_SLAB is not set -CONFIG_SLUB=y -# CONFIG_SLOB is not set -CONFIG_SLAB_MERGE_DEFAULT=y -# CONFIG_SLAB_FREELIST_RANDOM is not set -CONFIG_SLAB_FREELIST_HARDENED=y -CONFIG_SLUB_CPU_PARTIAL=y -CONFIG_SYSTEM_DATA_VERIFICATION=y -CONFIG_PROFILING=y -CONFIG_TRACEPOINTS=y -CONFIG_CRASH_CORE=y -CONFIG_KEXEC_CORE=y -CONFIG_HOTPLUG_SMT=y -CONFIG_OPROFILE=m -# CONFIG_OPROFILE_EVENT_MULTIPLEX is not set -CONFIG_HAVE_OPROFILE=y -CONFIG_OPROFILE_NMI_TIMER=y -CONFIG_KPROBES=y -CONFIG_JUMP_LABEL=y -# CONFIG_STATIC_KEYS_SELFTEST is not set -CONFIG_OPTPROBES=y -CONFIG_KPROBES_ON_FTRACE=y -CONFIG_UPROBES=y -# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_ARCH_USE_BUILTIN_BSWAP=y -CONFIG_KRETPROBES=y -CONFIG_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_IOREMAP_PROT=y -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_HAVE_OPTPROBES=y -CONFIG_HAVE_KPROBES_ON_FTRACE=y -CONFIG_HAVE_NMI=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_CONTIGUOUS=y -CONFIG_GENERIC_SMP_IDLE_THREAD=y -CONFIG_ARCH_HAS_FORTIFY_SOURCE=y -CONFIG_ARCH_HAS_SET_MEMORY=y -CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_CLK=y -CONFIG_HAVE_DMA_API_DEBUG=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y -CONFIG_HAVE_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_PERF_EVENTS_NMI=y -CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y -CONFIG_HAVE_PERF_REGS=y -CONFIG_HAVE_PERF_USER_STACK_DUMP=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_HAVE_RCU_TABLE_FREE=y -CONFIG_HAVE_RCU_TABLE_INVALIDATE=y -CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y -CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y -CONFIG_HAVE_CMPXCHG_LOCAL=y -CONFIG_HAVE_CMPXCHG_DOUBLE=y -CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y -CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y -CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -CONFIG_SECCOMP_FILTER=y -CONFIG_HAVE_GCC_PLUGINS=y -# CONFIG_GCC_PLUGINS is not set -CONFIG_HAVE_CC_STACKPROTECTOR=y -CONFIG_CC_STACKPROTECTOR=y -# CONFIG_CC_STACKPROTECTOR_NONE is not set -# CONFIG_CC_STACKPROTECTOR_REGULAR is not set -CONFIG_CC_STACKPROTECTOR_STRONG=y -CONFIG_THIN_ARCHIVES=y -CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y -CONFIG_HAVE_CONTEXT_TRACKING=y -CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y -CONFIG_HAVE_ARCH_HUGE_VMAP=y -CONFIG_HAVE_ARCH_SOFT_DIRTY=y -CONFIG_HAVE_MOD_ARCH_SPECIFIC=y -CONFIG_MODULES_USE_ELF_RELA=y -CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y -CONFIG_ARCH_HAS_ELF_RANDOMIZE=y -CONFIG_HAVE_ARCH_MMAP_RND_BITS=y -CONFIG_HAVE_EXIT_THREAD=y -CONFIG_ARCH_MMAP_RND_BITS=28 -CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y -CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 -CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y -CONFIG_HAVE_COPY_THREAD_TLS=y -CONFIG_HAVE_STACK_VALIDATION=y -# CONFIG_HAVE_ARCH_HASH is not set -# CONFIG_ISA_BUS_API is not set -CONFIG_OLD_SIGSUSPEND3=y -CONFIG_COMPAT_OLD_SIGACTION=y -# CONFIG_CPU_NO_EFFICIENT_FFS is not set -CONFIG_HAVE_ARCH_VMAP_STACK=y -CONFIG_VMAP_STACK=y -# CONFIG_ARCH_OPTIONAL_KERNEL_RWX is not set -# CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT is not set -CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y -CONFIG_STRICT_KERNEL_RWX=y -CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y -CONFIG_STRICT_MODULE_RWX=y -CONFIG_ARCH_HAS_REFCOUNT=y -# CONFIG_REFCOUNT_FULL is not set - -# -# GCOV-based kernel profiling -# -# CONFIG_GCOV_KERNEL is not set -CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y -# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set -CONFIG_SLABINFO=y -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULES=y -CONFIG_MODULE_FORCE_LOAD=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_MODULE_FORCE_UNLOAD is not set -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_MODULE_SIG=y -# CONFIG_MODULE_SIG_FORCE is not set -CONFIG_MODULE_SIG_ALL=y -# CONFIG_MODULE_SIG_SHA1 is not set -# CONFIG_MODULE_SIG_SHA224 is not set -# CONFIG_MODULE_SIG_SHA256 is not set -# CONFIG_MODULE_SIG_SHA384 is not set -CONFIG_MODULE_SIG_SHA512=y -CONFIG_MODULE_SIG_HASH="sha512" -# CONFIG_MODULE_COMPRESS is not set -CONFIG_MODULES_TREE_LOOKUP=y -CONFIG_BLOCK=y -CONFIG_BLK_SCSI_REQUEST=y -CONFIG_BLK_DEV_BSG=y -CONFIG_BLK_DEV_BSGLIB=y -CONFIG_BLK_DEV_INTEGRITY=y -# CONFIG_BLK_DEV_ZONED is not set -CONFIG_BLK_DEV_THROTTLING=y -# CONFIG_BLK_DEV_THROTTLING_LOW is not set -CONFIG_BLK_CMDLINE_PARSER=y -CONFIG_BLK_WBT=y -# CONFIG_BLK_WBT_SQ is not set -CONFIG_BLK_WBT_MQ=y -CONFIG_BLK_DEBUG_FS=y -# CONFIG_BLK_SED_OPAL is not set - -# -# Partition Types -# -CONFIG_PARTITION_ADVANCED=y -# CONFIG_ACORN_PARTITION is not set -# CONFIG_AIX_PARTITION is not set -# CONFIG_OSF_PARTITION is not set -# CONFIG_AMIGA_PARTITION is not set -# CONFIG_ATARI_PARTITION is not set -CONFIG_MAC_PARTITION=y -CONFIG_MSDOS_PARTITION=y -CONFIG_BSD_DISKLABEL=y -# CONFIG_MINIX_SUBPARTITION is not set -CONFIG_SOLARIS_X86_PARTITION=y -# CONFIG_UNIXWARE_DISKLABEL is not set -CONFIG_LDM_PARTITION=y -# CONFIG_LDM_DEBUG is not set -# CONFIG_SGI_PARTITION is not set -# CONFIG_ULTRIX_PARTITION is not set -CONFIG_SUN_PARTITION=y -# CONFIG_KARMA_PARTITION is not set -CONFIG_EFI_PARTITION=y -# CONFIG_SYSV68_PARTITION is not set -CONFIG_CMDLINE_PARTITION=y -CONFIG_BLOCK_COMPAT=y -CONFIG_BLK_MQ_PCI=y -CONFIG_BLK_MQ_VIRTIO=y -CONFIG_BLK_MQ_RDMA=y - -# -# IO Schedulers -# -CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_DEADLINE=m -CONFIG_IOSCHED_CFQ=y -CONFIG_CFQ_GROUP_IOSCHED=y -# CONFIG_DEFAULT_CFQ is not set -CONFIG_DEFAULT_NOOP=y -CONFIG_DEFAULT_IOSCHED="noop" -CONFIG_MQ_IOSCHED_DEADLINE=m -CONFIG_MQ_IOSCHED_KYBER=m -CONFIG_IOSCHED_BFQ=m -CONFIG_BFQ_GROUP_IOSCHED=y -CONFIG_PREEMPT_NOTIFIERS=y -CONFIG_PADATA=y -CONFIG_ASN1=y -CONFIG_INLINE_SPIN_UNLOCK_IRQ=y -CONFIG_INLINE_READ_UNLOCK=y -CONFIG_INLINE_READ_UNLOCK_IRQ=y -CONFIG_INLINE_WRITE_UNLOCK=y -CONFIG_INLINE_WRITE_UNLOCK_IRQ=y -CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_RWSEM_SPIN_ON_OWNER=y -CONFIG_LOCK_SPIN_ON_OWNER=y -CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y -CONFIG_QUEUED_SPINLOCKS=y -CONFIG_ARCH_USE_QUEUED_RWLOCKS=y -CONFIG_QUEUED_RWLOCKS=y -CONFIG_FREEZER=y - -# -# Processor type and features -# -CONFIG_ZONE_DMA=y -CONFIG_SMP=y -CONFIG_X86_FEATURE_NAMES=y -CONFIG_X86_FAST_FEATURE_TESTS=y -CONFIG_X86_X2APIC=y -CONFIG_X86_MPPARSE=y -# CONFIG_GOLDFISH is not set -CONFIG_RETPOLINE=y -# CONFIG_INTEL_RDT is not set -# CONFIG_X86_EXTENDED_PLATFORM is not set -# CONFIG_X86_INTEL_LPSS is not set -# CONFIG_X86_AMD_PLATFORM_DEVICE is not set -CONFIG_IOSF_MBI=m -# CONFIG_IOSF_MBI_DEBUG is not set -CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y -CONFIG_SCHED_OMIT_FRAME_POINTER=y -CONFIG_HYPERVISOR_GUEST=y -CONFIG_PARAVIRT=y -# CONFIG_PARAVIRT_DEBUG is not set -CONFIG_PARAVIRT_SPINLOCKS=y -# CONFIG_QUEUED_LOCK_STAT is not set -CONFIG_XEN=y -CONFIG_XEN_PV=y -CONFIG_XEN_PV_SMP=y -CONFIG_XEN_DOM0=y -CONFIG_XEN_PVHVM=y -CONFIG_XEN_PVHVM_SMP=y -CONFIG_XEN_512GB=y -CONFIG_XEN_SAVE_RESTORE=y -# CONFIG_XEN_DEBUG_FS is not set -CONFIG_XEN_PVH=y -CONFIG_KVM_GUEST=y -CONFIG_KVM_DEBUG_FS=y -CONFIG_PARAVIRT_TIME_ACCOUNTING=y -CONFIG_PARAVIRT_CLOCK=y -CONFIG_NO_BOOTMEM=y -# CONFIG_MK8 is not set -# CONFIG_MPSC is not set -# CONFIG_MCORE2 is not set -# CONFIG_MATOM is not set -CONFIG_GENERIC_CPU=y -CONFIG_X86_INTERNODE_CACHE_SHIFT=6 -CONFIG_X86_L1_CACHE_SHIFT=6 -CONFIG_X86_TSC=y -CONFIG_X86_CMPXCHG64=y -CONFIG_X86_CMOV=y -CONFIG_X86_MINIMUM_CPU_FAMILY=64 -CONFIG_X86_DEBUGCTLMSR=y -# CONFIG_PROCESSOR_SELECT is not set -CONFIG_CPU_SUP_INTEL=y -CONFIG_CPU_SUP_AMD=y -CONFIG_CPU_SUP_CENTAUR=y -CONFIG_HPET_TIMER=y -CONFIG_HPET_EMULATE_RTC=y -CONFIG_DMI=y -CONFIG_GART_IOMMU=y -# CONFIG_CALGARY_IOMMU is not set -CONFIG_SWIOTLB=y -CONFIG_IOMMU_HELPER=y -CONFIG_MAXSMP=y -CONFIG_NR_CPUS=8192 -CONFIG_SCHED_SMT=y -CONFIG_SCHED_MC=y -CONFIG_SCHED_MC_PRIO=y -CONFIG_PREEMPT_NONE=y -# CONFIG_PREEMPT_VOLUNTARY is not set -# CONFIG_PREEMPT is not set -CONFIG_X86_LOCAL_APIC=y -CONFIG_X86_IO_APIC=y -CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y -CONFIG_X86_MCE=y -CONFIG_X86_MCELOG_LEGACY=y -CONFIG_X86_MCE_INTEL=y -CONFIG_X86_MCE_AMD=y -CONFIG_X86_MCE_THRESHOLD=y -# CONFIG_X86_MCE_INJECT is not set -CONFIG_X86_THERMAL_VECTOR=y - -# -# Performance monitoring -# -CONFIG_PERF_EVENTS_INTEL_UNCORE=y -# CONFIG_PERF_EVENTS_INTEL_RAPL is not set -# CONFIG_PERF_EVENTS_INTEL_CSTATE is not set -# CONFIG_PERF_EVENTS_AMD_POWER is not set -# CONFIG_VM86 is not set -CONFIG_X86_VSYSCALL_EMULATION=y -CONFIG_I8K=m -CONFIG_MICROCODE=y -CONFIG_MICROCODE_INTEL=y -CONFIG_MICROCODE_AMD=y -CONFIG_MICROCODE_OLD_INTERFACE=y -CONFIG_X86_MSR=y -CONFIG_X86_CPUID=y -# CONFIG_X86_5LEVEL is not set -CONFIG_ARCH_PHYS_ADDR_T_64BIT=y -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_X86_DIRECT_GBPAGES=y -CONFIG_ARCH_HAS_MEM_ENCRYPT=y -CONFIG_AMD_MEM_ENCRYPT=y -# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set -CONFIG_ARCH_USE_MEMREMAP_PROT=y -CONFIG_NUMA=y -CONFIG_AMD_NUMA=y -CONFIG_X86_64_ACPI_NUMA=y -CONFIG_NODES_SPAN_OTHER_NODES=y -# CONFIG_NUMA_EMU is not set -CONFIG_NODES_SHIFT=10 -CONFIG_ARCH_SPARSEMEM_ENABLE=y -CONFIG_ARCH_SPARSEMEM_DEFAULT=y -CONFIG_ARCH_SELECT_MEMORY_MODEL=y -CONFIG_ARCH_MEMORY_PROBE=y -CONFIG_ARCH_PROC_KCORE_TEXT=y -CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_SPARSEMEM_MANUAL=y -CONFIG_SPARSEMEM=y -CONFIG_NEED_MULTIPLE_NODES=y -CONFIG_HAVE_MEMORY_PRESENT=y -CONFIG_SPARSEMEM_EXTREME=y -CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y -CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER=y -CONFIG_SPARSEMEM_VMEMMAP=y -CONFIG_HAVE_MEMBLOCK=y -CONFIG_HAVE_MEMBLOCK_NODE_MAP=y -CONFIG_HAVE_GENERIC_GUP=y -CONFIG_ARCH_DISCARD_MEMBLOCK=y -CONFIG_MEMORY_ISOLATION=y -CONFIG_HAVE_BOOTMEM_INFO_NODE=y -CONFIG_MEMORY_HOTPLUG=y -CONFIG_MEMORY_HOTPLUG_SPARSE=y -# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set -CONFIG_MEMORY_HOTREMOVE=y -CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y -CONFIG_COMPACTION=y -CONFIG_MIGRATION=y -CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y -CONFIG_ARCH_ENABLE_THP_MIGRATION=y -CONFIG_PHYS_ADDR_T_64BIT=y -CONFIG_BOUNCE=y -CONFIG_VIRT_TO_BUS=y -CONFIG_MMU_NOTIFIER=y -CONFIG_KSM=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 -CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y -# CONFIG_MEMORY_FAILURE is not set -CONFIG_TRANSPARENT_HUGEPAGE=y -# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set -CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y -CONFIG_ARCH_WANTS_THP_SWAP=y -CONFIG_THP_SWAP=y -CONFIG_TRANSPARENT_HUGE_PAGECACHE=y -CONFIG_CLEANCACHE=y -CONFIG_FRONTSWAP=y -# CONFIG_CMA is not set -CONFIG_MEM_SOFT_DIRTY=y -CONFIG_ZSWAP=y -CONFIG_ZPOOL=y -CONFIG_ZBUD=y -CONFIG_Z3FOLD=m -CONFIG_ZSMALLOC=m -# CONFIG_PGTABLE_MAPPING is not set -CONFIG_ZSMALLOC_STAT=y -CONFIG_GENERIC_EARLY_IOREMAP=y -CONFIG_ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT=y -# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set -# CONFIG_IDLE_PAGE_TRACKING is not set -CONFIG_ARCH_HAS_ZONE_DEVICE=y -# CONFIG_ZONE_DEVICE is not set -CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y -CONFIG_ARCH_HAS_PKEYS=y -CONFIG_PERCPU_STATS=y -# CONFIG_X86_PMEM_LEGACY is not set -CONFIG_X86_CHECK_BIOS_CORRUPTION=y -CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y -CONFIG_X86_RESERVE_LOW=64 -CONFIG_MTRR=y -CONFIG_MTRR_SANITIZER=y -CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=0 -CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 -CONFIG_X86_PAT=y -CONFIG_ARCH_USES_PG_UNCACHED=y -CONFIG_ARCH_RANDOM=y -CONFIG_X86_SMAP=y -# CONFIG_X86_INTEL_MPX is not set -CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y -# CONFIG_EFI is not set -CONFIG_SECCOMP=y -# CONFIG_HZ_100 is not set -CONFIG_HZ_250=y -# CONFIG_HZ_300 is not set -# CONFIG_HZ_1000 is not set -CONFIG_HZ=250 -CONFIG_SCHED_HRTICK=y -CONFIG_KEXEC=y -CONFIG_KEXEC_FILE=y -CONFIG_KEXEC_VERIFY_SIG=y -CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y -CONFIG_CRASH_DUMP=y -# CONFIG_KEXEC_JUMP is not set -CONFIG_PHYSICAL_START=0x1000000 -CONFIG_RELOCATABLE=y -CONFIG_RANDOMIZE_BASE=y -CONFIG_X86_NEED_RELOCS=y -CONFIG_PHYSICAL_ALIGN=0x1000000 -CONFIG_RANDOMIZE_MEMORY=y -CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa -CONFIG_HOTPLUG_CPU=y -# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set -# CONFIG_DEBUG_HOTPLUG_CPU0 is not set -# CONFIG_COMPAT_VDSO is not set -# CONFIG_LEGACY_VSYSCALL_NATIVE is not set -CONFIG_LEGACY_VSYSCALL_EMULATE=y -# CONFIG_LEGACY_VSYSCALL_NONE is not set -# CONFIG_CMDLINE_BOOL is not set -# CONFIG_MODIFY_LDT_SYSCALL is not set -CONFIG_HAVE_LIVEPATCH=y -CONFIG_LIVEPATCH=y -CONFIG_ARCH_HAS_ADD_PAGES=y -CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y -CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y -CONFIG_USE_PERCPU_NUMA_NODE_ID=y - -# -# Power management and ACPI options -# -CONFIG_ARCH_HIBERNATION_HEADER=y -# CONFIG_SUSPEND is not set -CONFIG_HIBERNATE_CALLBACKS=y -CONFIG_HIBERNATION=y -CONFIG_PM_STD_PARTITION="" -CONFIG_PM_SLEEP=y -CONFIG_PM_SLEEP_SMP=y -# CONFIG_PM_AUTOSLEEP is not set -# CONFIG_PM_WAKELOCKS is not set -CONFIG_PM=y -# CONFIG_PM_DEBUG is not set -CONFIG_PM_CLK=y -# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set -CONFIG_ACPI=y -CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y -CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y -CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y -# CONFIG_ACPI_DEBUGGER is not set -CONFIG_ACPI_SLEEP=y -CONFIG_ACPI_PROCFS_POWER=y -# CONFIG_ACPI_REV_OVERRIDE_POSSIBLE is not set -# CONFIG_ACPI_EC_DEBUGFS is not set -CONFIG_ACPI_AC=m -CONFIG_ACPI_BATTERY=m -CONFIG_ACPI_BUTTON=m -# CONFIG_ACPI_VIDEO is not set -# CONFIG_ACPI_FAN is not set -# CONFIG_ACPI_DOCK is not set -CONFIG_ACPI_CPU_FREQ_PSS=y -CONFIG_ACPI_PROCESSOR_CSTATE=y -CONFIG_ACPI_PROCESSOR_IDLE=y -CONFIG_ACPI_CPPC_LIB=y -CONFIG_ACPI_PROCESSOR=y -CONFIG_ACPI_HOTPLUG_CPU=y -CONFIG_ACPI_PROCESSOR_AGGREGATOR=m -CONFIG_ACPI_THERMAL=m -CONFIG_ACPI_NUMA=y -# CONFIG_ACPI_CUSTOM_DSDT is not set -CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y -CONFIG_ACPI_TABLE_UPGRADE=y -# CONFIG_ACPI_DEBUG is not set -CONFIG_ACPI_PCI_SLOT=y -CONFIG_X86_PM_TIMER=y -CONFIG_ACPI_CONTAINER=y -CONFIG_ACPI_HOTPLUG_MEMORY=y -CONFIG_ACPI_HOTPLUG_IOAPIC=y -CONFIG_ACPI_SBS=m -# CONFIG_ACPI_HED is not set -# CONFIG_ACPI_CUSTOM_METHOD is not set -# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set -# CONFIG_ACPI_NFIT is not set -CONFIG_HAVE_ACPI_APEI=y -CONFIG_HAVE_ACPI_APEI_NMI=y -# CONFIG_ACPI_APEI is not set -# CONFIG_DPTF_POWER is not set -CONFIG_ACPI_WATCHDOG=y -CONFIG_ACPI_EXTLOG=m -# CONFIG_PMIC_OPREGION is not set -# CONFIG_ACPI_CONFIGFS is not set -# CONFIG_SFI is not set - -# -# CPU Frequency scaling -# -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_GOV_ATTR_SET=y -CONFIG_CPU_FREQ_GOV_COMMON=y -CONFIG_CPU_FREQ_STAT=y -CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=m -CONFIG_CPU_FREQ_GOV_USERSPACE=m -CONFIG_CPU_FREQ_GOV_ONDEMAND=m -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m -# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set - -# -# CPU frequency scaling drivers -# -CONFIG_X86_INTEL_PSTATE=y -CONFIG_X86_PCC_CPUFREQ=m -CONFIG_X86_ACPI_CPUFREQ=m -# CONFIG_X86_ACPI_CPUFREQ_CPB is not set -# CONFIG_X86_POWERNOW_K8 is not set -# CONFIG_X86_AMD_FREQ_SENSITIVITY is not set -# CONFIG_X86_SPEEDSTEP_CENTRINO is not set -# CONFIG_X86_P4_CLOCKMOD is not set - -# -# shared options -# -# CONFIG_X86_SPEEDSTEP_LIB is not set - -# -# CPU Idle -# -CONFIG_CPU_IDLE=y -CONFIG_CPU_IDLE_GOV_LADDER=y -CONFIG_CPU_IDLE_GOV_MENU=y -# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set -CONFIG_INTEL_IDLE=y - -# -# Bus options (PCI etc.) -# -CONFIG_PCI=y -CONFIG_PCI_DIRECT=y -CONFIG_PCI_MMCONFIG=y -CONFIG_PCI_XEN=y -CONFIG_PCI_DOMAINS=y -# CONFIG_PCI_CNB20LE_QUIRK is not set -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y -# CONFIG_PCIEAER is not set -CONFIG_PCIEASPM=y -# CONFIG_PCIEASPM_DEBUG is not set -CONFIG_PCIEASPM_DEFAULT=y -# CONFIG_PCIEASPM_POWERSAVE is not set -# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set -# CONFIG_PCIEASPM_PERFORMANCE is not set -CONFIG_PCIE_PME=y -# CONFIG_PCIE_DPC is not set -# CONFIG_PCIE_PTM is not set -CONFIG_PCI_BUS_ADDR_T_64BIT=y -CONFIG_PCI_MSI=y -CONFIG_PCI_MSI_IRQ_DOMAIN=y -# CONFIG_PCI_DEBUG is not set -# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set -CONFIG_PCI_STUB=m -CONFIG_XEN_PCIDEV_FRONTEND=y -CONFIG_HT_IRQ=y -CONFIG_PCI_ATS=y -CONFIG_PCI_LOCKLESS_CONFIG=y -CONFIG_PCI_IOV=y -# CONFIG_PCI_PRI is not set -# CONFIG_PCI_PASID is not set -CONFIG_PCI_LABEL=y -# CONFIG_PCI_HYPERV is not set -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_ACPI=y -CONFIG_HOTPLUG_PCI_ACPI_IBM=m -CONFIG_HOTPLUG_PCI_CPCI=y -# CONFIG_HOTPLUG_PCI_CPCI_ZT5550 is not set -CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m -CONFIG_HOTPLUG_PCI_SHPC=m - -# -# DesignWare PCI Core Support -# -# CONFIG_PCIE_DW_PLAT is not set - -# -# PCI host controller drivers -# -# CONFIG_VMD is not set - -# -# PCI Endpoint -# -# CONFIG_PCI_ENDPOINT is not set - -# -# PCI switch controller drivers -# -# CONFIG_PCI_SW_SWITCHTEC is not set -# CONFIG_ISA_BUS is not set -CONFIG_ISA_DMA_API=y -CONFIG_AMD_NB=y -# CONFIG_PCCARD is not set -# CONFIG_RAPIDIO is not set -# CONFIG_X86_SYSFB is not set - -# -# Executable file formats / Emulations -# -CONFIG_BINFMT_ELF=y -CONFIG_COMPAT_BINFMT_ELF=y -CONFIG_ELFCORE=y -CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y -CONFIG_BINFMT_SCRIPT=y -# CONFIG_HAVE_AOUT is not set -CONFIG_BINFMT_MISC=m -CONFIG_COREDUMP=y -CONFIG_IA32_EMULATION=y -CONFIG_IA32_AOUT=m -# CONFIG_X86_X32 is not set -CONFIG_COMPAT_32=y -CONFIG_COMPAT=y -CONFIG_COMPAT_FOR_U64_ALIGNMENT=y -CONFIG_SYSVIPC_COMPAT=y -CONFIG_X86_DEV_DMA_OPS=y -CONFIG_NET=y -CONFIG_NET_INGRESS=y -CONFIG_NET_EGRESS=y - -# -# Networking options -# -CONFIG_PACKET=y -CONFIG_PACKET_DIAG=m -CONFIG_UNIX=y -CONFIG_UNIX_DIAG=m -CONFIG_TLS=m -CONFIG_XFRM=y -CONFIG_XFRM_OFFLOAD=y -CONFIG_XFRM_ALGO=m -CONFIG_XFRM_USER=m -CONFIG_XFRM_SUB_POLICY=y -CONFIG_XFRM_MIGRATE=y -CONFIG_XFRM_STATISTICS=y -CONFIG_XFRM_IPCOMP=m -CONFIG_NET_KEY=m -CONFIG_NET_KEY_MIGRATE=y -# CONFIG_SMC is not set -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -# CONFIG_IP_FIB_TRIE_STATS is not set -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_ROUTE_CLASSID=y -# CONFIG_IP_PNP is not set -CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m -CONFIG_NET_IP_TUNNEL=m -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y -CONFIG_IP_MROUTE=y -CONFIG_IP_MROUTE_MULTIPLE_TABLES=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m -CONFIG_NET_UDP_TUNNEL=m -CONFIG_NET_FOU=m -CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET_ESP_OFFLOAD=m -CONFIG_INET_IPCOMP=m -CONFIG_INET_XFRM_TUNNEL=m -CONFIG_INET_TUNNEL=m -CONFIG_INET_XFRM_MODE_TRANSPORT=m -CONFIG_INET_XFRM_MODE_TUNNEL=m -CONFIG_INET_XFRM_MODE_BEET=m -CONFIG_INET_DIAG=m -CONFIG_INET_TCP_DIAG=m -CONFIG_INET_UDP_DIAG=m -CONFIG_INET_RAW_DIAG=m -# CONFIG_INET_DIAG_DESTROY is not set -CONFIG_TCP_CONG_ADVANCED=y -CONFIG_TCP_CONG_BIC=m -CONFIG_TCP_CONG_CUBIC=y -CONFIG_TCP_CONG_WESTWOOD=m -CONFIG_TCP_CONG_HTCP=m -CONFIG_TCP_CONG_HSTCP=m -CONFIG_TCP_CONG_HYBLA=m -CONFIG_TCP_CONG_VEGAS=m -# CONFIG_TCP_CONG_NV is not set -CONFIG_TCP_CONG_SCALABLE=m -CONFIG_TCP_CONG_LP=m -CONFIG_TCP_CONG_VENO=m -CONFIG_TCP_CONG_YEAH=m -CONFIG_TCP_CONG_ILLINOIS=m -CONFIG_TCP_CONG_DCTCP=m -# CONFIG_TCP_CONG_CDG is not set -CONFIG_TCP_CONG_BBR=m -CONFIG_DEFAULT_CUBIC=y -# CONFIG_DEFAULT_RENO is not set -CONFIG_DEFAULT_TCP_CONG="cubic" -CONFIG_TCP_MD5SIG=y -CONFIG_IPV6=m -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -CONFIG_INET6_ESP_OFFLOAD=m -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_MIP6=m -CONFIG_IPV6_ILA=m -CONFIG_INET6_XFRM_TUNNEL=m -CONFIG_INET6_TUNNEL=m -CONFIG_INET6_XFRM_MODE_TRANSPORT=m -CONFIG_INET6_XFRM_MODE_TUNNEL=m -CONFIG_INET6_XFRM_MODE_BEET=m -CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m -CONFIG_IPV6_VTI=m -CONFIG_IPV6_SIT=m -CONFIG_IPV6_SIT_6RD=y -CONFIG_IPV6_NDISC_NODETYPE=y -CONFIG_IPV6_TUNNEL=m -CONFIG_IPV6_GRE=m -CONFIG_IPV6_FOU=m -CONFIG_IPV6_FOU_TUNNEL=m -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_IPV6_MROUTE=y -CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y -CONFIG_IPV6_PIMSM_V2=y -CONFIG_IPV6_SEG6_LWTUNNEL=y -CONFIG_IPV6_SEG6_HMAC=y -CONFIG_NETLABEL=y -CONFIG_NETWORK_SECMARK=y -CONFIG_NET_PTP_CLASSIFY=y -CONFIG_NETWORK_PHY_TIMESTAMPING=y -CONFIG_NETFILTER=y -CONFIG_NETFILTER_ADVANCED=y -CONFIG_BRIDGE_NETFILTER=m - -# -# Core Netfilter Configuration -# -CONFIG_NETFILTER_INGRESS=y -CONFIG_NETFILTER_NETLINK=m -CONFIG_NETFILTER_NETLINK_ACCT=m -CONFIG_NETFILTER_NETLINK_QUEUE=m -CONFIG_NETFILTER_NETLINK_LOG=m -CONFIG_NF_CONNTRACK=m -CONFIG_NF_LOG_COMMON=m -CONFIG_NF_LOG_NETDEV=m -CONFIG_NF_CONNTRACK_MARK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_ZONES=y -CONFIG_NF_CONNTRACK_PROCFS=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_TIMEOUT=y -CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CONNTRACK_LABELS=y -CONFIG_NF_CT_PROTO_DCCP=y -CONFIG_NF_CT_PROTO_GRE=m -CONFIG_NF_CT_PROTO_SCTP=y -CONFIG_NF_CT_PROTO_UDPLITE=y -CONFIG_NF_CONNTRACK_AMANDA=m -CONFIG_NF_CONNTRACK_FTP=m -CONFIG_NF_CONNTRACK_H323=m -CONFIG_NF_CONNTRACK_IRC=m -CONFIG_NF_CONNTRACK_BROADCAST=m -CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m -CONFIG_NF_CONNTRACK_PPTP=m -CONFIG_NF_CONNTRACK_SANE=m -CONFIG_NF_CONNTRACK_SIP=m -CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NF_CT_NETLINK=m -CONFIG_NF_CT_NETLINK_TIMEOUT=m -CONFIG_NF_CT_NETLINK_HELPER=m -CONFIG_NETFILTER_NETLINK_GLUE_CT=y -CONFIG_NF_NAT=m -CONFIG_NF_NAT_NEEDED=y -CONFIG_NF_NAT_PROTO_DCCP=y -CONFIG_NF_NAT_PROTO_UDPLITE=y -CONFIG_NF_NAT_PROTO_SCTP=y -CONFIG_NF_NAT_AMANDA=m -CONFIG_NF_NAT_FTP=m -CONFIG_NF_NAT_IRC=m -CONFIG_NF_NAT_SIP=m -CONFIG_NF_NAT_TFTP=m -CONFIG_NF_NAT_REDIRECT=m -CONFIG_NETFILTER_SYNPROXY=m -CONFIG_NF_TABLES=m -CONFIG_NF_TABLES_INET=m -CONFIG_NF_TABLES_NETDEV=m -CONFIG_NFT_EXTHDR=m -CONFIG_NFT_META=m -CONFIG_NFT_RT=m -# CONFIG_NFT_NUMGEN is not set -CONFIG_NFT_CT=m -CONFIG_NFT_SET_RBTREE=m -CONFIG_NFT_SET_HASH=m -CONFIG_NFT_SET_BITMAP=m -CONFIG_NFT_COUNTER=m -CONFIG_NFT_LOG=m -CONFIG_NFT_LIMIT=m -CONFIG_NFT_MASQ=m -CONFIG_NFT_REDIR=m -CONFIG_NFT_NAT=m -CONFIG_NFT_OBJREF=m -CONFIG_NFT_QUEUE=m -# CONFIG_NFT_QUOTA is not set -CONFIG_NFT_REJECT=m -CONFIG_NFT_REJECT_INET=m -CONFIG_NFT_COMPAT=m -CONFIG_NFT_HASH=m -CONFIG_NFT_FIB=m -CONFIG_NFT_FIB_INET=m -# CONFIG_NF_DUP_NETDEV is not set -# CONFIG_NFT_DUP_NETDEV is not set -# CONFIG_NFT_FWD_NETDEV is not set -CONFIG_NFT_FIB_NETDEV=m -CONFIG_NETFILTER_XTABLES=m - -# -# Xtables combined modules -# -CONFIG_NETFILTER_XT_MARK=m -CONFIG_NETFILTER_XT_CONNMARK=m -CONFIG_NETFILTER_XT_SET=m - -# -# Xtables targets -# -CONFIG_NETFILTER_XT_TARGET_AUDIT=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m -CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m -CONFIG_NETFILTER_XT_TARGET_CT=m -CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HL=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LOG=m -CONFIG_NETFILTER_XT_TARGET_MARK=m -CONFIG_NETFILTER_XT_NAT=m -CONFIG_NETFILTER_XT_TARGET_NETMAP=m -CONFIG_NETFILTER_XT_TARGET_NFLOG=m -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_RATEEST=m -CONFIG_NETFILTER_XT_TARGET_REDIRECT=m -CONFIG_NETFILTER_XT_TARGET_TEE=m -CONFIG_NETFILTER_XT_TARGET_TPROXY=m -CONFIG_NETFILTER_XT_TARGET_TRACE=m -CONFIG_NETFILTER_XT_TARGET_SECMARK=m -CONFIG_NETFILTER_XT_TARGET_TCPMSS=m -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m - -# -# Xtables matches -# -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m -CONFIG_NETFILTER_XT_MATCH_CGROUP=m -CONFIG_NETFILTER_XT_MATCH_CLUSTER=m -CONFIG_NETFILTER_XT_MATCH_COMMENT=m -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m -CONFIG_NETFILTER_XT_MATCH_CONNMARK=m -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_CPU=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m -CONFIG_NETFILTER_XT_MATCH_DSCP=m -CONFIG_NETFILTER_XT_MATCH_ECN=m -CONFIG_NETFILTER_XT_MATCH_ESP=m -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m -CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_HL=m -CONFIG_NETFILTER_XT_MATCH_IPCOMP=m -CONFIG_NETFILTER_XT_MATCH_IPRANGE=m -CONFIG_NETFILTER_XT_MATCH_IPVS=m -CONFIG_NETFILTER_XT_MATCH_L2TP=m -CONFIG_NETFILTER_XT_MATCH_LENGTH=m -CONFIG_NETFILTER_XT_MATCH_LIMIT=m -CONFIG_NETFILTER_XT_MATCH_MAC=m -CONFIG_NETFILTER_XT_MATCH_MARK=m -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m -CONFIG_NETFILTER_XT_MATCH_OWNER=m -CONFIG_NETFILTER_XT_MATCH_POLICY=m -CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m -CONFIG_NETFILTER_XT_MATCH_QUOTA=m -CONFIG_NETFILTER_XT_MATCH_RATEEST=m -CONFIG_NETFILTER_XT_MATCH_REALM=m -CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_SCTP=m -CONFIG_NETFILTER_XT_MATCH_SOCKET=m -CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NETFILTER_XT_MATCH_STATISTIC=m -CONFIG_NETFILTER_XT_MATCH_STRING=m -CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_TIME=m -CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_SET=m -CONFIG_IP_SET_MAX=256 -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPMARK=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -CONFIG_IP_SET_HASH_IPMAC=m -CONFIG_IP_SET_HASH_MAC=m -CONFIG_IP_SET_HASH_NETPORTNET=m -CONFIG_IP_SET_HASH_NET=m -CONFIG_IP_SET_HASH_NETNET=m -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m -CONFIG_IP_VS=m -CONFIG_IP_VS_IPV6=y -# CONFIG_IP_VS_DEBUG is not set -CONFIG_IP_VS_TAB_BITS=12 - -# -# IPVS transport protocol load balancing support -# -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_AH_ESP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -CONFIG_IP_VS_PROTO_SCTP=y - -# -# IPVS scheduler -# -CONFIG_IP_VS_RR=m -CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_LC=m -CONFIG_IP_VS_WLC=m -CONFIG_IP_VS_FO=m -CONFIG_IP_VS_OVF=m -CONFIG_IP_VS_LBLC=m -CONFIG_IP_VS_LBLCR=m -CONFIG_IP_VS_DH=m -CONFIG_IP_VS_SH=m -CONFIG_IP_VS_SED=m -CONFIG_IP_VS_NQ=m - -# -# IPVS SH scheduler -# -CONFIG_IP_VS_SH_TAB_BITS=8 - -# -# IPVS application helper -# -CONFIG_IP_VS_FTP=m -CONFIG_IP_VS_NFCT=y -CONFIG_IP_VS_PE_SIP=m - -# -# IP: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV4=m -CONFIG_NF_CONNTRACK_IPV4=m -CONFIG_NF_SOCKET_IPV4=m -CONFIG_NF_TABLES_IPV4=m -CONFIG_NFT_CHAIN_ROUTE_IPV4=m -CONFIG_NFT_REJECT_IPV4=m -CONFIG_NFT_DUP_IPV4=m -CONFIG_NFT_FIB_IPV4=m -CONFIG_NF_TABLES_ARP=m -CONFIG_NF_DUP_IPV4=m -CONFIG_NF_LOG_ARP=m -CONFIG_NF_LOG_IPV4=m -CONFIG_NF_REJECT_IPV4=m -CONFIG_NF_NAT_IPV4=m -CONFIG_NFT_CHAIN_NAT_IPV4=m -CONFIG_NF_NAT_MASQUERADE_IPV4=m -CONFIG_NFT_MASQ_IPV4=m -CONFIG_NFT_REDIR_IPV4=m -CONFIG_NF_NAT_SNMP_BASIC=m -CONFIG_NF_NAT_PROTO_GRE=m -CONFIG_NF_NAT_PPTP=m -CONFIG_NF_NAT_H323=m -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_AH=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_SYNPROXY=m -CONFIG_IP_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_CLUSTERIP=m -CONFIG_IP_NF_TARGET_ECN=m -CONFIG_IP_NF_TARGET_TTL=m -CONFIG_IP_NF_RAW=m -CONFIG_IP_NF_SECURITY=m -CONFIG_IP_NF_ARPTABLES=m -CONFIG_IP_NF_ARPFILTER=m -CONFIG_IP_NF_ARP_MANGLE=m - -# -# IPv6: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV6=m -CONFIG_NF_CONNTRACK_IPV6=m -CONFIG_NF_SOCKET_IPV6=m -CONFIG_NF_TABLES_IPV6=m -CONFIG_NFT_CHAIN_ROUTE_IPV6=m -CONFIG_NFT_CHAIN_NAT_IPV6=m -CONFIG_NFT_MASQ_IPV6=m -CONFIG_NFT_REDIR_IPV6=m -CONFIG_NFT_REJECT_IPV6=m -CONFIG_NFT_DUP_IPV6=m -CONFIG_NFT_FIB_IPV6=m -CONFIG_NF_DUP_IPV6=m -CONFIG_NF_REJECT_IPV6=m -CONFIG_NF_LOG_IPV6=m -CONFIG_NF_NAT_IPV6=m -CONFIG_NF_NAT_MASQUERADE_IPV6=m -CONFIG_IP6_NF_IPTABLES=m -CONFIG_IP6_NF_MATCH_AH=m -CONFIG_IP6_NF_MATCH_EUI64=m -CONFIG_IP6_NF_MATCH_FRAG=m -CONFIG_IP6_NF_MATCH_OPTS=m -CONFIG_IP6_NF_MATCH_HL=m -CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m -CONFIG_IP6_NF_MATCH_RT=m -CONFIG_IP6_NF_TARGET_HL=m -CONFIG_IP6_NF_FILTER=m -CONFIG_IP6_NF_TARGET_REJECT=m -CONFIG_IP6_NF_TARGET_SYNPROXY=m -CONFIG_IP6_NF_MANGLE=m -CONFIG_IP6_NF_RAW=m -CONFIG_IP6_NF_SECURITY=m -CONFIG_IP6_NF_NAT=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -# CONFIG_IP6_NF_TARGET_NPT is not set -CONFIG_NF_TABLES_BRIDGE=m -CONFIG_NFT_BRIDGE_META=m -CONFIG_NFT_BRIDGE_REJECT=m -CONFIG_NF_LOG_BRIDGE=m -CONFIG_BRIDGE_NF_EBTABLES=m -CONFIG_BRIDGE_EBT_BROUTE=m -CONFIG_BRIDGE_EBT_T_FILTER=m -CONFIG_BRIDGE_EBT_T_NAT=m -CONFIG_BRIDGE_EBT_802_3=m -CONFIG_BRIDGE_EBT_AMONG=m -CONFIG_BRIDGE_EBT_ARP=m -CONFIG_BRIDGE_EBT_IP=m -CONFIG_BRIDGE_EBT_IP6=m -CONFIG_BRIDGE_EBT_LIMIT=m -CONFIG_BRIDGE_EBT_MARK=m -CONFIG_BRIDGE_EBT_PKTTYPE=m -CONFIG_BRIDGE_EBT_STP=m -CONFIG_BRIDGE_EBT_VLAN=m -CONFIG_BRIDGE_EBT_ARPREPLY=m -CONFIG_BRIDGE_EBT_DNAT=m -CONFIG_BRIDGE_EBT_MARK_T=m -CONFIG_BRIDGE_EBT_REDIRECT=m -CONFIG_BRIDGE_EBT_SNAT=m -CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_NFLOG=m -CONFIG_IP_DCCP=m -CONFIG_INET_DCCP_DIAG=m - -# -# DCCP CCIDs Configuration -# -# CONFIG_IP_DCCP_CCID2_DEBUG is not set -CONFIG_IP_DCCP_CCID3=y -# CONFIG_IP_DCCP_CCID3_DEBUG is not set -CONFIG_IP_DCCP_TFRC_LIB=y - -# -# DCCP Kernel Hacking -# -# CONFIG_IP_DCCP_DEBUG is not set -CONFIG_NET_DCCPPROBE=m -CONFIG_IP_SCTP=m -# CONFIG_NET_SCTPPROBE is not set -# CONFIG_SCTP_DBG_OBJCNT is not set -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set -CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set -CONFIG_SCTP_COOKIE_HMAC_MD5=y -CONFIG_SCTP_COOKIE_HMAC_SHA1=y -CONFIG_INET_SCTP_DIAG=m -CONFIG_RDS=m -# CONFIG_RDS_RDMA is not set -CONFIG_RDS_TCP=m -# CONFIG_RDS_DEBUG is not set -CONFIG_TIPC=m -CONFIG_TIPC_MEDIA_UDP=y -# CONFIG_ATM is not set -# CONFIG_L2TP is not set -CONFIG_STP=m -CONFIG_GARP=m -CONFIG_MRP=m -CONFIG_BRIDGE=m -CONFIG_BRIDGE_IGMP_SNOOPING=y -CONFIG_BRIDGE_VLAN_FILTERING=y -CONFIG_HAVE_NET_DSA=y -# CONFIG_NET_DSA is not set -CONFIG_VLAN_8021Q=m -CONFIG_VLAN_8021Q_GVRP=y -CONFIG_VLAN_8021Q_MVRP=y -# CONFIG_DECNET is not set -CONFIG_LLC=m -# CONFIG_LLC2 is not set -# CONFIG_IPX is not set -# CONFIG_ATALK is not set -# CONFIG_X25 is not set -# CONFIG_LAPB is not set -# CONFIG_PHONET is not set -# CONFIG_6LOWPAN is not set -# CONFIG_IEEE802154 is not set -CONFIG_NET_SCHED=y - -# -# Queueing/Scheduling -# -CONFIG_NET_SCH_CBQ=m -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_MULTIQ=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFB=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_DSMARK=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_DRR=m -CONFIG_NET_SCH_MQPRIO=m -CONFIG_NET_SCH_CHOKE=m -CONFIG_NET_SCH_QFQ=m -CONFIG_NET_SCH_CODEL=m -CONFIG_NET_SCH_FQ_CODEL=m -CONFIG_NET_SCH_FQ=m -CONFIG_NET_SCH_HHF=m -CONFIG_NET_SCH_PIE=m -CONFIG_NET_SCH_INGRESS=m -CONFIG_NET_SCH_PLUG=m -# CONFIG_NET_SCH_DEFAULT is not set - -# -# Classification -# -CONFIG_NET_CLS=y -CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_TCINDEX=m -CONFIG_NET_CLS_ROUTE4=m -CONFIG_NET_CLS_FW=m -CONFIG_NET_CLS_U32=m -CONFIG_CLS_U32_PERF=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_RSVP=m -CONFIG_NET_CLS_RSVP6=m -CONFIG_NET_CLS_FLOW=m -CONFIG_NET_CLS_CGROUP=m -CONFIG_NET_CLS_BPF=m -CONFIG_NET_CLS_FLOWER=m -# CONFIG_NET_CLS_MATCHALL is not set -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_STACK=32 -CONFIG_NET_EMATCH_CMP=m -CONFIG_NET_EMATCH_NBYTE=m -CONFIG_NET_EMATCH_U32=m -CONFIG_NET_EMATCH_META=m -CONFIG_NET_EMATCH_TEXT=m -CONFIG_NET_EMATCH_IPSET=m -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=m -CONFIG_NET_ACT_GACT=m -CONFIG_GACT_PROB=y -CONFIG_NET_ACT_MIRRED=m -CONFIG_NET_ACT_SAMPLE=m -CONFIG_NET_ACT_IPT=m -CONFIG_NET_ACT_NAT=m -CONFIG_NET_ACT_PEDIT=m -CONFIG_NET_ACT_SIMP=m -CONFIG_NET_ACT_SKBEDIT=m -CONFIG_NET_ACT_CSUM=m -CONFIG_NET_ACT_VLAN=m -CONFIG_NET_ACT_BPF=m -CONFIG_NET_ACT_CONNMARK=m -# CONFIG_NET_ACT_SKBMOD is not set -# CONFIG_NET_ACT_IFE is not set -# CONFIG_NET_ACT_TUNNEL_KEY is not set -CONFIG_NET_CLS_IND=y -CONFIG_NET_SCH_FIFO=y -CONFIG_DCB=y -CONFIG_DNS_RESOLVER=m -# CONFIG_BATMAN_ADV is not set -CONFIG_OPENVSWITCH=m -CONFIG_OPENVSWITCH_GRE=m -CONFIG_OPENVSWITCH_VXLAN=m -CONFIG_OPENVSWITCH_GENEVE=m -CONFIG_VSOCKETS=m -CONFIG_VMWARE_VMCI_VSOCKETS=m -CONFIG_VIRTIO_VSOCKETS=m -CONFIG_VIRTIO_VSOCKETS_COMMON=m -CONFIG_HYPERV_VSOCKETS=m -# CONFIG_NETLINK_DIAG is not set -CONFIG_MPLS=y -CONFIG_NET_MPLS_GSO=m -CONFIG_MPLS_ROUTING=m -CONFIG_MPLS_IPTUNNEL=m -# CONFIG_NET_NSH is not set -CONFIG_HSR=m -# CONFIG_NET_SWITCHDEV is not set -# CONFIG_NET_L3_MASTER_DEV is not set -# CONFIG_NET_NCSI is not set -CONFIG_RPS=y -CONFIG_RFS_ACCEL=y -CONFIG_XPS=y -CONFIG_CGROUP_NET_PRIO=y -CONFIG_CGROUP_NET_CLASSID=y -CONFIG_NET_RX_BUSY_POLL=y -CONFIG_BQL=y -CONFIG_BPF_JIT=y -CONFIG_BPF_STREAM_PARSER=y -CONFIG_NET_FLOW_LIMIT=y - -# -# Network testing -# -CONFIG_NET_PKTGEN=m -# CONFIG_NET_TCPPROBE is not set -# CONFIG_NET_DROP_MONITOR is not set -# CONFIG_HAMRADIO is not set -# CONFIG_CAN is not set -CONFIG_BT=m -CONFIG_BT_BREDR=y -CONFIG_BT_RFCOMM=m -CONFIG_BT_RFCOMM_TTY=y -CONFIG_BT_BNEP=m -CONFIG_BT_BNEP_MC_FILTER=y -CONFIG_BT_BNEP_PROTO_FILTER=y -CONFIG_BT_HIDP=m -CONFIG_BT_HS=y -CONFIG_BT_LE=y -# CONFIG_BT_SELFTEST is not set -CONFIG_BT_DEBUGFS=y - -# -# Bluetooth device drivers -# -CONFIG_BT_INTEL=m -CONFIG_BT_BCM=m -CONFIG_BT_RTL=m -CONFIG_BT_HCIBTUSB=m -CONFIG_BT_HCIBTUSB_BCM=y -CONFIG_BT_HCIBTUSB_RTL=y -CONFIG_BT_HCIUART=m -CONFIG_BT_HCIUART_SERDEV=y -CONFIG_BT_HCIUART_H4=y -# CONFIG_BT_HCIUART_NOKIA is not set -CONFIG_BT_HCIUART_BCSP=y -# CONFIG_BT_HCIUART_ATH3K is not set -CONFIG_BT_HCIUART_LL=y -CONFIG_BT_HCIUART_3WIRE=y -CONFIG_BT_HCIUART_INTEL=y -CONFIG_BT_HCIUART_BCM=y -# CONFIG_BT_HCIUART_QCA is not set -# CONFIG_BT_HCIUART_AG6XX is not set -# CONFIG_BT_HCIUART_MRVL is not set -# CONFIG_BT_HCIBCM203X is not set -# CONFIG_BT_HCIBPA10X is not set -# CONFIG_BT_HCIBFUSB is not set -CONFIG_BT_HCIVHCI=m -# CONFIG_BT_MRVL is not set -# CONFIG_BT_ATH3K is not set -CONFIG_AF_RXRPC=m -# CONFIG_AF_RXRPC_IPV6 is not set -# CONFIG_AF_RXRPC_INJECT_LOSS is not set -# CONFIG_AF_RXRPC_DEBUG is not set -# CONFIG_RXKAD is not set -# CONFIG_AF_KCM is not set -CONFIG_STREAM_PARSER=y -CONFIG_FIB_RULES=y -# CONFIG_WIRELESS is not set -# CONFIG_WIMAX is not set -# CONFIG_RFKILL is not set -# CONFIG_NET_9P is not set -# CONFIG_CAIF is not set -CONFIG_CEPH_LIB=m -# CONFIG_CEPH_LIB_PRETTYDEBUG is not set -CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y -# CONFIG_NFC is not set -CONFIG_PSAMPLE=m -CONFIG_NET_IFE=m -CONFIG_LWTUNNEL=y -CONFIG_LWTUNNEL_BPF=y -CONFIG_DST_CACHE=y -CONFIG_GRO_CELLS=y -# CONFIG_NET_DEVLINK is not set -CONFIG_MAY_USE_DEVLINK=y -CONFIG_HAVE_EBPF_JIT=y - -# -# Device Drivers -# - -# -# Generic Driver Options -# -CONFIG_UEVENT_HELPER=y -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y -CONFIG_FW_LOADER=y -# CONFIG_FIRMWARE_IN_KERNEL is not set -CONFIG_EXTRA_FIRMWARE="" -CONFIG_FW_LOADER_USER_HELPER=y -# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set -CONFIG_ALLOW_DEV_COREDUMP=y -# CONFIG_DEBUG_DRIVER is not set -CONFIG_DEBUG_DEVRES=y -# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set -# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set -CONFIG_SYS_HYPERVISOR=y -# CONFIG_GENERIC_CPU_DEVICES is not set -CONFIG_GENERIC_CPU_AUTOPROBE=y -CONFIG_GENERIC_CPU_VULNERABILITIES=y -CONFIG_REGMAP=y -CONFIG_REGMAP_I2C=m -CONFIG_DMA_SHARED_BUFFER=y -# CONFIG_DMA_FENCE_TRACE is not set - -# -# Bus devices -# -CONFIG_CONNECTOR=y -CONFIG_PROC_EVENTS=y -# CONFIG_MTD is not set -# CONFIG_OF is not set -CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y -# CONFIG_PARPORT is not set -CONFIG_PNP=y -CONFIG_PNP_DEBUG_MESSAGES=y - -# -# Protocols -# -CONFIG_PNPACPI=y -CONFIG_BLK_DEV=y -CONFIG_BLK_DEV_NULL_BLK=m -# CONFIG_BLK_DEV_FD is not set -# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set -CONFIG_ZRAM=m -# CONFIG_ZRAM_WRITEBACK is not set -# CONFIG_BLK_DEV_DAC960 is not set -# CONFIG_BLK_DEV_UMEM is not set -# CONFIG_BLK_DEV_COW_COMMON is not set -CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 -CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_DRBD=m -# CONFIG_DRBD_FAULT_INJECTION is not set -CONFIG_BLK_DEV_NBD=m -# CONFIG_BLK_DEV_SKD is not set -# CONFIG_BLK_DEV_SX8 is not set -CONFIG_BLK_DEV_RAM=m -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=16384 -CONFIG_CDROM_PKTCDVD=m -CONFIG_CDROM_PKTCDVD_BUFFERS=8 -# CONFIG_CDROM_PKTCDVD_WCACHE is not set -CONFIG_ATA_OVER_ETH=m -CONFIG_XEN_BLKDEV_FRONTEND=y -CONFIG_XEN_BLKDEV_BACKEND=m -CONFIG_VIRTIO_BLK=y -# CONFIG_VIRTIO_BLK_SCSI is not set -CONFIG_BLK_DEV_RBD=m -# CONFIG_BLK_DEV_RSXX is not set -CONFIG_NVME_CORE=y -CONFIG_BLK_DEV_NVME=y -# CONFIG_NVME_RDMA is not set -# CONFIG_NVME_FC is not set -# CONFIG_NVME_TARGET is not set - -# -# Misc devices -# -# CONFIG_SENSORS_LIS3LV02D is not set -# CONFIG_AD525X_DPOT is not set -# CONFIG_DUMMY_IRQ is not set -# CONFIG_IBM_ASM is not set -# CONFIG_PHANTOM is not set -# CONFIG_SGI_IOC4 is not set -# CONFIG_TIFM_CORE is not set -# CONFIG_ICS932S401 is not set -# CONFIG_ENCLOSURE_SERVICES is not set -# CONFIG_HP_ILO is not set -# CONFIG_APDS9802ALS is not set -# CONFIG_ISL29003 is not set -# CONFIG_ISL29020 is not set -# CONFIG_SENSORS_TSL2550 is not set -# CONFIG_SENSORS_BH1770 is not set -# CONFIG_SENSORS_APDS990X is not set -# CONFIG_HMC6352 is not set -# CONFIG_DS1682 is not set -CONFIG_VMWARE_BALLOON=m -# CONFIG_USB_SWITCH_FSA9480 is not set -# CONFIG_SRAM is not set -# CONFIG_PCI_ENDPOINT_TEST is not set -# CONFIG_C2PORT is not set - -# -# EEPROM support -# -# CONFIG_EEPROM_AT24 is not set -# CONFIG_EEPROM_LEGACY is not set -# CONFIG_EEPROM_MAX6875 is not set -# CONFIG_EEPROM_93CX6 is not set -# CONFIG_EEPROM_IDT_89HPESX is not set -# CONFIG_CB710_CORE is not set - -# -# Texas Instruments shared transport line discipline -# -# CONFIG_SENSORS_LIS3_I2C is not set - -# -# Altera FPGA firmware download module -# -# CONFIG_ALTERA_STAPL is not set -# CONFIG_INTEL_MEI is not set -# CONFIG_INTEL_MEI_ME is not set -# CONFIG_INTEL_MEI_TXE is not set -CONFIG_VMWARE_VMCI=m - -# -# Intel MIC Bus Driver -# -# CONFIG_INTEL_MIC_BUS is not set - -# -# SCIF Bus Driver -# -# CONFIG_SCIF_BUS is not set - -# -# VOP Bus Driver -# -# CONFIG_VOP_BUS is not set - -# -# Intel MIC Host Driver -# - -# -# Intel MIC Card Driver -# - -# -# SCIF Driver -# - -# -# Intel MIC Coprocessor State Management (COSM) Drivers -# - -# -# VOP Driver -# -# CONFIG_GENWQE is not set -# CONFIG_ECHO is not set -# CONFIG_CXL_BASE is not set -# CONFIG_CXL_AFU_DRIVER_OPS is not set -# CONFIG_CXL_LIB is not set -CONFIG_HAVE_IDE=y -# CONFIG_IDE is not set - -# -# SCSI device support -# -CONFIG_SCSI_MOD=m -CONFIG_RAID_ATTRS=m -CONFIG_SCSI=m -CONFIG_SCSI_DMA=y -CONFIG_SCSI_NETLINK=y -CONFIG_SCSI_MQ_DEFAULT=y -CONFIG_SCSI_PROC_FS=y - -# -# SCSI support type (disk, tape, CD-ROM) -# -CONFIG_BLK_DEV_SD=m -CONFIG_CHR_DEV_ST=m -CONFIG_CHR_DEV_OSST=m -CONFIG_BLK_DEV_SR=m -# CONFIG_BLK_DEV_SR_VENDOR is not set -CONFIG_CHR_DEV_SG=m -CONFIG_CHR_DEV_SCH=m -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SCAN_ASYNC=y - -# -# SCSI Transports -# -CONFIG_SCSI_SPI_ATTRS=m -CONFIG_SCSI_FC_ATTRS=m -CONFIG_SCSI_ISCSI_ATTRS=m -CONFIG_SCSI_SAS_ATTRS=m -# CONFIG_SCSI_SAS_LIBSAS is not set -# CONFIG_SCSI_SRP_ATTRS is not set -CONFIG_SCSI_LOWLEVEL=y -CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m -# CONFIG_SCSI_CXGB3_ISCSI is not set -# CONFIG_SCSI_CXGB4_ISCSI is not set -# CONFIG_SCSI_BNX2_ISCSI is not set -# CONFIG_SCSI_BNX2X_FCOE is not set -# CONFIG_BE2ISCSI is not set -# CONFIG_BLK_DEV_3W_XXXX_RAID is not set -# CONFIG_SCSI_HPSA is not set -# CONFIG_SCSI_3W_9XXX is not set -# CONFIG_SCSI_3W_SAS is not set -# CONFIG_SCSI_ACARD is not set -# CONFIG_SCSI_AACRAID is not set -# CONFIG_SCSI_AIC7XXX is not set -# CONFIG_SCSI_AIC79XX is not set -# CONFIG_SCSI_AIC94XX is not set -# CONFIG_SCSI_MVSAS is not set -# CONFIG_SCSI_MVUMI is not set -# CONFIG_SCSI_DPT_I2O is not set -# CONFIG_SCSI_ADVANSYS is not set -# CONFIG_SCSI_ARCMSR is not set -# CONFIG_SCSI_ESAS2R is not set -# CONFIG_MEGARAID_NEWGEN is not set -# CONFIG_MEGARAID_LEGACY is not set -# CONFIG_MEGARAID_SAS is not set -# CONFIG_SCSI_MPT3SAS is not set -# CONFIG_SCSI_MPT2SAS is not set -# CONFIG_SCSI_SMARTPQI is not set -# CONFIG_SCSI_UFSHCD is not set -# CONFIG_SCSI_HPTIOP is not set -CONFIG_SCSI_BUSLOGIC=m -# CONFIG_SCSI_FLASHPOINT is not set -CONFIG_VMWARE_PVSCSI=m -CONFIG_XEN_SCSI_FRONTEND=m -CONFIG_HYPERV_STORAGE=m -CONFIG_LIBFC=m -CONFIG_LIBFCOE=m -# CONFIG_FCOE is not set -# CONFIG_FCOE_FNIC is not set -# CONFIG_SCSI_SNIC is not set -# CONFIG_SCSI_DMX3191D is not set -# CONFIG_SCSI_EATA is not set -# CONFIG_SCSI_FUTURE_DOMAIN is not set -# CONFIG_SCSI_GDTH is not set -# CONFIG_SCSI_ISCI is not set -# CONFIG_SCSI_IPS is not set -# CONFIG_SCSI_INITIO is not set -# CONFIG_SCSI_INIA100 is not set -# CONFIG_SCSI_STEX is not set -# CONFIG_SCSI_SYM53C8XX_2 is not set -# CONFIG_SCSI_IPR is not set -# CONFIG_SCSI_QLOGIC_1280 is not set -# CONFIG_SCSI_QLA_FC is not set -# CONFIG_SCSI_QLA_ISCSI is not set -# CONFIG_SCSI_LPFC is not set -# CONFIG_SCSI_DC395x is not set -# CONFIG_SCSI_AM53C974 is not set -# CONFIG_SCSI_WD719X is not set -CONFIG_SCSI_DEBUG=m -# CONFIG_SCSI_PMCRAID is not set -# CONFIG_SCSI_PM8001 is not set -# CONFIG_SCSI_BFA_FC is not set -CONFIG_SCSI_VIRTIO=m -CONFIG_SCSI_CHELSIO_FCOE=m -# CONFIG_SCSI_DH is not set -CONFIG_SCSI_OSD_INITIATOR=m -CONFIG_SCSI_OSD_ULD=m -CONFIG_SCSI_OSD_DPRINT_SENSE=1 -# CONFIG_SCSI_OSD_DEBUG is not set -CONFIG_ATA=m -# CONFIG_ATA_NONSTANDARD is not set -CONFIG_ATA_VERBOSE_ERROR=y -CONFIG_ATA_ACPI=y -# CONFIG_SATA_ZPODD is not set -CONFIG_SATA_PMP=y - -# -# Controllers with non-SFF native interface -# -CONFIG_SATA_AHCI=m -# CONFIG_SATA_AHCI_PLATFORM is not set -# CONFIG_SATA_INIC162X is not set -# CONFIG_SATA_ACARD_AHCI is not set -# CONFIG_SATA_SIL24 is not set -CONFIG_ATA_SFF=y - -# -# SFF controllers with custom DMA interface -# -# CONFIG_PDC_ADMA is not set -# CONFIG_SATA_QSTOR is not set -# CONFIG_SATA_SX4 is not set -CONFIG_ATA_BMDMA=y - -# -# SATA SFF controllers with BMDMA -# -CONFIG_ATA_PIIX=m -# CONFIG_SATA_DWC is not set -# CONFIG_SATA_MV is not set -# CONFIG_SATA_NV is not set -# CONFIG_SATA_PROMISE is not set -# CONFIG_SATA_SIL is not set -# CONFIG_SATA_SIS is not set -# CONFIG_SATA_SVW is not set -# CONFIG_SATA_ULI is not set -# CONFIG_SATA_VIA is not set -# CONFIG_SATA_VITESSE is not set - -# -# PATA SFF controllers with BMDMA -# -# CONFIG_PATA_ALI is not set -# CONFIG_PATA_AMD is not set -# CONFIG_PATA_ARTOP is not set -# CONFIG_PATA_ATIIXP is not set -# CONFIG_PATA_ATP867X is not set -# CONFIG_PATA_CMD64X is not set -# CONFIG_PATA_CYPRESS is not set -# CONFIG_PATA_EFAR is not set -# CONFIG_PATA_HPT366 is not set -# CONFIG_PATA_HPT37X is not set -# CONFIG_PATA_HPT3X2N is not set -# CONFIG_PATA_HPT3X3 is not set -# CONFIG_PATA_IT8213 is not set -# CONFIG_PATA_IT821X is not set -# CONFIG_PATA_JMICRON is not set -# CONFIG_PATA_MARVELL is not set -# CONFIG_PATA_NETCELL is not set -# CONFIG_PATA_NINJA32 is not set -# CONFIG_PATA_NS87415 is not set -# CONFIG_PATA_OLDPIIX is not set -# CONFIG_PATA_OPTIDMA is not set -# CONFIG_PATA_PDC2027X is not set -# CONFIG_PATA_PDC_OLD is not set -# CONFIG_PATA_RADISYS is not set -# CONFIG_PATA_RDC is not set -# CONFIG_PATA_SCH is not set -# CONFIG_PATA_SERVERWORKS is not set -# CONFIG_PATA_SIL680 is not set -# CONFIG_PATA_SIS is not set -# CONFIG_PATA_TOSHIBA is not set -# CONFIG_PATA_TRIFLEX is not set -# CONFIG_PATA_VIA is not set -# CONFIG_PATA_WINBOND is not set - -# -# PIO-only SFF controllers -# -# CONFIG_PATA_CMD640_PCI is not set -# CONFIG_PATA_MPIIX is not set -# CONFIG_PATA_NS87410 is not set -# CONFIG_PATA_OPTI is not set -# CONFIG_PATA_PLATFORM is not set -# CONFIG_PATA_RZ1000 is not set - -# -# Generic fallback / legacy drivers -# -# CONFIG_PATA_ACPI is not set -# CONFIG_ATA_GENERIC is not set -# CONFIG_PATA_LEGACY is not set -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_AUTODETECT=y -CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m -CONFIG_MD_RAID10=m -CONFIG_MD_RAID456=m -# CONFIG_MD_MULTIPATH is not set -CONFIG_MD_FAULTY=m -# CONFIG_MD_CLUSTER is not set -CONFIG_BCACHE=m -# CONFIG_BCACHE_DEBUG is not set -# CONFIG_BCACHE_CLOSURES_DEBUG is not set -CONFIG_BLK_DEV_DM_BUILTIN=y -CONFIG_BLK_DEV_DM=y -# CONFIG_DM_MQ_DEFAULT is not set -CONFIG_DM_DEBUG=y -CONFIG_DM_BUFIO=y -CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y -# CONFIG_DM_DEBUG_BLOCK_STACK_TRACING is not set -CONFIG_DM_BIO_PRISON=m -CONFIG_DM_PERSISTENT_DATA=m -CONFIG_DM_CRYPT=m -CONFIG_DM_SNAPSHOT=m -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m -CONFIG_DM_CACHE_SMQ=m -# CONFIG_DM_ERA is not set -CONFIG_DM_MIRROR=m -CONFIG_DM_LOG_USERSPACE=m -CONFIG_DM_RAID=m -CONFIG_DM_ZERO=m -CONFIG_DM_MULTIPATH=m -CONFIG_DM_MULTIPATH_QL=m -CONFIG_DM_MULTIPATH_ST=m -CONFIG_DM_DELAY=m -CONFIG_DM_INIT=y -CONFIG_DM_UEVENT=y -CONFIG_DM_FLAKEY=m -CONFIG_DM_VERITY=y -# CONFIG_DM_VERITY_FEC is not set -# CONFIG_DM_SWITCH is not set -# CONFIG_DM_LOG_WRITES is not set -CONFIG_DM_INTEGRITY=m -CONFIG_TARGET_CORE=m -CONFIG_TCM_IBLOCK=m -CONFIG_TCM_FILEIO=m -# CONFIG_TCM_PSCSI is not set -# CONFIG_TCM_USER2 is not set -CONFIG_LOOPBACK_TARGET=m -# CONFIG_TCM_FC is not set -CONFIG_ISCSI_TARGET=m -CONFIG_FUSION=y -CONFIG_FUSION_SPI=m -CONFIG_FUSION_FC=m -CONFIG_FUSION_SAS=m -CONFIG_FUSION_MAX_SGE=128 -# CONFIG_FUSION_CTL is not set -# CONFIG_FUSION_LOGGING is not set - -# -# IEEE 1394 (FireWire) support -# -# CONFIG_FIREWIRE is not set -# CONFIG_FIREWIRE_NOSY is not set -# CONFIG_MACINTOSH_DRIVERS is not set -CONFIG_NETDEVICES=y -CONFIG_MII=m -CONFIG_NET_CORE=y -CONFIG_BONDING=m -CONFIG_DUMMY=m -CONFIG_EQUALIZER=m -# CONFIG_NET_FC is not set -CONFIG_IFB=m -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -# CONFIG_NET_TEAM_MODE_RANDOM is not set -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_MACVLAN=m -CONFIG_MACVTAP=m -CONFIG_VXLAN=m -CONFIG_GENEVE=m -# CONFIG_GTP is not set -# CONFIG_MACSEC is not set -CONFIG_NETCONSOLE=m -# CONFIG_NETCONSOLE_DYNAMIC is not set -CONFIG_NETPOLL=y -CONFIG_NET_POLL_CONTROLLER=y -CONFIG_TUN=m -CONFIG_TAP=m -# CONFIG_TUN_VNET_CROSS_LE is not set -CONFIG_VETH=m -CONFIG_VIRTIO_NET=y -CONFIG_NLMON=m -# CONFIG_VSOCKMON is not set -# CONFIG_ARCNET is not set - -# -# CAIF transport drivers -# - -# -# Distributed Switch Architecture drivers -# -CONFIG_ETHERNET=y -# CONFIG_NET_VENDOR_3COM is not set -# CONFIG_NET_VENDOR_ADAPTEC is not set -# CONFIG_NET_VENDOR_AGERE is not set -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_ALTEON is not set -# CONFIG_ALTERA_TSE is not set -# CONFIG_NET_VENDOR_AMAZON is not set -# CONFIG_NET_VENDOR_AMD is not set -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_ATHEROS is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_CADENCE is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_BROCADE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CHELSIO is not set -# CONFIG_NET_VENDOR_CISCO is not set -# CONFIG_CX_ECAT is not set -# CONFIG_DNET is not set -# CONFIG_NET_VENDOR_DEC is not set -# CONFIG_NET_VENDOR_DLINK is not set -# CONFIG_NET_VENDOR_EMULEX is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_EXAR is not set -# CONFIG_NET_VENDOR_HP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set -CONFIG_NET_VENDOR_INTEL=y -# CONFIG_E100 is not set -CONFIG_E1000=m -CONFIG_E1000E=m -# CONFIG_E1000E_HWTS is not set -# CONFIG_IGB is not set -# CONFIG_IGBVF is not set -# CONFIG_IXGB is not set -# CONFIG_IXGBE is not set -CONFIG_IXGBEVF=m -# CONFIG_I40E is not set -# CONFIG_I40EVF is not set -# CONFIG_FM10K is not set -# CONFIG_NET_VENDOR_I825XX is not set -# CONFIG_JME is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MELLANOX is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MYRI is not set -# CONFIG_FEALNX is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NVIDIA is not set -# CONFIG_NET_VENDOR_OKI is not set -# CONFIG_ETHOC is not set -# CONFIG_NET_PACKET_ENGINE is not set -# CONFIG_NET_VENDOR_QLOGIC is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -CONFIG_NET_VENDOR_REALTEK=y -CONFIG_8139CP=m -CONFIG_8139TOO=m -# CONFIG_8139TOO_PIO is not set -# CONFIG_8139TOO_TUNE_TWISTER is not set -# CONFIG_8139TOO_8129 is not set -# CONFIG_8139_OLD_RX_RESET is not set -# CONFIG_R8169 is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_RDC is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SILAN is not set -# CONFIG_NET_VENDOR_SIS is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set -# CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SUN is not set -# CONFIG_NET_VENDOR_TEHUTI is not set -# CONFIG_NET_VENDOR_TI is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_FDDI is not set -# CONFIG_HIPPI is not set -# CONFIG_NET_SB1000 is not set -CONFIG_MDIO_DEVICE=m -CONFIG_MDIO_BUS=m -# CONFIG_MDIO_BITBANG is not set -# CONFIG_MDIO_THUNDER is not set -CONFIG_PHYLIB=m - -# -# MII PHY device drivers -# -# CONFIG_AMD_PHY is not set -# CONFIG_AQUANTIA_PHY is not set -# CONFIG_AT803X_PHY is not set -# CONFIG_BCM7XXX_PHY is not set -# CONFIG_BCM87XX_PHY is not set -# CONFIG_BROADCOM_PHY is not set -# CONFIG_CICADA_PHY is not set -# CONFIG_CORTINA_PHY is not set -# CONFIG_DAVICOM_PHY is not set -# CONFIG_DP83848_PHY is not set -# CONFIG_DP83867_PHY is not set -# CONFIG_FIXED_PHY is not set -# CONFIG_ICPLUS_PHY is not set -# CONFIG_INTEL_XWAY_PHY is not set -# CONFIG_LSI_ET1011C_PHY is not set -# CONFIG_LXT_PHY is not set -# CONFIG_MARVELL_PHY is not set -# CONFIG_MARVELL_10G_PHY is not set -# CONFIG_MICREL_PHY is not set -# CONFIG_MICROCHIP_PHY is not set -# CONFIG_MICROSEMI_PHY is not set -# CONFIG_NATIONAL_PHY is not set -# CONFIG_QSEMI_PHY is not set -# CONFIG_REALTEK_PHY is not set -# CONFIG_ROCKCHIP_PHY is not set -# CONFIG_SMSC_PHY is not set -# CONFIG_STE10XP is not set -# CONFIG_TERANETICS_PHY is not set -# CONFIG_VITESSE_PHY is not set -# CONFIG_XILINX_GMII2RGMII is not set -CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPP_MULTILINK=y -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -CONFIG_SLIP=m -CONFIG_SLHC=m -CONFIG_SLIP_COMPRESSED=y -CONFIG_SLIP_SMART=y -# CONFIG_SLIP_MODE_SLIP6 is not set - -# -# Host-side USB support is needed for USB Network Adapter support -# -CONFIG_USB_NET_DRIVERS=m -# CONFIG_USB_CATC is not set -# CONFIG_USB_KAWETH is not set -# CONFIG_USB_PEGASUS is not set -# CONFIG_USB_RTL8150 is not set -# CONFIG_USB_RTL8152 is not set -# CONFIG_USB_LAN78XX is not set -# CONFIG_USB_USBNET is not set -# CONFIG_USB_IPHETH is not set -# CONFIG_WLAN is not set - -# -# Enable WiMAX (Networking options) to see the WiMAX drivers -# -# CONFIG_WAN is not set -CONFIG_XEN_NETDEV_FRONTEND=y -CONFIG_XEN_NETDEV_BACKEND=m -CONFIG_VMXNET3=m -# CONFIG_FUJITSU_ES is not set -CONFIG_HYPERV_NET=m -# CONFIG_ISDN is not set -# CONFIG_NVM is not set - -# -# Input device support -# -CONFIG_INPUT=y -CONFIG_INPUT_FF_MEMLESS=y -CONFIG_INPUT_POLLDEV=m -CONFIG_INPUT_SPARSEKMAP=m -# CONFIG_INPUT_MATRIXKMAP is not set - -# -# Userland interfaces -# -CONFIG_INPUT_MOUSEDEV=m -CONFIG_INPUT_MOUSEDEV_PSAUX=y -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 -# CONFIG_INPUT_JOYDEV is not set -CONFIG_INPUT_EVDEV=m -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -CONFIG_INPUT_KEYBOARD=y -# CONFIG_KEYBOARD_ADP5588 is not set -# CONFIG_KEYBOARD_ADP5589 is not set -CONFIG_KEYBOARD_ATKBD=y -# CONFIG_KEYBOARD_QT1070 is not set -# CONFIG_KEYBOARD_QT2160 is not set -# CONFIG_KEYBOARD_DLINK_DIR685 is not set -# CONFIG_KEYBOARD_LKKBD is not set -# CONFIG_KEYBOARD_TCA6416 is not set -# CONFIG_KEYBOARD_TCA8418 is not set -# CONFIG_KEYBOARD_LM8333 is not set -# CONFIG_KEYBOARD_MAX7359 is not set -# CONFIG_KEYBOARD_MCS is not set -# CONFIG_KEYBOARD_MPR121 is not set -# CONFIG_KEYBOARD_NEWTON is not set -# CONFIG_KEYBOARD_OPENCORES is not set -# CONFIG_KEYBOARD_SAMSUNG is not set -# CONFIG_KEYBOARD_STOWAWAY is not set -# CONFIG_KEYBOARD_SUNKBD is not set -# CONFIG_KEYBOARD_XTKBD is not set -CONFIG_INPUT_MOUSE=y -CONFIG_MOUSE_PS2=m -# CONFIG_MOUSE_PS2_ALPS is not set -# CONFIG_MOUSE_PS2_BYD is not set -# CONFIG_MOUSE_PS2_LOGIPS2PP is not set -# CONFIG_MOUSE_PS2_SYNAPTICS is not set -# CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS is not set -# CONFIG_MOUSE_PS2_CYPRESS is not set -# CONFIG_MOUSE_PS2_LIFEBOOK is not set -# CONFIG_MOUSE_PS2_TRACKPOINT is not set -# CONFIG_MOUSE_PS2_ELANTECH is not set -# CONFIG_MOUSE_PS2_SENTELIC is not set -# CONFIG_MOUSE_PS2_TOUCHKIT is not set -# CONFIG_MOUSE_PS2_FOCALTECH is not set -# CONFIG_MOUSE_PS2_VMMOUSE is not set -# CONFIG_MOUSE_SERIAL is not set -# CONFIG_MOUSE_APPLETOUCH is not set -# CONFIG_MOUSE_BCM5974 is not set -# CONFIG_MOUSE_CYAPA is not set -# CONFIG_MOUSE_ELAN_I2C is not set -# CONFIG_MOUSE_VSXXXAA is not set -# CONFIG_MOUSE_SYNAPTICS_I2C is not set -# CONFIG_MOUSE_SYNAPTICS_USB is not set -# CONFIG_INPUT_JOYSTICK is not set -# CONFIG_INPUT_TABLET is not set -# CONFIG_INPUT_TOUCHSCREEN is not set -CONFIG_INPUT_MISC=y -# CONFIG_INPUT_AD714X is not set -# CONFIG_INPUT_BMA150 is not set -# CONFIG_INPUT_E3X0_BUTTON is not set -# CONFIG_INPUT_PCSPKR is not set -# CONFIG_INPUT_MMA8450 is not set -# CONFIG_INPUT_ATLAS_BTNS is not set -# CONFIG_INPUT_ATI_REMOTE2 is not set -# CONFIG_INPUT_KEYSPAN_REMOTE is not set -# CONFIG_INPUT_KXTJ9 is not set -# CONFIG_INPUT_POWERMATE is not set -# CONFIG_INPUT_YEALINK is not set -# CONFIG_INPUT_CM109 is not set -CONFIG_INPUT_UINPUT=m -# CONFIG_INPUT_PCF8574 is not set -# CONFIG_INPUT_ADXL34X is not set -# CONFIG_INPUT_CMA3000 is not set -CONFIG_INPUT_XEN_KBDDEV_FRONTEND=y -# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set -# CONFIG_INPUT_DRV2665_HAPTICS is not set -# CONFIG_INPUT_DRV2667_HAPTICS is not set -# CONFIG_RMI4_CORE is not set - -# -# Hardware I/O ports -# -CONFIG_SERIO=y -CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y -CONFIG_SERIO_I8042=y -# CONFIG_SERIO_SERPORT is not set -# CONFIG_SERIO_CT82C710 is not set -# CONFIG_SERIO_PCIPS2 is not set -CONFIG_SERIO_LIBPS2=y -# CONFIG_SERIO_RAW is not set -# CONFIG_SERIO_ALTERA_PS2 is not set -# CONFIG_SERIO_PS2MULT is not set -CONFIG_SERIO_ARC_PS2=m -CONFIG_HYPERV_KEYBOARD=m -# CONFIG_USERIO is not set -# CONFIG_GAMEPORT is not set - -# -# Character devices -# -CONFIG_TTY=y -CONFIG_VT=y -CONFIG_CONSOLE_TRANSLATIONS=y -CONFIG_VT_CONSOLE=y -CONFIG_VT_CONSOLE_SLEEP=y -CONFIG_HW_CONSOLE=y -CONFIG_VT_HW_CONSOLE_BINDING=y -CONFIG_UNIX98_PTYS=y -# CONFIG_LEGACY_PTYS is not set -# CONFIG_SERIAL_NONSTANDARD is not set -# CONFIG_NOZOMI is not set -# CONFIG_N_GSM is not set -# CONFIG_TRACE_SINK is not set -CONFIG_DEVMEM=y -# CONFIG_DEVKMEM is not set - -# -# Serial drivers -# -CONFIG_SERIAL_EARLYCON=y -CONFIG_SERIAL_8250=y -# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set -CONFIG_SERIAL_8250_PNP=y -# CONFIG_SERIAL_8250_FINTEK is not set -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_DMA=y -CONFIG_SERIAL_8250_PCI=y -CONFIG_SERIAL_8250_EXAR=m -CONFIG_SERIAL_8250_NR_UARTS=4 -CONFIG_SERIAL_8250_RUNTIME_UARTS=4 -# CONFIG_SERIAL_8250_EXTENDED is not set -# CONFIG_SERIAL_8250_FSL is not set -# CONFIG_SERIAL_8250_DW is not set -# CONFIG_SERIAL_8250_RT288X is not set -# CONFIG_SERIAL_8250_LPSS is not set -# CONFIG_SERIAL_8250_MID is not set -# CONFIG_SERIAL_8250_MOXA is not set - -# -# Non-8250 serial port support -# -# CONFIG_SERIAL_KGDB_NMI is not set -# CONFIG_SERIAL_UARTLITE is not set -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -CONFIG_CONSOLE_POLL=y -# CONFIG_SERIAL_JSM is not set -# CONFIG_SERIAL_SCCNXP is not set -# CONFIG_SERIAL_SC16IS7XX is not set -# CONFIG_SERIAL_ALTERA_JTAGUART is not set -# CONFIG_SERIAL_ALTERA_UART is not set -CONFIG_SERIAL_ARC=m -CONFIG_SERIAL_ARC_NR_PORTS=1 -# CONFIG_SERIAL_RP2 is not set -# CONFIG_SERIAL_FSL_LPUART is not set -CONFIG_SERIAL_DEV_BUS=y -CONFIG_SERIAL_DEV_CTRL_TTYPORT=y -# CONFIG_TTY_PRINTK is not set -CONFIG_HVC_DRIVER=y -CONFIG_HVC_IRQ=y -CONFIG_HVC_XEN=y -CONFIG_HVC_XEN_FRONTEND=y -CONFIG_VIRTIO_CONSOLE=m -# CONFIG_IPMI_HANDLER is not set -CONFIG_HW_RANDOM=m -# CONFIG_HW_RANDOM_TIMERIOMEM is not set -CONFIG_HW_RANDOM_INTEL=m -CONFIG_HW_RANDOM_AMD=m -CONFIG_HW_RANDOM_VIA=m -CONFIG_HW_RANDOM_VIRTIO=m -CONFIG_HW_RANDOM_TPM=m -CONFIG_NVRAM=m -# CONFIG_R3964 is not set -# CONFIG_APPLICOM is not set -# CONFIG_MWAVE is not set -CONFIG_RAW_DRIVER=m -CONFIG_MAX_RAW_DEVS=256 -CONFIG_HPET=y -CONFIG_HPET_MMAP=y -# CONFIG_HPET_MMAP_DEFAULT is not set -CONFIG_HANGCHECK_TIMER=m -CONFIG_TCG_TPM=y -CONFIG_TCG_TIS_CORE=y -CONFIG_TCG_TIS=y -# CONFIG_TCG_TIS_I2C_ATMEL is not set -# CONFIG_TCG_TIS_I2C_INFINEON is not set -# CONFIG_TCG_TIS_I2C_NUVOTON is not set -# CONFIG_TCG_NSC is not set -# CONFIG_TCG_ATMEL is not set -# CONFIG_TCG_INFINEON is not set -CONFIG_TCG_XEN=m -CONFIG_TCG_CRB=y -# CONFIG_TCG_VTPM_PROXY is not set -# CONFIG_TCG_TIS_ST33ZP24_I2C is not set -CONFIG_TELCLOCK=m -CONFIG_DEVPORT=y -# CONFIG_XILLYBUS is not set - -# -# I2C support -# -CONFIG_I2C=m -CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_COMPAT=y -# CONFIG_I2C_CHARDEV is not set -# CONFIG_I2C_MUX is not set -CONFIG_I2C_HELPER_AUTO=y -CONFIG_I2C_ALGOBIT=m - -# -# I2C Hardware Bus support -# - -# -# PC SMBus host controller drivers -# -# CONFIG_I2C_ALI1535 is not set -# CONFIG_I2C_ALI1563 is not set -# CONFIG_I2C_ALI15X3 is not set -# CONFIG_I2C_AMD756 is not set -# CONFIG_I2C_AMD8111 is not set -# CONFIG_I2C_I801 is not set -# CONFIG_I2C_ISCH is not set -# CONFIG_I2C_ISMT is not set -# CONFIG_I2C_PIIX4 is not set -# CONFIG_I2C_NFORCE2 is not set -# CONFIG_I2C_SIS5595 is not set -# CONFIG_I2C_SIS630 is not set -# CONFIG_I2C_SIS96X is not set -# CONFIG_I2C_VIA is not set -# CONFIG_I2C_VIAPRO is not set - -# -# ACPI drivers -# -# CONFIG_I2C_SCMI is not set - -# -# I2C system bus drivers (mostly embedded / system-on-chip) -# -# CONFIG_I2C_DESIGNWARE_PLATFORM is not set -# CONFIG_I2C_DESIGNWARE_PCI is not set -# CONFIG_I2C_EMEV2 is not set -# CONFIG_I2C_OCORES is not set -# CONFIG_I2C_PCA_PLATFORM is not set -# CONFIG_I2C_PXA_PCI is not set -# CONFIG_I2C_SIMTEC is not set -# CONFIG_I2C_XILINX is not set - -# -# External I2C/SMBus adapter drivers -# -# CONFIG_I2C_DIOLAN_U2C is not set -# CONFIG_I2C_PARPORT_LIGHT is not set -# CONFIG_I2C_ROBOTFUZZ_OSIF is not set -# CONFIG_I2C_TAOS_EVM is not set -# CONFIG_I2C_TINY_USB is not set - -# -# Other I2C/SMBus bus drivers -# -# CONFIG_I2C_MLXCPLD is not set -# CONFIG_I2C_STUB is not set -# CONFIG_I2C_SLAVE is not set -# CONFIG_I2C_DEBUG_CORE is not set -# CONFIG_I2C_DEBUG_ALGO is not set -# CONFIG_I2C_DEBUG_BUS is not set -# CONFIG_SPI is not set -# CONFIG_SPMI is not set -# CONFIG_HSI is not set -CONFIG_PPS=m -CONFIG_PPS_DEBUG=y - -# -# PPS clients support -# -CONFIG_PPS_CLIENT_KTIMER=m -CONFIG_PPS_CLIENT_LDISC=m -CONFIG_PPS_CLIENT_GPIO=m - -# -# PPS generators support -# - -# -# PTP clock support -# -CONFIG_PTP_1588_CLOCK=m -# CONFIG_DP83640_PHY is not set -CONFIG_PTP_1588_CLOCK_KVM=m -# CONFIG_GPIOLIB is not set -# CONFIG_W1 is not set -# CONFIG_POWER_AVS is not set -CONFIG_POWER_RESET=y -# CONFIG_POWER_RESET_RESTART is not set -CONFIG_POWER_SUPPLY=y -# CONFIG_POWER_SUPPLY_DEBUG is not set -# CONFIG_PDA_POWER is not set -# CONFIG_TEST_POWER is not set -# CONFIG_BATTERY_DS2780 is not set -# CONFIG_BATTERY_DS2781 is not set -# CONFIG_BATTERY_DS2782 is not set -# CONFIG_BATTERY_SBS is not set -# CONFIG_CHARGER_SBS is not set -# CONFIG_BATTERY_BQ27XXX is not set -# CONFIG_BATTERY_MAX17040 is not set -# CONFIG_BATTERY_MAX17042 is not set -# CONFIG_CHARGER_MAX8903 is not set -# CONFIG_CHARGER_LP8727 is not set -# CONFIG_CHARGER_BQ2415X is not set -# CONFIG_CHARGER_SMB347 is not set -# CONFIG_BATTERY_GAUGE_LTC2941 is not set -CONFIG_HWMON=m -# CONFIG_HWMON_VID is not set -# CONFIG_HWMON_DEBUG_CHIP is not set - -# -# Native drivers -# -# CONFIG_SENSORS_ABITUGURU is not set -# CONFIG_SENSORS_ABITUGURU3 is not set -# CONFIG_SENSORS_AD7414 is not set -# CONFIG_SENSORS_AD7418 is not set -# CONFIG_SENSORS_ADM1021 is not set -# CONFIG_SENSORS_ADM1025 is not set -# CONFIG_SENSORS_ADM1026 is not set -# CONFIG_SENSORS_ADM1029 is not set -# CONFIG_SENSORS_ADM1031 is not set -# CONFIG_SENSORS_ADM9240 is not set -# CONFIG_SENSORS_ADT7410 is not set -# CONFIG_SENSORS_ADT7411 is not set -# CONFIG_SENSORS_ADT7462 is not set -# CONFIG_SENSORS_ADT7470 is not set -# CONFIG_SENSORS_ADT7475 is not set -# CONFIG_SENSORS_ASC7621 is not set -# CONFIG_SENSORS_K8TEMP is not set -# CONFIG_SENSORS_K10TEMP is not set -# CONFIG_SENSORS_FAM15H_POWER is not set -# CONFIG_SENSORS_APPLESMC is not set -# CONFIG_SENSORS_ASB100 is not set -# CONFIG_SENSORS_ASPEED is not set -# CONFIG_SENSORS_ATXP1 is not set -# CONFIG_SENSORS_DS620 is not set -# CONFIG_SENSORS_DS1621 is not set -CONFIG_SENSORS_DELL_SMM=m -# CONFIG_SENSORS_I5K_AMB is not set -# CONFIG_SENSORS_F71805F is not set -# CONFIG_SENSORS_F71882FG is not set -# CONFIG_SENSORS_F75375S is not set -# CONFIG_SENSORS_FSCHMD is not set -# CONFIG_SENSORS_FTSTEUTATES is not set -# CONFIG_SENSORS_GL518SM is not set -# CONFIG_SENSORS_GL520SM is not set -# CONFIG_SENSORS_G760A is not set -# CONFIG_SENSORS_G762 is not set -# CONFIG_SENSORS_HIH6130 is not set -# CONFIG_SENSORS_I5500 is not set -# CONFIG_SENSORS_CORETEMP is not set -# CONFIG_SENSORS_IT87 is not set -# CONFIG_SENSORS_JC42 is not set -# CONFIG_SENSORS_POWR1220 is not set -# CONFIG_SENSORS_LINEAGE is not set -# CONFIG_SENSORS_LTC2945 is not set -# CONFIG_SENSORS_LTC2990 is not set -# CONFIG_SENSORS_LTC4151 is not set -# CONFIG_SENSORS_LTC4215 is not set -# CONFIG_SENSORS_LTC4222 is not set -# CONFIG_SENSORS_LTC4245 is not set -# CONFIG_SENSORS_LTC4260 is not set -# CONFIG_SENSORS_LTC4261 is not set -# CONFIG_SENSORS_MAX16065 is not set -# CONFIG_SENSORS_MAX1619 is not set -# CONFIG_SENSORS_MAX1668 is not set -# CONFIG_SENSORS_MAX197 is not set -# CONFIG_SENSORS_MAX6639 is not set -# CONFIG_SENSORS_MAX6642 is not set -# CONFIG_SENSORS_MAX6650 is not set -# CONFIG_SENSORS_MAX6697 is not set -# CONFIG_SENSORS_MAX31790 is not set -# CONFIG_SENSORS_MCP3021 is not set -# CONFIG_SENSORS_TC654 is not set -# CONFIG_SENSORS_LM63 is not set -# CONFIG_SENSORS_LM73 is not set -# CONFIG_SENSORS_LM75 is not set -# CONFIG_SENSORS_LM77 is not set -# CONFIG_SENSORS_LM78 is not set -# CONFIG_SENSORS_LM80 is not set -# CONFIG_SENSORS_LM83 is not set -# CONFIG_SENSORS_LM85 is not set -# CONFIG_SENSORS_LM87 is not set -# CONFIG_SENSORS_LM90 is not set -# CONFIG_SENSORS_LM92 is not set -# CONFIG_SENSORS_LM93 is not set -# CONFIG_SENSORS_LM95234 is not set -# CONFIG_SENSORS_LM95241 is not set -# CONFIG_SENSORS_LM95245 is not set -# CONFIG_SENSORS_PC87360 is not set -# CONFIG_SENSORS_PC87427 is not set -# CONFIG_SENSORS_NTC_THERMISTOR is not set -# CONFIG_SENSORS_NCT6683 is not set -# CONFIG_SENSORS_NCT6775 is not set -# CONFIG_SENSORS_NCT7802 is not set -# CONFIG_SENSORS_NCT7904 is not set -# CONFIG_SENSORS_PCF8591 is not set -# CONFIG_PMBUS is not set -# CONFIG_SENSORS_SHT21 is not set -# CONFIG_SENSORS_SHT3x is not set -# CONFIG_SENSORS_SHTC1 is not set -# CONFIG_SENSORS_SIS5595 is not set -# CONFIG_SENSORS_DME1737 is not set -# CONFIG_SENSORS_EMC1403 is not set -# CONFIG_SENSORS_EMC2103 is not set -# CONFIG_SENSORS_EMC6W201 is not set -# CONFIG_SENSORS_SMSC47M1 is not set -# CONFIG_SENSORS_SMSC47M192 is not set -# CONFIG_SENSORS_SMSC47B397 is not set -# CONFIG_SENSORS_SCH56XX_COMMON is not set -# CONFIG_SENSORS_SCH5627 is not set -# CONFIG_SENSORS_SCH5636 is not set -# CONFIG_SENSORS_STTS751 is not set -# CONFIG_SENSORS_SMM665 is not set -# CONFIG_SENSORS_ADC128D818 is not set -# CONFIG_SENSORS_ADS1015 is not set -# CONFIG_SENSORS_ADS7828 is not set -# CONFIG_SENSORS_AMC6821 is not set -# CONFIG_SENSORS_INA209 is not set -# CONFIG_SENSORS_INA2XX is not set -# CONFIG_SENSORS_INA3221 is not set -# CONFIG_SENSORS_TC74 is not set -# CONFIG_SENSORS_THMC50 is not set -# CONFIG_SENSORS_TMP102 is not set -# CONFIG_SENSORS_TMP103 is not set -# CONFIG_SENSORS_TMP108 is not set -# CONFIG_SENSORS_TMP401 is not set -# CONFIG_SENSORS_TMP421 is not set -# CONFIG_SENSORS_VIA_CPUTEMP is not set -# CONFIG_SENSORS_VIA686A is not set -# CONFIG_SENSORS_VT1211 is not set -# CONFIG_SENSORS_VT8231 is not set -# CONFIG_SENSORS_W83781D is not set -# CONFIG_SENSORS_W83791D is not set -# CONFIG_SENSORS_W83792D is not set -# CONFIG_SENSORS_W83793 is not set -# CONFIG_SENSORS_W83795 is not set -# CONFIG_SENSORS_W83L785TS is not set -# CONFIG_SENSORS_W83L786NG is not set -# CONFIG_SENSORS_W83627HF is not set -# CONFIG_SENSORS_W83627EHF is not set -# CONFIG_SENSORS_XGENE is not set - -# -# ACPI drivers -# -CONFIG_SENSORS_ACPI_POWER=m -# CONFIG_SENSORS_ATK0110 is not set -CONFIG_THERMAL=y -CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 -CONFIG_THERMAL_WRITABLE_TRIPS=y -CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y -# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set -# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set -# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set -CONFIG_THERMAL_GOV_FAIR_SHARE=y -CONFIG_THERMAL_GOV_STEP_WISE=y -# CONFIG_THERMAL_GOV_BANG_BANG is not set -CONFIG_THERMAL_GOV_USER_SPACE=y -# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set -# CONFIG_THERMAL_EMULATION is not set -# CONFIG_INTEL_POWERCLAMP is not set -CONFIG_X86_PKG_TEMP_THERMAL=m -# CONFIG_INTEL_SOC_DTS_THERMAL is not set - -# -# ACPI INT340X thermal drivers -# -# CONFIG_INT340X_THERMAL is not set -# CONFIG_INTEL_PCH_THERMAL is not set -CONFIG_WATCHDOG=y -CONFIG_WATCHDOG_CORE=y -# CONFIG_WATCHDOG_NOWAYOUT is not set -CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y -CONFIG_WATCHDOG_SYSFS=y - -# -# Watchdog Device Drivers -# -CONFIG_SOFT_WATCHDOG=m -CONFIG_WDAT_WDT=m -# CONFIG_XILINX_WATCHDOG is not set -# CONFIG_ZIIRAVE_WATCHDOG is not set -# CONFIG_CADENCE_WATCHDOG is not set -# CONFIG_DW_WATCHDOG is not set -# CONFIG_MAX63XX_WATCHDOG is not set -# CONFIG_ACQUIRE_WDT is not set -# CONFIG_ADVANTECH_WDT is not set -# CONFIG_ALIM1535_WDT is not set -# CONFIG_ALIM7101_WDT is not set -# CONFIG_F71808E_WDT is not set -# CONFIG_SP5100_TCO is not set -# CONFIG_SBC_FITPC2_WATCHDOG is not set -# CONFIG_EUROTECH_WDT is not set -# CONFIG_IB700_WDT is not set -# CONFIG_IBMASR is not set -# CONFIG_WAFER_WDT is not set -# CONFIG_I6300ESB_WDT is not set -# CONFIG_IE6XX_WDT is not set -# CONFIG_ITCO_WDT is not set -# CONFIG_IT8712F_WDT is not set -# CONFIG_IT87_WDT is not set -# CONFIG_HP_WATCHDOG is not set -# CONFIG_SC1200_WDT is not set -# CONFIG_PC87413_WDT is not set -# CONFIG_NV_TCO is not set -# CONFIG_60XX_WDT is not set -# CONFIG_CPU5_WDT is not set -# CONFIG_SMSC_SCH311X_WDT is not set -# CONFIG_SMSC37B787_WDT is not set -# CONFIG_VIA_WDT is not set -# CONFIG_W83627HF_WDT is not set -# CONFIG_W83877F_WDT is not set -# CONFIG_W83977F_WDT is not set -# CONFIG_MACHZ_WDT is not set -# CONFIG_SBC_EPX_C3_WATCHDOG is not set -# CONFIG_NI903X_WDT is not set -# CONFIG_NIC7018_WDT is not set -# CONFIG_XEN_WDT is not set - -# -# PCI-based Watchdog Cards -# -# CONFIG_PCIPCWATCHDOG is not set -# CONFIG_WDTPCI is not set - -# -# USB-based Watchdog Cards -# -# CONFIG_USBPCWATCHDOG is not set - -# -# Watchdog Pretimeout Governors -# -# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set -CONFIG_SSB_POSSIBLE=y - -# -# Sonics Silicon Backplane -# -CONFIG_SSB=m -CONFIG_SSB_SPROM=y -CONFIG_SSB_PCIHOST_POSSIBLE=y -CONFIG_SSB_PCIHOST=y -# CONFIG_SSB_B43_PCI_BRIDGE is not set -# CONFIG_SSB_SILENT is not set -# CONFIG_SSB_DEBUG is not set -CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y -CONFIG_SSB_DRIVER_PCICORE=y -CONFIG_BCMA_POSSIBLE=y -# CONFIG_BCMA is not set - -# -# Multifunction device drivers -# -CONFIG_MFD_CORE=m -# CONFIG_MFD_BCM590XX is not set -# CONFIG_MFD_BD9571MWV is not set -# CONFIG_MFD_AXP20X_I2C is not set -# CONFIG_MFD_CROS_EC is not set -# CONFIG_MFD_DA9062 is not set -# CONFIG_MFD_DA9063 is not set -# CONFIG_MFD_DA9150 is not set -# CONFIG_MFD_DLN2 is not set -# CONFIG_MFD_MC13XXX_I2C is not set -# CONFIG_HTC_PASIC3 is not set -# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set -CONFIG_LPC_ICH=m -CONFIG_LPC_SCH=m -# CONFIG_MFD_INTEL_LPSS_ACPI is not set -# CONFIG_MFD_INTEL_LPSS_PCI is not set -# CONFIG_MFD_JANZ_CMODIO is not set -# CONFIG_MFD_KEMPLD is not set -# CONFIG_MFD_88PM800 is not set -# CONFIG_MFD_88PM805 is not set -# CONFIG_MFD_MAX14577 is not set -# CONFIG_MFD_MAX77693 is not set -# CONFIG_MFD_MAX8907 is not set -# CONFIG_MFD_MT6397 is not set -# CONFIG_MFD_MENF21BMC is not set -# CONFIG_MFD_VIPERBOARD is not set -# CONFIG_MFD_RETU is not set -# CONFIG_MFD_PCF50633 is not set -# CONFIG_MFD_RDC321X is not set -# CONFIG_MFD_RTSX_PCI is not set -# CONFIG_MFD_RT5033 is not set -# CONFIG_MFD_RTSX_USB is not set -# CONFIG_MFD_SI476X_CORE is not set -# CONFIG_MFD_SM501 is not set -# CONFIG_MFD_SKY81452 is not set -# CONFIG_ABX500_CORE is not set -# CONFIG_MFD_SYSCON is not set -# CONFIG_MFD_TI_AM335X_TSCADC is not set -# CONFIG_MFD_LP3943 is not set -# CONFIG_MFD_TI_LMU is not set -# CONFIG_TPS6105X is not set -# CONFIG_TPS6507X is not set -# CONFIG_MFD_TPS65086 is not set -# CONFIG_MFD_TPS65217 is not set -# CONFIG_MFD_TI_LP873X is not set -# CONFIG_MFD_TPS65218 is not set -# CONFIG_MFD_TPS65912_I2C is not set -# CONFIG_MFD_WL1273_CORE is not set -# CONFIG_MFD_LM3533 is not set -# CONFIG_MFD_TMIO is not set -# CONFIG_MFD_VX855 is not set -# CONFIG_MFD_ARIZONA_I2C is not set -# CONFIG_MFD_WM8994 is not set -# CONFIG_REGULATOR is not set -# CONFIG_RC_CORE is not set -# CONFIG_MEDIA_SUPPORT is not set - -# -# Graphics support -# -# CONFIG_AGP is not set -CONFIG_VGA_ARB=y -CONFIG_VGA_ARB_MAX_GPUS=16 -# CONFIG_VGA_SWITCHEROO is not set -CONFIG_DRM=m -# CONFIG_DRM_DP_AUX_CHARDEV is not set -# CONFIG_DRM_DEBUG_MM_SELFTEST is not set -CONFIG_DRM_KMS_HELPER=m -CONFIG_DRM_KMS_FB_HELPER=y -CONFIG_DRM_FBDEV_EMULATION=y -CONFIG_DRM_FBDEV_OVERALLOC=100 -# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set -CONFIG_DRM_TTM=m - -# -# I2C encoder or helper chips -# -# CONFIG_DRM_I2C_CH7006 is not set -# CONFIG_DRM_I2C_SIL164 is not set -# CONFIG_DRM_I2C_NXP_TDA998X is not set -# CONFIG_DRM_RADEON is not set -# CONFIG_DRM_AMDGPU is not set - -# -# ACP (Audio CoProcessor) Configuration -# -# CONFIG_DRM_NOUVEAU is not set -# CONFIG_DRM_I915 is not set -# CONFIG_DRM_VGEM is not set -# CONFIG_DRM_VMWGFX is not set -# CONFIG_DRM_GMA500 is not set -# CONFIG_DRM_UDL is not set -# CONFIG_DRM_AST is not set -# CONFIG_DRM_MGAG200 is not set -# CONFIG_DRM_CIRRUS_QEMU is not set -# CONFIG_DRM_QXL is not set -# CONFIG_DRM_BOCHS is not set -# CONFIG_DRM_VIRTIO_GPU is not set -CONFIG_DRM_PANEL=y - -# -# Display Panels -# -CONFIG_DRM_BRIDGE=y -CONFIG_DRM_PANEL_BRIDGE=y - -# -# Display Interface Bridges -# -# CONFIG_DRM_ANALOGIX_ANX78XX is not set -# CONFIG_DRM_HISI_HIBMC is not set -# CONFIG_DRM_TINYDRM is not set -# CONFIG_DRM_LEGACY is not set -# CONFIG_DRM_LIB_RANDOM is not set - -# -# Frame buffer Devices -# -CONFIG_FB=m -# CONFIG_FIRMWARE_EDID is not set -CONFIG_FB_CMDLINE=y -CONFIG_FB_NOTIFY=y -# CONFIG_FB_DDC is not set -# CONFIG_FB_BOOT_VESA_SUPPORT is not set -CONFIG_FB_CFB_FILLRECT=m -CONFIG_FB_CFB_COPYAREA=m -CONFIG_FB_CFB_IMAGEBLIT=m -# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set -CONFIG_FB_SYS_FILLRECT=m -CONFIG_FB_SYS_COPYAREA=m -CONFIG_FB_SYS_IMAGEBLIT=m -# CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA is not set -# CONFIG_FB_FOREIGN_ENDIAN is not set -CONFIG_FB_SYS_FOPS=m -CONFIG_FB_DEFERRED_IO=y -# CONFIG_FB_SVGALIB is not set -# CONFIG_FB_MACMODES is not set -# CONFIG_FB_BACKLIGHT is not set -# CONFIG_FB_MODE_HELPERS is not set -# CONFIG_FB_TILEBLITTING is not set - -# -# Frame buffer hardware drivers -# -# CONFIG_FB_CIRRUS is not set -# CONFIG_FB_PM2 is not set -# CONFIG_FB_CYBER2000 is not set -# CONFIG_FB_ARC is not set -# CONFIG_FB_VGA16 is not set -# CONFIG_FB_UVESA is not set -# CONFIG_FB_N411 is not set -# CONFIG_FB_HGA is not set -# CONFIG_FB_OPENCORES is not set -# CONFIG_FB_S1D13XXX is not set -# CONFIG_FB_NVIDIA is not set -# CONFIG_FB_RIVA is not set -# CONFIG_FB_I740 is not set -# CONFIG_FB_LE80578 is not set -# CONFIG_FB_MATROX is not set -# CONFIG_FB_RADEON is not set -# CONFIG_FB_ATY128 is not set -# CONFIG_FB_ATY is not set -# CONFIG_FB_S3 is not set -# CONFIG_FB_SAVAGE is not set -# CONFIG_FB_SIS is not set -# CONFIG_FB_NEOMAGIC is not set -# CONFIG_FB_KYRO is not set -# CONFIG_FB_3DFX is not set -# CONFIG_FB_VOODOO1 is not set -# CONFIG_FB_VT8623 is not set -# CONFIG_FB_TRIDENT is not set -# CONFIG_FB_ARK is not set -# CONFIG_FB_PM3 is not set -# CONFIG_FB_CARMINE is not set -# CONFIG_FB_SMSCUFX is not set -# CONFIG_FB_UDL is not set -# CONFIG_FB_IBM_GXT4500 is not set -# CONFIG_FB_VIRTUAL is not set -# CONFIG_XEN_FBDEV_FRONTEND is not set -# CONFIG_FB_METRONOME is not set -# CONFIG_FB_MB862XX is not set -# CONFIG_FB_BROADSHEET is not set -# CONFIG_FB_AUO_K190X is not set -# CONFIG_FB_HYPERV is not set -# CONFIG_FB_SM712 is not set -CONFIG_BACKLIGHT_LCD_SUPPORT=y -CONFIG_LCD_CLASS_DEVICE=m -# CONFIG_LCD_PLATFORM is not set -CONFIG_BACKLIGHT_CLASS_DEVICE=m -CONFIG_BACKLIGHT_GENERIC=m -# CONFIG_BACKLIGHT_APPLE is not set -# CONFIG_BACKLIGHT_PM8941_WLED is not set -# CONFIG_BACKLIGHT_SAHARA is not set -# CONFIG_BACKLIGHT_ADP8860 is not set -# CONFIG_BACKLIGHT_ADP8870 is not set -# CONFIG_BACKLIGHT_LM3639 is not set -# CONFIG_BACKLIGHT_LV5207LP is not set -# CONFIG_BACKLIGHT_BD6107 is not set -# CONFIG_BACKLIGHT_ARCXCNN is not set -# CONFIG_VGASTATE is not set -CONFIG_HDMI=y - -# -# Console display driver support -# -CONFIG_VGA_CONSOLE=y -# CONFIG_VGACON_SOFT_SCROLLBACK is not set -CONFIG_DUMMY_CONSOLE=y -CONFIG_DUMMY_CONSOLE_COLUMNS=80 -CONFIG_DUMMY_CONSOLE_ROWS=25 -# CONFIG_FRAMEBUFFER_CONSOLE is not set -# CONFIG_LOGO is not set -# CONFIG_SOUND is not set - -# -# HID support -# -CONFIG_HID=y -# CONFIG_HID_BATTERY_STRENGTH is not set -CONFIG_HIDRAW=y -CONFIG_UHID=m -CONFIG_HID_GENERIC=m - -# -# Special HID drivers -# -# CONFIG_HID_A4TECH is not set -# CONFIG_HID_ACCUTOUCH is not set -# CONFIG_HID_ACRUX is not set -# CONFIG_HID_APPLE is not set -# CONFIG_HID_APPLEIR is not set -# CONFIG_HID_AUREAL is not set -# CONFIG_HID_BELKIN is not set -# CONFIG_HID_BETOP_FF is not set -# CONFIG_HID_CHERRY is not set -# CONFIG_HID_CHICONY is not set -# CONFIG_HID_CMEDIA is not set -# CONFIG_HID_CYPRESS is not set -# CONFIG_HID_DRAGONRISE is not set -# CONFIG_HID_EMS_FF is not set -# CONFIG_HID_ELECOM is not set -# CONFIG_HID_ELO is not set -# CONFIG_HID_EZKEY is not set -# CONFIG_HID_GEMBIRD is not set -# CONFIG_HID_GFRM is not set -# CONFIG_HID_HOLTEK is not set -# CONFIG_HID_KEYTOUCH is not set -# CONFIG_HID_KYE is not set -# CONFIG_HID_UCLOGIC is not set -# CONFIG_HID_WALTOP is not set -# CONFIG_HID_GYRATION is not set -# CONFIG_HID_ICADE is not set -# CONFIG_HID_ITE is not set -# CONFIG_HID_TWINHAN is not set -# CONFIG_HID_KENSINGTON is not set -# CONFIG_HID_LCPOWER is not set -# CONFIG_HID_LENOVO is not set -# CONFIG_HID_LOGITECH is not set -# CONFIG_HID_MAGICMOUSE is not set -# CONFIG_HID_MAYFLASH is not set -# CONFIG_HID_MICROSOFT is not set -# CONFIG_HID_MONTEREY is not set -# CONFIG_HID_MULTITOUCH is not set -# CONFIG_HID_NTI is not set -# CONFIG_HID_NTRIG is not set -# CONFIG_HID_ORTEK is not set -# CONFIG_HID_PANTHERLORD is not set -# CONFIG_HID_PENMOUNT is not set -# CONFIG_HID_PETALYNX is not set -# CONFIG_HID_PICOLCD is not set -# CONFIG_HID_PLANTRONICS is not set -# CONFIG_HID_PRIMAX is not set -# CONFIG_HID_RETRODE is not set -# CONFIG_HID_ROCCAT is not set -# CONFIG_HID_SAITEK is not set -# CONFIG_HID_SAMSUNG is not set -# CONFIG_HID_SPEEDLINK is not set -# CONFIG_HID_STEELSERIES is not set -# CONFIG_HID_SUNPLUS is not set -# CONFIG_HID_RMI is not set -# CONFIG_HID_GREENASIA is not set -CONFIG_HID_HYPERV_MOUSE=m -# CONFIG_HID_SMARTJOYPLUS is not set -# CONFIG_HID_TIVO is not set -# CONFIG_HID_TOPSEED is not set -# CONFIG_HID_THRUSTMASTER is not set -# CONFIG_HID_UDRAW_PS3 is not set -# CONFIG_HID_WACOM is not set -# CONFIG_HID_XINMO is not set -# CONFIG_HID_ZEROPLUS is not set -# CONFIG_HID_ZYDACRON is not set -# CONFIG_HID_SENSOR_HUB is not set -# CONFIG_HID_ALPS is not set - -# -# USB HID support -# -CONFIG_USB_HID=m -# CONFIG_HID_PID is not set -# CONFIG_USB_HIDDEV is not set - -# -# USB HID Boot Protocol drivers -# -# CONFIG_USB_KBD is not set -# CONFIG_USB_MOUSE is not set - -# -# I2C HID support -# -# CONFIG_I2C_HID is not set - -# -# Intel ISH HID support -# -# CONFIG_INTEL_ISH_HID is not set -CONFIG_USB_OHCI_LITTLE_ENDIAN=y -CONFIG_USB_SUPPORT=y -CONFIG_USB_COMMON=m -CONFIG_USB_ARCH_HAS_HCD=y -CONFIG_USB=m -CONFIG_USB_PCI=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y - -# -# Miscellaneous USB options -# -CONFIG_USB_DEFAULT_PERSIST=y -# CONFIG_USB_DYNAMIC_MINORS is not set -# CONFIG_USB_OTG is not set -# CONFIG_USB_OTG_WHITELIST is not set -# CONFIG_USB_OTG_BLACKLIST_HUB is not set -CONFIG_USB_MON=m -# CONFIG_USB_WUSB_CBAF is not set - -# -# USB Host Controller Drivers -# -# CONFIG_USB_C67X00_HCD is not set -CONFIG_USB_XHCI_HCD=m -CONFIG_USB_XHCI_PCI=m -CONFIG_USB_XHCI_PLATFORM=m -CONFIG_USB_EHCI_HCD=m -CONFIG_USB_EHCI_ROOT_HUB_TT=y -CONFIG_USB_EHCI_TT_NEWSCHED=y -CONFIG_USB_EHCI_PCI=m -CONFIG_USB_EHCI_HCD_PLATFORM=m -# CONFIG_USB_OXU210HP_HCD is not set -# CONFIG_USB_ISP116X_HCD is not set -# CONFIG_USB_ISP1362_HCD is not set -# CONFIG_USB_FOTG210_HCD is not set -CONFIG_USB_OHCI_HCD=m -CONFIG_USB_OHCI_HCD_PCI=m -# CONFIG_USB_OHCI_HCD_SSB is not set -CONFIG_USB_OHCI_HCD_PLATFORM=m -CONFIG_USB_UHCI_HCD=m -# CONFIG_USB_SL811_HCD is not set -# CONFIG_USB_R8A66597_HCD is not set -# CONFIG_USB_HCD_SSB is not set -# CONFIG_USB_HCD_TEST_MODE is not set - -# -# USB Device Class drivers -# -# CONFIG_USB_ACM is not set -# CONFIG_USB_PRINTER is not set -# CONFIG_USB_WDM is not set -# CONFIG_USB_TMC is not set - -# -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may -# - -# -# also be needed; see USB_STORAGE Help for more info -# -CONFIG_USB_STORAGE=m -# CONFIG_USB_STORAGE_DEBUG is not set -# CONFIG_USB_STORAGE_REALTEK is not set -# CONFIG_USB_STORAGE_DATAFAB is not set -# CONFIG_USB_STORAGE_FREECOM is not set -# CONFIG_USB_STORAGE_ISD200 is not set -# CONFIG_USB_STORAGE_USBAT is not set -# CONFIG_USB_STORAGE_SDDR09 is not set -# CONFIG_USB_STORAGE_SDDR55 is not set -# CONFIG_USB_STORAGE_JUMPSHOT is not set -# CONFIG_USB_STORAGE_ALAUDA is not set -# CONFIG_USB_STORAGE_ONETOUCH is not set -# CONFIG_USB_STORAGE_KARMA is not set -# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set -# CONFIG_USB_STORAGE_ENE_UB6250 is not set -CONFIG_USB_UAS=m - -# -# USB Imaging devices -# -# CONFIG_USB_MDC800 is not set -# CONFIG_USB_MICROTEK is not set -CONFIG_USBIP_CORE=m -CONFIG_USBIP_VHCI_HCD=m -CONFIG_USBIP_VHCI_HC_PORTS=8 -CONFIG_USBIP_VHCI_NR_HCS=1 -CONFIG_USBIP_HOST=m -# CONFIG_USBIP_DEBUG is not set -# CONFIG_USB_MUSB_HDRC is not set -# CONFIG_USB_DWC3 is not set -# CONFIG_USB_DWC2 is not set -# CONFIG_USB_CHIPIDEA is not set -# CONFIG_USB_ISP1760 is not set - -# -# USB port drivers -# -# CONFIG_USB_SERIAL is not set - -# -# USB Miscellaneous drivers -# -# CONFIG_USB_EMI62 is not set -# CONFIG_USB_EMI26 is not set -# CONFIG_USB_ADUTUX is not set -# CONFIG_USB_SEVSEG is not set -# CONFIG_USB_RIO500 is not set -# CONFIG_USB_LEGOTOWER is not set -# CONFIG_USB_LCD is not set -# CONFIG_USB_CYPRESS_CY7C63 is not set -# CONFIG_USB_CYTHERM is not set -# CONFIG_USB_IDMOUSE is not set -# CONFIG_USB_FTDI_ELAN is not set -# CONFIG_USB_APPLEDISPLAY is not set -# CONFIG_USB_SISUSBVGA is not set -# CONFIG_USB_LD is not set -# CONFIG_USB_TRANCEVIBRATOR is not set -# CONFIG_USB_IOWARRIOR is not set -# CONFIG_USB_TEST is not set -# CONFIG_USB_EHSET_TEST_FIXTURE is not set -# CONFIG_USB_ISIGHTFW is not set -# CONFIG_USB_YUREX is not set -# CONFIG_USB_EZUSB_FX2 is not set -# CONFIG_USB_HUB_USB251XB is not set -# CONFIG_USB_HSIC_USB3503 is not set -# CONFIG_USB_HSIC_USB4604 is not set -# CONFIG_USB_LINK_LAYER_TEST is not set -# CONFIG_USB_CHAOSKEY is not set - -# -# USB Physical Layer drivers -# -# CONFIG_USB_PHY is not set -# CONFIG_NOP_USB_XCEIV is not set -# CONFIG_USB_ISP1301 is not set -# CONFIG_USB_GADGET is not set - -# -# USB Power Delivery and Type-C drivers -# -# CONFIG_TYPEC_UCSI is not set -# CONFIG_USB_ULPI_BUS is not set -# CONFIG_UWB is not set -# CONFIG_MMC is not set -# CONFIG_MEMSTICK is not set -# CONFIG_NEW_LEDS is not set -# CONFIG_ACCESSIBILITY is not set -CONFIG_INFINIBAND=m -# CONFIG_INFINIBAND_USER_MAD is not set -CONFIG_INFINIBAND_USER_ACCESS=m -# CONFIG_INFINIBAND_EXP_USER_ACCESS is not set -CONFIG_INFINIBAND_USER_MEM=y -CONFIG_INFINIBAND_ON_DEMAND_PAGING=y -CONFIG_INFINIBAND_ADDR_TRANS=y -CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y -# CONFIG_INFINIBAND_MTHCA is not set -# CONFIG_MLX4_INFINIBAND is not set -# CONFIG_INFINIBAND_NES is not set -# CONFIG_INFINIBAND_OCRDMA is not set -# CONFIG_INFINIBAND_VMWARE_PVRDMA is not set -# CONFIG_INFINIBAND_USNIC is not set -# CONFIG_INFINIBAND_IPOIB is not set -# CONFIG_INFINIBAND_SRP is not set -# CONFIG_INFINIBAND_SRPT is not set -# CONFIG_INFINIBAND_ISER is not set -# CONFIG_INFINIBAND_ISERT is not set -# CONFIG_INFINIBAND_OPA_VNIC is not set -# CONFIG_INFINIBAND_RDMAVT is not set -# CONFIG_RDMA_RXE is not set -# CONFIG_INFINIBAND_BNXT_RE is not set -CONFIG_EDAC_ATOMIC_SCRUB=y -CONFIG_EDAC_SUPPORT=y -CONFIG_EDAC=y -CONFIG_EDAC_LEGACY_SYSFS=y -# CONFIG_EDAC_DEBUG is not set -CONFIG_EDAC_DECODE_MCE=y -CONFIG_EDAC_AMD64=m -# CONFIG_EDAC_AMD64_ERROR_INJECTION is not set -CONFIG_EDAC_E752X=m -CONFIG_EDAC_I82975X=m -CONFIG_EDAC_I3000=m -CONFIG_EDAC_I3200=m -CONFIG_EDAC_IE31200=m -CONFIG_EDAC_X38=m -CONFIG_EDAC_I5400=m -CONFIG_EDAC_I7CORE=m -CONFIG_EDAC_I5000=m -CONFIG_EDAC_I5100=m -CONFIG_EDAC_I7300=m -CONFIG_EDAC_SBRIDGE=m -CONFIG_EDAC_SKX=m -CONFIG_EDAC_PND2=m -CONFIG_RTC_LIB=y -CONFIG_RTC_MC146818_LIB=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_HCTOSYS=y -CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -CONFIG_RTC_SYSTOHC=y -CONFIG_RTC_SYSTOHC_DEVICE="rtc0" -# CONFIG_RTC_DEBUG is not set -# CONFIG_RTC_NVMEM is not set - -# -# RTC interfaces -# -CONFIG_RTC_INTF_SYSFS=y -CONFIG_RTC_INTF_PROC=y -CONFIG_RTC_INTF_DEV=y -# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set -# CONFIG_RTC_DRV_TEST is not set - -# -# I2C RTC drivers -# -# CONFIG_RTC_DRV_ABB5ZES3 is not set -# CONFIG_RTC_DRV_ABX80X is not set -# CONFIG_RTC_DRV_DS1307 is not set -# CONFIG_RTC_DRV_DS1374 is not set -# CONFIG_RTC_DRV_DS1672 is not set -# CONFIG_RTC_DRV_MAX6900 is not set -# CONFIG_RTC_DRV_RS5C372 is not set -# CONFIG_RTC_DRV_ISL1208 is not set -# CONFIG_RTC_DRV_ISL12022 is not set -# CONFIG_RTC_DRV_X1205 is not set -# CONFIG_RTC_DRV_PCF8523 is not set -# CONFIG_RTC_DRV_PCF85063 is not set -# CONFIG_RTC_DRV_PCF8563 is not set -# CONFIG_RTC_DRV_PCF8583 is not set -# CONFIG_RTC_DRV_M41T80 is not set -# CONFIG_RTC_DRV_BQ32K is not set -# CONFIG_RTC_DRV_S35390A is not set -# CONFIG_RTC_DRV_FM3130 is not set -# CONFIG_RTC_DRV_RX8010 is not set -# CONFIG_RTC_DRV_RX8581 is not set -# CONFIG_RTC_DRV_RX8025 is not set -# CONFIG_RTC_DRV_EM3027 is not set -# CONFIG_RTC_DRV_RV8803 is not set - -# -# SPI RTC drivers -# -CONFIG_RTC_I2C_AND_SPI=m - -# -# SPI and I2C RTC drivers -# -# CONFIG_RTC_DRV_DS3232 is not set -# CONFIG_RTC_DRV_PCF2127 is not set -# CONFIG_RTC_DRV_RV3029C2 is not set - -# -# Platform RTC drivers -# -CONFIG_RTC_DRV_CMOS=y -# CONFIG_RTC_DRV_DS1286 is not set -# CONFIG_RTC_DRV_DS1511 is not set -# CONFIG_RTC_DRV_DS1553 is not set -# CONFIG_RTC_DRV_DS1685_FAMILY is not set -# CONFIG_RTC_DRV_DS1742 is not set -# CONFIG_RTC_DRV_DS2404 is not set -# CONFIG_RTC_DRV_STK17TA8 is not set -# CONFIG_RTC_DRV_M48T86 is not set -# CONFIG_RTC_DRV_M48T35 is not set -# CONFIG_RTC_DRV_M48T59 is not set -# CONFIG_RTC_DRV_MSM6242 is not set -# CONFIG_RTC_DRV_BQ4802 is not set -# CONFIG_RTC_DRV_RP5C01 is not set -# CONFIG_RTC_DRV_V3020 is not set - -# -# on-CPU RTC drivers -# -# CONFIG_RTC_DRV_FTRTC010 is not set - -# -# HID Sensor RTC drivers -# -# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set -CONFIG_DMADEVICES=y -# CONFIG_DMADEVICES_DEBUG is not set - -# -# DMA Devices -# -CONFIG_DMA_ENGINE=y -CONFIG_DMA_ACPI=y -# CONFIG_ALTERA_MSGDMA is not set -# CONFIG_INTEL_IDMA64 is not set -CONFIG_INTEL_IOATDMA=m -# CONFIG_QCOM_HIDMA_MGMT is not set -# CONFIG_QCOM_HIDMA is not set -# CONFIG_DW_DMAC is not set -# CONFIG_DW_DMAC_PCI is not set - -# -# DMA Clients -# -# CONFIG_ASYNC_TX_DMA is not set -CONFIG_DMATEST=m -CONFIG_DMA_ENGINE_RAID=y - -# -# DMABUF options -# -CONFIG_SYNC_FILE=y -# CONFIG_SW_SYNC is not set -CONFIG_DCA=m -CONFIG_AUXDISPLAY=y -# CONFIG_IMG_ASCII_LCD is not set -CONFIG_UIO=m -# CONFIG_UIO_CIF is not set -CONFIG_UIO_PDRV_GENIRQ=m -CONFIG_UIO_DMEM_GENIRQ=m -# CONFIG_UIO_AEC is not set -# CONFIG_UIO_SERCOS3 is not set -CONFIG_UIO_PCI_GENERIC=m -# CONFIG_UIO_NETX is not set -# CONFIG_UIO_PRUSS is not set -# CONFIG_UIO_MF624 is not set -CONFIG_UIO_HV_GENERIC=m -CONFIG_VFIO_IOMMU_TYPE1=m -CONFIG_VFIO_VIRQFD=m -CONFIG_VFIO=m -# CONFIG_VFIO_NOIOMMU is not set -CONFIG_VFIO_PCI=m -# CONFIG_VFIO_PCI_VGA is not set -CONFIG_VFIO_PCI_MMAP=y -CONFIG_VFIO_PCI_INTX=y -CONFIG_VFIO_PCI_IGD=y -# CONFIG_VFIO_MDEV is not set -CONFIG_IRQ_BYPASS_MANAGER=m -CONFIG_VIRT_DRIVERS=y -CONFIG_VIRTIO=y - -# -# Virtio drivers -# -CONFIG_VIRTIO_PCI=y -CONFIG_VIRTIO_PCI_LEGACY=y -# CONFIG_VIRTIO_BALLOON is not set -# CONFIG_VIRTIO_INPUT is not set -CONFIG_VIRTIO_MMIO=m -# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set - -# -# Microsoft Hyper-V guest support -# -CONFIG_HYPERV=m -CONFIG_HYPERV_TSCPAGE=y -CONFIG_HYPERV_UTILS=m -CONFIG_HYPERV_BALLOON=m - -# -# Xen driver support -# -# CONFIG_XEN_BALLOON is not set -CONFIG_XEN_DEV_EVTCHN=m -CONFIG_XEN_BACKEND=y -CONFIG_XENFS=m -CONFIG_XEN_COMPAT_XENFS=y -CONFIG_XEN_SYS_HYPERVISOR=y -CONFIG_XEN_XENBUS_FRONTEND=y -CONFIG_XEN_GNTDEV=m -CONFIG_XEN_GRANT_DEV_ALLOC=m -CONFIG_SWIOTLB_XEN=y -CONFIG_XEN_TMEM=m -CONFIG_XEN_PCIDEV_BACKEND=m -# CONFIG_XEN_PVCALLS_BACKEND is not set -# CONFIG_XEN_SCSI_BACKEND is not set -CONFIG_XEN_PRIVCMD=m -# CONFIG_XEN_ACPI_PROCESSOR is not set -# CONFIG_XEN_MCE_LOG is not set -CONFIG_XEN_HAVE_PVMMU=y -CONFIG_XEN_AUTO_XLATE=y -CONFIG_XEN_ACPI=y -# CONFIG_XEN_SYMS is not set -CONFIG_XEN_HAVE_VPMU=y -CONFIG_STAGING=y -# CONFIG_IRDA is not set -# CONFIG_COMEDI is not set -# CONFIG_RTS5208 is not set -# CONFIG_FB_SM750 is not set -# CONFIG_FB_XGI is not set - -# -# Speakup console speech -# -# CONFIG_SPEAKUP is not set -# CONFIG_STAGING_MEDIA is not set - -# -# Android -# -# CONFIG_LTE_GDM724X is not set -# CONFIG_LNET is not set -# CONFIG_DGNC is not set -# CONFIG_GS_FPGABOOT is not set -# CONFIG_CRYPTO_SKEIN is not set -# CONFIG_UNISYSSPAR is not set -# CONFIG_MOST is not set -# CONFIG_GREYBUS is not set - -# -# USB Power Delivery and Type-C drivers -# -# CONFIG_TYPEC_TCPM is not set -CONFIG_DRM_VBOXVIDEO=m -CONFIG_X86_PLATFORM_DEVICES=y -# CONFIG_ACERHDF is not set -# CONFIG_ASUS_LAPTOP is not set -# CONFIG_DELL_LAPTOP is not set -# CONFIG_DELL_SMO8800 is not set -# CONFIG_FUJITSU_LAPTOP is not set -# CONFIG_FUJITSU_TABLET is not set -# CONFIG_HP_ACCEL is not set -# CONFIG_HP_WIRELESS is not set -# CONFIG_PANASONIC_LAPTOP is not set -# CONFIG_THINKPAD_ACPI is not set -# CONFIG_SENSORS_HDAPS is not set -# CONFIG_INTEL_MENLOW is not set -# CONFIG_EEEPC_LAPTOP is not set -# CONFIG_ASUS_WIRELESS is not set -# CONFIG_ACPI_WMI is not set -# CONFIG_TOPSTAR_LAPTOP is not set -# CONFIG_TOSHIBA_BT_RFKILL is not set -# CONFIG_TOSHIBA_HAPS is not set -# CONFIG_ACPI_CMPC is not set -# CONFIG_INTEL_CHT_INT33FE is not set -# CONFIG_INTEL_HID_EVENT is not set -# CONFIG_INTEL_VBTN is not set -# CONFIG_INTEL_IPS is not set -# CONFIG_INTEL_PMC_CORE is not set -# CONFIG_IBM_RTL is not set -# CONFIG_SAMSUNG_LAPTOP is not set -# CONFIG_SAMSUNG_Q10 is not set -# CONFIG_APPLE_GMUX is not set -# CONFIG_INTEL_RST is not set -# CONFIG_INTEL_SMARTCONNECT is not set -# CONFIG_PVPANIC is not set -# CONFIG_INTEL_PMC_IPC is not set -# CONFIG_SURFACE_PRO3_BUTTON is not set -# CONFIG_INTEL_PUNIT_IPC is not set -# CONFIG_MLX_PLATFORM is not set -# CONFIG_MLX_CPLD_PLATFORM is not set -CONFIG_INTEL_TURBO_MAX_3=y -CONFIG_PMC_ATOM=y -# CONFIG_CHROME_PLATFORMS is not set -CONFIG_CLKDEV_LOOKUP=y -CONFIG_HAVE_CLK_PREPARE=y -CONFIG_COMMON_CLK=y - -# -# Common Clock Framework -# -# CONFIG_COMMON_CLK_SI5351 is not set -# CONFIG_COMMON_CLK_CDCE706 is not set -# CONFIG_COMMON_CLK_CS2000_CP is not set -# CONFIG_COMMON_CLK_NXP is not set -# CONFIG_COMMON_CLK_PXA is not set -# CONFIG_COMMON_CLK_PIC32 is not set -CONFIG_HWSPINLOCK=m - -# -# Clock Source drivers -# -CONFIG_CLKEVT_I8253=y -CONFIG_I8253_LOCK=y -CONFIG_CLKBLD_I8253=y -# CONFIG_ATMEL_PIT is not set -# CONFIG_SH_TIMER_CMT is not set -# CONFIG_SH_TIMER_MTU2 is not set -# CONFIG_SH_TIMER_TMU is not set -# CONFIG_EM_TIMER_STI is not set -CONFIG_MAILBOX=y -CONFIG_PCC=y -# CONFIG_ALTERA_MBOX is not set -CONFIG_IOMMU_API=y -CONFIG_IOMMU_SUPPORT=y - -# -# Generic IOMMU Pagetable Support -# -CONFIG_IOMMU_IOVA=y -# CONFIG_AMD_IOMMU is not set -CONFIG_DMAR_TABLE=y -CONFIG_INTEL_IOMMU=y -# CONFIG_INTEL_IOMMU_SVM is not set -# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set -CONFIG_INTEL_IOMMU_FLOPPY_WA=y -CONFIG_IRQ_REMAP=y - -# -# Remoteproc drivers -# -# CONFIG_REMOTEPROC is not set - -# -# Rpmsg drivers -# -# CONFIG_RPMSG_QCOM_GLINK_RPM is not set - -# -# SOC (System On Chip) specific Drivers -# - -# -# Amlogic SoC drivers -# - -# -# Broadcom SoC drivers -# - -# -# i.MX SoC drivers -# - -# -# Qualcomm SoC drivers -# -# CONFIG_SUNXI_SRAM is not set -# CONFIG_SOC_TI is not set -# CONFIG_PM_DEVFREQ is not set -# CONFIG_EXTCON is not set -# CONFIG_MEMORY is not set -# CONFIG_IIO is not set -# CONFIG_NTB is not set -# CONFIG_VME_BUS is not set -# CONFIG_PWM is not set -CONFIG_ARM_GIC_MAX_NR=1 -# CONFIG_IPACK_BUS is not set -# CONFIG_RESET_CONTROLLER is not set -# CONFIG_FMC is not set - -# -# PHY Subsystem -# -# CONFIG_GENERIC_PHY is not set -# CONFIG_BCM_KONA_USB2_PHY is not set -# CONFIG_PHY_PXA_28NM_HSIC is not set -# CONFIG_PHY_PXA_28NM_USB2 is not set -# CONFIG_POWERCAP is not set -# CONFIG_MCB is not set - -# -# Performance monitor support -# -CONFIG_RAS=y -# CONFIG_THUNDERBOLT is not set - -# -# Android -# -# CONFIG_ANDROID is not set -# CONFIG_LIBNVDIMM is not set -CONFIG_DAX=y -# CONFIG_DEV_DAX is not set -# CONFIG_NVMEM is not set -# CONFIG_STM is not set -# CONFIG_INTEL_TH is not set -# CONFIG_FPGA is not set - -# -# FSI support -# -CONFIG_FSI=m -CONFIG_FSI_MASTER_HUB=m -CONFIG_FSI_SCOM=m - -# -# Firmware Drivers -# -CONFIG_EDD=m -# CONFIG_EDD_OFF is not set -CONFIG_FIRMWARE_MEMMAP=y -CONFIG_DELL_RBU=m -CONFIG_DCDBAS=m -CONFIG_DMIID=y -CONFIG_DMI_SYSFS=m -CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y -# CONFIG_ISCSI_IBFT_FIND is not set -# CONFIG_FW_CFG_SYSFS is not set -# CONFIG_GOOGLE_FIRMWARE is not set -CONFIG_UEFI_CPER=y -# CONFIG_EFI_DEV_PATH_PARSER is not set - -# -# Tegra firmware driver -# - -# -# File systems -# -CONFIG_DCACHE_WORD_ACCESS=y -CONFIG_FS_IOMAP=y -CONFIG_EXT2_FS=m -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT2_FS_POSIX_ACL=y -CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=m -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y -CONFIG_EXT4_DEBUG=y -CONFIG_JBD2=y -CONFIG_JBD2_DEBUG=y -CONFIG_FS_MBCACHE=y -# CONFIG_REISERFS_FS is not set -CONFIG_JFS_FS=m -CONFIG_JFS_POSIX_ACL=y -CONFIG_JFS_SECURITY=y -# CONFIG_JFS_DEBUG is not set -CONFIG_JFS_STATISTICS=y -CONFIG_XFS_FS=m -CONFIG_XFS_QUOTA=y -CONFIG_XFS_POSIX_ACL=y -# CONFIG_XFS_RT is not set -# CONFIG_XFS_WARN is not set -# CONFIG_XFS_DEBUG is not set -# CONFIG_GFS2_FS is not set -# CONFIG_OCFS2_FS is not set -CONFIG_BTRFS_FS=m -CONFIG_BTRFS_FS_POSIX_ACL=y -# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set -# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set -# CONFIG_BTRFS_DEBUG is not set -# CONFIG_BTRFS_ASSERT is not set -CONFIG_NILFS2_FS=m -# CONFIG_F2FS_FS is not set -# CONFIG_FS_DAX is not set -CONFIG_FS_POSIX_ACL=y -CONFIG_EXPORTFS=y -# CONFIG_EXPORTFS_BLOCK_OPS is not set -CONFIG_FILE_LOCKING=y -CONFIG_MANDATORY_FILE_LOCKING=y -CONFIG_FS_ENCRYPTION=y -CONFIG_FSNOTIFY=y -CONFIG_DNOTIFY=y -CONFIG_INOTIFY_USER=y -CONFIG_FANOTIFY=y -CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -# CONFIG_PRINT_QUOTA_WARNING is not set -# CONFIG_QUOTA_DEBUG is not set -CONFIG_QUOTA_TREE=m -# CONFIG_QFMT_V1 is not set -CONFIG_QFMT_V2=m -CONFIG_QUOTACTL=y -CONFIG_QUOTACTL_COMPAT=y -CONFIG_AUTOFS4_FS=m -CONFIG_FUSE_FS=m -CONFIG_CUSE=m -CONFIG_OVERLAY_FS=y -# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set -# CONFIG_OVERLAY_FS_INDEX is not set - -# -# Caches -# -CONFIG_FSCACHE=m -# CONFIG_FSCACHE_STATS is not set -# CONFIG_FSCACHE_HISTOGRAM is not set -# CONFIG_FSCACHE_DEBUG is not set -# CONFIG_FSCACHE_OBJECT_LIST is not set -CONFIG_CACHEFILES=m -# CONFIG_CACHEFILES_DEBUG is not set -# CONFIG_CACHEFILES_HISTOGRAM is not set - -# -# CD-ROM/DVD Filesystems -# -CONFIG_ISO9660_FS=m -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=m -CONFIG_UDF_NLS=y - -# -# DOS/FAT/NT Filesystems -# -CONFIG_FAT_FS=m -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=m -CONFIG_FAT_DEFAULT_CODEPAGE=437 -CONFIG_FAT_DEFAULT_IOCHARSET="ascii" -# CONFIG_FAT_DEFAULT_UTF8 is not set -CONFIG_NTFS_FS=m -# CONFIG_NTFS_DEBUG is not set -CONFIG_NTFS_RW=y - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_KCORE=y -CONFIG_PROC_VMCORE=y -CONFIG_PROC_SYSCTL=y -CONFIG_PROC_PAGE_MONITOR=y -CONFIG_PROC_CHILDREN=y -CONFIG_KERNFS=y -CONFIG_SYSFS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_TMPFS_XATTR=y -CONFIG_HUGETLBFS=y -CONFIG_HUGETLB_PAGE=y -CONFIG_ARCH_HAS_GIGANTIC_PAGE=y -CONFIG_CONFIGFS_FS=m -CONFIG_MISC_FILESYSTEMS=y -# CONFIG_ORANGEFS_FS is not set -# CONFIG_ADFS_FS is not set -# CONFIG_AFFS_FS is not set -CONFIG_ECRYPT_FS=m -# CONFIG_ECRYPT_FS_MESSAGING is not set -CONFIG_HFS_FS=m -CONFIG_HFSPLUS_FS=m -# CONFIG_HFSPLUS_FS_POSIX_ACL is not set -# CONFIG_BEFS_FS is not set -# CONFIG_BFS_FS is not set -# CONFIG_EFS_FS is not set -CONFIG_CRAMFS=m -CONFIG_SQUASHFS=m -CONFIG_SQUASHFS_FILE_CACHE=y -# CONFIG_SQUASHFS_FILE_DIRECT is not set -CONFIG_SQUASHFS_DECOMP_SINGLE=y -# CONFIG_SQUASHFS_DECOMP_MULTI is not set -# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set -CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_ZLIB=y -CONFIG_SQUASHFS_LZ4=y -CONFIG_SQUASHFS_LZO=y -CONFIG_SQUASHFS_XZ=y -CONFIG_SQUASHFS_ZSTD=y -# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set -# CONFIG_SQUASHFS_EMBEDDED is not set -CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 -# CONFIG_VXFS_FS is not set -# CONFIG_MINIX_FS is not set -# CONFIG_OMFS_FS is not set -# CONFIG_HPFS_FS is not set -# CONFIG_QNX4FS_FS is not set -# CONFIG_QNX6FS_FS is not set -CONFIG_ROMFS_FS=m -CONFIG_ROMFS_BACKED_BY_BLOCK=y -CONFIG_ROMFS_ON_BLOCK=y -CONFIG_PSTORE=y -CONFIG_PSTORE_ZLIB_COMPRESS=y -# CONFIG_PSTORE_LZO_COMPRESS is not set -# CONFIG_PSTORE_LZ4_COMPRESS is not set -# CONFIG_PSTORE_CONSOLE is not set -# CONFIG_PSTORE_PMSG is not set -# CONFIG_PSTORE_FTRACE is not set -CONFIG_PSTORE_RAM=m -# CONFIG_SYSV_FS is not set -CONFIG_UFS_FS=m -# CONFIG_UFS_FS_WRITE is not set -# CONFIG_UFS_DEBUG is not set -# CONFIG_EXOFS_FS is not set -CONFIG_NETWORK_FILESYSTEMS=y -CONFIG_NFS_FS=m -CONFIG_NFS_V2=m -CONFIG_NFS_V3=m -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=m -# CONFIG_NFS_SWAP is not set -CONFIG_NFS_V4_1=y -CONFIG_NFS_V4_2=y -CONFIG_PNFS_FILE_LAYOUT=m -CONFIG_PNFS_BLOCK=m -CONFIG_PNFS_FLEXFILE_LAYOUT=m -CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" -# CONFIG_NFS_V4_1_MIGRATION is not set -CONFIG_NFS_V4_SECURITY_LABEL=y -CONFIG_NFS_FSCACHE=y -# CONFIG_NFS_USE_LEGACY_DNS is not set -CONFIG_NFS_USE_KERNEL_DNS=y -CONFIG_NFS_DEBUG=y -CONFIG_NFSD=m -CONFIG_NFSD_V2_ACL=y -CONFIG_NFSD_V3=y -CONFIG_NFSD_V3_ACL=y -CONFIG_NFSD_V4=y -# CONFIG_NFSD_BLOCKLAYOUT is not set -# CONFIG_NFSD_SCSILAYOUT is not set -# CONFIG_NFSD_FLEXFILELAYOUT is not set -CONFIG_NFSD_V4_SECURITY_LABEL=y -# CONFIG_NFSD_FAULT_INJECTION is not set -CONFIG_GRACE_PERIOD=m -CONFIG_LOCKD=m -CONFIG_LOCKD_V4=y -CONFIG_NFS_ACL_SUPPORT=m -CONFIG_NFS_COMMON=y -CONFIG_SUNRPC=m -CONFIG_SUNRPC_GSS=m -CONFIG_SUNRPC_BACKCHANNEL=y -CONFIG_RPCSEC_GSS_KRB5=m -CONFIG_SUNRPC_DEBUG=y -# CONFIG_SUNRPC_XPRT_RDMA is not set -CONFIG_CEPH_FS=m -CONFIG_CEPH_FSCACHE=y -CONFIG_CEPH_FS_POSIX_ACL=y -CONFIG_CIFS=m -CONFIG_CIFS_STATS=y -CONFIG_CIFS_STATS2=y -CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y -CONFIG_CIFS_WEAK_PW_HASH=y -CONFIG_CIFS_UPCALL=y -CONFIG_CIFS_XATTR=y -CONFIG_CIFS_POSIX=y -CONFIG_CIFS_ACL=y -CONFIG_CIFS_DEBUG=y -# CONFIG_CIFS_DEBUG2 is not set -# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set -CONFIG_CIFS_DFS_UPCALL=y -# CONFIG_CIFS_SMB311 is not set -# CONFIG_CIFS_FSCACHE is not set -# CONFIG_NCP_FS is not set -# CONFIG_CODA_FS is not set -CONFIG_AFS_FS=m -# CONFIG_AFS_DEBUG is not set -CONFIG_AFS_FSCACHE=y -CONFIG_NLS=y -CONFIG_NLS_DEFAULT="utf8" -CONFIG_NLS_CODEPAGE_437=m -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=m -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ASCII=m -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m -CONFIG_NLS_MAC_ROMAN=m -CONFIG_NLS_MAC_CELTIC=m -CONFIG_NLS_MAC_CENTEURO=m -CONFIG_NLS_MAC_CROATIAN=m -CONFIG_NLS_MAC_CYRILLIC=m -CONFIG_NLS_MAC_GAELIC=m -CONFIG_NLS_MAC_GREEK=m -CONFIG_NLS_MAC_ICELAND=m -CONFIG_NLS_MAC_INUIT=m -CONFIG_NLS_MAC_ROMANIAN=m -CONFIG_NLS_MAC_TURKISH=m -CONFIG_NLS_UTF8=m -CONFIG_DLM=m -# CONFIG_DLM_DEBUG is not set - -# -# Kernel hacking -# -CONFIG_TRACE_IRQFLAGS_SUPPORT=y - -# -# printk and dmesg options -# -CONFIG_PRINTK_TIME=y -CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 -CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 -CONFIG_BOOT_PRINTK_DELAY=y -CONFIG_DYNAMIC_DEBUG=y - -# -# Compile-time checks and compiler options -# -CONFIG_DEBUG_INFO=y -# CONFIG_DEBUG_INFO_REDUCED is not set -# CONFIG_DEBUG_INFO_SPLIT is not set -# CONFIG_DEBUG_INFO_DWARF4 is not set -# CONFIG_GDB_SCRIPTS is not set -CONFIG_ENABLE_WARN_DEPRECATED=y -# CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_FRAME_WARN=2048 -CONFIG_STRIP_ASM_SYMS=y -# CONFIG_READABLE_ASM is not set -CONFIG_UNUSED_SYMBOLS=y -# CONFIG_PAGE_OWNER is not set -CONFIG_DEBUG_FS=y -# CONFIG_HEADERS_CHECK is not set -CONFIG_DEBUG_SECTION_MISMATCH=y -CONFIG_SECTION_MISMATCH_WARN_ONLY=y -CONFIG_STACK_VALIDATION=y -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 -CONFIG_MAGIC_SYSRQ_SERIAL=y -CONFIG_DEBUG_KERNEL=y - -# -# Memory Debugging -# -# CONFIG_PAGE_EXTENSION is not set -# CONFIG_DEBUG_PAGEALLOC is not set -# CONFIG_PAGE_POISONING is not set -# CONFIG_DEBUG_PAGE_REF is not set -# CONFIG_DEBUG_RODATA_TEST is not set -# CONFIG_DEBUG_OBJECTS is not set -# CONFIG_SLUB_DEBUG_ON is not set -# CONFIG_SLUB_STATS is not set -CONFIG_HAVE_DEBUG_KMEMLEAK=y -# CONFIG_DEBUG_KMEMLEAK is not set -# CONFIG_DEBUG_STACK_USAGE is not set -# CONFIG_DEBUG_VM is not set -CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y -# CONFIG_DEBUG_VIRTUAL is not set -CONFIG_DEBUG_MEMORY_INIT=y -# CONFIG_DEBUG_PER_CPU_MAPS is not set -CONFIG_HAVE_DEBUG_STACKOVERFLOW=y -CONFIG_DEBUG_STACKOVERFLOW=y -CONFIG_HAVE_ARCH_KASAN=y -# CONFIG_KASAN is not set -CONFIG_ARCH_HAS_KCOV=y -# CONFIG_KCOV is not set -# CONFIG_DEBUG_SHIRQ is not set - -# -# Debug Lockups and Hangs -# -CONFIG_LOCKUP_DETECTOR=y -CONFIG_SOFTLOCKUP_DETECTOR=y -CONFIG_HARDLOCKUP_DETECTOR_PERF=y -CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y -CONFIG_HARDLOCKUP_DETECTOR=y -# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set -CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0 -# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set -CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 -CONFIG_DETECT_HUNG_TASK=y -CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 -# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set -CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 -CONFIG_WQ_WATCHDOG=y -# CONFIG_PANIC_ON_OOPS is not set -CONFIG_PANIC_ON_OOPS_VALUE=0 -CONFIG_PANIC_TIMEOUT=0 -CONFIG_SCHED_DEBUG=y -CONFIG_SCHED_INFO=y -CONFIG_SCHEDSTATS=y -CONFIG_SCHED_STACK_END_CHECK=y -# CONFIG_DEBUG_TIMEKEEPING is not set - -# -# Lock Debugging (spinlocks, mutexes, etc...) -# -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_PROVE_LOCKING is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_ATOMIC_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -# CONFIG_LOCK_TORTURE_TEST is not set -# CONFIG_WW_MUTEX_SELFTEST is not set -CONFIG_STACKTRACE=y -# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set -# CONFIG_DEBUG_KOBJECT is not set -CONFIG_DEBUG_BUGVERBOSE=y -CONFIG_DEBUG_LIST=y -# CONFIG_DEBUG_PI_LIST is not set -# CONFIG_DEBUG_SG is not set -# CONFIG_DEBUG_NOTIFIERS is not set -# CONFIG_DEBUG_CREDENTIALS is not set - -# -# RCU Debugging -# -# CONFIG_PROVE_RCU is not set -# CONFIG_TORTURE_TEST is not set -# CONFIG_RCU_PERF_TEST is not set -# CONFIG_RCU_TORTURE_TEST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=59 -# CONFIG_RCU_TRACE is not set -# CONFIG_RCU_EQS_DEBUG is not set -# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set -# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set -# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set -# CONFIG_NOTIFIER_ERROR_INJECTION is not set -# CONFIG_FAULT_INJECTION is not set -CONFIG_LATENCYTOP=y -CONFIG_USER_STACKTRACE_SUPPORT=y -CONFIG_NOP_TRACER=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_FENTRY=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_TRACER_MAX_TRACE=y -CONFIG_TRACE_CLOCK=y -CONFIG_RING_BUFFER=y -CONFIG_EVENT_TRACING=y -CONFIG_CONTEXT_SWITCH_TRACER=y -CONFIG_RING_BUFFER_ALLOW_SWAP=y -CONFIG_TRACING=y -CONFIG_GENERIC_TRACER=y -CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -CONFIG_FUNCTION_TRACER=y -CONFIG_FUNCTION_GRAPH_TRACER=y -# CONFIG_IRQSOFF_TRACER is not set -CONFIG_SCHED_TRACER=y -# CONFIG_HWLAT_TRACER is not set -CONFIG_FTRACE_SYSCALLS=y -CONFIG_TRACER_SNAPSHOT=y -# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -CONFIG_STACK_TRACER=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_KPROBE_EVENTS=y -CONFIG_UPROBE_EVENTS=y -CONFIG_BPF_EVENTS=y -CONFIG_PROBE_EVENTS=y -CONFIG_DYNAMIC_FTRACE=y -CONFIG_DYNAMIC_FTRACE_WITH_REGS=y -# CONFIG_FUNCTION_PROFILER is not set -CONFIG_FTRACE_MCOUNT_RECORD=y -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_MMIOTRACE is not set -# CONFIG_HIST_TRIGGERS is not set -# CONFIG_TRACEPOINT_BENCHMARK is not set -# CONFIG_RING_BUFFER_BENCHMARK is not set -# CONFIG_RING_BUFFER_STARTUP_TEST is not set -CONFIG_TRACE_EVAL_MAP_FILE=y -# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set -# CONFIG_DMA_API_DEBUG is not set - -# -# Runtime Testing -# -# CONFIG_LKDTM is not set -# CONFIG_TEST_LIST_SORT is not set -# CONFIG_TEST_SORT is not set -# CONFIG_KPROBES_SANITY_TEST is not set -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_RBTREE_TEST is not set -CONFIG_INTERVAL_TREE_TEST=m -# CONFIG_PERCPU_TEST is not set -# CONFIG_ATOMIC64_SELFTEST is not set -# CONFIG_ASYNC_RAID6_TEST is not set -# CONFIG_TEST_HEXDUMP is not set -# CONFIG_TEST_STRING_HELPERS is not set -# CONFIG_TEST_KSTRTOX is not set -# CONFIG_TEST_PRINTF is not set -# CONFIG_TEST_BITMAP is not set -# CONFIG_TEST_UUID is not set -# CONFIG_TEST_RHASHTABLE is not set -# CONFIG_TEST_HASH is not set -# CONFIG_TEST_LKM is not set -# CONFIG_TEST_USER_COPY is not set -# CONFIG_TEST_BPF is not set -# CONFIG_TEST_FIRMWARE is not set -# CONFIG_TEST_SYSCTL is not set -# CONFIG_TEST_UDELAY is not set -# CONFIG_TEST_STATIC_KEYS is not set -# CONFIG_TEST_KMOD is not set -# CONFIG_MEMTEST is not set -CONFIG_BUG_ON_DATA_CORRUPTION=y -# CONFIG_SAMPLES is not set -CONFIG_HAVE_ARCH_KGDB=y -CONFIG_KGDB=y -CONFIG_KGDB_SERIAL_CONSOLE=m -# CONFIG_KGDB_TESTS is not set -CONFIG_KGDB_LOW_LEVEL_TRAP=y -CONFIG_KGDB_KDB=y -CONFIG_KDB_DEFAULT_ENABLE=0x1 -CONFIG_KDB_KEYBOARD=y -CONFIG_KDB_CONTINUE_CATASTROPHIC=0 -CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y -# CONFIG_ARCH_WANTS_UBSAN_NO_NULL is not set -# CONFIG_UBSAN is not set -CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y -CONFIG_STRICT_DEVMEM=y -# CONFIG_IO_STRICT_DEVMEM is not set -CONFIG_X86_VERBOSE_BOOTUP=y -CONFIG_EARLY_PRINTK=y -# CONFIG_EARLY_PRINTK_DBGP is not set -# CONFIG_EARLY_PRINTK_USB_XDBC is not set -# CONFIG_X86_PTDUMP_CORE is not set -# CONFIG_X86_PTDUMP is not set -# CONFIG_DEBUG_WX is not set -CONFIG_DOUBLEFAULT=y -# CONFIG_DEBUG_TLBFLUSH is not set -# CONFIG_IOMMU_DEBUG is not set -# CONFIG_IOMMU_STRESS is not set -CONFIG_HAVE_MMIOTRACE_SUPPORT=y -# CONFIG_X86_DECODER_SELFTEST is not set -CONFIG_IO_DELAY_TYPE_0X80=0 -CONFIG_IO_DELAY_TYPE_0XED=1 -CONFIG_IO_DELAY_TYPE_UDELAY=2 -CONFIG_IO_DELAY_TYPE_NONE=3 -CONFIG_IO_DELAY_0X80=y -# CONFIG_IO_DELAY_0XED is not set -# CONFIG_IO_DELAY_UDELAY is not set -# CONFIG_IO_DELAY_NONE is not set -CONFIG_DEFAULT_IO_DELAY_TYPE=0 -CONFIG_DEBUG_BOOT_PARAMS=y -# CONFIG_CPA_DEBUG is not set -CONFIG_OPTIMIZE_INLINING=y -# CONFIG_DEBUG_ENTRY is not set -# CONFIG_DEBUG_NMI_SELFTEST is not set -# CONFIG_X86_DEBUG_FPU is not set -# CONFIG_PUNIT_ATOM_DEBUG is not set -CONFIG_UNWINDER_ORC=y -# CONFIG_UNWINDER_FRAME_POINTER is not set -# CONFIG_UNWINDER_GUESS is not set - -# -# Security options -# -CONFIG_KEYS=y -CONFIG_KEYS_COMPAT=y -CONFIG_PERSISTENT_KEYRINGS=y -# CONFIG_BIG_KEYS is not set -CONFIG_TRUSTED_KEYS=m -CONFIG_ENCRYPTED_KEYS=m -# CONFIG_KEY_DH_OPERATIONS is not set -# CONFIG_SECURITY_DMESG_RESTRICT is not set -CONFIG_SECURITY=y -CONFIG_SECURITY_WRITABLE_HOOKS=y -CONFIG_SECURITYFS=y -CONFIG_SECURITY_NETWORK=y -CONFIG_PAGE_TABLE_ISOLATION=y -CONFIG_SECURITY_INFINIBAND=y -CONFIG_SECURITY_NETWORK_XFRM=y -# CONFIG_SECURITY_PATH is not set -CONFIG_INTEL_TXT=y -CONFIG_LSM_MMAP_MIN_ADDR=65536 -CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y -# CONFIG_HARDENED_USERCOPY is not set -CONFIG_FORTIFY_SOURCE=y -# CONFIG_STATIC_USERMODEHELPER is not set -CONFIG_SECURITY_SELINUX=y -CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1 -CONFIG_SECURITY_SELINUX_DISABLE=y -CONFIG_SECURITY_SELINUX_DEVELOP=y -CONFIG_SECURITY_SELINUX_AVC_STATS=y -CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 -# CONFIG_SECURITY_SMACK is not set -# CONFIG_SECURITY_TOMOYO is not set -# CONFIG_SECURITY_APPARMOR is not set -# CONFIG_SECURITY_LOADPIN is not set -# CONFIG_SECURITY_YAMA is not set -CONFIG_INTEGRITY=y -# CONFIG_INTEGRITY_SIGNATURE is not set -CONFIG_INTEGRITY_AUDIT=y -CONFIG_IMA=y -CONFIG_IMA_MEASURE_PCR_IDX=10 -CONFIG_IMA_LSM_RULES=y -# CONFIG_IMA_TEMPLATE is not set -CONFIG_IMA_NG_TEMPLATE=y -# CONFIG_IMA_SIG_TEMPLATE is not set -CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" -CONFIG_IMA_DEFAULT_HASH_SHA1=y -# CONFIG_IMA_DEFAULT_HASH_SHA256 is not set -# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set -CONFIG_IMA_DEFAULT_HASH="sha1" -# CONFIG_IMA_WRITE_POLICY is not set -# CONFIG_IMA_READ_POLICY is not set -# CONFIG_IMA_APPRAISE is not set -# CONFIG_EVM is not set -CONFIG_DEFAULT_SECURITY_SELINUX=y -# CONFIG_DEFAULT_SECURITY_DAC is not set -CONFIG_DEFAULT_SECURITY="selinux" -CONFIG_XOR_BLOCKS=m -CONFIG_ASYNC_CORE=m -CONFIG_ASYNC_MEMCPY=m -CONFIG_ASYNC_XOR=m -CONFIG_ASYNC_PQ=m -CONFIG_ASYNC_RAID6_RECOV=m -CONFIG_CRYPTO=y - -# -# Crypto core or helper -# -CONFIG_CRYPTO_FIPS=y -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_ALGAPI2=y -CONFIG_CRYPTO_AEAD=y -CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_BLKCIPHER=y -CONFIG_CRYPTO_BLKCIPHER2=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_RNG=y -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_RNG_DEFAULT=y -CONFIG_CRYPTO_AKCIPHER2=y -CONFIG_CRYPTO_AKCIPHER=y -CONFIG_CRYPTO_KPP2=y -CONFIG_CRYPTO_KPP=y -CONFIG_CRYPTO_ACOMP2=y -CONFIG_CRYPTO_RSA=y -CONFIG_CRYPTO_DH=y -CONFIG_CRYPTO_ECDH=y -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y -CONFIG_CRYPTO_USER=m -# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set -CONFIG_CRYPTO_GF128MUL=m -CONFIG_CRYPTO_NULL=y -CONFIG_CRYPTO_NULL2=y -CONFIG_CRYPTO_PCRYPT=m -CONFIG_CRYPTO_WORKQUEUE=y -CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m -CONFIG_CRYPTO_AUTHENC=m -CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_ABLK_HELPER=m -CONFIG_CRYPTO_SIMD=m -CONFIG_CRYPTO_GLUE_HELPER_X86=m - -# -# Authenticated Encryption with Associated Data -# -CONFIG_CRYPTO_CCM=m -CONFIG_CRYPTO_GCM=m -CONFIG_CRYPTO_CHACHA20POLY1305=m -CONFIG_CRYPTO_SEQIV=y -CONFIG_CRYPTO_ECHAINIV=m - -# -# Block modes -# -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_CTR=y -CONFIG_CRYPTO_CTS=y -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_LRW=m -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_XTS=y -CONFIG_CRYPTO_KEYWRAP=m - -# -# Hash modes -# -CONFIG_CRYPTO_CMAC=m -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m - -# -# Digest -# -CONFIG_CRYPTO_CRC32C=y -CONFIG_CRYPTO_CRC32C_INTEL=m -CONFIG_CRYPTO_CRC32=m -CONFIG_CRYPTO_CRC32_PCLMUL=m -CONFIG_CRYPTO_CRCT10DIF=y -CONFIG_CRYPTO_CRCT10DIF_PCLMUL=y -CONFIG_CRYPTO_GHASH=m -CONFIG_CRYPTO_POLY1305=m -CONFIG_CRYPTO_POLY1305_X86_64=m -CONFIG_CRYPTO_MD4=m -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m -CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA1_SSSE3=y -CONFIG_CRYPTO_SHA256_SSSE3=y -CONFIG_CRYPTO_SHA512_SSSE3=y -CONFIG_CRYPTO_SHA1_MB=m -CONFIG_CRYPTO_SHA256_MB=m -CONFIG_CRYPTO_SHA512_MB=m -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=y -CONFIG_CRYPTO_SHA3=m -CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m - -# -# Ciphers -# -CONFIG_CRYPTO_AES=y -CONFIG_CRYPTO_AES_TI=y -CONFIG_CRYPTO_AES_X86_64=m -CONFIG_CRYPTO_AES_NI_INTEL=m -CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_ARC4=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_BLOWFISH_COMMON=m -CONFIG_CRYPTO_BLOWFISH_X86_64=m -CONFIG_CRYPTO_CAMELLIA=m -CONFIG_CRYPTO_CAMELLIA_X86_64=m -CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m -CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m -CONFIG_CRYPTO_CAST_COMMON=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST5_AVX_X86_64=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_CAST6_AVX_X86_64=m -CONFIG_CRYPTO_DES=m -CONFIG_CRYPTO_DES3_EDE_X86_64=m -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m -CONFIG_CRYPTO_CHACHA20=m -CONFIG_CRYPTO_CHACHA20_X86_64=m -CONFIG_CRYPTO_SEED=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m -CONFIG_CRYPTO_SERPENT_AVX_X86_64=m -CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_TWOFISH_COMMON=m -CONFIG_CRYPTO_TWOFISH_X86_64=m -CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m -CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m - -# -# Compression -# -CONFIG_CRYPTO_DEFLATE=m -CONFIG_CRYPTO_LZO=y -CONFIG_CRYPTO_842=m -CONFIG_CRYPTO_LZ4=m -CONFIG_CRYPTO_LZ4HC=m - -# -# Random Number Generation -# -CONFIG_CRYPTO_ANSI_CPRNG=m -CONFIG_CRYPTO_DRBG_MENU=y -CONFIG_CRYPTO_DRBG_HMAC=y -CONFIG_CRYPTO_DRBG_HASH=y -CONFIG_CRYPTO_DRBG_CTR=y -CONFIG_CRYPTO_DRBG=y -CONFIG_CRYPTO_JITTERENTROPY=y -CONFIG_CRYPTO_USER_API=m -CONFIG_CRYPTO_USER_API_HASH=m -CONFIG_CRYPTO_USER_API_SKCIPHER=m -CONFIG_CRYPTO_USER_API_RNG=m -CONFIG_CRYPTO_USER_API_AEAD=m -CONFIG_CRYPTO_HASH_INFO=y -# CONFIG_CRYPTO_HW is not set -CONFIG_ASYMMETRIC_KEY_TYPE=y -CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y -CONFIG_X509_CERTIFICATE_PARSER=y -CONFIG_PKCS7_MESSAGE_PARSER=y -CONFIG_PKCS7_TEST_KEY=m -CONFIG_SIGNED_PE_FILE_VERIFICATION=y - -# -# Certificates for signature checking -# -CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" -CONFIG_SYSTEM_TRUSTED_KEYRING=y -CONFIG_SYSTEM_TRUSTED_KEYS="" -# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set -# CONFIG_SECONDARY_TRUSTED_KEYRING is not set -CONFIG_SYSTEM_BLACKLIST_KEYRING=y -CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" -CONFIG_HAVE_KVM=y -CONFIG_HAVE_KVM_IRQCHIP=y -CONFIG_HAVE_KVM_IRQFD=y -CONFIG_HAVE_KVM_IRQ_ROUTING=y -CONFIG_HAVE_KVM_EVENTFD=y -CONFIG_KVM_MMIO=y -CONFIG_KVM_ASYNC_PF=y -CONFIG_HAVE_KVM_MSI=y -CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y -CONFIG_KVM_VFIO=y -CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y -CONFIG_KVM_COMPAT=y -CONFIG_HAVE_KVM_IRQ_BYPASS=y -CONFIG_VIRTUALIZATION=y -CONFIG_KVM=m -CONFIG_KVM_INTEL=m -CONFIG_KVM_AMD=m -CONFIG_KVM_MMU_AUDIT=y -CONFIG_VHOST_NET=m -# CONFIG_VHOST_SCSI is not set -CONFIG_VHOST_VSOCK=m -CONFIG_VHOST=m -# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set -CONFIG_BINARY_PRINTF=y - -# -# Library routines -# -CONFIG_RAID6_PQ=m -CONFIG_BITREVERSE=y -# CONFIG_HAVE_ARCH_BITREVERSE is not set -CONFIG_RATIONAL=y -CONFIG_GENERIC_STRNCPY_FROM_USER=y -CONFIG_GENERIC_STRNLEN_USER=y -CONFIG_GENERIC_NET_UTILS=y -CONFIG_GENERIC_FIND_FIRST_BIT=y -CONFIG_GENERIC_PCI_IOMAP=y -CONFIG_GENERIC_IOMAP=y -CONFIG_GENERIC_IO=y -CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y -CONFIG_ARCH_HAS_FAST_MULTIPLIER=y -CONFIG_CRC_CCITT=m -CONFIG_CRC16=y -CONFIG_CRC_T10DIF=y -CONFIG_CRC_ITU_T=m -CONFIG_CRC32=y -CONFIG_CRC32_SELFTEST=y -CONFIG_CRC32_SLICEBY8=y -# CONFIG_CRC32_SLICEBY4 is not set -# CONFIG_CRC32_SARWATE is not set -# CONFIG_CRC32_BIT is not set -CONFIG_CRC4=m -CONFIG_CRC7=m -CONFIG_LIBCRC32C=m -# CONFIG_CRC8 is not set -CONFIG_XXHASH=m -# CONFIG_AUDIT_ARCH_COMPAT_GENERIC is not set -# CONFIG_RANDOM32_SELFTEST is not set -CONFIG_842_COMPRESS=m -CONFIG_842_DECOMPRESS=m -CONFIG_ZLIB_INFLATE=y -CONFIG_ZLIB_DEFLATE=y -CONFIG_LZO_COMPRESS=y -CONFIG_LZO_DECOMPRESS=y -CONFIG_LZ4_COMPRESS=m -CONFIG_LZ4HC_COMPRESS=m -CONFIG_LZ4_DECOMPRESS=y -CONFIG_ZSTD_COMPRESS=m -CONFIG_ZSTD_DECOMPRESS=m -CONFIG_XZ_DEC=y -CONFIG_XZ_DEC_X86=y -# CONFIG_XZ_DEC_POWERPC is not set -# CONFIG_XZ_DEC_IA64 is not set -# CONFIG_XZ_DEC_ARM is not set -# CONFIG_XZ_DEC_ARMTHUMB is not set -# CONFIG_XZ_DEC_SPARC is not set -CONFIG_XZ_DEC_BCJ=y -# CONFIG_XZ_DEC_TEST is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_BZIP2=y -CONFIG_DECOMPRESS_LZMA=y -CONFIG_DECOMPRESS_XZ=y -CONFIG_DECOMPRESS_LZO=y -CONFIG_DECOMPRESS_LZ4=y -CONFIG_GENERIC_ALLOCATOR=y -CONFIG_REED_SOLOMON=m -CONFIG_REED_SOLOMON_ENC8=y -CONFIG_REED_SOLOMON_DEC8=y -CONFIG_TEXTSEARCH=y -CONFIG_TEXTSEARCH_KMP=m -CONFIG_TEXTSEARCH_BM=m -CONFIG_TEXTSEARCH_FSM=m -CONFIG_INTERVAL_TREE=y -CONFIG_RADIX_TREE_MULTIORDER=y -CONFIG_ASSOCIATIVE_ARRAY=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT_MAP=y -CONFIG_HAS_DMA=y -# CONFIG_DMA_NOOP_OPS is not set -# CONFIG_DMA_VIRT_OPS is not set -CONFIG_CPUMASK_OFFSTACK=y -CONFIG_CPU_RMAP=y -CONFIG_DQL=y -CONFIG_GLOB=y -# CONFIG_GLOB_SELFTEST is not set -CONFIG_NLATTR=y -CONFIG_LRU_CACHE=m -CONFIG_CLZ_TAB=y -# CONFIG_CORDIC is not set -# CONFIG_DDR is not set -CONFIG_IRQ_POLL=y -CONFIG_MPILIB=y -CONFIG_OID_REGISTRY=y -# CONFIG_SG_SPLIT is not set -CONFIG_SG_POOL=y -CONFIG_ARCH_HAS_SG_CHAIN=y -CONFIG_ARCH_HAS_PMEM_API=y -CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y -CONFIG_SBITMAP=y -# CONFIG_STRING_SELFTEST is not set -CONFIG_NET_VENDOR_AMAZON=y -CONFIG_ENA_ETHERNET=y diff --git a/packages/kernel/kernel.spec b/packages/kernel/kernel.spec index c067676b..088d3459 100644 --- a/packages/kernel/kernel.spec +++ b/packages/kernel/kernel.spec @@ -1,17 +1,21 @@ %global debug_package %{nil} Name: %{_cross_os}kernel -Version: 4.14.102 +Version: 4.19.58 Release: 1%{?dist} Summary: The Linux kernel License: GPLv2 and Redistributable, no modification permitted URL: https://www.kernel.org/ -Source0: https://www.kernel.org/pub/linux/kernel/v4.x/linux-%{version}.tar.xz -Source100: config-%{_cross_arch} -Patch1000: dm-add-support-to-directly-boot-to-a-mapped-device.patch -Patch1001: selinux-use-kernel-linux-socket.h-for-genheaders-and.patch -Patch1002: random-add-a-config-option-to-trust-the-CPU-s-hwrng.patch -Patch1003: random-make-CPU-trust-a-boot-parameter.patch +# Use latest-srpm-url.sh to get this. +Source0: https://cdn.amazonlinux.com/blobstore/f768a50d6e52a712310bc97ddb087e19df240972cc99a59c7921b90901874521/kernel-4.19.58-21.57.amzn2.src.rpm +Source100: config-thar +Patch0001: 0001-dm-add-support-to-directly-boot-to-a-mapped-device.patch +Patch0002: 0002-dm-init-fix-const-confusion-for-dm_allowed_targets-a.patch +Patch0003: 0003-dm-init-fix-max-devices-targets-checks.patch +Patch0004: 0004-dm-ioctl-fix-hang-in-early-create-error-condition.patch +Patch0005: 0005-dm-init-fix-incorrect-uses-of-kstrndup.patch +Patch0006: 0006-dm-init-remove-trailing-newline-from-calls-to-DMERR-.patch +Patch0007: 0007-lustrefsx-Disable-Werror-stringop-overflow.patch BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: gcc-%{_cross_target} @@ -35,8 +39,18 @@ Summary: Header files for the Linux kernel for use by glibc %{summary}. %prep -%autosetup -n linux-%{version} -p1 -cp %{SOURCE100} "arch/%{_cross_karch}/configs/%{_cross_vendor}_defconfig" +rpm2cpio %{SOURCE0} | cpio -iu linux-%{version}.tar config-%{_cross_arch} "*.patch" +tar -xof linux-%{version}.tar; rm linux-%{version}.tar +%setup -TDn linux-%{version} +# Patches from the Source0 SRPM +for patch in ../*.patch; do + patch -p1 <"$patch" +done +# Patches listed in this spec (Patch0001...) +%autopatch -p1 +KCONFIG_CONFIG="arch/%{_cross_karch}/configs/%{_cross_vendor}_defconfig" \ + scripts/kconfig/merge_config.sh ../config-%{_cross_arch} %{SOURCE100} +rm -f ../config-%{_cross_arch} ../*.patch %global kmake \ make -s\\\ diff --git a/packages/kernel/latest-srpm-url.sh b/packages/kernel/latest-srpm-url.sh new file mode 100755 index 00000000..3913cef9 --- /dev/null +++ b/packages/kernel/latest-srpm-url.sh @@ -0,0 +1,2 @@ +#!/bin/sh +docker run --rm amazonlinux:2 sh -c 'amazon-linux-extras enable kernel-ng >/dev/null && yum install -q -y yum-utils && yumdownloader -q --source --urls kernel | grep ^http' diff --git a/packages/kernel/random-add-a-config-option-to-trust-the-CPU-s-hwrng.patch b/packages/kernel/random-add-a-config-option-to-trust-the-CPU-s-hwrng.patch deleted file mode 100644 index 0cfa7508..00000000 --- a/packages/kernel/random-add-a-config-option-to-trust-the-CPU-s-hwrng.patch +++ /dev/null @@ -1,78 +0,0 @@ -From 538b177b9031dcc46bc14ae358525892c35e2581 Mon Sep 17 00:00:00 2001 -From: Theodore Ts'o -Date: Tue, 17 Jul 2018 18:24:27 -0400 -Subject: [PATCH 1/2] random: add a config option to trust the CPU's hwrng - -This gives the user building their own kernel (or a Linux -distribution) the option of deciding whether or not to trust the CPU's -hardware random number generator (e.g., RDRAND for x86 CPU's) as being -correctly implemented and not having a back door introduced (perhaps -courtesy of a Nation State's law enforcement or intelligence -agencies). - -This will prevent getrandom(2) from blocking, if there is a -willingness to trust the CPU manufacturer. - -Signed-off-by: Theodore Ts'o ---- - drivers/char/Kconfig | 14 ++++++++++++++ - drivers/char/random.c | 11 ++++++++++- - 2 files changed, 24 insertions(+), 1 deletion(-) - -diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig -index c28dca0c613d..b732016921a2 100644 ---- a/drivers/char/Kconfig -+++ b/drivers/char/Kconfig -@@ -590,3 +590,17 @@ source "drivers/char/xillybus/Kconfig" - - endmenu - -+config RANDOM_TRUST_CPU -+ bool "Trust the CPU manufacturer to initialize Linux's CRNG" -+ depends on X86 || S390 || PPC -+ default n -+ help -+ Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or -+ RDRAND, IBM for the S390 and Power PC architectures) is trustworthy -+ for the purposes of initializing Linux's CRNG. Since this is not -+ something that can be independently audited, this amounts to trusting -+ that CPU manufacturer (perhaps with the insistence or mandate -+ of a Nation State's intelligence or law enforcement agencies) -+ has not installed a hidden back door to compromise the CPU's -+ random number generation facilities. -+ -diff --git a/drivers/char/random.c b/drivers/char/random.c -index 8ad92707e45f..efdb37fa18ed 100644 ---- a/drivers/char/random.c -+++ b/drivers/char/random.c -@@ -770,6 +770,7 @@ static void invalidate_batched_entropy(void); - static void crng_initialize(struct crng_state *crng) - { - int i; -+ int arch_init = 1; - unsigned long rv; - - memcpy(&crng->state[0], "expand 32-byte k", 16); -@@ -780,10 +781,18 @@ static void crng_initialize(struct crng_state *crng) - _get_random_bytes(&crng->state[4], sizeof(__u32) * 12); - for (i = 4; i < 16; i++) { - if (!arch_get_random_seed_long(&rv) && -- !arch_get_random_long(&rv)) -+ !arch_get_random_long(&rv)) { - rv = random_get_entropy(); -+ arch_init = 0; -+ } - crng->state[i] ^= rv; - } -+#ifdef CONFIG_RANDOM_TRUST_CPU -+ if (arch_init) { -+ crng_init = 2; -+ pr_notice("random: crng done (trusting CPU's manufacturer)\n"); -+ } -+#endif - crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; - } - --- -2.21.0 - diff --git a/packages/kernel/random-make-CPU-trust-a-boot-parameter.patch b/packages/kernel/random-make-CPU-trust-a-boot-parameter.patch deleted file mode 100644 index 7a90b5bd..00000000 --- a/packages/kernel/random-make-CPU-trust-a-boot-parameter.patch +++ /dev/null @@ -1,82 +0,0 @@ -From 8438d10a15a63e3ebe7f73b134423c988712eec8 Mon Sep 17 00:00:00 2001 -From: Kees Cook -Date: Mon, 27 Aug 2018 14:51:54 -0700 -Subject: [PATCH 2/2] random: make CPU trust a boot parameter - -Instead of forcing a distro or other system builder to choose -at build time whether the CPU is trusted for CRNG seeding via -CONFIG_RANDOM_TRUST_CPU, provide a boot-time parameter for end users to -control the choice. The CONFIG will set the default state instead. - -Signed-off-by: Kees Cook -Signed-off-by: Theodore Ts'o ---- - Documentation/admin-guide/kernel-parameters.txt | 6 ++++++ - drivers/char/Kconfig | 4 ++-- - drivers/char/random.c | 11 ++++++++--- - 3 files changed, 16 insertions(+), 5 deletions(-) - -diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt -index 05496622b4ef..915013dab0bf 100644 ---- a/Documentation/admin-guide/kernel-parameters.txt -+++ b/Documentation/admin-guide/kernel-parameters.txt -@@ -3267,6 +3267,12 @@ - ramdisk_size= [RAM] Sizes of RAM disks in kilobytes - See Documentation/blockdev/ramdisk.txt. - -+ random.trust_cpu={on,off} -+ [KNL] Enable or disable trusting the use of the -+ CPU's random number generator (if available) to -+ fully seed the kernel's CRNG. Default is controlled -+ by CONFIG_RANDOM_TRUST_CPU. -+ - ras=option[,option,...] [KNL] RAS-specific options - - cec_disable [X86] -diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig -index b732016921a2..6328b571b4b8 100644 ---- a/drivers/char/Kconfig -+++ b/drivers/char/Kconfig -@@ -602,5 +602,5 @@ config RANDOM_TRUST_CPU - that CPU manufacturer (perhaps with the insistence or mandate - of a Nation State's intelligence or law enforcement agencies) - has not installed a hidden back door to compromise the CPU's -- random number generation facilities. -- -+ random number generation facilities. This can also be configured -+ at boot with "random.trust_cpu=on/off". -diff --git a/drivers/char/random.c b/drivers/char/random.c -index efdb37fa18ed..5f3955220487 100644 ---- a/drivers/char/random.c -+++ b/drivers/char/random.c -@@ -767,6 +767,13 @@ static struct crng_state **crng_node_pool __read_mostly; - - static void invalidate_batched_entropy(void); - -+static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); -+static int __init parse_trust_cpu(char *arg) -+{ -+ return kstrtobool(arg, &trust_cpu); -+} -+early_param("random.trust_cpu", parse_trust_cpu); -+ - static void crng_initialize(struct crng_state *crng) - { - int i; -@@ -787,12 +794,10 @@ static void crng_initialize(struct crng_state *crng) - } - crng->state[i] ^= rv; - } --#ifdef CONFIG_RANDOM_TRUST_CPU -- if (arch_init) { -+ if (trust_cpu && arch_init) { - crng_init = 2; - pr_notice("random: crng done (trusting CPU's manufacturer)\n"); - } --#endif - crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; - } - --- -2.21.0 - diff --git a/packages/kernel/selinux-use-kernel-linux-socket.h-for-genheaders-and.patch b/packages/kernel/selinux-use-kernel-linux-socket.h-for-genheaders-and.patch deleted file mode 100644 index 725c82e0..00000000 --- a/packages/kernel/selinux-use-kernel-linux-socket.h-for-genheaders-and.patch +++ /dev/null @@ -1,70 +0,0 @@ -From dfbd199a7cfe3e3cd8531e1353cdbd7175bfbc5e Mon Sep 17 00:00:00 2001 -From: Paulo Alcantara -Date: Sun, 24 Feb 2019 21:55:28 -0300 -Subject: [PATCH] selinux: use kernel linux/socket.h for genheaders and mdp - -When compiling genheaders and mdp from a newer host kernel, the -following error happens: - - In file included from scripts/selinux/genheaders/genheaders.c:18: - ./security/selinux/include/classmap.h:238:2: error: #error New - address family defined, please update secclass_map. #error New - address family defined, please update secclass_map. ^~~~~ - make[3]: *** [scripts/Makefile.host:107: - scripts/selinux/genheaders/genheaders] Error 1 make[2]: *** - [scripts/Makefile.build:599: scripts/selinux/genheaders] Error 2 - make[1]: *** [scripts/Makefile.build:599: scripts/selinux] Error 2 - make[1]: *** Waiting for unfinished jobs.... - -Instead of relying on the host definition, include linux/socket.h in -classmap.h to have PF_MAX. - -Cc: stable@vger.kernel.org -Signed-off-by: Paulo Alcantara -Acked-by: Stephen Smalley -[PM: manually merge in mdp.c, subject line tweaks] -Signed-off-by: Paul Moore ---- - scripts/selinux/genheaders/genheaders.c | 1 - - scripts/selinux/mdp/mdp.c | 1 - - security/selinux/include/classmap.h | 1 + - 3 files changed, 1 insertion(+), 2 deletions(-) - -diff --git a/scripts/selinux/genheaders/genheaders.c b/scripts/selinux/genheaders/genheaders.c -index 1ceedea847dd..544ca126a8a8 100644 ---- a/scripts/selinux/genheaders/genheaders.c -+++ b/scripts/selinux/genheaders/genheaders.c -@@ -9,7 +9,6 @@ - #include - #include - #include --#include - - struct security_class_mapping { - const char *name; -diff --git a/scripts/selinux/mdp/mdp.c b/scripts/selinux/mdp/mdp.c -index 073fe7537f6c..6d51b74bc679 100644 ---- a/scripts/selinux/mdp/mdp.c -+++ b/scripts/selinux/mdp/mdp.c -@@ -32,7 +32,6 @@ - #include - #include - #include --#include - - static void usage(char *name) - { -diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h -index bd5fe0d3204a..201f7e588a29 100644 ---- a/security/selinux/include/classmap.h -+++ b/security/selinux/include/classmap.h -@@ -1,5 +1,6 @@ - /* SPDX-License-Identifier: GPL-2.0 */ - #include -+#include - - #define COMMON_FILE_SOCK_PERMS "ioctl", "read", "write", "create", \ - "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append", "map" --- -2.21.0 - diff --git a/packages/kernel/sources b/packages/kernel/sources index 2f90cb91..d4383402 100644 --- a/packages/kernel/sources +++ b/packages/kernel/sources @@ -1 +1 @@ -SHA512 (linux-4.14.102.tar.xz) = bdc387dcaa6a585ca01cfc2bf04bf93024d8512dce1a5921c6ce6b55847d663b0d1bf24cd18e87ae200d9713eefd0ea2f866577b1a236e928ca0bfbc49589a53 +SHA512 (kernel-4.19.58-21.57.amzn2.src.rpm) = b72bb8574078bd1ff58bd53013a608bdb4bc5070eaf8bd55b898bd946d3628ce4e1a25f8b663914a01df35817d22380e060141b17700811eef0cc26a8d04e27f From f1c4cbf0846432f0adf2effed34acfa1d7973558 Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Tue, 6 Aug 2019 17:49:00 +0000 Subject: [PATCH 0051/1356] grub: update to 2.04 Signed-off-by: iliana destroyer of worlds --- packages/grub/.gitignore | 2 +- ...-root-device-argument-to-grub-setup.patch} | 102 +- ...reat-R_X86_64_PLT32-as-R_X86_64_PC32.patch | 74 - .../grub/0002-gpt-start-new-GPT-module.patch | 920 +++ ...name-misnamed-header-location-fields.patch | 68 + ...-record-size-of-of-the-entries-table.patch | 134 + ...t-consolidate-crc32-computation-code.patch | 116 + ...ir-function-to-sync-up-primary-and-b.patch | 214 + ...write-function-and-gptrepair-command.patch | 364 ++ ...0008-gpt-add-a-new-generic-GUID-type.patch | 122 + ...next-command-for-selecting-priority-.patch | 526 ++ ...gpt-split-out-checksum-recomputation.patch | 86 + ...d-printing-function-to-common-librar.patch | 77 + ...tch-partition-names-to-a-16-bit-type.patch | 24 + ...partitions-to-the-gpt-unit-test-data.patch | 124 + ...by-partition-label-and-uuid-commands.patch | 443 ++ ...n-up-little-endian-crc32-computation.patch | 113 + packages/grub/0016-gpt-minor-cleanup.patch | 66 + ...-gpt-add-search-by-disk-uuid-command.patch | 269 + ...isk-sizes-GRUB-will-reject-as-invali.patch | 64 + .../0019-gpt-add-verbose-debug-logging.patch | 251 + ...pt-improve-validation-of-GPT-headers.patch | 101 + ...0021-gpt-refuse-to-write-to-sector-0.patch | 31 + ...rly-detect-and-repair-invalid-tables.patch | 39 + ...repair_test-fix-typo-in-cleanup-trap.patch | 22 + ...eck-GPT-is-repaired-when-appropriate.patch | 107 + ...tition-table-indexing-and-validation.patch | 323 + ...-disk-size-from-header-over-firmware.patch | 114 + ...dd-helper-for-picking-a-valid-header.patch | 68 + .../0028-gptrepair-fix-status-checking.patch | 64 + ...e-functions-for-checking-status-bits.patch | 133 + ...30-gpt-allow-repair-function-to-noop.patch | 42 + ...ot-use-an-enum-for-status-bit-values.patch | 46 + ...der-and-entries-status-bits-together.patch | 46 + ...reful-about-relocating-backup-header.patch | 51 + ...ectively-update-fields-during-repair.patch | 75 + ...evalidate-when-recomputing-checksums.patch | 72 + ...backup-in-sync-check-in-revalidation.patch | 37 + ...-table-at-the-same-time-as-the-heade.patch | 131 + ...8-gpt-report-all-revalidation-errors.patch | 37 + ...pdate-documentation-for-grub_gpt_upd.patch | 68 + ...ackup-GPT-first-skip-if-inaccessible.patch | 69 + ...io-Use-Thar-boot-partition-type-GUID.patch | 55 + packages/grub/gpt.patch | 5725 ----------------- packages/grub/grub.spec | 47 +- packages/grub/sources | 2 +- 46 files changed, 5819 insertions(+), 5845 deletions(-) rename packages/grub/{100-grub_setup_root.patch => 0001-setup-Add-root-device-argument-to-grub-setup.patch} (73%) delete mode 100644 packages/grub/0001-x86-64-Treat-R_X86_64_PLT32-as-R_X86_64_PC32.patch create mode 100644 packages/grub/0002-gpt-start-new-GPT-module.patch create mode 100644 packages/grub/0003-gpt-rename-misnamed-header-location-fields.patch create mode 100644 packages/grub/0004-gpt-record-size-of-of-the-entries-table.patch create mode 100644 packages/grub/0005-gpt-consolidate-crc32-computation-code.patch create mode 100644 packages/grub/0006-gpt-add-new-repair-function-to-sync-up-primary-and-b.patch create mode 100644 packages/grub/0007-gpt-add-write-function-and-gptrepair-command.patch create mode 100644 packages/grub/0008-gpt-add-a-new-generic-GUID-type.patch create mode 100644 packages/grub/0009-gpt-new-gptprio.next-command-for-selecting-priority-.patch create mode 100644 packages/grub/0010-gpt-split-out-checksum-recomputation.patch create mode 100644 packages/grub/0011-gpt-move-gpt-guid-printing-function-to-common-librar.patch create mode 100644 packages/grub/0012-gpt-switch-partition-names-to-a-16-bit-type.patch create mode 100644 packages/grub/0013-tests-add-some-partitions-to-the-gpt-unit-test-data.patch create mode 100644 packages/grub/0014-gpt-add-search-by-partition-label-and-uuid-commands.patch create mode 100644 packages/grub/0015-gpt-clean-up-little-endian-crc32-computation.patch create mode 100644 packages/grub/0016-gpt-minor-cleanup.patch create mode 100644 packages/grub/0017-gpt-add-search-by-disk-uuid-command.patch create mode 100644 packages/grub/0018-gpt-do-not-use-disk-sizes-GRUB-will-reject-as-invali.patch create mode 100644 packages/grub/0019-gpt-add-verbose-debug-logging.patch create mode 100644 packages/grub/0020-gpt-improve-validation-of-GPT-headers.patch create mode 100644 packages/grub/0021-gpt-refuse-to-write-to-sector-0.patch create mode 100644 packages/grub/0022-gpt-properly-detect-and-repair-invalid-tables.patch create mode 100644 packages/grub/0023-gptrepair_test-fix-typo-in-cleanup-trap.patch create mode 100644 packages/grub/0024-gptprio_test-check-GPT-is-repaired-when-appropriate.patch create mode 100644 packages/grub/0025-gpt-fix-partition-table-indexing-and-validation.patch create mode 100644 packages/grub/0026-gpt-prefer-disk-size-from-header-over-firmware.patch create mode 100644 packages/grub/0027-gpt-add-helper-for-picking-a-valid-header.patch create mode 100644 packages/grub/0028-gptrepair-fix-status-checking.patch create mode 100644 packages/grub/0029-gpt-use-inline-functions-for-checking-status-bits.patch create mode 100644 packages/grub/0030-gpt-allow-repair-function-to-noop.patch create mode 100644 packages/grub/0031-gpt-do-not-use-an-enum-for-status-bit-values.patch create mode 100644 packages/grub/0032-gpt-check-header-and-entries-status-bits-together.patch create mode 100644 packages/grub/0033-gpt-be-more-careful-about-relocating-backup-header.patch create mode 100644 packages/grub/0034-gpt-selectively-update-fields-during-repair.patch create mode 100644 packages/grub/0035-gpt-always-revalidate-when-recomputing-checksums.patch create mode 100644 packages/grub/0036-gpt-include-backup-in-sync-check-in-revalidation.patch create mode 100644 packages/grub/0037-gpt-read-entries-table-at-the-same-time-as-the-heade.patch create mode 100644 packages/grub/0038-gpt-report-all-revalidation-errors.patch create mode 100644 packages/grub/0039-gpt-rename-and-update-documentation-for-grub_gpt_upd.patch create mode 100644 packages/grub/0040-gpt-write-backup-GPT-first-skip-if-inaccessible.patch create mode 100644 packages/grub/0041-gptprio-Use-Thar-boot-partition-type-GUID.patch delete mode 100644 packages/grub/gpt.patch diff --git a/packages/grub/.gitignore b/packages/grub/.gitignore index b619e359..6407ec63 100644 --- a/packages/grub/.gitignore +++ b/packages/grub/.gitignore @@ -1 +1 @@ -grub-2.02.tar.xz +grub-2.04.tar.xz diff --git a/packages/grub/100-grub_setup_root.patch b/packages/grub/0001-setup-Add-root-device-argument-to-grub-setup.patch similarity index 73% rename from packages/grub/100-grub_setup_root.patch rename to packages/grub/0001-setup-Add-root-device-argument-to-grub-setup.patch index f053ba95..7d15b4b7 100644 --- a/packages/grub/100-grub_setup_root.patch +++ b/packages/grub/0001-setup-Add-root-device-argument-to-grub-setup.patch @@ -1,3 +1,61 @@ +From 9ddb865df4ea2fb79edaf6c3a8b0122796800014 Mon Sep 17 00:00:00 2001 +From: iliana destroyer of worlds +Date: Tue, 6 Aug 2019 17:37:19 +0000 +Subject: [PATCH] setup: Add root device argument to grub-setup + +This patch originates from the OpenWRT tree: +https://github.com/openwrt/openwrt/blob/65c8f2890ca4f41f5b933b5bc1e43de86cc1bd54/package/boot/grub2/patches/100-grub_setup_root.patch +--- + include/grub/util/install.h | 4 ++-- + util/grub-install.c | 4 ++-- + util/grub-setup.c | 12 +++++++++++- + util/setup.c | 10 +++++++--- + 4 files changed, 22 insertions(+), 8 deletions(-) + +diff --git a/include/grub/util/install.h b/include/grub/util/install.h +index 2631b1074..ff02c365c 100644 +--- a/include/grub/util/install.h ++++ b/include/grub/util/install.h +@@ -191,13 +191,13 @@ grub_install_get_image_target (const char *arg); + void + grub_util_bios_setup (const char *dir, + const char *boot_file, const char *core_file, +- const char *dest, int force, ++ const char *root, const char *dest, int force, + int fs_probe, int allow_floppy, + int add_rs_codes); + void + grub_util_sparc_setup (const char *dir, + const char *boot_file, const char *core_file, +- const char *dest, int force, ++ const char *root, const char *dest, int force, + int fs_probe, int allow_floppy, + int add_rs_codes); + +diff --git a/util/grub-install.c b/util/grub-install.c +index 8a55ad4b8..c0a2c5ec0 100644 +--- a/util/grub-install.c ++++ b/util/grub-install.c +@@ -1712,7 +1712,7 @@ main (int argc, char *argv[]) + /* Now perform the installation. */ + if (install_bootsector) + grub_util_bios_setup (platdir, "boot.img", "core.img", +- install_drive, force, ++ NULL, install_drive, force, + fs_probe, allow_floppy, add_rs_codes); + break; + } +@@ -1738,7 +1738,7 @@ main (int argc, char *argv[]) + /* Now perform the installation. */ + if (install_bootsector) + grub_util_sparc_setup (platdir, "boot.img", "core.img", +- install_drive, force, ++ NULL, install_drive, force, + fs_probe, allow_floppy, + 0 /* unused */ ); + break; +diff --git a/util/grub-setup.c b/util/grub-setup.c +index 42b98ad3c..ae1f98f75 100644 --- a/util/grub-setup.c +++ b/util/grub-setup.c @@ -87,6 +87,8 @@ static struct argp_option options[] = { @@ -40,9 +98,11 @@ arguments.fs_probe, arguments.allow_floppy, arguments.add_rs_codes); +diff --git a/util/setup.c b/util/setup.c +index 6f88f3cc4..bd7bb9c86 100644 --- a/util/setup.c +++ b/util/setup.c -@@ -247,13 +247,12 @@ identify_partmap (grub_disk_t disk __attribute__ ((unused)), +@@ -252,13 +252,12 @@ identify_partmap (grub_disk_t disk __attribute__ ((unused)), void SETUP (const char *dir, const char *boot_file, const char *core_file, @@ -55,8 +115,8 @@ char *boot_img, *core_img, *boot_path; - char *root = 0; size_t boot_size, core_size; - #ifdef GRUB_SETUP_BIOS grub_uint16_t core_sectors; + grub_device_t root_dev = 0, dest_dev, core_dev; @@ -307,7 +306,10 @@ SETUP (const char *dir, core_dev = dest_dev; @@ -78,41 +138,3 @@ drive = grub_util_get_grub_dev (*cur); if (!drive) continue; ---- a/include/grub/util/install.h -+++ b/include/grub/util/install.h -@@ -184,13 +184,13 @@ grub_install_get_image_target (const char *arg); - void - grub_util_bios_setup (const char *dir, - const char *boot_file, const char *core_file, -- const char *dest, int force, -+ const char *root, const char *dest, int force, - int fs_probe, int allow_floppy, - int add_rs_codes); - void - grub_util_sparc_setup (const char *dir, - const char *boot_file, const char *core_file, -- const char *dest, int force, -+ const char *root, const char *dest, int force, - int fs_probe, int allow_floppy, - int add_rs_codes); - ---- a/util/grub-install.c -+++ b/util/grub-install.c -@@ -1673,7 +1673,7 @@ main (int argc, char *argv[]) - /* Now perform the installation. */ - if (install_bootsector) - grub_util_bios_setup (platdir, "boot.img", "core.img", -- install_drive, force, -+ NULL, install_drive, force, - fs_probe, allow_floppy, add_rs_codes); - break; - } -@@ -1699,7 +1699,7 @@ main (int argc, char *argv[]) - /* Now perform the installation. */ - if (install_bootsector) - grub_util_sparc_setup (platdir, "boot.img", "core.img", -- install_drive, force, -+ NULL, install_drive, force, - fs_probe, allow_floppy, - 0 /* unused */ ); - break; diff --git a/packages/grub/0001-x86-64-Treat-R_X86_64_PLT32-as-R_X86_64_PC32.patch b/packages/grub/0001-x86-64-Treat-R_X86_64_PLT32-as-R_X86_64_PC32.patch deleted file mode 100644 index cd8b5e73..00000000 --- a/packages/grub/0001-x86-64-Treat-R_X86_64_PLT32-as-R_X86_64_PC32.patch +++ /dev/null @@ -1,74 +0,0 @@ -From 842c390469e2c2e10b5aa36700324cd3bde25875 Mon Sep 17 00:00:00 2001 -From: "H.J. Lu" -Date: Sat, 17 Feb 2018 06:47:28 -0800 -Subject: [PATCH] x86-64: Treat R_X86_64_PLT32 as R_X86_64_PC32 - -Starting from binutils commit bd7ab16b4537788ad53521c45469a1bdae84ad4a: - -https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=bd7ab16b4537788ad53521c45469a1bdae84ad4a - -x86-64 assembler generates R_X86_64_PLT32, instead of R_X86_64_PC32, for -32-bit PC-relative branches. Grub2 should treat R_X86_64_PLT32 as -R_X86_64_PC32. - -Signed-off-by: H.J. Lu -Reviewed-by: Daniel Kiper -Signed-off-by: Romain Naour ---- - grub-core/efiemu/i386/loadcore64.c | 1 + - grub-core/kern/x86_64/dl.c | 1 + - util/grub-mkimagexx.c | 1 + - util/grub-module-verifier.c | 1 + - 4 files changed, 4 insertions(+) - -diff --git a/grub-core/efiemu/i386/loadcore64.c b/grub-core/efiemu/i386/loadcore64.c -index e49d0b6..18facf4 100644 ---- a/grub-core/efiemu/i386/loadcore64.c -+++ b/grub-core/efiemu/i386/loadcore64.c -@@ -98,6 +98,7 @@ grub_arch_efiemu_relocate_symbols64 (grub_efiemu_segment_t segs, - break; - - case R_X86_64_PC32: -+ case R_X86_64_PLT32: - err = grub_efiemu_write_value (addr, - *addr32 + rel->r_addend - + sym.off -diff --git a/grub-core/kern/x86_64/dl.c b/grub-core/kern/x86_64/dl.c -index 4406906..3a73e6e 100644 ---- a/grub-core/kern/x86_64/dl.c -+++ b/grub-core/kern/x86_64/dl.c -@@ -70,6 +70,7 @@ grub_arch_dl_relocate_symbols (grub_dl_t mod, void *ehdr, - break; - - case R_X86_64_PC32: -+ case R_X86_64_PLT32: - { - grub_int64_t value; - value = ((grub_int32_t) *addr32) + rel->r_addend + sym->st_value - -diff --git a/util/grub-mkimagexx.c b/util/grub-mkimagexx.c -index a2bb054..39d7efb 100644 ---- a/util/grub-mkimagexx.c -+++ b/util/grub-mkimagexx.c -@@ -841,6 +841,7 @@ SUFFIX (relocate_addresses) (Elf_Ehdr *e, Elf_Shdr *sections, - break; - - case R_X86_64_PC32: -+ case R_X86_64_PLT32: - { - grub_uint32_t *t32 = (grub_uint32_t *) target; - *t32 = grub_host_to_target64 (grub_target_to_host32 (*t32) -diff --git a/util/grub-module-verifier.c b/util/grub-module-verifier.c -index 9179285..a79271f 100644 ---- a/util/grub-module-verifier.c -+++ b/util/grub-module-verifier.c -@@ -19,6 +19,7 @@ struct grub_module_verifier_arch archs[] = { - -1 - }, (int[]){ - R_X86_64_PC32, -+ R_X86_64_PLT32, - -1 - } - }, --- -2.7.4 - diff --git a/packages/grub/0002-gpt-start-new-GPT-module.patch b/packages/grub/0002-gpt-start-new-GPT-module.patch new file mode 100644 index 00000000..d850692f --- /dev/null +++ b/packages/grub/0002-gpt-start-new-GPT-module.patch @@ -0,0 +1,920 @@ +From 46217d1569b652fe38169a788a079b373cf6c91f Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Sun, 28 Sep 2014 21:26:21 -0700 +Subject: [PATCH] gpt: start new GPT module + +This module is a new implementation for reading GUID Partition Tables +which is much stricter than the existing part_gpt module and exports GPT +data directly instead of the generic grub_partition structure. It will +be the basis for modules that need to read/write/update GPT data. + +The current code does nothing more than read and verify the table. +--- + Makefile.util.def | 16 ++ + grub-core/Makefile.core.def | 5 + + grub-core/lib/gpt.c | 288 +++++++++++++++++++++ + include/grub/gpt_partition.h | 60 +++++ + tests/gpt_unit_test.c | 467 +++++++++++++++++++++++++++++++++++ + 5 files changed, 836 insertions(+) + create mode 100644 grub-core/lib/gpt.c + create mode 100644 tests/gpt_unit_test.c + +diff --git a/Makefile.util.def b/Makefile.util.def +index 969d32f00..af8a008e2 100644 +--- a/Makefile.util.def ++++ b/Makefile.util.def +@@ -1270,6 +1270,22 @@ program = { + ldadd = '$(LIBDEVMAPPER) $(LIBZFS) $(LIBNVPAIR) $(LIBGEOM)'; + }; + ++program = { ++ testcase; ++ name = gpt_unit_test; ++ common = tests/gpt_unit_test.c; ++ common = tests/lib/unit_test.c; ++ common = grub-core/disk/host.c; ++ common = grub-core/kern/emu/hostfs.c; ++ common = grub-core/lib/gpt.c; ++ common = grub-core/tests/lib/test.c; ++ ldadd = libgrubmods.a; ++ ldadd = libgrubgcry.a; ++ ldadd = libgrubkern.a; ++ ldadd = grub-core/gnulib/libgnu.a; ++ ldadd = '$(LIBDEVMAPPER) $(LIBZFS) $(LIBNVPAIR) $(LIBGEOM)'; ++}; ++ + program = { + name = grub-menulst2cfg; + mansection = 1; +diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def +index 474a63e68..95dba2d26 100644 +--- a/grub-core/Makefile.core.def ++++ b/grub-core/Makefile.core.def +@@ -893,6 +893,11 @@ module = { + common = commands/gptsync.c; + }; + ++module = { ++ name = gpt; ++ common = lib/gpt.c; ++}; ++ + module = { + name = halt; + nopc = commands/halt.c; +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +new file mode 100644 +index 000000000..a308e8537 +--- /dev/null ++++ b/grub-core/lib/gpt.c +@@ -0,0 +1,288 @@ ++/* gpt.c - Read/Verify/Write GUID Partition Tables (GPT). */ ++/* ++ * GRUB -- GRand Unified Bootloader ++ * Copyright (C) 2002,2005,2006,2007,2008 Free Software Foundation, Inc. ++ * Copyright (C) 2014 CoreOS, Inc. ++ * ++ * GRUB is free software: you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation, either version 3 of the License, or ++ * (at your option) any later version. ++ * ++ * GRUB is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with GRUB. If not, see . ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++GRUB_MOD_LICENSE ("GPLv3+"); ++ ++static grub_uint8_t grub_gpt_magic[] = GRUB_GPT_HEADER_MAGIC; ++ ++ ++static grub_err_t ++grub_gpt_header_crc32 (struct grub_gpt_header *gpt, grub_uint32_t *crc) ++{ ++ grub_uint8_t *crc32_context; ++ grub_uint32_t old; ++ ++ crc32_context = grub_zalloc (GRUB_MD_CRC32->contextsize); ++ if (!crc32_context) ++ return grub_errno; ++ ++ /* crc32 must be computed with the field cleared. */ ++ old = gpt->crc32; ++ gpt->crc32 = 0; ++ GRUB_MD_CRC32->init (crc32_context); ++ GRUB_MD_CRC32->write (crc32_context, gpt, sizeof (*gpt)); ++ GRUB_MD_CRC32->final (crc32_context); ++ gpt->crc32 = old; ++ ++ /* GRUB_MD_CRC32 always uses big endian, gpt is always little. */ ++ *crc = grub_swap_bytes32 (*(grub_uint32_t *) ++ GRUB_MD_CRC32->read (crc32_context)); ++ ++ grub_free (crc32_context); ++ ++ return GRUB_ERR_NONE; ++} ++ ++/* Make sure the MBR is a protective MBR and not a normal MBR. */ ++grub_err_t ++grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr) ++{ ++ unsigned int i; ++ ++ if (mbr->signature != ++ grub_cpu_to_le16_compile_time (GRUB_PC_PARTITION_SIGNATURE)) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid MBR signature"); ++ ++ for (i = 0; i < sizeof (mbr->entries); i++) ++ if (mbr->entries[i].type == GRUB_PC_PARTITION_TYPE_GPT_DISK) ++ return GRUB_ERR_NONE; ++ ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid protective MBR"); ++} ++ ++grub_err_t ++grub_gpt_header_check (struct grub_gpt_header *gpt, ++ unsigned int log_sector_size) ++{ ++ grub_uint32_t crc = 0, size; ++ ++ if (grub_memcmp (gpt->magic, grub_gpt_magic, sizeof (grub_gpt_magic)) != 0) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT signature"); ++ ++ if (gpt->version != GRUB_GPT_HEADER_VERSION) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "unknown GPT version"); ++ ++ if (grub_gpt_header_crc32 (gpt, &crc)) ++ return grub_errno; ++ ++ if (gpt->crc32 != crc) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT header crc32"); ++ ++ /* The header size must be between 92 and the sector size. */ ++ size = grub_le_to_cpu32 (gpt->headersize); ++ if (size < 92U || size > (1U << log_sector_size)) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT header size"); ++ ++ /* The partition entry size must be a multiple of 128. */ ++ size = grub_le_to_cpu32 (gpt->partentry_size); ++ if (size < 128 || size % 128) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT entry size"); ++ ++ return GRUB_ERR_NONE; ++} ++ ++static grub_err_t ++grub_gpt_read_primary (grub_disk_t disk, grub_gpt_t gpt) ++{ ++ grub_disk_addr_t addr; ++ ++ /* TODO: The gpt partmap module searches for the primary header instead ++ * of relying on the disk's sector size. For now trust the disk driver ++ * but eventually this code should match the existing behavior. */ ++ gpt->log_sector_size = disk->log_sector_size; ++ ++ addr = grub_gpt_sector_to_addr (gpt, 1); ++ if (grub_disk_read (disk, addr, 0, sizeof (gpt->primary), &gpt->primary)) ++ return grub_errno; ++ ++ if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) ++ return grub_errno; ++ ++ gpt->status |= GRUB_GPT_PRIMARY_HEADER_VALID; ++ return GRUB_ERR_NONE; ++} ++ ++static grub_err_t ++grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) ++{ ++ grub_uint64_t sector; ++ grub_disk_addr_t addr; ++ ++ /* Assumes gpt->log_sector_size == disk->log_sector_size */ ++ if (disk->total_sectors != GRUB_DISK_SIZE_UNKNOWN) ++ sector = disk->total_sectors - 1; ++ else if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) ++ sector = grub_le_to_cpu64 (gpt->primary.backup); ++ else ++ return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, ++ "Unable to locate backup GPT"); ++ ++ addr = grub_gpt_sector_to_addr (gpt, sector); ++ if (grub_disk_read (disk, addr, 0, sizeof (gpt->backup), &gpt->backup)) ++ return grub_errno; ++ ++ if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) ++ return grub_errno; ++ ++ gpt->status |= GRUB_GPT_BACKUP_HEADER_VALID; ++ return GRUB_ERR_NONE; ++} ++ ++static struct grub_gpt_partentry * ++grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, ++ struct grub_gpt_header *header) ++{ ++ struct grub_gpt_partentry *entries = NULL; ++ grub_uint8_t *crc32_context = NULL; ++ grub_uint32_t count, size, crc; ++ grub_disk_addr_t addr; ++ grub_size_t entries_size; ++ ++ /* Grub doesn't include calloc, hence the manual overflow check. */ ++ count = grub_le_to_cpu32 (header->maxpart); ++ size = grub_le_to_cpu32 (header->partentry_size); ++ entries_size = count *size; ++ if (size && entries_size / size != count) ++ { ++ grub_error (GRUB_ERR_OUT_OF_MEMORY, N_("out of memory")); ++ goto fail; ++ } ++ ++ entries = grub_malloc (entries_size); ++ if (!entries) ++ goto fail; ++ ++ addr = grub_gpt_sector_to_addr (gpt, grub_le_to_cpu64 (header->partitions)); ++ if (grub_disk_read (disk, addr, 0, entries_size, entries)) ++ goto fail; ++ ++ crc32_context = grub_zalloc (GRUB_MD_CRC32->contextsize); ++ if (!crc32_context) ++ goto fail; ++ ++ GRUB_MD_CRC32->init (crc32_context); ++ GRUB_MD_CRC32->write (crc32_context, entries, entries_size); ++ GRUB_MD_CRC32->final (crc32_context); ++ ++ crc = *(grub_uint32_t *) GRUB_MD_CRC32->read (crc32_context); ++ if (grub_swap_bytes32 (crc) != header->partentry_crc32) ++ { ++ grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT entry crc32"); ++ goto fail; ++ } ++ ++ grub_free (crc32_context); ++ return entries; ++ ++fail: ++ grub_free (entries); ++ grub_free (crc32_context); ++ return NULL; ++} ++ ++grub_gpt_t ++grub_gpt_read (grub_disk_t disk) ++{ ++ grub_gpt_t gpt; ++ struct grub_gpt_partentry *backup_entries; ++ ++ gpt = grub_zalloc (sizeof (*gpt)); ++ if (!gpt) ++ goto fail; ++ ++ if (grub_disk_read (disk, 0, 0, sizeof (gpt->mbr), &gpt->mbr)) ++ goto fail; ++ ++ /* Check the MBR but errors aren't reported beyond the status bit. */ ++ if (grub_gpt_pmbr_check (&gpt->mbr)) ++ grub_errno = GRUB_ERR_NONE; ++ else ++ gpt->status |= GRUB_GPT_PROTECTIVE_MBR; ++ ++ /* If both the primary and backup fail report the primary's error. */ ++ if (grub_gpt_read_primary (disk, gpt)) ++ { ++ grub_error_push (); ++ grub_gpt_read_backup (disk, gpt); ++ grub_error_pop (); ++ } ++ else ++ grub_gpt_read_backup (disk, gpt); ++ ++ /* If either succeeded clear any possible error from the other. */ ++ if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID || ++ gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) ++ grub_errno = GRUB_ERR_NONE; ++ else ++ goto fail; ++ ++ /* Same error handling scheme for the entry tables. */ ++ gpt->entries = grub_gpt_read_entries (disk, gpt, &gpt->primary); ++ if (!gpt->entries) ++ { ++ grub_error_push (); ++ backup_entries = grub_gpt_read_entries (disk, gpt, &gpt->backup); ++ grub_error_pop (); ++ } ++ else ++ { ++ gpt->status |= GRUB_GPT_PRIMARY_ENTRIES_VALID; ++ backup_entries = grub_gpt_read_entries (disk, gpt, &gpt->backup); ++ } ++ ++ if (backup_entries) ++ { ++ gpt->status |= GRUB_GPT_BACKUP_ENTRIES_VALID; ++ ++ if (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID) ++ grub_free (backup_entries); ++ else ++ gpt->entries = backup_entries; ++ } ++ ++ if (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID || ++ gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID) ++ { ++ grub_errno = GRUB_ERR_NONE; ++ return gpt; ++ } ++ ++fail: ++ grub_gpt_free (gpt); ++ return NULL; ++} ++ ++void ++grub_gpt_free (grub_gpt_t gpt) ++{ ++ if (!gpt) ++ return; ++ ++ grub_free (gpt->entries); ++ grub_free (gpt); ++} +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index 7a93f4329..ee9eb0b95 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -21,6 +21,7 @@ + + #include + #include ++#include + + struct grub_gpt_part_guid + { +@@ -50,6 +51,12 @@ typedef struct grub_gpt_part_guid grub_gpt_part_guid_t; + { 0x85, 0xD2, 0xE1, 0xE9, 0x04, 0x34, 0xCF, 0xB3 } \ + } + ++#define GRUB_GPT_HEADER_MAGIC \ ++ { 0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54 } ++ ++#define GRUB_GPT_HEADER_VERSION \ ++ grub_cpu_to_le32_compile_time (0x00010000U) ++ + struct grub_gpt_header + { + grub_uint8_t magic[8]; +@@ -78,10 +85,63 @@ struct grub_gpt_partentry + char name[72]; + } GRUB_PACKED; + ++/* Basic GPT partmap module. */ + grub_err_t + grub_gpt_partition_map_iterate (grub_disk_t disk, + grub_partition_iterate_hook_t hook, + void *hook_data); + ++/* Advanced GPT library. */ ++typedef enum grub_gpt_status ++ { ++ GRUB_GPT_PROTECTIVE_MBR = 0x01, ++ GRUB_GPT_HYBRID_MBR = 0x02, ++ GRUB_GPT_PRIMARY_HEADER_VALID = 0x04, ++ GRUB_GPT_PRIMARY_ENTRIES_VALID = 0x08, ++ GRUB_GPT_BACKUP_HEADER_VALID = 0x10, ++ GRUB_GPT_BACKUP_ENTRIES_VALID = 0x20, ++ } grub_gpt_status_t; ++ ++#define GRUB_GPT_MBR_VALID (GRUB_GPT_PROTECTIVE_MBR|GRUB_GPT_HYBRID_MBR) ++ ++/* UEFI requires the entries table to be at least 16384 bytes for a ++ * total of 128 entries given the standard 128 byte entry size. */ ++#define GRUB_GPT_DEFAULT_ENTRIES_LENGTH 128 ++ ++struct grub_gpt ++{ ++ /* Bit field indicating which structures on disk are valid. */ ++ grub_gpt_status_t status; ++ ++ /* Protective or hybrid MBR. */ ++ struct grub_msdos_partition_mbr mbr; ++ ++ /* Each of the two GPT headers. */ ++ struct grub_gpt_header primary; ++ struct grub_gpt_header backup; ++ ++ /* Only need one entries table, on disk both copies are identical. */ ++ struct grub_gpt_partentry *entries; ++ ++ /* Logarithm of sector size, in case GPT and disk driver disagree. */ ++ unsigned int log_sector_size; ++}; ++typedef struct grub_gpt *grub_gpt_t; ++ ++/* Translate GPT sectors to GRUB's 512 byte block addresses. */ ++static inline grub_disk_addr_t ++grub_gpt_sector_to_addr (grub_gpt_t gpt, grub_uint64_t sector) ++{ ++ return (sector << (gpt->log_sector_size - GRUB_DISK_SECTOR_BITS)); ++} ++ ++/* Allocates and fills new grub_gpt structure, free with grub_gpt_free. */ ++grub_gpt_t grub_gpt_read (grub_disk_t disk); ++ ++void grub_gpt_free (grub_gpt_t gpt); ++ ++grub_err_t grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr); ++grub_err_t grub_gpt_header_check (struct grub_gpt_header *gpt, ++ unsigned int log_sector_size); + + #endif /* ! GRUB_GPT_PARTITION_HEADER */ +diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c +new file mode 100644 +index 000000000..a824cd967 +--- /dev/null ++++ b/tests/gpt_unit_test.c +@@ -0,0 +1,467 @@ ++/* ++ * GRUB -- GRand Unified Bootloader ++ * Copyright (C) 2014 CoreOS, Inc. ++ * ++ * GRUB is free software: you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation, either version 3 of the License, or ++ * (at your option) any later version. ++ * ++ * GRUB is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with GRUB. If not, see . ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* from gnulib */ ++#include ++ ++ ++/* GPT section sizes. */ ++#define HEADER_SIZE (sizeof (struct grub_gpt_header)) ++#define HEADER_PAD (GRUB_DISK_SECTOR_SIZE - HEADER_SIZE) ++#define ENTRY_SIZE (sizeof (struct grub_gpt_partentry)) ++#define TABLE_ENTRIES 0x80 ++#define TABLE_SIZE (TABLE_ENTRIES * ENTRY_SIZE) ++#define TABLE_SECTORS (TABLE_SIZE / GRUB_DISK_SECTOR_SIZE) ++ ++/* Double check that the table size calculation was valid. */ ++verify (TABLE_SECTORS * GRUB_DISK_SECTOR_SIZE == TABLE_SIZE); ++ ++/* GPT section locations for a 1MiB disk. */ ++#define DISK_SECTORS 0x800 ++#define DISK_SIZE (GRUB_DISK_SECTOR_SIZE * DISK_SECTORS) ++#define PRIMARY_HEADER_SECTOR 0x1 ++#define PRIMARY_TABLE_SECTOR 0x2 ++#define BACKUP_HEADER_SECTOR (DISK_SECTORS - 0x1) ++#define BACKUP_TABLE_SECTOR (BACKUP_HEADER_SECTOR - TABLE_SECTORS) ++ ++#define DATA_START_SECTOR (PRIMARY_TABLE_SECTOR + TABLE_SECTORS) ++#define DATA_END_SECTOR (BACKUP_TABLE_SECTOR - 0x1) ++#define DATA_SECTORS (BACKUP_TABLE_SECTOR - DATA_START_SECTOR) ++#define DATA_SIZE (GRUB_DISK_SECTOR_SIZE * DATA_SECTORS) ++ ++struct test_disk ++{ ++ struct grub_msdos_partition_mbr mbr; ++ ++ struct grub_gpt_header primary_header; ++ grub_uint8_t primary_header_pad[HEADER_PAD]; ++ struct grub_gpt_partentry primary_entries[TABLE_ENTRIES]; ++ ++ grub_uint8_t data[DATA_SIZE]; ++ ++ struct grub_gpt_partentry backup_entries[TABLE_ENTRIES]; ++ struct grub_gpt_header backup_header; ++ grub_uint8_t backup_header_pad[HEADER_PAD]; ++} GRUB_PACKED; ++ ++/* Sanity check that all the above ugly math was correct. */ ++verify (sizeof (struct test_disk) == DISK_SIZE); ++ ++struct test_data ++{ ++ int fd; ++ grub_device_t dev; ++ struct test_disk *raw; ++}; ++ ++ ++/* Sample primary GPT header for an empty 1MB disk. */ ++static const struct grub_gpt_header example_primary = { ++ .magic = GRUB_GPT_HEADER_MAGIC, ++ .version = GRUB_GPT_HEADER_VERSION, ++ .headersize = sizeof (struct grub_gpt_header), ++ .crc32 = grub_cpu_to_le32_compile_time (0x7cd8642c), ++ .primary = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), ++ .backup = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), ++ .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), ++ .end = grub_cpu_to_le64_compile_time (DATA_END_SECTOR), ++ .guid = {0xad, 0x31, 0xc1, 0x69, 0xd6, 0x67, 0xc6, 0x46, ++ 0x93, 0xc4, 0x12, 0x4c, 0x75, 0x52, 0x56, 0xac}, ++ .partitions = grub_cpu_to_le64_compile_time (PRIMARY_TABLE_SECTOR), ++ .maxpart = grub_cpu_to_le32_compile_time (TABLE_ENTRIES), ++ .partentry_size = grub_cpu_to_le32_compile_time (ENTRY_SIZE), ++ .partentry_crc32 = grub_cpu_to_le32_compile_time (0xab54d286), ++}; ++ ++/* And the backup header. */ ++static const struct grub_gpt_header example_backup = { ++ .magic = GRUB_GPT_HEADER_MAGIC, ++ .version = GRUB_GPT_HEADER_VERSION, ++ .headersize = sizeof (struct grub_gpt_header), ++ .crc32 = grub_cpu_to_le32_compile_time (0xcfaa4a27), ++ .primary = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), ++ .backup = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), ++ .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), ++ .end = grub_cpu_to_le64_compile_time (DATA_END_SECTOR), ++ .guid = {0xad, 0x31, 0xc1, 0x69, 0xd6, 0x67, 0xc6, 0x46, ++ 0x93, 0xc4, 0x12, 0x4c, 0x75, 0x52, 0x56, 0xac}, ++ .partitions = grub_cpu_to_le64_compile_time (BACKUP_TABLE_SECTOR), ++ .maxpart = grub_cpu_to_le32_compile_time (TABLE_ENTRIES), ++ .partentry_size = grub_cpu_to_le32_compile_time (ENTRY_SIZE), ++ .partentry_crc32 = grub_cpu_to_le32_compile_time (0xab54d286), ++}; ++ ++/* Sample protective MBR for the same 1MB disk. Note, this matches ++ * parted and fdisk behavior. The UEFI spec uses different values. */ ++static const struct grub_msdos_partition_mbr example_pmbr = { ++ .entries = {{.flag = 0x00, ++ .start_head = 0x00, ++ .start_sector = 0x01, ++ .start_cylinder = 0x00, ++ .type = 0xee, ++ .end_head = 0xfe, ++ .end_sector = 0xff, ++ .end_cylinder = 0xff, ++ .start = grub_cpu_to_le32_compile_time (0x1), ++ .length = grub_cpu_to_le32_compile_time (DISK_SECTORS - 0x1), ++ }}, ++ .signature = grub_cpu_to_le16_compile_time (GRUB_PC_PARTITION_SIGNATURE), ++}; ++ ++/* If errors are left in grub's error stack things can get confused. */ ++static void ++assert_error_stack_empty (void) ++{ ++ do ++ { ++ grub_test_assert (grub_errno == GRUB_ERR_NONE, ++ "error on stack: %s", grub_errmsg); ++ } ++ while (grub_error_pop ()); ++} ++ ++static grub_err_t ++execute_command2 (const char *name, const char *arg1, const char *arg2) ++{ ++ grub_command_t cmd; ++ grub_err_t err; ++ char *argv[2]; ++ ++ cmd = grub_command_find (name); ++ if (!cmd) ++ grub_fatal ("can't find command %s", name); ++ ++ argv[0] = strdup (arg1); ++ argv[1] = strdup (arg2); ++ err = (cmd->func) (cmd, 2, argv); ++ free (argv[0]); ++ free (argv[1]); ++ ++ return err; ++} ++ ++static void ++sync_disk (struct test_data *data) ++{ ++ if (msync (data->raw, DISK_SIZE, MS_SYNC | MS_INVALIDATE) < 0) ++ grub_fatal ("Syncing disk failed: %s", strerror (errno)); ++ ++ grub_disk_cache_invalidate_all (); ++} ++ ++static void ++reset_disk (struct test_data *data) ++{ ++ memset (data->raw, 0, DISK_SIZE); ++ ++ /* Initialize image with valid example tables. */ ++ memcpy (&data->raw->mbr, &example_pmbr, sizeof (data->raw->mbr)); ++ memcpy (&data->raw->primary_header, &example_primary, ++ sizeof (data->raw->primary_header)); ++ memcpy (&data->raw->backup_header, &example_backup, ++ sizeof (data->raw->backup_header)); ++ ++ sync_disk (data); ++} ++ ++static void ++open_disk (struct test_data *data) ++{ ++ const char *loop = "loop0"; ++ char template[] = "/tmp/grub_gpt_test.XXXXXX"; ++ char host[sizeof ("(host)") + sizeof (template)]; ++ ++ data->fd = mkstemp (template); ++ if (data->fd < 0) ++ grub_fatal ("Creating %s failed: %s", template, strerror (errno)); ++ ++ if (ftruncate (data->fd, DISK_SIZE) < 0) ++ { ++ int err = errno; ++ unlink (template); ++ grub_fatal ("Resizing %s failed: %s", template, strerror (err)); ++ } ++ ++ data->raw = mmap (NULL, DISK_SIZE, PROT_READ | PROT_WRITE, ++ MAP_SHARED, data->fd, 0); ++ if (data->raw == MAP_FAILED) ++ { ++ int err = errno; ++ unlink (template); ++ grub_fatal ("Maping %s failed: %s", template, strerror (err)); ++ } ++ ++ snprintf (host, sizeof (host), "(host)%s", template); ++ if (execute_command2 ("loopback", loop, host) != GRUB_ERR_NONE) ++ { ++ unlink (template); ++ grub_fatal ("loopback %s %s failed: %s", loop, host, grub_errmsg); ++ } ++ ++ if (unlink (template) < 0) ++ grub_fatal ("Unlinking %s failed: %s", template, strerror (errno)); ++ ++ reset_disk (data); ++ ++ data->dev = grub_device_open (loop); ++ if (!data->dev) ++ grub_fatal ("Opening %s failed: %s", loop, grub_errmsg); ++} ++ ++static void ++close_disk (struct test_data *data) ++{ ++ char *loop; ++ ++ assert_error_stack_empty (); ++ ++ if (munmap (data->raw, DISK_SIZE) || close (data->fd)) ++ grub_fatal ("Closing disk image failed: %s", strerror (errno)); ++ ++ loop = strdup (data->dev->disk->name); ++ grub_test_assert (grub_device_close (data->dev) == GRUB_ERR_NONE, ++ "Closing disk device failed: %s", grub_errmsg); ++ ++ grub_test_assert (execute_command2 ("loopback", "-d", loop) == ++ GRUB_ERR_NONE, "loopback -d %s failed: %s", loop, ++ grub_errmsg); ++ ++ free (loop); ++} ++ ++static grub_gpt_t ++read_disk (struct test_data *data) ++{ ++ grub_gpt_t gpt; ++ ++ gpt = grub_gpt_read (data->dev->disk); ++ if (gpt == NULL) ++ { ++ grub_print_error (); ++ grub_fatal ("grub_gpt_read failed"); ++ } ++ ++ ++ return gpt; ++} ++ ++static void ++pmbr_test (void) ++{ ++ struct grub_msdos_partition_mbr mbr; ++ ++ memset (&mbr, 0, sizeof (mbr)); ++ ++ /* Empty is invalid. */ ++ grub_gpt_pmbr_check (&mbr); ++ grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++ ++ /* A table without a protective partition is invalid. */ ++ mbr.signature = grub_cpu_to_le16_compile_time (GRUB_PC_PARTITION_SIGNATURE); ++ grub_gpt_pmbr_check (&mbr); ++ grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++ ++ /* A table with a protective type is ok. */ ++ memcpy (&mbr, &example_pmbr, sizeof (mbr)); ++ grub_gpt_pmbr_check (&mbr); ++ grub_test_assert (grub_errno == GRUB_ERR_NONE, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++} ++ ++static void ++header_test (void) ++{ ++ struct grub_gpt_header primary, backup; ++ ++ /* Example headers should be valid. */ ++ memcpy (&primary, &example_primary, sizeof (primary)); ++ grub_gpt_header_check (&primary, GRUB_DISK_SECTOR_BITS); ++ grub_test_assert (grub_errno == GRUB_ERR_NONE, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++ ++ memcpy (&backup, &example_backup, sizeof (backup)); ++ grub_gpt_header_check (&backup, GRUB_DISK_SECTOR_BITS); ++ grub_test_assert (grub_errno == GRUB_ERR_NONE, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++ ++ /* Twiddle the GUID to invalidate the CRC. */ ++ primary.guid[0] = 0; ++ grub_gpt_header_check (&primary, GRUB_DISK_SECTOR_BITS); ++ grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++ ++ backup.guid[0] = 0; ++ grub_gpt_header_check (&backup, GRUB_DISK_SECTOR_BITS); ++ grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++} ++ ++static void ++read_valid_test (void) ++{ ++ struct test_data data; ++ grub_gpt_t gpt; ++ ++ open_disk (&data); ++ gpt = read_disk (&data); ++ grub_test_assert (gpt->status == (GRUB_GPT_PROTECTIVE_MBR | ++ GRUB_GPT_PRIMARY_HEADER_VALID | ++ GRUB_GPT_PRIMARY_ENTRIES_VALID | ++ GRUB_GPT_BACKUP_HEADER_VALID | ++ GRUB_GPT_BACKUP_ENTRIES_VALID), ++ "unexpected status: 0x%02x", gpt->status); ++ grub_gpt_free (gpt); ++ close_disk (&data); ++} ++ ++static void ++read_invalid_entries_test (void) ++{ ++ struct test_data data; ++ grub_gpt_t gpt; ++ ++ open_disk (&data); ++ ++ /* Corrupt the first entry in both tables. */ ++ memset (&data.raw->primary_entries[0], 0x55, ++ sizeof (data.raw->primary_entries[0])); ++ memset (&data.raw->backup_entries[0], 0x55, ++ sizeof (data.raw->backup_entries[0])); ++ sync_disk (&data); ++ ++ gpt = grub_gpt_read (data.dev->disk); ++ grub_test_assert (gpt == NULL, "no error reported for corrupt entries"); ++ grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++ ++ close_disk (&data); ++} ++ ++static void ++read_fallback_test (void) ++{ ++ struct test_data data; ++ grub_gpt_t gpt; ++ ++ open_disk (&data); ++ ++ /* Corrupt the primary header. */ ++ memset (&data.raw->primary_header.guid, 0x55, ++ sizeof (data.raw->primary_header.guid)); ++ sync_disk (&data); ++ gpt = read_disk (&data); ++ grub_test_assert ((gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) == 0, ++ "unreported corrupt primary header"); ++ grub_gpt_free (gpt); ++ reset_disk (&data); ++ ++ /* Corrupt the backup header. */ ++ memset (&data.raw->backup_header.guid, 0x55, ++ sizeof (data.raw->backup_header.guid)); ++ sync_disk (&data); ++ gpt = read_disk (&data); ++ grub_test_assert ((gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) == 0, ++ "unreported corrupt backup header"); ++ grub_gpt_free (gpt); ++ reset_disk (&data); ++ ++ /* Corrupt the primary entry table. */ ++ memset (&data.raw->primary_entries[0], 0x55, ++ sizeof (data.raw->primary_entries[0])); ++ sync_disk (&data); ++ gpt = read_disk (&data); ++ grub_test_assert ((gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID) == 0, ++ "unreported corrupt primary entries table"); ++ grub_gpt_free (gpt); ++ reset_disk (&data); ++ ++ /* Corrupt the backup entry table. */ ++ memset (&data.raw->backup_entries[0], 0x55, ++ sizeof (data.raw->backup_entries[0])); ++ sync_disk (&data); ++ gpt = read_disk (&data); ++ grub_test_assert ((gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID) == 0, ++ "unreported corrupt backup entries table"); ++ grub_gpt_free (gpt); ++ reset_disk (&data); ++ ++ /* If primary is corrupt and disk size is unknown fallback fails. */ ++ memset (&data.raw->primary_header.guid, 0x55, ++ sizeof (data.raw->primary_header.guid)); ++ sync_disk (&data); ++ data.dev->disk->total_sectors = GRUB_DISK_SIZE_UNKNOWN; ++ gpt = grub_gpt_read (data.dev->disk); ++ grub_test_assert (gpt == NULL, "no error reported"); ++ grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++ ++ close_disk (&data); ++} ++ ++void ++grub_unit_test_init (void) ++{ ++ grub_init_all (); ++ grub_hostfs_init (); ++ grub_host_init (); ++ grub_test_register ("gpt_pmbr_test", pmbr_test); ++ grub_test_register ("gpt_header_test", header_test); ++ grub_test_register ("gpt_read_valid_test", read_valid_test); ++ grub_test_register ("gpt_read_invalid_test", read_invalid_entries_test); ++ grub_test_register ("gpt_read_fallback_test", read_fallback_test); ++} ++ ++void ++grub_unit_test_fini (void) ++{ ++ grub_test_unregister ("gpt_pmbr_test"); ++ grub_test_unregister ("gpt_header_test"); ++ grub_test_unregister ("gpt_read_valid_test"); ++ grub_test_unregister ("gpt_read_invalid_test"); ++ grub_test_unregister ("gpt_read_fallback_test"); ++ grub_fini_all (); ++} diff --git a/packages/grub/0003-gpt-rename-misnamed-header-location-fields.patch b/packages/grub/0003-gpt-rename-misnamed-header-location-fields.patch new file mode 100644 index 00000000..ff06baf7 --- /dev/null +++ b/packages/grub/0003-gpt-rename-misnamed-header-location-fields.patch @@ -0,0 +1,68 @@ +From eb194ecfc9137233703e530fa0411bc86405469b Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Sat, 18 Oct 2014 15:39:13 -0700 +Subject: [PATCH] gpt: rename misnamed header location fields + +The header location fields refer to 'this header' and 'alternate header' +respectively, not 'primary header' and 'backup header'. The previous +field names are backwards for the backup header. +--- + grub-core/lib/gpt.c | 2 +- + include/grub/gpt_partition.h | 4 ++-- + tests/gpt_unit_test.c | 8 ++++---- + 3 files changed, 7 insertions(+), 7 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index a308e8537..705bd77f9 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -137,7 +137,7 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) + if (disk->total_sectors != GRUB_DISK_SIZE_UNKNOWN) + sector = disk->total_sectors - 1; + else if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) +- sector = grub_le_to_cpu64 (gpt->primary.backup); ++ sector = grub_le_to_cpu64 (gpt->primary.alternate_lba); + else + return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, + "Unable to locate backup GPT"); +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index ee9eb0b95..6d678fae2 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -64,8 +64,8 @@ struct grub_gpt_header + grub_uint32_t headersize; + grub_uint32_t crc32; + grub_uint32_t unused1; +- grub_uint64_t primary; +- grub_uint64_t backup; ++ grub_uint64_t header_lba; ++ grub_uint64_t alternate_lba; + grub_uint64_t start; + grub_uint64_t end; + grub_uint8_t guid[16]; +diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c +index a824cd967..4d70868af 100644 +--- a/tests/gpt_unit_test.c ++++ b/tests/gpt_unit_test.c +@@ -94,8 +94,8 @@ static const struct grub_gpt_header example_primary = { + .version = GRUB_GPT_HEADER_VERSION, + .headersize = sizeof (struct grub_gpt_header), + .crc32 = grub_cpu_to_le32_compile_time (0x7cd8642c), +- .primary = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), +- .backup = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), ++ .header_lba = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), ++ .alternate_lba = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), + .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), + .end = grub_cpu_to_le64_compile_time (DATA_END_SECTOR), + .guid = {0xad, 0x31, 0xc1, 0x69, 0xd6, 0x67, 0xc6, 0x46, +@@ -112,8 +112,8 @@ static const struct grub_gpt_header example_backup = { + .version = GRUB_GPT_HEADER_VERSION, + .headersize = sizeof (struct grub_gpt_header), + .crc32 = grub_cpu_to_le32_compile_time (0xcfaa4a27), +- .primary = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), +- .backup = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), ++ .header_lba = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), ++ .alternate_lba = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), + .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), + .end = grub_cpu_to_le64_compile_time (DATA_END_SECTOR), + .guid = {0xad, 0x31, 0xc1, 0x69, 0xd6, 0x67, 0xc6, 0x46, diff --git a/packages/grub/0004-gpt-record-size-of-of-the-entries-table.patch b/packages/grub/0004-gpt-record-size-of-of-the-entries-table.patch new file mode 100644 index 00000000..dabd5064 --- /dev/null +++ b/packages/grub/0004-gpt-record-size-of-of-the-entries-table.patch @@ -0,0 +1,134 @@ +From b78d543b0ab493f6bd20f2e5d101fc11c9357faf Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Sat, 18 Oct 2014 16:46:17 -0700 +Subject: [PATCH] gpt: record size of of the entries table + +The size of the entries table will be needed later when writing it back +to disk. Restructure the entries reading code to flow a little better. +--- + grub-core/lib/gpt.c | 53 ++++++++++++++++-------------------- + include/grub/gpt_partition.h | 5 +++- + 2 files changed, 27 insertions(+), 31 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 705bd77f9..01df7f3e8 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -153,7 +153,7 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) + return GRUB_ERR_NONE; + } + +-static struct grub_gpt_partentry * ++static grub_err_t + grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, + struct grub_gpt_header *header) + { +@@ -173,6 +173,10 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, + goto fail; + } + ++ /* Double check that the header was validated properly. */ ++ if (entries_size < GRUB_GPT_DEFAULT_ENTRIES_SIZE) ++ return grub_error (GRUB_ERR_BUG, "invalid GPT entries table size"); ++ + entries = grub_malloc (entries_size); + if (!entries) + goto fail; +@@ -197,19 +201,21 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, + } + + grub_free (crc32_context); +- return entries; ++ grub_free (gpt->entries); ++ gpt->entries = entries; ++ gpt->entries_size = entries_size; ++ return GRUB_ERR_NONE; + + fail: + grub_free (entries); + grub_free (crc32_context); +- return NULL; ++ return grub_errno; + } + + grub_gpt_t + grub_gpt_read (grub_disk_t disk) + { + grub_gpt_t gpt; +- struct grub_gpt_partentry *backup_entries; + + gpt = grub_zalloc (sizeof (*gpt)); + if (!gpt) +@@ -241,36 +247,23 @@ grub_gpt_read (grub_disk_t disk) + else + goto fail; + +- /* Same error handling scheme for the entry tables. */ +- gpt->entries = grub_gpt_read_entries (disk, gpt, &gpt->primary); +- if (!gpt->entries) +- { +- grub_error_push (); +- backup_entries = grub_gpt_read_entries (disk, gpt, &gpt->backup); +- grub_error_pop (); +- } +- else +- { +- gpt->status |= GRUB_GPT_PRIMARY_ENTRIES_VALID; +- backup_entries = grub_gpt_read_entries (disk, gpt, &gpt->backup); +- } ++ /* Similarly, favor the value or error from the primary table. */ ++ if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID && ++ !grub_gpt_read_entries (disk, gpt, &gpt->backup)) ++ gpt->status |= GRUB_GPT_BACKUP_ENTRIES_VALID; + +- if (backup_entries) +- { +- gpt->status |= GRUB_GPT_BACKUP_ENTRIES_VALID; +- +- if (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID) +- grub_free (backup_entries); +- else +- gpt->entries = backup_entries; +- } ++ grub_errno = GRUB_ERR_NONE; ++ if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID && ++ !grub_gpt_read_entries (disk, gpt, &gpt->primary)) ++ gpt->status |= GRUB_GPT_PRIMARY_ENTRIES_VALID; + + if (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID || + gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID) +- { +- grub_errno = GRUB_ERR_NONE; +- return gpt; +- } ++ grub_errno = GRUB_ERR_NONE; ++ else ++ goto fail; ++ ++ return gpt; + + fail: + grub_gpt_free (gpt); +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index 6d678fae2..451b02a89 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -106,7 +106,9 @@ typedef enum grub_gpt_status + + /* UEFI requires the entries table to be at least 16384 bytes for a + * total of 128 entries given the standard 128 byte entry size. */ +-#define GRUB_GPT_DEFAULT_ENTRIES_LENGTH 128 ++#define GRUB_GPT_DEFAULT_ENTRIES_SIZE 16384 ++#define GRUB_GPT_DEFAULT_ENTRIES_LENGTH \ ++ (GRUB_GPT_DEFAULT_ENTRIES_SIZE / sizeof (struct grub_gpt_partentry)) + + struct grub_gpt + { +@@ -122,6 +124,7 @@ struct grub_gpt + + /* Only need one entries table, on disk both copies are identical. */ + struct grub_gpt_partentry *entries; ++ grub_size_t entries_size; + + /* Logarithm of sector size, in case GPT and disk driver disagree. */ + unsigned int log_sector_size; diff --git a/packages/grub/0005-gpt-consolidate-crc32-computation-code.patch b/packages/grub/0005-gpt-consolidate-crc32-computation-code.patch new file mode 100644 index 00000000..8246377c --- /dev/null +++ b/packages/grub/0005-gpt-consolidate-crc32-computation-code.patch @@ -0,0 +1,116 @@ +From 39351efcb1aab82bb86052d21f07308429414a83 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Sat, 18 Oct 2014 18:18:17 -0700 +Subject: [PATCH] gpt: consolidate crc32 computation code + +The gcrypt API is overly verbose, wrap it up in a helper function to +keep this rather common operation easy to use. +--- + grub-core/lib/gpt.c | 43 ++++++++++++++++++++++++------------------- + 1 file changed, 24 insertions(+), 19 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 01df7f3e8..43a150942 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -32,22 +32,17 @@ static grub_uint8_t grub_gpt_magic[] = GRUB_GPT_HEADER_MAGIC; + + + static grub_err_t +-grub_gpt_header_crc32 (struct grub_gpt_header *gpt, grub_uint32_t *crc) ++grub_gpt_lecrc32 (void *data, grub_size_t len, grub_uint32_t *crc) + { + grub_uint8_t *crc32_context; +- grub_uint32_t old; + + crc32_context = grub_zalloc (GRUB_MD_CRC32->contextsize); + if (!crc32_context) + return grub_errno; + +- /* crc32 must be computed with the field cleared. */ +- old = gpt->crc32; +- gpt->crc32 = 0; + GRUB_MD_CRC32->init (crc32_context); +- GRUB_MD_CRC32->write (crc32_context, gpt, sizeof (*gpt)); ++ GRUB_MD_CRC32->write (crc32_context, data, len); + GRUB_MD_CRC32->final (crc32_context); +- gpt->crc32 = old; + + /* GRUB_MD_CRC32 always uses big endian, gpt is always little. */ + *crc = grub_swap_bytes32 (*(grub_uint32_t *) +@@ -58,6 +53,25 @@ grub_gpt_header_crc32 (struct grub_gpt_header *gpt, grub_uint32_t *crc) + return GRUB_ERR_NONE; + } + ++static grub_err_t ++grub_gpt_header_lecrc32 (struct grub_gpt_header *header, grub_uint32_t *crc) ++{ ++ grub_uint32_t old, new; ++ grub_err_t err; ++ ++ /* crc32 must be computed with the field cleared. */ ++ old = header->crc32; ++ header->crc32 = 0; ++ err = grub_gpt_lecrc32 (header, sizeof (*header), &new); ++ header->crc32 = old; ++ ++ if (err) ++ return err; ++ ++ *crc = new; ++ return GRUB_ERR_NONE; ++} ++ + /* Make sure the MBR is a protective MBR and not a normal MBR. */ + grub_err_t + grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr) +@@ -87,7 +101,7 @@ grub_gpt_header_check (struct grub_gpt_header *gpt, + if (gpt->version != GRUB_GPT_HEADER_VERSION) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "unknown GPT version"); + +- if (grub_gpt_header_crc32 (gpt, &crc)) ++ if (grub_gpt_header_lecrc32 (gpt, &crc)) + return grub_errno; + + if (gpt->crc32 != crc) +@@ -158,7 +172,6 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, + struct grub_gpt_header *header) + { + struct grub_gpt_partentry *entries = NULL; +- grub_uint8_t *crc32_context = NULL; + grub_uint32_t count, size, crc; + grub_disk_addr_t addr; + grub_size_t entries_size; +@@ -185,22 +198,15 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, + if (grub_disk_read (disk, addr, 0, entries_size, entries)) + goto fail; + +- crc32_context = grub_zalloc (GRUB_MD_CRC32->contextsize); +- if (!crc32_context) ++ if (grub_gpt_lecrc32 (entries, entries_size, &crc)) + goto fail; + +- GRUB_MD_CRC32->init (crc32_context); +- GRUB_MD_CRC32->write (crc32_context, entries, entries_size); +- GRUB_MD_CRC32->final (crc32_context); +- +- crc = *(grub_uint32_t *) GRUB_MD_CRC32->read (crc32_context); +- if (grub_swap_bytes32 (crc) != header->partentry_crc32) ++ if (crc != header->partentry_crc32) + { + grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT entry crc32"); + goto fail; + } + +- grub_free (crc32_context); + grub_free (gpt->entries); + gpt->entries = entries; + gpt->entries_size = entries_size; +@@ -208,7 +214,6 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, + + fail: + grub_free (entries); +- grub_free (crc32_context); + return grub_errno; + } + diff --git a/packages/grub/0006-gpt-add-new-repair-function-to-sync-up-primary-and-b.patch b/packages/grub/0006-gpt-add-new-repair-function-to-sync-up-primary-and-b.patch new file mode 100644 index 00000000..8bf78901 --- /dev/null +++ b/packages/grub/0006-gpt-add-new-repair-function-to-sync-up-primary-and-b.patch @@ -0,0 +1,214 @@ +From 5e9049af888cd344990fd031f93d7189ef340805 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Sat, 18 Oct 2014 18:21:07 -0700 +Subject: [PATCH] gpt: add new repair function to sync up primary and backup + tables. + +--- + grub-core/lib/gpt.c | 90 ++++++++++++++++++++++++++++++++++++ + include/grub/gpt_partition.h | 3 ++ + tests/gpt_unit_test.c | 49 ++++++++++++++++++++ + 3 files changed, 142 insertions(+) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 43a150942..2d61df488 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -31,6 +31,20 @@ GRUB_MOD_LICENSE ("GPLv3+"); + static grub_uint8_t grub_gpt_magic[] = GRUB_GPT_HEADER_MAGIC; + + ++static grub_uint64_t ++grub_gpt_size_to_sectors (grub_gpt_t gpt, grub_size_t size) ++{ ++ unsigned int sector_size; ++ grub_uint64_t sectors; ++ ++ sector_size = 1U << gpt->log_sector_size; ++ sectors = size / sector_size; ++ if (size % sector_size) ++ sectors++; ++ ++ return sectors; ++} ++ + static grub_err_t + grub_gpt_lecrc32 (void *data, grub_size_t len, grub_uint32_t *crc) + { +@@ -275,6 +289,82 @@ fail: + return NULL; + } + ++grub_err_t ++grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) ++{ ++ grub_uint64_t backup_header, backup_entries; ++ grub_uint32_t crc; ++ ++ if (disk->log_sector_size != gpt->log_sector_size) ++ return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, ++ "GPT sector size must match disk sector size"); ++ ++ if (!(gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID || ++ gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID)) ++ return grub_error (GRUB_ERR_BUG, "No valid GPT entries"); ++ ++ if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) ++ { ++ backup_header = grub_le_to_cpu64 (gpt->primary.alternate_lba); ++ grub_memcpy (&gpt->backup, &gpt->primary, sizeof (gpt->backup)); ++ } ++ else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) ++ { ++ backup_header = grub_le_to_cpu64 (gpt->backup.header_lba); ++ grub_memcpy (&gpt->primary, &gpt->backup, sizeof (gpt->primary)); ++ } ++ else ++ return grub_error (GRUB_ERR_BUG, "No valid GPT header"); ++ ++ /* Relocate backup to end if disk whenever possible. */ ++ if (disk->total_sectors != GRUB_DISK_SIZE_UNKNOWN) ++ backup_header = disk->total_sectors - 1; ++ ++ backup_entries = backup_header - ++ grub_gpt_size_to_sectors (gpt, gpt->entries_size); ++ ++ /* Update/fixup header and partition table locations. */ ++ gpt->primary.header_lba = grub_cpu_to_le64_compile_time (1); ++ gpt->primary.alternate_lba = grub_cpu_to_le64 (backup_header); ++ gpt->primary.partitions = grub_cpu_to_le64_compile_time (2); ++ gpt->backup.header_lba = gpt->primary.alternate_lba; ++ gpt->backup.alternate_lba = gpt->primary.header_lba; ++ gpt->backup.partitions = grub_cpu_to_le64 (backup_entries); ++ ++ /* Writing headers larger than our header structure are unsupported. */ ++ gpt->primary.headersize = ++ grub_cpu_to_le32_compile_time (sizeof (gpt->primary)); ++ gpt->backup.headersize = ++ grub_cpu_to_le32_compile_time (sizeof (gpt->backup)); ++ ++ /* Recompute checksums. */ ++ if (grub_gpt_lecrc32 (gpt->entries, gpt->entries_size, &crc)) ++ return grub_errno; ++ ++ gpt->primary.partentry_crc32 = crc; ++ gpt->backup.partentry_crc32 = crc; ++ ++ if (grub_gpt_header_lecrc32 (&gpt->primary, &gpt->primary.crc32)) ++ return grub_errno; ++ ++ if (grub_gpt_header_lecrc32 (&gpt->backup, &gpt->backup.crc32)) ++ return grub_errno; ++ ++ /* Sanity check. */ ++ if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) ++ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); ++ ++ if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) ++ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); ++ ++ gpt->status |= (GRUB_GPT_PRIMARY_HEADER_VALID | ++ GRUB_GPT_PRIMARY_ENTRIES_VALID | ++ GRUB_GPT_BACKUP_HEADER_VALID | ++ GRUB_GPT_BACKUP_ENTRIES_VALID); ++ ++ return GRUB_ERR_NONE; ++} ++ + void + grub_gpt_free (grub_gpt_t gpt) + { +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index 451b02a89..f367fe50d 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -141,6 +141,9 @@ grub_gpt_sector_to_addr (grub_gpt_t gpt, grub_uint64_t sector) + /* Allocates and fills new grub_gpt structure, free with grub_gpt_free. */ + grub_gpt_t grub_gpt_read (grub_disk_t disk); + ++/* Sync up primary and backup headers, recompute checksums. */ ++grub_err_t grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt); ++ + void grub_gpt_free (grub_gpt_t gpt); + + grub_err_t grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr); +diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c +index 4d70868af..83198bebf 100644 +--- a/tests/gpt_unit_test.c ++++ b/tests/gpt_unit_test.c +@@ -24,6 +24,7 @@ + #include + #include + #include ++#include + #include + + #include +@@ -442,6 +443,52 @@ read_fallback_test (void) + close_disk (&data); + } + ++static void ++repair_test (void) ++{ ++ struct test_data data; ++ grub_gpt_t gpt; ++ ++ open_disk (&data); ++ ++ /* Erase/Repair primary. */ ++ memset (&data.raw->primary_header, 0, sizeof (data.raw->primary_header)); ++ sync_disk (&data); ++ gpt = read_disk (&data); ++ grub_gpt_repair (data.dev->disk, gpt); ++ grub_test_assert (grub_errno == GRUB_ERR_NONE, ++ "repair failed: %s", grub_errmsg); ++ if (memcmp (&gpt->primary, &example_primary, sizeof (gpt->primary))) ++ { ++ printf ("Invalid restored primary header:\n"); ++ hexdump (16, (char*)&gpt->primary, sizeof (gpt->primary)); ++ printf ("Expected primary header:\n"); ++ hexdump (16, (char*)&example_primary, sizeof (example_primary)); ++ grub_test_assert (0, "repair did not restore primary header"); ++ } ++ grub_gpt_free (gpt); ++ reset_disk (&data); ++ ++ /* Erase/Repair backup. */ ++ memset (&data.raw->backup_header, 0, sizeof (data.raw->backup_header)); ++ sync_disk (&data); ++ gpt = read_disk (&data); ++ grub_gpt_repair (data.dev->disk, gpt); ++ grub_test_assert (grub_errno == GRUB_ERR_NONE, ++ "repair failed: %s", grub_errmsg); ++ if (memcmp (&gpt->backup, &example_backup, sizeof (gpt->backup))) ++ { ++ printf ("Invalid restored backup header:\n"); ++ hexdump (16, (char*)&gpt->backup, sizeof (gpt->backup)); ++ printf ("Expected backup header:\n"); ++ hexdump (16, (char*)&example_backup, sizeof (example_backup)); ++ grub_test_assert (0, "repair did not restore backup header"); ++ } ++ grub_gpt_free (gpt); ++ reset_disk (&data); ++ ++ close_disk (&data); ++} + void + grub_unit_test_init (void) + { +@@ -453,6 +500,7 @@ grub_unit_test_init (void) + grub_test_register ("gpt_read_valid_test", read_valid_test); + grub_test_register ("gpt_read_invalid_test", read_invalid_entries_test); + grub_test_register ("gpt_read_fallback_test", read_fallback_test); ++ grub_test_register ("gpt_repair_test", repair_test); + } + + void +@@ -463,5 +511,6 @@ grub_unit_test_fini (void) + grub_test_unregister ("gpt_read_valid_test"); + grub_test_unregister ("gpt_read_invalid_test"); + grub_test_unregister ("gpt_read_fallback_test"); ++ grub_test_unregister ("gpt_repair_test"); + grub_fini_all (); + } diff --git a/packages/grub/0007-gpt-add-write-function-and-gptrepair-command.patch b/packages/grub/0007-gpt-add-write-function-and-gptrepair-command.patch new file mode 100644 index 00000000..ae331c19 --- /dev/null +++ b/packages/grub/0007-gpt-add-write-function-and-gptrepair-command.patch @@ -0,0 +1,364 @@ +From cb285d07b1b20c2ae081913cd3002cd97c8dd385 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Sun, 19 Oct 2014 14:21:29 -0700 +Subject: [PATCH] gpt: add write function and gptrepair command + +The first hint of something practical, a command that can restore any of +the GPT structures from the alternate location. New test case must run +under QEMU because the loopback device used by the other unit tests does +not support writing. +--- + Makefile.util.def | 6 ++ + grub-core/Makefile.core.def | 5 ++ + grub-core/commands/gptrepair.c | 116 +++++++++++++++++++++++++++++++++ + grub-core/lib/gpt.c | 44 +++++++++++-- + include/grub/gpt_partition.h | 8 +++ + tests/gptrepair_test.in | 102 +++++++++++++++++++++++++++++ + 6 files changed, 277 insertions(+), 4 deletions(-) + create mode 100644 grub-core/commands/gptrepair.c + create mode 100644 tests/gptrepair_test.in + +diff --git a/Makefile.util.def b/Makefile.util.def +index af8a008e2..6ed541c1c 100644 +--- a/Makefile.util.def ++++ b/Makefile.util.def +@@ -1175,6 +1175,12 @@ script = { + common = tests/grub_cmd_tr.in; + }; + ++script = { ++ testcase; ++ name = gptrepair_test; ++ common = tests/gptrepair_test.in; ++}; ++ + script = { + testcase; + name = file_filter_test; +diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def +index 95dba2d26..43ce166db 100644 +--- a/grub-core/Makefile.core.def ++++ b/grub-core/Makefile.core.def +@@ -893,6 +893,11 @@ module = { + common = commands/gptsync.c; + }; + ++module = { ++ name = gptrepair; ++ common = commands/gptrepair.c; ++}; ++ + module = { + name = gpt; + common = lib/gpt.c; +diff --git a/grub-core/commands/gptrepair.c b/grub-core/commands/gptrepair.c +new file mode 100644 +index 000000000..38392fd8f +--- /dev/null ++++ b/grub-core/commands/gptrepair.c +@@ -0,0 +1,116 @@ ++/* gptrepair.c - verify and restore GPT info from alternate location. */ ++/* ++ * GRUB -- GRand Unified Bootloader ++ * Copyright (C) 2009 Free Software Foundation, Inc. ++ * Copyright (C) 2014 CoreOS, Inc. ++ * ++ * GRUB is free software: you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation, either version 3 of the License, or ++ * (at your option) any later version. ++ * ++ * GRUB is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with GRUB. If not, see . ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++GRUB_MOD_LICENSE ("GPLv3+"); ++ ++static char * ++trim_dev_name (char *name) ++{ ++ grub_size_t len = grub_strlen (name); ++ if (len && name[0] == '(' && name[len - 1] == ')') ++ { ++ name[len - 1] = '\0'; ++ name = name + 1; ++ } ++ return name; ++} ++ ++static grub_err_t ++grub_cmd_gptrepair (grub_command_t cmd __attribute__ ((unused)), ++ int argc, char **args) ++{ ++ grub_device_t dev = NULL; ++ grub_gpt_t gpt = NULL; ++ char *dev_name; ++ grub_uint32_t primary_crc, backup_crc; ++ enum grub_gpt_status old_status; ++ ++ if (argc != 1 || !grub_strlen(args[0])) ++ return grub_error (GRUB_ERR_BAD_ARGUMENT, "device name required"); ++ ++ dev_name = trim_dev_name (args[0]); ++ dev = grub_device_open (dev_name); ++ if (!dev) ++ goto done; ++ ++ if (!dev->disk) ++ { ++ grub_error (GRUB_ERR_BAD_ARGUMENT, "not a disk"); ++ goto done; ++ } ++ ++ gpt = grub_gpt_read (dev->disk); ++ if (!gpt) ++ goto done; ++ ++ primary_crc = gpt->primary.crc32; ++ backup_crc = gpt->backup.crc32; ++ old_status = gpt->status; ++ ++ if (grub_gpt_repair (dev->disk, gpt)) ++ goto done; ++ ++ if (primary_crc == gpt->primary.crc32 && ++ backup_crc == gpt->backup.crc32 && ++ old_status && gpt->status) ++ { ++ grub_printf_ (N_("GPT already valid, %s unmodified.\n"), dev_name); ++ goto done; ++ } ++ ++ if (grub_gpt_write (dev->disk, gpt)) ++ goto done; ++ ++ if (!(old_status & GRUB_GPT_PRIMARY_VALID)) ++ grub_printf_ (N_("Primary GPT for %s repaired.\n"), dev_name); ++ ++ if (!(old_status & GRUB_GPT_BACKUP_VALID)) ++ grub_printf_ (N_("Backup GPT for %s repaired.\n"), dev_name); ++ ++done: ++ if (gpt) ++ grub_gpt_free (gpt); ++ ++ if (dev) ++ grub_device_close (dev); ++ ++ return grub_errno; ++} ++ ++static grub_command_t cmd; ++ ++GRUB_MOD_INIT(gptrepair) ++{ ++ cmd = grub_register_command ("gptrepair", grub_cmd_gptrepair, ++ N_("DEVICE"), ++ N_("Verify and repair GPT on drive DEVICE.")); ++} ++ ++GRUB_MOD_FINI(gptrepair) ++{ ++ grub_unregister_command (cmd); ++} +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 2d61df488..67ffdf703 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -357,10 +357,46 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) + return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); + +- gpt->status |= (GRUB_GPT_PRIMARY_HEADER_VALID | +- GRUB_GPT_PRIMARY_ENTRIES_VALID | +- GRUB_GPT_BACKUP_HEADER_VALID | +- GRUB_GPT_BACKUP_ENTRIES_VALID); ++ gpt->status |= GRUB_GPT_BOTH_VALID; ++ return GRUB_ERR_NONE; ++} ++ ++static grub_err_t ++grub_gpt_write_table (grub_disk_t disk, grub_gpt_t gpt, ++ struct grub_gpt_header *header) ++{ ++ grub_disk_addr_t addr; ++ ++ if (grub_le_to_cpu32 (header->headersize) != sizeof (*header)) ++ return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, ++ "Header size is %u, must be %u", ++ grub_le_to_cpu32 (header->headersize), ++ sizeof (*header)); ++ ++ addr = grub_gpt_sector_to_addr (gpt, grub_le_to_cpu64 (header->header_lba)); ++ if (grub_disk_write (disk, addr, 0, sizeof (*header), header)) ++ return grub_errno; ++ ++ addr = grub_gpt_sector_to_addr (gpt, grub_le_to_cpu64 (header->partitions)); ++ if (grub_disk_write (disk, addr, 0, gpt->entries_size, gpt->entries)) ++ return grub_errno; ++ ++ return GRUB_ERR_NONE; ++} ++ ++grub_err_t ++grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt) ++{ ++ /* TODO: update/repair protective MBRs too. */ ++ ++ if (!(gpt->status & GRUB_GPT_BOTH_VALID)) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "Invalid GPT data"); ++ ++ if (grub_gpt_write_table (disk, gpt, &gpt->primary)) ++ return grub_errno; ++ ++ if (grub_gpt_write_table (disk, gpt, &gpt->backup)) ++ return grub_errno; + + return GRUB_ERR_NONE; + } +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index f367fe50d..a483f710a 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -103,6 +103,11 @@ typedef enum grub_gpt_status + } grub_gpt_status_t; + + #define GRUB_GPT_MBR_VALID (GRUB_GPT_PROTECTIVE_MBR|GRUB_GPT_HYBRID_MBR) ++#define GRUB_GPT_PRIMARY_VALID \ ++ (GRUB_GPT_PRIMARY_HEADER_VALID|GRUB_GPT_PRIMARY_ENTRIES_VALID) ++#define GRUB_GPT_BACKUP_VALID \ ++ (GRUB_GPT_BACKUP_HEADER_VALID|GRUB_GPT_BACKUP_ENTRIES_VALID) ++#define GRUB_GPT_BOTH_VALID (GRUB_GPT_PRIMARY_VALID|GRUB_GPT_BACKUP_VALID) + + /* UEFI requires the entries table to be at least 16384 bytes for a + * total of 128 entries given the standard 128 byte entry size. */ +@@ -144,6 +149,9 @@ grub_gpt_t grub_gpt_read (grub_disk_t disk); + /* Sync up primary and backup headers, recompute checksums. */ + grub_err_t grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt); + ++/* Write headers and entry tables back to disk. */ ++grub_err_t grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt); ++ + void grub_gpt_free (grub_gpt_t gpt); + + grub_err_t grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr); +diff --git a/tests/gptrepair_test.in b/tests/gptrepair_test.in +new file mode 100644 +index 000000000..80b2de633 +--- /dev/null ++++ b/tests/gptrepair_test.in +@@ -0,0 +1,102 @@ ++#! /bin/sh ++set -e ++ ++# Copyright (C) 2010 Free Software Foundation, Inc. ++# Copyright (C) 2014 CoreOS, Inc. ++# ++# GRUB is free software: you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation, either version 3 of the License, or ++# (at your option) any later version. ++# ++# GRUB is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GRUB. If not, see . ++ ++parted=parted ++grubshell=@builddir@/grub-shell ++ ++. "@builddir@/grub-core/modinfo.sh" ++ ++case "${grub_modinfo_target_cpu}-${grub_modinfo_platform}" in ++ mips-qemu_mips | mipsel-qemu_mips | i386-qemu | i386-multiboot | i386-coreboot | mipsel-loongson) ++ disk=ata0 ++ ;; ++ powerpc-ieee1275) ++ disk=ieee1275//pci@80000000/mac-io@4/ata-3@20000/disk@0 ++ # FIXME: QEMU firmware has bugs which prevent it from accessing hard disk w/o recognised label. ++ exit 0 ++ ;; ++ sparc64-ieee1275) ++ disk=ieee1275//pci@1fe\,0/pci-ata@5/ide0@500/disk@0 ++ # FIXME: QEMU firmware has bugs which prevent it from accessing hard disk w/o recognised label. ++ exit 0 ++ ;; ++ i386-ieee1275) ++ disk=ieee1275/d ++ # FIXME: QEMU firmware has bugs which prevent it from accessing hard disk w/o recognised label. ++ exit 0 ++ ;; ++ mips-arc) ++ # FIXME: ARC firmware has bugs which prevent it from accessing hard disk w/o dvh disklabel. ++ exit 0 ;; ++ mipsel-arc) ++ disk=arc/scsi0/disk0/rdisk0 ++ ;; ++ *) ++ disk=hd0 ++ ;; ++esac ++img1="`mktemp "${TMPDIR:-/tmp}/tmp.XXXXXXXXXX"`" || exit 1 ++img2="`mktemp "${TMPDIR:-/tmp}/tmp.XXXXXXXXXX"`" || exit 1 ++trap "rm -f '${img1}' '${ing2}'" EXIT ++ ++create_disk_image () { ++ size=$1 ++ rm -f "${img1}" ++ dd if=/dev/zero of="${img1}" bs=512 count=1 seek=$((size - 1)) status=none ++ ${parted} -a none -s "${img1}" mklabel gpt ++ cp "${img1}" "${img2}" ++} ++ ++wipe_disk_area () { ++ sector=$1 ++ size=$2 ++ dd if=/dev/zero of="${img2}" bs=512 count=${size} seek=${sector} conv=notrunc status=none ++} ++ ++do_repair () { ++ output="`echo "gptrepair ($disk)" | "${grubshell}" --disk="${img2}"`" ++ if echo "${output}" | grep ^error; then ++ return 1 ++ fi ++ if echo "${output}" | grep -v GPT; then ++ echo "Unexpected output ${output}" ++ return 1 ++ fi ++ echo "${output}" ++} ++ ++echo "Nothing to repair:" ++create_disk_image 100 ++do_repair ++cmp "${img1}" "${img2}" ++echo ++ ++echo "Repair primary (MBR left intact)" ++create_disk_image 100 ++wipe_disk_area 1 1 ++do_repair ++cmp "${img1}" "${img2}" ++echo ++ ++echo "Repair backup" ++create_disk_image 100 ++wipe_disk_area 99 1 ++do_repair ++cmp "${img1}" "${img2}" ++echo diff --git a/packages/grub/0008-gpt-add-a-new-generic-GUID-type.patch b/packages/grub/0008-gpt-add-a-new-generic-GUID-type.patch new file mode 100644 index 00000000..5f57e963 --- /dev/null +++ b/packages/grub/0008-gpt-add-a-new-generic-GUID-type.patch @@ -0,0 +1,122 @@ +From 14d7fb113f2c89b3df8b6ed7b0fa001084704d6b Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Thu, 30 Oct 2014 20:55:21 -0700 +Subject: [PATCH] gpt: add a new generic GUID type + +In order to do anything with partition GUIDs they need to be stored in a +proper structure like the partition type GUIDs. Additionally add an +initializer macro to simplify defining both GUID types. + +[iweller: use new type name from a16f4a822] +Signed-off-by: iliana destroyer of worlds +--- + include/grub/gpt_partition.h | 34 ++++++++++++++++++---------------- + tests/gpt_unit_test.c | 12 ++++++------ + 2 files changed, 24 insertions(+), 22 deletions(-) + +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index a483f710a..8183a1f30 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -23,33 +23,35 @@ + #include + #include + +-struct grub_gpt_part_guid ++struct grub_gpt_guid + { + grub_uint32_t data1; + grub_uint16_t data2; + grub_uint16_t data3; + grub_uint8_t data4[8]; + } GRUB_PACKED; +-typedef struct grub_gpt_part_guid grub_gpt_part_guid_t; ++typedef struct grub_gpt_guid grub_gpt_guid_t; ++typedef struct grub_gpt_guid grub_gpt_part_guid_t; ++ ++#define GRUB_GPT_GUID_INIT(a, b, c, d1, d2, d3, d4, d5, d6, d7, d8) \ ++ { \ ++ grub_cpu_to_le32_compile_time (a), \ ++ grub_cpu_to_le16_compile_time (b), \ ++ grub_cpu_to_le16_compile_time (c), \ ++ { d1, d2, d3, d4, d5, d6, d7, d8 } \ ++ } + + #define GRUB_GPT_PARTITION_TYPE_EMPTY \ +- { 0x0, 0x0, 0x0, \ +- { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 } \ +- } ++ GRUB_GPT_GUID_INIT (0x0, 0x0, 0x0, \ ++ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0) + + #define GRUB_GPT_PARTITION_TYPE_BIOS_BOOT \ +- { grub_cpu_to_le32_compile_time (0x21686148), \ +- grub_cpu_to_le16_compile_time (0x6449), \ +- grub_cpu_to_le16_compile_time (0x6e6f), \ +- { 0x74, 0x4e, 0x65, 0x65, 0x64, 0x45, 0x46, 0x49 } \ +- } ++ GRUB_GPT_GUID_INIT (0x21686148, 0x6449, 0x6e6f, \ ++ 0x74, 0x4e, 0x65, 0x65, 0x64, 0x45, 0x46, 0x49) + + #define GRUB_GPT_PARTITION_TYPE_LDM \ +- { grub_cpu_to_le32_compile_time (0x5808C8AAU),\ +- grub_cpu_to_le16_compile_time (0x7E8F), \ +- grub_cpu_to_le16_compile_time (0x42E0), \ +- { 0x85, 0xD2, 0xE1, 0xE9, 0x04, 0x34, 0xCF, 0xB3 } \ +- } ++ GRUB_GPT_GUID_INIT (0x5808c8aa, 0x7e8f, 0x42e0, \ ++ 0x85, 0xd2, 0xe1, 0xe9, 0x04, 0x34, 0xcf, 0xb3) + + #define GRUB_GPT_HEADER_MAGIC \ + { 0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54 } +@@ -68,7 +70,7 @@ struct grub_gpt_header + grub_uint64_t alternate_lba; + grub_uint64_t start; + grub_uint64_t end; +- grub_uint8_t guid[16]; ++ grub_gpt_guid_t guid; + grub_uint64_t partitions; + grub_uint32_t maxpart; + grub_uint32_t partentry_size; +diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c +index 83198bebf..86e4364a5 100644 +--- a/tests/gpt_unit_test.c ++++ b/tests/gpt_unit_test.c +@@ -99,8 +99,8 @@ static const struct grub_gpt_header example_primary = { + .alternate_lba = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), + .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), + .end = grub_cpu_to_le64_compile_time (DATA_END_SECTOR), +- .guid = {0xad, 0x31, 0xc1, 0x69, 0xd6, 0x67, 0xc6, 0x46, +- 0x93, 0xc4, 0x12, 0x4c, 0x75, 0x52, 0x56, 0xac}, ++ .guid = GRUB_GPT_GUID_INIT(0x69c131ad, 0x67d6, 0x46c6, ++ 0x93, 0xc4, 0x12, 0x4c, 0x75, 0x52, 0x56, 0xac), + .partitions = grub_cpu_to_le64_compile_time (PRIMARY_TABLE_SECTOR), + .maxpart = grub_cpu_to_le32_compile_time (TABLE_ENTRIES), + .partentry_size = grub_cpu_to_le32_compile_time (ENTRY_SIZE), +@@ -117,8 +117,8 @@ static const struct grub_gpt_header example_backup = { + .alternate_lba = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), + .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), + .end = grub_cpu_to_le64_compile_time (DATA_END_SECTOR), +- .guid = {0xad, 0x31, 0xc1, 0x69, 0xd6, 0x67, 0xc6, 0x46, +- 0x93, 0xc4, 0x12, 0x4c, 0x75, 0x52, 0x56, 0xac}, ++ .guid = GRUB_GPT_GUID_INIT(0x69c131ad, 0x67d6, 0x46c6, ++ 0x93, 0xc4, 0x12, 0x4c, 0x75, 0x52, 0x56, 0xac), + .partitions = grub_cpu_to_le64_compile_time (BACKUP_TABLE_SECTOR), + .maxpart = grub_cpu_to_le32_compile_time (TABLE_ENTRIES), + .partentry_size = grub_cpu_to_le32_compile_time (ENTRY_SIZE), +@@ -326,13 +326,13 @@ header_test (void) + grub_errno = GRUB_ERR_NONE; + + /* Twiddle the GUID to invalidate the CRC. */ +- primary.guid[0] = 0; ++ primary.guid.data1 = 0; + grub_gpt_header_check (&primary, GRUB_DISK_SECTOR_BITS); + grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, + "unexpected error: %s", grub_errmsg); + grub_errno = GRUB_ERR_NONE; + +- backup.guid[0] = 0; ++ backup.guid.data1 = 0; + grub_gpt_header_check (&backup, GRUB_DISK_SECTOR_BITS); + grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, + "unexpected error: %s", grub_errmsg); diff --git a/packages/grub/0009-gpt-new-gptprio.next-command-for-selecting-priority-.patch b/packages/grub/0009-gpt-new-gptprio.next-command-for-selecting-priority-.patch new file mode 100644 index 00000000..6de6693a --- /dev/null +++ b/packages/grub/0009-gpt-new-gptprio.next-command-for-selecting-priority-.patch @@ -0,0 +1,526 @@ +From 821cdddd98bdc3879fb7580c225c3a89282873e7 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Mon, 3 Nov 2014 17:14:37 -0800 +Subject: [PATCH] gpt: new gptprio.next command for selecting priority based + partitions + +Basic usage would look something like this: + + gptprio.next -d usr_dev -u usr_uuid + linuxefi ($usr_dev)/boot/vmlinuz mount.usr=PARTUUID=$usr_uuid + +After booting the system should set the 'successful' bit on the +partition that was used. + +[iweller: use new type name from a16f4a822] +Signed-off-by: iliana destroyer of worlds +--- + Makefile.util.def | 6 + + grub-core/Makefile.core.def | 5 + + grub-core/commands/gptprio.c | 238 +++++++++++++++++++++++++++++++++++ + include/grub/gpt_partition.h | 49 ++++++++ + tests/gptprio_test.in | 150 ++++++++++++++++++++++ + 5 files changed, 448 insertions(+) + create mode 100644 grub-core/commands/gptprio.c + create mode 100644 tests/gptprio_test.in + +diff --git a/Makefile.util.def b/Makefile.util.def +index 6ed541c1c..a2b84ec4b 100644 +--- a/Makefile.util.def ++++ b/Makefile.util.def +@@ -1181,6 +1181,12 @@ script = { + common = tests/gptrepair_test.in; + }; + ++script = { ++ testcase; ++ name = gptprio_test; ++ common = tests/gptprio_test.in; ++}; ++ + script = { + testcase; + name = file_filter_test; +diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def +index 43ce166db..615b00226 100644 +--- a/grub-core/Makefile.core.def ++++ b/grub-core/Makefile.core.def +@@ -898,6 +898,11 @@ module = { + common = commands/gptrepair.c; + }; + ++module = { ++ name = gptprio; ++ common = commands/gptprio.c; ++}; ++ + module = { + name = gpt; + common = lib/gpt.c; +diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c +new file mode 100644 +index 000000000..1e2e06cef +--- /dev/null ++++ b/grub-core/commands/gptprio.c +@@ -0,0 +1,238 @@ ++/* gptprio.c - manage priority based partition selection. */ ++/* ++ * GRUB -- GRand Unified Bootloader ++ * Copyright (C) 2009 Free Software Foundation, Inc. ++ * Copyright (C) 2014 CoreOS, Inc. ++ * ++ * GRUB is free software: you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation, either version 3 of the License, or ++ * (at your option) any later version. ++ * ++ * GRUB is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with GRUB. If not, see . ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++GRUB_MOD_LICENSE ("GPLv3+"); ++ ++static const struct grub_arg_option options_next[] = { ++ {"set-device", 'd', 0, ++ N_("Set a variable to the name of selected partition."), ++ N_("VARNAME"), ARG_TYPE_STRING}, ++ {"set-uuid", 'u', 0, ++ N_("Set a variable to the GPT UUID of selected partition."), ++ N_("VARNAME"), ARG_TYPE_STRING}, ++ {0, 0, 0, 0, 0, 0} ++}; ++ ++enum options_next ++{ ++ NEXT_SET_DEVICE, ++ NEXT_SET_UUID, ++}; ++ ++static unsigned int ++grub_gptprio_priority (struct grub_gpt_partentry *entry) ++{ ++ return (unsigned int) grub_gpt_entry_attribute ++ (entry, GRUB_GPT_PART_ATTR_OFFSET_GPTPRIO_PRIORITY, 4); ++} ++ ++static unsigned int ++grub_gptprio_tries_left (struct grub_gpt_partentry *entry) ++{ ++ return (unsigned int) grub_gpt_entry_attribute ++ (entry, GRUB_GPT_PART_ATTR_OFFSET_GPTPRIO_TRIES_LEFT, 4); ++} ++ ++static void ++grub_gptprio_set_tries_left (struct grub_gpt_partentry *entry, ++ unsigned int tries_left) ++{ ++ grub_gpt_entry_set_attribute ++ (entry, tries_left, GRUB_GPT_PART_ATTR_OFFSET_GPTPRIO_TRIES_LEFT, 4); ++} ++ ++static unsigned int ++grub_gptprio_successful (struct grub_gpt_partentry *entry) ++{ ++ return (unsigned int) grub_gpt_entry_attribute ++ (entry, GRUB_GPT_PART_ATTR_OFFSET_GPTPRIO_SUCCESSFUL, 1); ++} ++ ++static grub_err_t ++grub_find_next (const char *disk_name, ++ const grub_gpt_part_guid_t *part_type, ++ char **part_name, char **part_guid) ++{ ++ struct grub_gpt_partentry *part_found = NULL; ++ grub_device_t dev = NULL; ++ grub_gpt_t gpt = NULL; ++ grub_uint32_t i, part_index; ++ ++ dev = grub_device_open (disk_name); ++ if (!dev) ++ goto done; ++ ++ gpt = grub_gpt_read (dev->disk); ++ if (!gpt) ++ goto done; ++ ++ if (!(gpt->status & GRUB_GPT_BOTH_VALID)) ++ if (grub_gpt_repair (dev->disk, gpt)) ++ goto done; ++ ++ for (i = 0; i < grub_le_to_cpu32 (gpt->primary.maxpart); i++) ++ { ++ struct grub_gpt_partentry *part = &gpt->entries[i]; ++ ++ if (grub_memcmp (part_type, &part->type, sizeof (*part_type)) == 0) ++ { ++ unsigned int priority, tries_left, successful, old_priority = 0; ++ ++ priority = grub_gptprio_priority (part); ++ tries_left = grub_gptprio_tries_left (part); ++ successful = grub_gptprio_successful (part); ++ ++ if (part_found) ++ old_priority = grub_gptprio_priority (part_found); ++ ++ if ((tries_left || successful) && priority > old_priority) ++ { ++ part_index = i; ++ part_found = part; ++ } ++ } ++ } ++ ++ if (!part_found) ++ { ++ grub_error (GRUB_ERR_UNKNOWN_DEVICE, N_("no such partition")); ++ goto done; ++ } ++ ++ if (grub_gptprio_tries_left (part_found)) ++ { ++ unsigned int tries_left = grub_gptprio_tries_left (part_found); ++ ++ grub_gptprio_set_tries_left (part_found, tries_left - 1); ++ ++ if (grub_gpt_update_checksums (gpt)) ++ goto done; ++ ++ if (grub_gpt_write (dev->disk, gpt)) ++ goto done; ++ } ++ ++ *part_name = grub_xasprintf ("%s,gpt%u", disk_name, part_index + 1); ++ if (!*part_name) ++ goto done; ++ ++ *part_guid = ++ grub_xasprintf ("%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", ++ grub_le_to_cpu32 (part_found->guid.data1), ++ grub_le_to_cpu16 (part_found->guid.data2), ++ grub_le_to_cpu16 (part_found->guid.data3), ++ part_found->guid.data4[0], ++ part_found->guid.data4[1], ++ part_found->guid.data4[2], ++ part_found->guid.data4[3], ++ part_found->guid.data4[4], ++ part_found->guid.data4[5], ++ part_found->guid.data4[6], ++ part_found->guid.data4[7]); ++ if (!*part_name) ++ goto done; ++ ++ grub_errno = GRUB_ERR_NONE; ++ ++done: ++ grub_gpt_free (gpt); ++ ++ if (dev) ++ grub_device_close (dev); ++ ++ return grub_errno; ++} ++ ++ ++ ++static grub_err_t ++grub_cmd_next (grub_extcmd_context_t ctxt, int argc, char **args) ++{ ++ struct grub_arg_list *state = ctxt->state; ++ char *p, *root = NULL, *part_name = NULL, *part_guid = NULL; ++ ++ /* TODO: Add a uuid parser and a command line flag for providing type. */ ++ grub_gpt_part_guid_t part_type = GRUB_GPT_PARTITION_TYPE_USR_X86_64; ++ ++ if (!state[NEXT_SET_DEVICE].set || !state[NEXT_SET_UUID].set) ++ { ++ grub_error (GRUB_ERR_INVALID_COMMAND, N_("-d and -u are required")); ++ goto done; ++ } ++ ++ if (argc == 0) ++ root = grub_strdup (grub_env_get ("root")); ++ else if (argc == 1) ++ root = grub_strdup (args[0]); ++ else ++ { ++ grub_error (GRUB_ERR_BAD_ARGUMENT, N_("unexpected arguments")); ++ goto done; ++ } ++ ++ if (!root) ++ goto done; ++ ++ /* To make using $root practical strip off the partition name. */ ++ p = grub_strchr (root, ','); ++ if (p) ++ *p = '\0'; ++ ++ if (grub_find_next (root, &part_type, &part_name, &part_guid)) ++ goto done; ++ ++ if (grub_env_set (state[NEXT_SET_DEVICE].arg, part_name)) ++ goto done; ++ ++ if (grub_env_set (state[NEXT_SET_UUID].arg, part_guid)) ++ goto done; ++ ++ grub_errno = GRUB_ERR_NONE; ++ ++done: ++ grub_free (root); ++ grub_free (part_name); ++ grub_free (part_guid); ++ ++ return grub_errno; ++} ++ ++static grub_extcmd_t cmd_next; ++ ++GRUB_MOD_INIT(gptprio) ++{ ++ cmd_next = grub_register_extcmd ("gptprio.next", grub_cmd_next, 0, ++ N_("-d VARNAME -u VARNAME [DEVICE]"), ++ N_("Select next partition to boot."), ++ options_next); ++} ++ ++GRUB_MOD_FINI(gptprio) ++{ ++ grub_unregister_extcmd (cmd_next); ++} +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index 8183a1f30..8a6e56af4 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -53,6 +53,10 @@ typedef struct grub_gpt_guid grub_gpt_part_guid_t; + GRUB_GPT_GUID_INIT (0x5808c8aa, 0x7e8f, 0x42e0, \ + 0x85, 0xd2, 0xe1, 0xe9, 0x04, 0x34, 0xcf, 0xb3) + ++#define GRUB_GPT_PARTITION_TYPE_USR_X86_64 \ ++ GRUB_GPT_GUID_INIT (0x5dfbf5f4, 0x2848, 0x4bac, \ ++ 0xaa, 0x5e, 0x0d, 0x9a, 0x20, 0xb7, 0x45, 0xa6) ++ + #define GRUB_GPT_HEADER_MAGIC \ + { 0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54 } + +@@ -87,6 +91,51 @@ struct grub_gpt_partentry + char name[72]; + } GRUB_PACKED; + ++enum grub_gpt_part_attr_offset ++{ ++ /* Standard partition attribute bits defined by UEFI. */ ++ GRUB_GPT_PART_ATTR_OFFSET_REQUIRED = 0, ++ GRUB_GPT_PART_ATTR_OFFSET_NO_BLOCK_IO_PROTOCOL = 1, ++ GRUB_GPT_PART_ATTR_OFFSET_LEGACY_BIOS_BOOTABLE = 2, ++ ++ /* De facto standard attribute bits defined by Microsoft and reused by ++ * http://www.freedesktop.org/wiki/Specifications/DiscoverablePartitionsSpec */ ++ GRUB_GPT_PART_ATTR_OFFSET_READ_ONLY = 60, ++ GRUB_GPT_PART_ATTR_OFFSET_NO_AUTO = 63, ++ ++ /* Partition attributes for priority based selection, ++ * Currently only valid for PARTITION_TYPE_USR_X86_64. ++ * TRIES_LEFT and PRIORITY are 4 bit wide fields. */ ++ GRUB_GPT_PART_ATTR_OFFSET_GPTPRIO_PRIORITY = 48, ++ GRUB_GPT_PART_ATTR_OFFSET_GPTPRIO_TRIES_LEFT = 52, ++ GRUB_GPT_PART_ATTR_OFFSET_GPTPRIO_SUCCESSFUL = 56, ++}; ++ ++/* Helpers for reading/writing partition attributes. */ ++static inline grub_uint64_t ++grub_gpt_entry_attribute (struct grub_gpt_partentry *entry, ++ enum grub_gpt_part_attr_offset offset, ++ unsigned int bits) ++{ ++ grub_uint64_t attrib = grub_le_to_cpu64 (entry->attrib); ++ ++ return (attrib >> offset) & ((1ULL << bits) - 1); ++} ++ ++static inline void ++grub_gpt_entry_set_attribute (struct grub_gpt_partentry *entry, ++ grub_uint64_t value, ++ enum grub_gpt_part_attr_offset offset, ++ unsigned int bits) ++{ ++ grub_uint64_t attrib, mask; ++ ++ mask = (((1ULL << bits) - 1) << offset); ++ attrib = grub_le_to_cpu64 (entry->attrib) & ~mask; ++ attrib |= ((value << offset) & mask); ++ entry->attrib = grub_cpu_to_le64 (attrib); ++} ++ + /* Basic GPT partmap module. */ + grub_err_t + grub_gpt_partition_map_iterate (grub_disk_t disk, +diff --git a/tests/gptprio_test.in b/tests/gptprio_test.in +new file mode 100644 +index 000000000..f4aea0dc9 +--- /dev/null ++++ b/tests/gptprio_test.in +@@ -0,0 +1,150 @@ ++#! /bin/bash ++set -e ++ ++# Copyright (C) 2010 Free Software Foundation, Inc. ++# Copyright (C) 2014 CoreOS, Inc. ++# ++# GRUB is free software: you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation, either version 3 of the License, or ++# (at your option) any later version. ++# ++# GRUB is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GRUB. If not, see . ++ ++sgdisk=sgdisk ++grubshell=@builddir@/grub-shell ++ ++if ! which "${sgdisk}" >/dev/null 2>&1; then ++ echo "sgdisk not installed; cannot test gptprio." ++ exit 77 ++fi ++ ++. "@builddir@/grub-core/modinfo.sh" ++ ++case "${grub_modinfo_target_cpu}-${grub_modinfo_platform}" in ++ mips-qemu_mips | mipsel-qemu_mips | i386-qemu | i386-multiboot | i386-coreboot | mipsel-loongson) ++ disk=ata0 ++ ;; ++ powerpc-ieee1275) ++ disk=ieee1275//pci@80000000/mac-io@4/ata-3@20000/disk@0 ++ # FIXME: QEMU firmware has bugs which prevent it from accessing hard disk w/o recognised label. ++ exit 0 ++ ;; ++ sparc64-ieee1275) ++ disk=ieee1275//pci@1fe\,0/pci-ata@5/ide0@500/disk@0 ++ # FIXME: QEMU firmware has bugs which prevent it from accessing hard disk w/o recognised label. ++ exit 0 ++ ;; ++ i386-ieee1275) ++ disk=ieee1275/d ++ # FIXME: QEMU firmware has bugs which prevent it from accessing hard disk w/o recognised label. ++ exit 0 ++ ;; ++ mips-arc) ++ # FIXME: ARC firmware has bugs which prevent it from accessing hard disk w/o dvh disklabel. ++ exit 0 ;; ++ mipsel-arc) ++ disk=arc/scsi0/disk0/rdisk0 ++ ;; ++ *) ++ disk=hd0 ++ ;; ++esac ++img1="`mktemp "${TMPDIR:-/tmp}/tmp.XXXXXXXXXX"`" || exit 1 ++trap "rm -f '${img1}'" EXIT ++ ++prio_type="5dfbf5f4-2848-4bac-aa5e-0d9a20b745a6" ++declare -a prio_uuid ++prio_uuid[2]="9b003904-d006-4ab3-97f1-73f547b7af1a" ++prio_uuid[3]="1aa5a658-5b02-414d-9b71-f7e6c151f0cd" ++prio_uuid[4]="8aa0240d-98af-42b0-b32a-ccbe0572d62b" ++ ++create_disk_image () { ++ rm -f "${img1}" ++ dd if=/dev/zero of="${img1}" bs=512 count=1 seek=100 status=none ++ ${sgdisk} \ ++ -n 1:0:+1 -c 1:ESP -t 1:ef00 \ ++ -n 2:0:+1 -c 2:A -t 2:"${prio_type}" -u 2:"${prio_uuid[2]}" \ ++ -n 3:0:+1 -c 3:B -t 3:"${prio_type}" -u 3:"${prio_uuid[3]}" \ ++ -n 4:0:+1 -c 4:C -t 4:"${prio_type}" -u 4:"${prio_uuid[4]}" \ ++ "${img1}" >/dev/null ++} ++ ++ ++fmt_prio () { ++ priority=$(( ( $1 & 15 ) << 48 )) ++ tries=$(( ( $2 & 15 ) << 52 )) ++ success=$(( ( $3 & 1 ) << 56 )) ++ printf %016x $(( priority | tries | success )) ++} ++ ++set_prio () { ++ part="$1" ++ attr=$(fmt_prio $2 $3 $4) ++ ${sgdisk} -A "${part}:=:${attr}" "${img1}" >/dev/null ++} ++ ++check_prio () { ++ part="$1" ++ expect=$(fmt_prio $2 $3 $4) ++ result=$(LANG=C ${sgdisk} -i "${part}" "${img1}" \ ++ | awk '/^Attribute flags: / {print $3}') ++ if [[ "${expect}" != "${result}" ]]; then ++ echo "Partition ${part} has attributes ${result}, not ${expect}" >&2 ++ exit 1 ++ fi ++} ++ ++run_next() { ++ "${grubshell}" --disk="${img1}" --modules=gptprio < +Date: Sat, 15 Nov 2014 13:27:13 -0800 +Subject: [PATCH] gpt: split out checksum recomputation + +For basic data modifications the full repair function is overkill. +--- + grub-core/lib/gpt.c | 30 ++++++++++++++++++++---------- + include/grub/gpt_partition.h | 3 +++ + 2 files changed, 23 insertions(+), 10 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 67ffdf703..198234071 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -293,7 +293,6 @@ grub_err_t + grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + { + grub_uint64_t backup_header, backup_entries; +- grub_uint32_t crc; + + if (disk->log_sector_size != gpt->log_sector_size) + return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, +@@ -331,13 +330,32 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + gpt->backup.alternate_lba = gpt->primary.header_lba; + gpt->backup.partitions = grub_cpu_to_le64 (backup_entries); + ++ /* Recompute checksums. */ ++ if (grub_gpt_update_checksums (gpt)) ++ return grub_errno; ++ ++ /* Sanity check. */ ++ if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) ++ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); ++ ++ if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) ++ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); ++ ++ gpt->status |= GRUB_GPT_BOTH_VALID; ++ return GRUB_ERR_NONE; ++} ++ ++grub_err_t ++grub_gpt_update_checksums (grub_gpt_t gpt) ++{ ++ grub_uint32_t crc; ++ + /* Writing headers larger than our header structure are unsupported. */ + gpt->primary.headersize = + grub_cpu_to_le32_compile_time (sizeof (gpt->primary)); + gpt->backup.headersize = + grub_cpu_to_le32_compile_time (sizeof (gpt->backup)); + +- /* Recompute checksums. */ + if (grub_gpt_lecrc32 (gpt->entries, gpt->entries_size, &crc)) + return grub_errno; + +@@ -350,14 +368,6 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + if (grub_gpt_header_lecrc32 (&gpt->backup, &gpt->backup.crc32)) + return grub_errno; + +- /* Sanity check. */ +- if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) +- return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); +- +- if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) +- return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); +- +- gpt->status |= GRUB_GPT_BOTH_VALID; + return GRUB_ERR_NONE; + } + +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index 8a6e56af4..f5197b8ae 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -200,6 +200,9 @@ grub_gpt_t grub_gpt_read (grub_disk_t disk); + /* Sync up primary and backup headers, recompute checksums. */ + grub_err_t grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt); + ++/* Recompute checksums, must be called after modifying GPT data. */ ++grub_err_t grub_gpt_update_checksums (grub_gpt_t gpt); ++ + /* Write headers and entry tables back to disk. */ + grub_err_t grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt); + diff --git a/packages/grub/0011-gpt-move-gpt-guid-printing-function-to-common-librar.patch b/packages/grub/0011-gpt-move-gpt-guid-printing-function-to-common-librar.patch new file mode 100644 index 00000000..2fa53107 --- /dev/null +++ b/packages/grub/0011-gpt-move-gpt-guid-printing-function-to-common-librar.patch @@ -0,0 +1,77 @@ +From 415fc53d5282e848e64096aeb90a628488ab2c94 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Thu, 27 Nov 2014 12:55:53 -0800 +Subject: [PATCH] gpt: move gpt guid printing function to common library + +--- + grub-core/commands/gptprio.c | 16 ++-------------- + grub-core/lib/gpt.c | 13 +++++++++++++ + include/grub/gpt_partition.h | 4 ++++ + 3 files changed, 19 insertions(+), 14 deletions(-) + +diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c +index 1e2e06cef..24157477c 100644 +--- a/grub-core/commands/gptprio.c ++++ b/grub-core/commands/gptprio.c +@@ -141,20 +141,8 @@ grub_find_next (const char *disk_name, + if (!*part_name) + goto done; + +- *part_guid = +- grub_xasprintf ("%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", +- grub_le_to_cpu32 (part_found->guid.data1), +- grub_le_to_cpu16 (part_found->guid.data2), +- grub_le_to_cpu16 (part_found->guid.data3), +- part_found->guid.data4[0], +- part_found->guid.data4[1], +- part_found->guid.data4[2], +- part_found->guid.data4[3], +- part_found->guid.data4[4], +- part_found->guid.data4[5], +- part_found->guid.data4[6], +- part_found->guid.data4[7]); +- if (!*part_name) ++ *part_guid = grub_gpt_guid_to_str (&part_found->guid); ++ if (!*part_guid) + goto done; + + grub_errno = GRUB_ERR_NONE; +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 198234071..9a1835b84 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -31,6 +31,19 @@ GRUB_MOD_LICENSE ("GPLv3+"); + static grub_uint8_t grub_gpt_magic[] = GRUB_GPT_HEADER_MAGIC; + + ++char * ++grub_gpt_guid_to_str (grub_gpt_guid_t *guid) ++{ ++ return grub_xasprintf ("%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", ++ grub_le_to_cpu32 (guid->data1), ++ grub_le_to_cpu16 (guid->data2), ++ grub_le_to_cpu16 (guid->data3), ++ guid->data4[0], guid->data4[1], ++ guid->data4[2], guid->data4[3], ++ guid->data4[4], guid->data4[5], ++ guid->data4[6], guid->data4[7]); ++} ++ + static grub_uint64_t + grub_gpt_size_to_sectors (grub_gpt_t gpt, grub_size_t size) + { +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index f5197b8ae..f2b3630e4 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -33,6 +33,10 @@ struct grub_gpt_guid + typedef struct grub_gpt_guid grub_gpt_guid_t; + typedef struct grub_gpt_guid grub_gpt_part_guid_t; + ++/* Format the raw little-endian GUID as a newly allocated string. */ ++char * grub_gpt_guid_to_str (grub_gpt_guid_t *guid); ++ ++ + #define GRUB_GPT_GUID_INIT(a, b, c, d1, d2, d3, d4, d5, d6, d7, d8) \ + { \ + grub_cpu_to_le32_compile_time (a), \ diff --git a/packages/grub/0012-gpt-switch-partition-names-to-a-16-bit-type.patch b/packages/grub/0012-gpt-switch-partition-names-to-a-16-bit-type.patch new file mode 100644 index 00000000..b23a4ae9 --- /dev/null +++ b/packages/grub/0012-gpt-switch-partition-names-to-a-16-bit-type.patch @@ -0,0 +1,24 @@ +From de8ed5eacc471e74e32a70257261ef83cb8b9adc Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Thu, 27 Nov 2014 14:54:27 -0800 +Subject: [PATCH] gpt: switch partition names to a 16 bit type + +In UEFI/GPT strings are UTF-16 so use a uint16 to make dealing with the +string practical. +--- + include/grub/gpt_partition.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index f2b3630e4..1d065df99 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -92,7 +92,7 @@ struct grub_gpt_partentry + grub_uint64_t start; + grub_uint64_t end; + grub_uint64_t attrib; +- char name[72]; ++ grub_uint16_t name[36]; + } GRUB_PACKED; + + enum grub_gpt_part_attr_offset diff --git a/packages/grub/0013-tests-add-some-partitions-to-the-gpt-unit-test-data.patch b/packages/grub/0013-tests-add-some-partitions-to-the-gpt-unit-test-data.patch new file mode 100644 index 00000000..a787e50b --- /dev/null +++ b/packages/grub/0013-tests-add-some-partitions-to-the-gpt-unit-test-data.patch @@ -0,0 +1,124 @@ +From 19f4319c4f68b416433265a104e49520486419cc Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Thu, 27 Nov 2014 15:49:57 -0800 +Subject: [PATCH] tests: add some partitions to the gpt unit test data + +--- + tests/gpt_unit_test.c | 65 ++++++++++++++++++++++++++++++++++++------- + 1 file changed, 55 insertions(+), 10 deletions(-) + +diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c +index 86e4364a5..5692a5a52 100644 +--- a/tests/gpt_unit_test.c ++++ b/tests/gpt_unit_test.c +@@ -89,12 +89,12 @@ struct test_data + }; + + +-/* Sample primary GPT header for an empty 1MB disk. */ ++/* Sample primary GPT header for a 1MB disk. */ + static const struct grub_gpt_header example_primary = { + .magic = GRUB_GPT_HEADER_MAGIC, + .version = GRUB_GPT_HEADER_VERSION, + .headersize = sizeof (struct grub_gpt_header), +- .crc32 = grub_cpu_to_le32_compile_time (0x7cd8642c), ++ .crc32 = grub_cpu_to_le32_compile_time (0xb985abe0), + .header_lba = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), + .alternate_lba = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), + .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), +@@ -104,7 +104,52 @@ static const struct grub_gpt_header example_primary = { + .partitions = grub_cpu_to_le64_compile_time (PRIMARY_TABLE_SECTOR), + .maxpart = grub_cpu_to_le32_compile_time (TABLE_ENTRIES), + .partentry_size = grub_cpu_to_le32_compile_time (ENTRY_SIZE), +- .partentry_crc32 = grub_cpu_to_le32_compile_time (0xab54d286), ++ .partentry_crc32 = grub_cpu_to_le32_compile_time (0x074e052c), ++}; ++ ++static const struct grub_gpt_partentry example_entries[TABLE_ENTRIES] = { ++ { ++ .type = GRUB_GPT_PARTITION_TYPE_EFI_SYSTEM, ++ .guid = GRUB_GPT_GUID_INIT (0xa0f1792e, 0xb4ce, 0x4136, 0xbc, 0xf2, ++ 0x1a, 0xfc, 0x13, 0x3c, 0x28, 0x28), ++ .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), ++ .end = grub_cpu_to_le64_compile_time (0x3f), ++ .attrib = 0x0, ++ .name = { ++ grub_cpu_to_le16_compile_time ('E'), ++ grub_cpu_to_le16_compile_time ('F'), ++ grub_cpu_to_le16_compile_time ('I'), ++ grub_cpu_to_le16_compile_time (' '), ++ grub_cpu_to_le16_compile_time ('S'), ++ grub_cpu_to_le16_compile_time ('Y'), ++ grub_cpu_to_le16_compile_time ('S'), ++ grub_cpu_to_le16_compile_time ('T'), ++ grub_cpu_to_le16_compile_time ('E'), ++ grub_cpu_to_le16_compile_time ('M'), ++ 0x0, ++ } ++ }, ++ { ++ .type = GRUB_GPT_PARTITION_TYPE_BIOS_BOOT, ++ .guid = GRUB_GPT_GUID_INIT (0x876c898d, 0x1b40, 0x4727, 0xa1, 0x61, ++ 0xed, 0xf9, 0xb5, 0x48, 0x66, 0x74), ++ .start = grub_cpu_to_le64_compile_time (0x40), ++ .end = grub_cpu_to_le64_compile_time (0x7f), ++ .attrib = grub_cpu_to_le64_compile_time ( ++ 1ULL << GRUB_GPT_PART_ATTR_OFFSET_LEGACY_BIOS_BOOTABLE), ++ .name = { ++ grub_cpu_to_le16_compile_time ('B'), ++ grub_cpu_to_le16_compile_time ('I'), ++ grub_cpu_to_le16_compile_time ('O'), ++ grub_cpu_to_le16_compile_time ('S'), ++ grub_cpu_to_le16_compile_time (' '), ++ grub_cpu_to_le16_compile_time ('B'), ++ grub_cpu_to_le16_compile_time ('O'), ++ grub_cpu_to_le16_compile_time ('O'), ++ grub_cpu_to_le16_compile_time ('T'), ++ 0x0, ++ } ++ }, + }; + + /* And the backup header. */ +@@ -112,7 +157,7 @@ static const struct grub_gpt_header example_backup = { + .magic = GRUB_GPT_HEADER_MAGIC, + .version = GRUB_GPT_HEADER_VERSION, + .headersize = sizeof (struct grub_gpt_header), +- .crc32 = grub_cpu_to_le32_compile_time (0xcfaa4a27), ++ .crc32 = grub_cpu_to_le32_compile_time (0x0af785eb), + .header_lba = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), + .alternate_lba = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), + .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), +@@ -122,7 +167,7 @@ static const struct grub_gpt_header example_backup = { + .partitions = grub_cpu_to_le64_compile_time (BACKUP_TABLE_SECTOR), + .maxpart = grub_cpu_to_le32_compile_time (TABLE_ENTRIES), + .partentry_size = grub_cpu_to_le32_compile_time (ENTRY_SIZE), +- .partentry_crc32 = grub_cpu_to_le32_compile_time (0xab54d286), ++ .partentry_crc32 = grub_cpu_to_le32_compile_time (0x074e052c), + }; + + /* Sample protective MBR for the same 1MB disk. Note, this matches +@@ -192,6 +237,10 @@ reset_disk (struct test_data *data) + memcpy (&data->raw->mbr, &example_pmbr, sizeof (data->raw->mbr)); + memcpy (&data->raw->primary_header, &example_primary, + sizeof (data->raw->primary_header)); ++ memcpy (&data->raw->primary_entries, &example_entries, ++ sizeof (data->raw->primary_entries)); ++ memcpy (&data->raw->backup_entries, &example_entries, ++ sizeof (data->raw->backup_entries)); + memcpy (&data->raw->backup_header, &example_backup, + sizeof (data->raw->backup_header)); + +@@ -270,11 +319,7 @@ read_disk (struct test_data *data) + + gpt = grub_gpt_read (data->dev->disk); + if (gpt == NULL) +- { +- grub_print_error (); +- grub_fatal ("grub_gpt_read failed"); +- } +- ++ grub_fatal ("grub_gpt_read failed: %s", grub_errmsg); + + return gpt; + } diff --git a/packages/grub/0014-gpt-add-search-by-partition-label-and-uuid-commands.patch b/packages/grub/0014-gpt-add-search-by-partition-label-and-uuid-commands.patch new file mode 100644 index 00000000..b4563654 --- /dev/null +++ b/packages/grub/0014-gpt-add-search-by-partition-label-and-uuid-commands.patch @@ -0,0 +1,443 @@ +From 5ca51d1d60b6a692fc679b3cc0236cec0a66e1aa Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Thu, 27 Nov 2014 16:34:21 -0800 +Subject: [PATCH] gpt: add search by partition label and uuid commands + +Builds on the existing filesystem search code. Only for GPT right now. +--- + Makefile.util.def | 2 + + grub-core/Makefile.core.def | 10 +++ + grub-core/commands/search.c | 49 +++++++++++++++ + grub-core/commands/search_part_label.c | 5 ++ + grub-core/commands/search_part_uuid.c | 5 ++ + grub-core/commands/search_wrap.c | 12 ++++ + grub-core/lib/gpt.c | 64 ++++++++++++++++++++ + include/grub/gpt_partition.h | 16 +++++ + include/grub/search.h | 4 ++ + tests/gpt_unit_test.c | 84 ++++++++++++++++++++++++++ + 10 files changed, 251 insertions(+) + create mode 100644 grub-core/commands/search_part_label.c + create mode 100644 grub-core/commands/search_part_uuid.c + +diff --git a/Makefile.util.def b/Makefile.util.def +index a2b84ec4b..b63a2963c 100644 +--- a/Makefile.util.def ++++ b/Makefile.util.def +@@ -1287,6 +1287,8 @@ program = { + name = gpt_unit_test; + common = tests/gpt_unit_test.c; + common = tests/lib/unit_test.c; ++ common = grub-core/commands/search_part_label.c; ++ common = grub-core/commands/search_part_uuid.c; + common = grub-core/disk/host.c; + common = grub-core/kern/emu/hostfs.c; + common = grub-core/lib/gpt.c; +diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def +index 615b00226..9964d42fe 100644 +--- a/grub-core/Makefile.core.def ++++ b/grub-core/Makefile.core.def +@@ -1095,6 +1095,16 @@ module = { + common = commands/search_label.c; + }; + ++module = { ++ name = search_part_uuid; ++ common = commands/search_part_uuid.c; ++}; ++ ++module = { ++ name = search_part_label; ++ common = commands/search_part_label.c; ++}; ++ + module = { + name = setpci; + common = commands/setpci.c; +diff --git a/grub-core/commands/search.c b/grub-core/commands/search.c +index ed090b3af..4ad72c5b4 100644 +--- a/grub-core/commands/search.c ++++ b/grub-core/commands/search.c +@@ -30,6 +30,9 @@ + #include + #include + #include ++#if defined(DO_SEARCH_PART_UUID) || defined(DO_SEARCH_PART_LABEL) ++#include ++#endif + + GRUB_MOD_LICENSE ("GPLv3+"); + +@@ -90,6 +93,44 @@ iterate_device (const char *name, void *data) + } + grub_free (buf); + } ++#elif defined(DO_SEARCH_PART_UUID) ++ { ++ grub_device_t dev; ++ char *quid; ++ ++ dev = grub_device_open (name); ++ if (dev) ++ { ++ if (grub_gpt_part_uuid (dev, &quid) == GRUB_ERR_NONE) ++ { ++ if (grub_strcasecmp (quid, ctx->key) == 0) ++ found = 1; ++ ++ grub_free (quid); ++ } ++ ++ grub_device_close (dev); ++ } ++ } ++#elif defined(DO_SEARCH_PART_LABEL) ++ { ++ grub_device_t dev; ++ char *quid; ++ ++ dev = grub_device_open (name); ++ if (dev) ++ { ++ if (grub_gpt_part_label (dev, &quid) == GRUB_ERR_NONE) ++ { ++ if (grub_strcmp (quid, ctx->key) == 0) ++ found = 1; ++ ++ grub_free (quid); ++ } ++ ++ grub_device_close (dev); ++ } ++ } + #else + { + /* SEARCH_FS_UUID or SEARCH_LABEL */ +@@ -313,6 +354,10 @@ static grub_command_t cmd; + + #ifdef DO_SEARCH_FILE + GRUB_MOD_INIT(search_fs_file) ++#elif defined(DO_SEARCH_PART_UUID) ++GRUB_MOD_INIT(search_part_uuid) ++#elif defined(DO_SEARCH_PART_LABEL) ++GRUB_MOD_INIT(search_part_label) + #elif defined (DO_SEARCH_FS_UUID) + GRUB_MOD_INIT(search_fs_uuid) + #else +@@ -327,6 +372,10 @@ GRUB_MOD_INIT(search_label) + + #ifdef DO_SEARCH_FILE + GRUB_MOD_FINI(search_fs_file) ++#elif defined(DO_SEARCH_PART_UUID) ++GRUB_MOD_FINI(search_part_uuid) ++#elif defined(DO_SEARCH_PART_LABEL) ++GRUB_MOD_FINI(search_part_label) + #elif defined (DO_SEARCH_FS_UUID) + GRUB_MOD_FINI(search_fs_uuid) + #else +diff --git a/grub-core/commands/search_part_label.c b/grub-core/commands/search_part_label.c +new file mode 100644 +index 000000000..ca906cbd9 +--- /dev/null ++++ b/grub-core/commands/search_part_label.c +@@ -0,0 +1,5 @@ ++#define DO_SEARCH_PART_LABEL 1 ++#define FUNC_NAME grub_search_part_label ++#define COMMAND_NAME "search.part_label" ++#define HELP_MESSAGE N_("Search devices by partition label. If VARIABLE is specified, the first device found is set to a variable.") ++#include "search.c" +diff --git a/grub-core/commands/search_part_uuid.c b/grub-core/commands/search_part_uuid.c +new file mode 100644 +index 000000000..2d1d3d0d7 +--- /dev/null ++++ b/grub-core/commands/search_part_uuid.c +@@ -0,0 +1,5 @@ ++#define DO_SEARCH_PART_UUID 1 ++#define FUNC_NAME grub_search_part_uuid ++#define COMMAND_NAME "search.part_uuid" ++#define HELP_MESSAGE N_("Search devices by partition UUID. If VARIABLE is specified, the first device found is set to a variable.") ++#include "search.c" +diff --git a/grub-core/commands/search_wrap.c b/grub-core/commands/search_wrap.c +index d7fd26b94..e3ff756df 100644 +--- a/grub-core/commands/search_wrap.c ++++ b/grub-core/commands/search_wrap.c +@@ -36,6 +36,10 @@ static const struct grub_arg_option options[] = + 0, 0}, + {"fs-uuid", 'u', 0, N_("Search devices by a filesystem UUID."), + 0, 0}, ++ {"part-label", 'L', 0, N_("Search devices by a partition label."), ++ 0, 0}, ++ {"part-uuid", 'U', 0, N_("Search devices by a partition UUID."), ++ 0, 0}, + {"set", 's', GRUB_ARG_OPTION_OPTIONAL, + N_("Set a variable to the first device found."), N_("VARNAME"), + ARG_TYPE_STRING}, +@@ -71,6 +75,8 @@ enum options + SEARCH_FILE, + SEARCH_LABEL, + SEARCH_FS_UUID, ++ SEARCH_PART_LABEL, ++ SEARCH_PART_UUID, + SEARCH_SET, + SEARCH_NO_FLOPPY, + SEARCH_HINT, +@@ -186,6 +192,12 @@ grub_cmd_search (grub_extcmd_context_t ctxt, int argc, char **args) + else if (state[SEARCH_FS_UUID].set) + grub_search_fs_uuid (id, var, state[SEARCH_NO_FLOPPY].set, + hints, nhints); ++ else if (state[SEARCH_PART_LABEL].set) ++ grub_search_part_label (id, var, state[SEARCH_NO_FLOPPY].set, ++ hints, nhints); ++ else if (state[SEARCH_PART_UUID].set) ++ grub_search_part_uuid (id, var, state[SEARCH_NO_FLOPPY].set, ++ hints, nhints); + else if (state[SEARCH_FILE].set) + grub_search_fs_file (id, var, state[SEARCH_NO_FLOPPY].set, + hints, nhints); +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 9a1835b84..10a4b852d 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -18,7 +18,9 @@ + * along with GRUB. If not, see . + */ + ++#include + #include ++#include + #include + #include + #include +@@ -44,6 +46,68 @@ grub_gpt_guid_to_str (grub_gpt_guid_t *guid) + guid->data4[6], guid->data4[7]); + } + ++static grub_err_t ++grub_gpt_device_partentry (grub_device_t device, ++ struct grub_gpt_partentry *entry) ++{ ++ grub_disk_t disk = device->disk; ++ grub_partition_t p; ++ grub_err_t err; ++ ++ if (!disk || !disk->partition) ++ return grub_error (GRUB_ERR_BUG, "not a partition"); ++ ++ if (grub_strcmp (disk->partition->partmap->name, "gpt")) ++ return grub_error (GRUB_ERR_BAD_ARGUMENT, "not a GPT partition"); ++ ++ p = disk->partition; ++ disk->partition = p->parent; ++ err = grub_disk_read (disk, p->offset, p->index, sizeof (*entry), entry); ++ disk->partition = p; ++ ++ return err; ++} ++ ++grub_err_t ++grub_gpt_part_label (grub_device_t device, char **label) ++{ ++ struct grub_gpt_partentry entry; ++ const grub_size_t name_len = ARRAY_SIZE (entry.name); ++ const grub_size_t label_len = name_len * GRUB_MAX_UTF8_PER_UTF16 + 1; ++ grub_size_t i; ++ grub_uint8_t *end; ++ ++ if (grub_gpt_device_partentry (device, &entry)) ++ return grub_errno; ++ ++ *label = grub_malloc (label_len); ++ if (!*label) ++ return grub_errno; ++ ++ for (i = 0; i < name_len; i++) ++ entry.name[i] = grub_le_to_cpu16 (entry.name[i]); ++ ++ end = grub_utf16_to_utf8 ((grub_uint8_t *) *label, entry.name, name_len); ++ *end = '\0'; ++ ++ return GRUB_ERR_NONE; ++} ++ ++grub_err_t ++grub_gpt_part_uuid (grub_device_t device, char **uuid) ++{ ++ struct grub_gpt_partentry entry; ++ ++ if (grub_gpt_device_partentry (device, &entry)) ++ return grub_errno; ++ ++ *uuid = grub_gpt_guid_to_str (&entry.guid); ++ if (!*uuid) ++ return grub_errno; ++ ++ return GRUB_ERR_NONE; ++} ++ + static grub_uint64_t + grub_gpt_size_to_sectors (grub_gpt_t gpt, grub_size_t size) + { +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index 1d065df99..dc2dec43a 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -49,6 +49,10 @@ char * grub_gpt_guid_to_str (grub_gpt_guid_t *guid); + GRUB_GPT_GUID_INIT (0x0, 0x0, 0x0, \ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0) + ++#define GRUB_GPT_PARTITION_TYPE_EFI_SYSTEM \ ++ GRUB_GPT_GUID_INIT (0xc12a7328, 0xf81f, 0x11d2, \ ++ 0xba, 0x4b, 0x00, 0xa0, 0xc9, 0x3e, 0xc9, 0x3b) ++ + #define GRUB_GPT_PARTITION_TYPE_BIOS_BOOT \ + GRUB_GPT_GUID_INIT (0x21686148, 0x6449, 0x6e6f, \ + 0x74, 0x4e, 0x65, 0x65, 0x64, 0x45, 0x46, 0x49) +@@ -216,4 +220,16 @@ grub_err_t grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr); + grub_err_t grub_gpt_header_check (struct grub_gpt_header *gpt, + unsigned int log_sector_size); + ++ ++/* Utilities for simple partition data lookups, usage is intended to ++ * be similar to fs->label and fs->uuid functions. */ ++ ++/* Return the partition label of the device DEVICE in LABEL. ++ * The label is in a new buffer and should be freed by the caller. */ ++grub_err_t grub_gpt_part_label (grub_device_t device, char **label); ++ ++/* Return the partition uuid of the device DEVICE in UUID. ++ * The label is in a new buffer and should be freed by the caller. */ ++grub_err_t grub_gpt_part_uuid (grub_device_t device, char **uuid); ++ + #endif /* ! GRUB_GPT_PARTITION_HEADER */ +diff --git a/include/grub/search.h b/include/grub/search.h +index d80347df3..c2f40abe9 100644 +--- a/include/grub/search.h ++++ b/include/grub/search.h +@@ -25,5 +25,9 @@ void grub_search_fs_uuid (const char *key, const char *var, int no_floppy, + char **hints, unsigned nhints); + void grub_search_label (const char *key, const char *var, int no_floppy, + char **hints, unsigned nhints); ++void grub_search_part_uuid (const char *key, const char *var, int no_floppy, ++ char **hints, unsigned nhints); ++void grub_search_part_label (const char *key, const char *var, int no_floppy, ++ char **hints, unsigned nhints); + + #endif +diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c +index 5692a5a52..deb55a926 100644 +--- a/tests/gpt_unit_test.c ++++ b/tests/gpt_unit_test.c +@@ -21,10 +21,12 @@ + #include + #include + #include ++#include + #include + #include + #include + #include ++#include + #include + + #include +@@ -534,6 +536,84 @@ repair_test (void) + + close_disk (&data); + } ++ ++static void ++search_label_test (void) ++{ ++ struct test_data data; ++ const char *test_result; ++ char *expected_result; ++ ++ open_disk (&data); ++ ++ expected_result = grub_xasprintf ("%s,gpt1", data.dev->disk->name); ++ grub_env_unset ("test_result"); ++ grub_search_part_label ("EFI SYSTEM", "test_result", 0, NULL, 0); ++ test_result = grub_env_get ("test_result"); ++ grub_test_assert (test_result && strcmp (test_result, expected_result) == 0, ++ "wrong device: %s (%s)", test_result, expected_result); ++ grub_free (expected_result); ++ ++ expected_result = grub_xasprintf ("%s,gpt2", data.dev->disk->name); ++ grub_env_unset ("test_result"); ++ grub_search_part_label ("BIOS BOOT", "test_result", 0, NULL, 0); ++ test_result = grub_env_get ("test_result"); ++ grub_test_assert (test_result && strcmp (test_result, expected_result) == 0, ++ "wrong device: %s (%s)", test_result, expected_result); ++ grub_free (expected_result); ++ ++ grub_env_unset ("test_result"); ++ grub_search_part_label ("bogus name", "test_result", 0, NULL, 0); ++ test_result = grub_env_get ("test_result"); ++ grub_test_assert (test_result == NULL, ++ "unexpected device: %s", test_result); ++ grub_test_assert (grub_errno == GRUB_ERR_FILE_NOT_FOUND, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++ ++ close_disk (&data); ++} ++ ++static void ++search_uuid_test (void) ++{ ++ struct test_data data; ++ const char gpt1_uuid[] = "A0F1792E-B4CE-4136-BCF2-1AFC133C2828"; ++ const char gpt2_uuid[] = "876c898d-1b40-4727-a161-edf9b5486674"; ++ const char bogus_uuid[] = "1534c928-c50e-4866-9daf-6a9fd7918a76"; ++ const char *test_result; ++ char *expected_result; ++ ++ open_disk (&data); ++ ++ expected_result = grub_xasprintf ("%s,gpt1", data.dev->disk->name); ++ grub_env_unset ("test_result"); ++ grub_search_part_uuid (gpt1_uuid, "test_result", 0, NULL, 0); ++ test_result = grub_env_get ("test_result"); ++ grub_test_assert (test_result && strcmp (test_result, expected_result) == 0, ++ "wrong device: %s (%s)", test_result, expected_result); ++ grub_free (expected_result); ++ ++ expected_result = grub_xasprintf ("%s,gpt2", data.dev->disk->name); ++ grub_env_unset ("test_result"); ++ grub_search_part_uuid (gpt2_uuid, "test_result", 0, NULL, 0); ++ test_result = grub_env_get ("test_result"); ++ grub_test_assert (test_result && strcmp (test_result, expected_result) == 0, ++ "wrong device: %s (%s)", test_result, expected_result); ++ grub_free (expected_result); ++ ++ grub_env_unset ("test_result"); ++ grub_search_part_uuid (bogus_uuid, "test_result", 0, NULL, 0); ++ test_result = grub_env_get ("test_result"); ++ grub_test_assert (test_result == NULL, ++ "unexpected device: %s", test_result); ++ grub_test_assert (grub_errno == GRUB_ERR_FILE_NOT_FOUND, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++ ++ close_disk (&data); ++} ++ + void + grub_unit_test_init (void) + { +@@ -546,6 +626,8 @@ grub_unit_test_init (void) + grub_test_register ("gpt_read_invalid_test", read_invalid_entries_test); + grub_test_register ("gpt_read_fallback_test", read_fallback_test); + grub_test_register ("gpt_repair_test", repair_test); ++ grub_test_register ("gpt_search_label_test", search_label_test); ++ grub_test_register ("gpt_search_uuid_test", search_uuid_test); + } + + void +@@ -557,5 +639,7 @@ grub_unit_test_fini (void) + grub_test_unregister ("gpt_read_invalid_test"); + grub_test_unregister ("gpt_read_fallback_test"); + grub_test_unregister ("gpt_repair_test"); ++ grub_test_unregister ("gpt_search_label_test"); ++ grub_test_unregister ("gpt_search_uuid_test"); + grub_fini_all (); + } diff --git a/packages/grub/0015-gpt-clean-up-little-endian-crc32-computation.patch b/packages/grub/0015-gpt-clean-up-little-endian-crc32-computation.patch new file mode 100644 index 00000000..079c7428 --- /dev/null +++ b/packages/grub/0015-gpt-clean-up-little-endian-crc32-computation.patch @@ -0,0 +1,113 @@ +From ea2059a5db3f2c74216d30e4d509f477edaf0a42 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Fri, 31 Jul 2015 15:03:11 -0700 +Subject: [PATCH] gpt: clean up little-endian crc32 computation + + - Remove problematic cast from *uint8_t to *uint32_t (alignment issue). + - Remove dynamic allocation and associated error handling paths. + - Match parameter ordering to existing grub_crypto_hash function. +--- + grub-core/lib/gpt.c | 51 ++++++++++++--------------------------------- + 1 file changed, 13 insertions(+), 38 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 10a4b852d..aedc4f7a1 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -122,45 +122,29 @@ grub_gpt_size_to_sectors (grub_gpt_t gpt, grub_size_t size) + return sectors; + } + +-static grub_err_t +-grub_gpt_lecrc32 (void *data, grub_size_t len, grub_uint32_t *crc) ++static void ++grub_gpt_lecrc32 (grub_uint32_t *crc, const void *data, grub_size_t len) + { +- grub_uint8_t *crc32_context; +- +- crc32_context = grub_zalloc (GRUB_MD_CRC32->contextsize); +- if (!crc32_context) +- return grub_errno; ++ grub_uint32_t crc32_val; + +- GRUB_MD_CRC32->init (crc32_context); +- GRUB_MD_CRC32->write (crc32_context, data, len); +- GRUB_MD_CRC32->final (crc32_context); ++ grub_crypto_hash (GRUB_MD_CRC32, &crc32_val, data, len); + + /* GRUB_MD_CRC32 always uses big endian, gpt is always little. */ +- *crc = grub_swap_bytes32 (*(grub_uint32_t *) +- GRUB_MD_CRC32->read (crc32_context)); +- +- grub_free (crc32_context); +- +- return GRUB_ERR_NONE; ++ *crc = grub_swap_bytes32 (crc32_val); + } + +-static grub_err_t +-grub_gpt_header_lecrc32 (struct grub_gpt_header *header, grub_uint32_t *crc) ++static void ++grub_gpt_header_lecrc32 (grub_uint32_t *crc, struct grub_gpt_header *header) + { + grub_uint32_t old, new; +- grub_err_t err; + + /* crc32 must be computed with the field cleared. */ + old = header->crc32; + header->crc32 = 0; +- err = grub_gpt_lecrc32 (header, sizeof (*header), &new); ++ grub_gpt_lecrc32 (&new, header, sizeof (*header)); + header->crc32 = old; + +- if (err) +- return err; +- + *crc = new; +- return GRUB_ERR_NONE; + } + + /* Make sure the MBR is a protective MBR and not a normal MBR. */ +@@ -192,9 +176,7 @@ grub_gpt_header_check (struct grub_gpt_header *gpt, + if (gpt->version != GRUB_GPT_HEADER_VERSION) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "unknown GPT version"); + +- if (grub_gpt_header_lecrc32 (gpt, &crc)) +- return grub_errno; +- ++ grub_gpt_header_lecrc32 (&crc, gpt); + if (gpt->crc32 != crc) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT header crc32"); + +@@ -289,9 +271,7 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, + if (grub_disk_read (disk, addr, 0, entries_size, entries)) + goto fail; + +- if (grub_gpt_lecrc32 (entries, entries_size, &crc)) +- goto fail; +- ++ grub_gpt_lecrc32 (&crc, entries, entries_size); + if (crc != header->partentry_crc32) + { + grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT entry crc32"); +@@ -433,17 +413,12 @@ grub_gpt_update_checksums (grub_gpt_t gpt) + gpt->backup.headersize = + grub_cpu_to_le32_compile_time (sizeof (gpt->backup)); + +- if (grub_gpt_lecrc32 (gpt->entries, gpt->entries_size, &crc)) +- return grub_errno; +- ++ grub_gpt_lecrc32 (&crc, gpt->entries, gpt->entries_size); + gpt->primary.partentry_crc32 = crc; + gpt->backup.partentry_crc32 = crc; + +- if (grub_gpt_header_lecrc32 (&gpt->primary, &gpt->primary.crc32)) +- return grub_errno; +- +- if (grub_gpt_header_lecrc32 (&gpt->backup, &gpt->backup.crc32)) +- return grub_errno; ++ grub_gpt_header_lecrc32 (&gpt->primary.crc32, &gpt->primary); ++ grub_gpt_header_lecrc32 (&gpt->backup.crc32, &gpt->backup); + + return GRUB_ERR_NONE; + } diff --git a/packages/grub/0016-gpt-minor-cleanup.patch b/packages/grub/0016-gpt-minor-cleanup.patch new file mode 100644 index 00000000..940106cd --- /dev/null +++ b/packages/grub/0016-gpt-minor-cleanup.patch @@ -0,0 +1,66 @@ +From 025b3591c95200132256b44d15048a26bf558c40 Mon Sep 17 00:00:00 2001 +From: Alex Crawford +Date: Mon, 31 Aug 2015 15:23:39 -0700 +Subject: [PATCH] gpt: minor cleanup + +--- + include/grub/gpt_partition.h | 2 +- + tests/gpt_unit_test.c | 12 ++++++------ + 2 files changed, 7 insertions(+), 7 deletions(-) + +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index dc2dec43a..ae72b026c 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -229,7 +229,7 @@ grub_err_t grub_gpt_header_check (struct grub_gpt_header *gpt, + grub_err_t grub_gpt_part_label (grub_device_t device, char **label); + + /* Return the partition uuid of the device DEVICE in UUID. +- * The label is in a new buffer and should be freed by the caller. */ ++ * The uuid is in a new buffer and should be freed by the caller. */ + grub_err_t grub_gpt_part_uuid (grub_device_t device, char **uuid); + + #endif /* ! GRUB_GPT_PARTITION_HEADER */ +diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c +index deb55a926..7a1af46e1 100644 +--- a/tests/gpt_unit_test.c ++++ b/tests/gpt_unit_test.c +@@ -538,7 +538,7 @@ repair_test (void) + } + + static void +-search_label_test (void) ++search_part_label_test (void) + { + struct test_data data; + const char *test_result; +@@ -575,7 +575,7 @@ search_label_test (void) + } + + static void +-search_uuid_test (void) ++search_part_uuid_test (void) + { + struct test_data data; + const char gpt1_uuid[] = "A0F1792E-B4CE-4136-BCF2-1AFC133C2828"; +@@ -626,8 +626,8 @@ grub_unit_test_init (void) + grub_test_register ("gpt_read_invalid_test", read_invalid_entries_test); + grub_test_register ("gpt_read_fallback_test", read_fallback_test); + grub_test_register ("gpt_repair_test", repair_test); +- grub_test_register ("gpt_search_label_test", search_label_test); +- grub_test_register ("gpt_search_uuid_test", search_uuid_test); ++ grub_test_register ("gpt_search_part_label_test", search_part_label_test); ++ grub_test_register ("gpt_search_uuid_test", search_part_uuid_test); + } + + void +@@ -639,7 +639,7 @@ grub_unit_test_fini (void) + grub_test_unregister ("gpt_read_invalid_test"); + grub_test_unregister ("gpt_read_fallback_test"); + grub_test_unregister ("gpt_repair_test"); +- grub_test_unregister ("gpt_search_label_test"); +- grub_test_unregister ("gpt_search_uuid_test"); ++ grub_test_unregister ("gpt_search_part_label_test"); ++ grub_test_unregister ("gpt_search_part_uuid_test"); + grub_fini_all (); + } diff --git a/packages/grub/0017-gpt-add-search-by-disk-uuid-command.patch b/packages/grub/0017-gpt-add-search-by-disk-uuid-command.patch new file mode 100644 index 00000000..cd4b4cbc --- /dev/null +++ b/packages/grub/0017-gpt-add-search-by-disk-uuid-command.patch @@ -0,0 +1,269 @@ +From aa6b435ee13658a7eced13ebbe9be25567ee019a Mon Sep 17 00:00:00 2001 +From: Alex Crawford +Date: Mon, 31 Aug 2015 15:15:48 -0700 +Subject: [PATCH] gpt: add search by disk uuid command + +--- + Makefile.util.def | 1 + + grub-core/Makefile.core.def | 5 ++++ + grub-core/commands/search.c | 28 +++++++++++++++++++++-- + grub-core/commands/search_disk_uuid.c | 5 ++++ + grub-core/commands/search_wrap.c | 6 +++++ + grub-core/lib/gpt.c | 21 +++++++++++++++++ + include/grub/gpt_partition.h | 4 ++++ + include/grub/search.h | 2 ++ + tests/gpt_unit_test.c | 33 +++++++++++++++++++++++++++ + 9 files changed, 103 insertions(+), 2 deletions(-) + create mode 100644 grub-core/commands/search_disk_uuid.c + +diff --git a/Makefile.util.def b/Makefile.util.def +index b63a2963c..65cbfc081 100644 +--- a/Makefile.util.def ++++ b/Makefile.util.def +@@ -1289,6 +1289,7 @@ program = { + common = tests/lib/unit_test.c; + common = grub-core/commands/search_part_label.c; + common = grub-core/commands/search_part_uuid.c; ++ common = grub-core/commands/search_disk_uuid.c; + common = grub-core/disk/host.c; + common = grub-core/kern/emu/hostfs.c; + common = grub-core/lib/gpt.c; +diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def +index 9964d42fe..79b24c187 100644 +--- a/grub-core/Makefile.core.def ++++ b/grub-core/Makefile.core.def +@@ -1105,6 +1105,11 @@ module = { + common = commands/search_part_label.c; + }; + ++module = { ++ name = search_disk_uuid; ++ common = commands/search_disk_uuid.c; ++}; ++ + module = { + name = setpci; + common = commands/setpci.c; +diff --git a/grub-core/commands/search.c b/grub-core/commands/search.c +index 4ad72c5b4..fd411ce3e 100644 +--- a/grub-core/commands/search.c ++++ b/grub-core/commands/search.c +@@ -30,7 +30,8 @@ + #include + #include + #include +-#if defined(DO_SEARCH_PART_UUID) || defined(DO_SEARCH_PART_LABEL) ++#if defined(DO_SEARCH_PART_UUID) || defined(DO_SEARCH_PART_LABEL) || \ ++ defined(DO_SEARCH_DISK_UUID) + #include + #endif + +@@ -69,7 +70,7 @@ iterate_device (const char *name, void *data) + name[0] == 'f' && name[1] == 'd' && name[2] >= '0' && name[2] <= '9') + return 1; + +-#ifdef DO_SEARCH_FS_UUID ++#if defined(DO_SEARCH_FS_UUID) || defined(DO_SEARCH_DISK_UUID) + #define compare_fn grub_strcasecmp + #else + #define compare_fn grub_strcmp +@@ -128,6 +129,25 @@ iterate_device (const char *name, void *data) + grub_free (quid); + } + ++ grub_device_close (dev); ++ } ++ } ++#elif defined(DO_SEARCH_DISK_UUID) ++ { ++ grub_device_t dev; ++ char *quid; ++ ++ dev = grub_device_open (name); ++ if (dev) ++ { ++ if (grub_gpt_disk_uuid (dev, &quid) == GRUB_ERR_NONE) ++ { ++ if (grub_strcmp (quid, ctx->key) == 0) ++ found = 1; ++ ++ grub_free (quid); ++ } ++ + grub_device_close (dev); + } + } +@@ -360,6 +380,8 @@ GRUB_MOD_INIT(search_part_uuid) + GRUB_MOD_INIT(search_part_label) + #elif defined (DO_SEARCH_FS_UUID) + GRUB_MOD_INIT(search_fs_uuid) ++#elif defined (DO_SEARCH_DISK_UUID) ++GRUB_MOD_INIT(search_disk_uuid) + #else + GRUB_MOD_INIT(search_label) + #endif +@@ -378,6 +400,8 @@ GRUB_MOD_FINI(search_part_uuid) + GRUB_MOD_FINI(search_part_label) + #elif defined (DO_SEARCH_FS_UUID) + GRUB_MOD_FINI(search_fs_uuid) ++#elif defined (DO_SEARCH_DISK_UUID) ++GRUB_MOD_FINI(search_disk_uuid) + #else + GRUB_MOD_FINI(search_label) + #endif +diff --git a/grub-core/commands/search_disk_uuid.c b/grub-core/commands/search_disk_uuid.c +new file mode 100644 +index 000000000..fba96f6b8 +--- /dev/null ++++ b/grub-core/commands/search_disk_uuid.c +@@ -0,0 +1,5 @@ ++#define DO_SEARCH_DISK_UUID 1 ++#define FUNC_NAME grub_search_disk_uuid ++#define COMMAND_NAME "search.disk_uuid" ++#define HELP_MESSAGE N_("Search devices by disk UUID. If VARIABLE is specified, the first device found is set to a variable.") ++#include "search.c" +diff --git a/grub-core/commands/search_wrap.c b/grub-core/commands/search_wrap.c +index e3ff756df..d931c56c5 100644 +--- a/grub-core/commands/search_wrap.c ++++ b/grub-core/commands/search_wrap.c +@@ -40,6 +40,8 @@ static const struct grub_arg_option options[] = + 0, 0}, + {"part-uuid", 'U', 0, N_("Search devices by a partition UUID."), + 0, 0}, ++ {"disk-uuid", 'U', 0, N_("Search devices by a disk UUID."), ++ 0, 0}, + {"set", 's', GRUB_ARG_OPTION_OPTIONAL, + N_("Set a variable to the first device found."), N_("VARNAME"), + ARG_TYPE_STRING}, +@@ -77,6 +79,7 @@ enum options + SEARCH_FS_UUID, + SEARCH_PART_LABEL, + SEARCH_PART_UUID, ++ SEARCH_DISK_UUID, + SEARCH_SET, + SEARCH_NO_FLOPPY, + SEARCH_HINT, +@@ -198,6 +201,9 @@ grub_cmd_search (grub_extcmd_context_t ctxt, int argc, char **args) + else if (state[SEARCH_PART_UUID].set) + grub_search_part_uuid (id, var, state[SEARCH_NO_FLOPPY].set, + hints, nhints); ++ else if (state[SEARCH_DISK_UUID].set) ++ grub_search_disk_uuid (id, var, state[SEARCH_NO_FLOPPY].set, ++ hints, nhints); + else if (state[SEARCH_FILE].set) + grub_search_fs_file (id, var, state[SEARCH_NO_FLOPPY].set, + hints, nhints); +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index aedc4f7a1..e162bafd3 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -108,6 +108,27 @@ grub_gpt_part_uuid (grub_device_t device, char **uuid) + return GRUB_ERR_NONE; + } + ++grub_err_t ++grub_gpt_disk_uuid (grub_device_t device, char **uuid) ++{ ++ grub_gpt_t gpt = grub_gpt_read (device->disk); ++ if (!gpt) ++ goto done; ++ ++ grub_errno = GRUB_ERR_NONE; ++ ++ if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) ++ *uuid = grub_gpt_guid_to_str (&gpt->primary.guid); ++ else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) ++ *uuid = grub_gpt_guid_to_str (&gpt->backup.guid); ++ else ++ grub_errno = grub_error (GRUB_ERR_BUG, "No valid GPT header"); ++ ++done: ++ grub_gpt_free (gpt); ++ return grub_errno; ++} ++ + static grub_uint64_t + grub_gpt_size_to_sectors (grub_gpt_t gpt, grub_size_t size) + { +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index ae72b026c..2ca063ad9 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -232,4 +232,8 @@ grub_err_t grub_gpt_part_label (grub_device_t device, char **label); + * The uuid is in a new buffer and should be freed by the caller. */ + grub_err_t grub_gpt_part_uuid (grub_device_t device, char **uuid); + ++/* Return the disk uuid of the device DEVICE in UUID. ++ * The uuid is in a new buffer and should be freed by the caller. */ ++grub_err_t grub_gpt_disk_uuid (grub_device_t device, char **uuid); ++ + #endif /* ! GRUB_GPT_PARTITION_HEADER */ +diff --git a/include/grub/search.h b/include/grub/search.h +index c2f40abe9..7f69d25d1 100644 +--- a/include/grub/search.h ++++ b/include/grub/search.h +@@ -29,5 +29,7 @@ void grub_search_part_uuid (const char *key, const char *var, int no_floppy, + char **hints, unsigned nhints); + void grub_search_part_label (const char *key, const char *var, int no_floppy, + char **hints, unsigned nhints); ++void grub_search_disk_uuid (const char *key, const char *var, int no_floppy, ++ char **hints, unsigned nhints); + + #endif +diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c +index 7a1af46e1..60f601729 100644 +--- a/tests/gpt_unit_test.c ++++ b/tests/gpt_unit_test.c +@@ -614,6 +614,37 @@ search_part_uuid_test (void) + close_disk (&data); + } + ++static void ++search_disk_uuid_test (void) ++{ ++ struct test_data data; ++ const char disk_uuid[] = "69c131ad-67d6-46c6-93c4-124c755256ac"; ++ const char bogus_uuid[] = "1534c928-c50e-4866-9daf-6a9fd7918a76"; ++ const char *test_result; ++ char *expected_result; ++ ++ open_disk (&data); ++ ++ expected_result = grub_xasprintf ("%s", data.dev->disk->name); ++ grub_env_unset ("test_result"); ++ grub_search_disk_uuid (disk_uuid, "test_result", 0, NULL, 0); ++ test_result = grub_env_get ("test_result"); ++ grub_test_assert (test_result && strcmp (test_result, expected_result) == 0, ++ "wrong device: %s (%s)", test_result, expected_result); ++ grub_free (expected_result); ++ ++ grub_env_unset ("test_result"); ++ grub_search_disk_uuid (bogus_uuid, "test_result", 0, NULL, 0); ++ test_result = grub_env_get ("test_result"); ++ grub_test_assert (test_result == NULL, ++ "unexpected device: %s", test_result); ++ grub_test_assert (grub_errno == GRUB_ERR_FILE_NOT_FOUND, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++ ++ close_disk (&data); ++} ++ + void + grub_unit_test_init (void) + { +@@ -628,6 +659,7 @@ grub_unit_test_init (void) + grub_test_register ("gpt_repair_test", repair_test); + grub_test_register ("gpt_search_part_label_test", search_part_label_test); + grub_test_register ("gpt_search_uuid_test", search_part_uuid_test); ++ grub_test_register ("gpt_search_disk_uuid_test", search_disk_uuid_test); + } + + void +@@ -641,5 +673,6 @@ grub_unit_test_fini (void) + grub_test_unregister ("gpt_repair_test"); + grub_test_unregister ("gpt_search_part_label_test"); + grub_test_unregister ("gpt_search_part_uuid_test"); ++ grub_test_unregister ("gpt_search_disk_uuid_test"); + grub_fini_all (); + } diff --git a/packages/grub/0018-gpt-do-not-use-disk-sizes-GRUB-will-reject-as-invali.patch b/packages/grub/0018-gpt-do-not-use-disk-sizes-GRUB-will-reject-as-invali.patch new file mode 100644 index 00000000..9bb3bac0 --- /dev/null +++ b/packages/grub/0018-gpt-do-not-use-disk-sizes-GRUB-will-reject-as-invali.patch @@ -0,0 +1,64 @@ +From 2009646dc28e1a20b71d176040e9222f5c62d231 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Mon, 25 Jul 2016 14:59:29 -0700 +Subject: [PATCH] gpt: do not use disk sizes GRUB will reject as invalid later + on + +GRUB assumes that no disk is ever larger than 1EiB and rejects +reads/writes to such locations. Unfortunately this is not conveyed in +the usual way with the special GRUB_DISK_SIZE_UNKNOWN value. +--- + grub-core/lib/gpt.c | 26 ++++++++++++++++++++++++-- + 1 file changed, 24 insertions(+), 2 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index e162bafd3..3e17f2771 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -143,6 +143,28 @@ grub_gpt_size_to_sectors (grub_gpt_t gpt, grub_size_t size) + return sectors; + } + ++/* Copied from grub-core/kern/disk_common.c grub_disk_adjust_range so we can ++ * avoid attempting to use disk->total_sectors when GRUB won't let us. ++ * TODO: Why is disk->total_sectors not set to GRUB_DISK_SIZE_UNKNOWN? */ ++static int ++grub_gpt_disk_size_valid (grub_disk_t disk) ++{ ++ grub_disk_addr_t total_sectors; ++ ++ /* Transform total_sectors to number of 512B blocks. */ ++ total_sectors = disk->total_sectors << (disk->log_sector_size - GRUB_DISK_SECTOR_BITS); ++ ++ /* Some drivers have problems with disks above reasonable. ++ Treat unknown as 1EiB disk. While on it, clamp the size to 1EiB. ++ Just one condition is enough since GRUB_DISK_UNKNOWN_SIZE << ls is always ++ above 9EiB. ++ */ ++ if (total_sectors > (1ULL << 51)) ++ return 0; ++ ++ return 1; ++} ++ + static void + grub_gpt_lecrc32 (grub_uint32_t *crc, const void *data, grub_size_t len) + { +@@ -242,7 +264,7 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) + grub_disk_addr_t addr; + + /* Assumes gpt->log_sector_size == disk->log_sector_size */ +- if (disk->total_sectors != GRUB_DISK_SIZE_UNKNOWN) ++ if (grub_gpt_disk_size_valid(disk)) + sector = disk->total_sectors - 1; + else if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) + sector = grub_le_to_cpu64 (gpt->primary.alternate_lba); +@@ -394,7 +416,7 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + return grub_error (GRUB_ERR_BUG, "No valid GPT header"); + + /* Relocate backup to end if disk whenever possible. */ +- if (disk->total_sectors != GRUB_DISK_SIZE_UNKNOWN) ++ if (grub_gpt_disk_size_valid(disk)) + backup_header = disk->total_sectors - 1; + + backup_entries = backup_header - diff --git a/packages/grub/0019-gpt-add-verbose-debug-logging.patch b/packages/grub/0019-gpt-add-verbose-debug-logging.patch new file mode 100644 index 00000000..e1a4039c --- /dev/null +++ b/packages/grub/0019-gpt-add-verbose-debug-logging.patch @@ -0,0 +1,251 @@ +From 8fb347e03236e927e3a2a1134923854a39a03c4b Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Wed, 10 Aug 2016 18:26:03 -0700 +Subject: [PATCH] gpt: add verbose debug logging + +--- + grub-core/lib/gpt.c | 117 +++++++++++++++++++++++++++++++++++++++++--- + 1 file changed, 109 insertions(+), 8 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 3e17f2771..c2821b563 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -207,6 +207,18 @@ grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid protective MBR"); + } + ++static grub_uint64_t ++grub_gpt_entries_sectors (struct grub_gpt_header *gpt, ++ unsigned int log_sector_size) ++{ ++ grub_uint64_t sector_bytes, entries_bytes; ++ ++ sector_bytes = 1ULL << log_sector_size; ++ entries_bytes = (grub_uint64_t) grub_le_to_cpu32 (gpt->maxpart) * ++ (grub_uint64_t) grub_le_to_cpu32 (gpt->partentry_size); ++ return grub_divmod64(entries_bytes + sector_bytes - 1, sector_bytes, NULL); ++} ++ + grub_err_t + grub_gpt_header_check (struct grub_gpt_header *gpt, + unsigned int log_sector_size) +@@ -236,6 +248,64 @@ grub_gpt_header_check (struct grub_gpt_header *gpt, + return GRUB_ERR_NONE; + } + ++static grub_err_t ++grub_gpt_check_primary (grub_gpt_t gpt) ++{ ++ grub_uint64_t backup, primary, entries, entries_len, start, end; ++ ++ primary = grub_le_to_cpu64 (gpt->primary.header_lba); ++ backup = grub_le_to_cpu64 (gpt->primary.alternate_lba); ++ entries = grub_le_to_cpu64 (gpt->primary.partitions); ++ entries_len = grub_gpt_entries_sectors(&gpt->primary, gpt->log_sector_size); ++ start = grub_le_to_cpu64 (gpt->primary.start); ++ end = grub_le_to_cpu64 (gpt->primary.end); ++ ++ grub_dprintf ("gpt", "Primary GPT layout:\n" ++ "primary header = 0x%llx backup header = 0x%llx\n" ++ "entries location = 0x%llx length = 0x%llx\n" ++ "first usable = 0x%llx last usable = 0x%llx\n", ++ (unsigned long long) primary, ++ (unsigned long long) backup, ++ (unsigned long long) entries, ++ (unsigned long long) entries_len, ++ (unsigned long long) start, ++ (unsigned long long) end); ++ ++ if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) ++ return grub_errno; ++ ++ return GRUB_ERR_NONE; ++} ++ ++static grub_err_t ++grub_gpt_check_backup (grub_gpt_t gpt) ++{ ++ grub_uint64_t backup, primary, entries, entries_len, start, end; ++ ++ backup = grub_le_to_cpu64 (gpt->backup.header_lba); ++ primary = grub_le_to_cpu64 (gpt->backup.alternate_lba); ++ entries = grub_le_to_cpu64 (gpt->backup.partitions); ++ entries_len = grub_gpt_entries_sectors(&gpt->backup, gpt->log_sector_size); ++ start = grub_le_to_cpu64 (gpt->backup.start); ++ end = grub_le_to_cpu64 (gpt->backup.end); ++ ++ grub_dprintf ("gpt", "Backup GPT layout:\n" ++ "primary header = 0x%llx backup header = 0x%llx\n" ++ "entries location = 0x%llx length = 0x%llx\n" ++ "first usable = 0x%llx last usable = 0x%llx\n", ++ (unsigned long long) primary, ++ (unsigned long long) backup, ++ (unsigned long long) entries, ++ (unsigned long long) entries_len, ++ (unsigned long long) start, ++ (unsigned long long) end); ++ ++ if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) ++ return grub_errno; ++ ++ return GRUB_ERR_NONE; ++} ++ + static grub_err_t + grub_gpt_read_primary (grub_disk_t disk, grub_gpt_t gpt) + { +@@ -246,11 +316,13 @@ grub_gpt_read_primary (grub_disk_t disk, grub_gpt_t gpt) + * but eventually this code should match the existing behavior. */ + gpt->log_sector_size = disk->log_sector_size; + ++ grub_dprintf ("gpt", "reading primary GPT from sector 0x1\n"); ++ + addr = grub_gpt_sector_to_addr (gpt, 1); + if (grub_disk_read (disk, addr, 0, sizeof (gpt->primary), &gpt->primary)) + return grub_errno; + +- if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) ++ if (grub_gpt_check_primary (gpt)) + return grub_errno; + + gpt->status |= GRUB_GPT_PRIMARY_HEADER_VALID; +@@ -272,11 +344,14 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) + return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, + "Unable to locate backup GPT"); + ++ grub_dprintf ("gpt", "reading backup GPT from sector 0x%llx\n", ++ (unsigned long long) sector); ++ + addr = grub_gpt_sector_to_addr (gpt, sector); + if (grub_disk_read (disk, addr, 0, sizeof (gpt->backup), &gpt->backup)) + return grub_errno; + +- if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) ++ if (grub_gpt_check_backup (gpt)) + return grub_errno; + + gpt->status |= GRUB_GPT_BACKUP_HEADER_VALID; +@@ -289,6 +364,7 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, + { + struct grub_gpt_partentry *entries = NULL; + grub_uint32_t count, size, crc; ++ grub_uint64_t sector; + grub_disk_addr_t addr; + grub_size_t entries_size; + +@@ -310,7 +386,12 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, + if (!entries) + goto fail; + +- addr = grub_gpt_sector_to_addr (gpt, grub_le_to_cpu64 (header->partitions)); ++ sector = grub_le_to_cpu64 (header->partitions); ++ grub_dprintf ("gpt", "reading GPT %lu entries from sector 0x%llx\n", ++ (unsigned long) count, ++ (unsigned long long) sector); ++ ++ addr = grub_gpt_sector_to_addr (gpt, sector); + if (grub_disk_read (disk, addr, 0, entries_size, entries)) + goto fail; + +@@ -336,6 +417,8 @@ grub_gpt_read (grub_disk_t disk) + { + grub_gpt_t gpt; + ++ grub_dprintf ("gpt", "reading GPT from %s\n", disk->name); ++ + gpt = grub_zalloc (sizeof (*gpt)); + if (!gpt) + goto fail; +@@ -369,12 +452,18 @@ grub_gpt_read (grub_disk_t disk) + /* Similarly, favor the value or error from the primary table. */ + if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID && + !grub_gpt_read_entries (disk, gpt, &gpt->backup)) +- gpt->status |= GRUB_GPT_BACKUP_ENTRIES_VALID; ++ { ++ grub_dprintf ("gpt", "read valid backup GPT from %s\n", disk->name); ++ gpt->status |= GRUB_GPT_BACKUP_ENTRIES_VALID; ++ } + + grub_errno = GRUB_ERR_NONE; + if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID && + !grub_gpt_read_entries (disk, gpt, &gpt->primary)) +- gpt->status |= GRUB_GPT_PRIMARY_ENTRIES_VALID; ++ { ++ grub_dprintf ("gpt", "read valid primary GPT from %s\n", disk->name); ++ gpt->status |= GRUB_GPT_PRIMARY_ENTRIES_VALID; ++ } + + if (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID || + gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID) +@@ -394,21 +483,25 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + { + grub_uint64_t backup_header, backup_entries; + ++ grub_dprintf ("gpt", "repairing GPT for %s\n", disk->name); ++ + if (disk->log_sector_size != gpt->log_sector_size) + return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, + "GPT sector size must match disk sector size"); + + if (!(gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID || +- gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID)) ++ gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID)) + return grub_error (GRUB_ERR_BUG, "No valid GPT entries"); + + if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) + { ++ grub_dprintf ("gpt", "primary GPT header is valid\n"); + backup_header = grub_le_to_cpu64 (gpt->primary.alternate_lba); + grub_memcpy (&gpt->backup, &gpt->primary, sizeof (gpt->backup)); + } + else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) + { ++ grub_dprintf ("gpt", "backup GPT header is valid\n"); + backup_header = grub_le_to_cpu64 (gpt->backup.header_lba); + grub_memcpy (&gpt->primary, &gpt->backup, sizeof (gpt->primary)); + } +@@ -418,9 +511,13 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + /* Relocate backup to end if disk whenever possible. */ + if (grub_gpt_disk_size_valid(disk)) + backup_header = disk->total_sectors - 1; ++ grub_dprintf ("gpt", "backup GPT header will be located at 0x%llx\n", ++ (unsigned long long) backup_header); + + backup_entries = backup_header - + grub_gpt_size_to_sectors (gpt, gpt->entries_size); ++ grub_dprintf ("gpt", "backup GPT entries will be located at 0x%llx\n", ++ (unsigned long long) backup_entries); + + /* Update/fixup header and partition table locations. */ + gpt->primary.header_lba = grub_cpu_to_le64_compile_time (1); +@@ -435,13 +532,15 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + return grub_errno; + + /* Sanity check. */ +- if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) ++ if (grub_gpt_check_primary (gpt)) + return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); + +- if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) ++ if (grub_gpt_check_backup (gpt)) + return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); + + gpt->status |= GRUB_GPT_BOTH_VALID; ++ grub_dprintf ("gpt", "repairing GPT for %s successful\n", disk->name); ++ + return GRUB_ERR_NONE; + } + +@@ -497,9 +596,11 @@ grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt) + if (!(gpt->status & GRUB_GPT_BOTH_VALID)) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "Invalid GPT data"); + ++ grub_dprintf ("gpt", "writing primary GPT to %s\n", disk->name); + if (grub_gpt_write_table (disk, gpt, &gpt->primary)) + return grub_errno; + ++ grub_dprintf ("gpt", "writing backup GPT to %s\n", disk->name); + if (grub_gpt_write_table (disk, gpt, &gpt->backup)) + return grub_errno; + diff --git a/packages/grub/0020-gpt-improve-validation-of-GPT-headers.patch b/packages/grub/0020-gpt-improve-validation-of-GPT-headers.patch new file mode 100644 index 00000000..6d45c729 --- /dev/null +++ b/packages/grub/0020-gpt-improve-validation-of-GPT-headers.patch @@ -0,0 +1,101 @@ +From 3670a0019adb31fd9f849b5128a629e0e89a9bba Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Wed, 10 Aug 2016 18:26:03 -0700 +Subject: [PATCH] gpt: improve validation of GPT headers + +Adds basic validation of all the disk locations in the headers, reducing +the chance of corrupting weird locations on disk. +--- + grub-core/lib/gpt.c | 48 +++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 48 insertions(+) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index c2821b563..f83fe29ac 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -224,6 +224,7 @@ grub_gpt_header_check (struct grub_gpt_header *gpt, + unsigned int log_sector_size) + { + grub_uint32_t crc = 0, size; ++ grub_uint64_t start, end; + + if (grub_memcmp (gpt->magic, grub_gpt_magic, sizeof (grub_gpt_magic)) != 0) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT signature"); +@@ -245,9 +246,35 @@ grub_gpt_header_check (struct grub_gpt_header *gpt, + if (size < 128 || size % 128) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT entry size"); + ++ /* And of course there better be some space for partitions! */ ++ start = grub_le_to_cpu64 (gpt->start); ++ end = grub_le_to_cpu64 (gpt->end); ++ if (start > end) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid usable sectors"); ++ + return GRUB_ERR_NONE; + } + ++static int ++grub_gpt_headers_equal (grub_gpt_t gpt) ++{ ++ /* Assume headers passed grub_gpt_header_check so skip magic and version. ++ * Individual fields must be checked instead of just using memcmp because ++ * crc32, header, alternate, and partitions will all normally differ. */ ++ ++ if (gpt->primary.headersize != gpt->backup.headersize || ++ gpt->primary.header_lba != gpt->backup.alternate_lba || ++ gpt->primary.start != gpt->backup.start || ++ gpt->primary.end != gpt->backup.end || ++ gpt->primary.maxpart != gpt->backup.maxpart || ++ gpt->primary.partentry_size != gpt->backup.partentry_size || ++ gpt->primary.partentry_crc32 != gpt->backup.partentry_crc32) ++ return 0; ++ ++ return grub_memcmp(&gpt->primary.guid, &gpt->backup.guid, ++ sizeof(grub_gpt_guid_t)) == 0; ++} ++ + static grub_err_t + grub_gpt_check_primary (grub_gpt_t gpt) + { +@@ -273,6 +300,12 @@ grub_gpt_check_primary (grub_gpt_t gpt) + + if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) + return grub_errno; ++ if (primary != 1) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid primary GPT LBA"); ++ if (entries <= 1 || entries+entries_len > start) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid entries location"); ++ if (backup <= end) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid backup GPT LBA"); + + return GRUB_ERR_NONE; + } +@@ -302,6 +335,12 @@ grub_gpt_check_backup (grub_gpt_t gpt) + + if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) + return grub_errno; ++ if (primary != 1) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid primary GPT LBA"); ++ if (entries <= end || entries+entries_len > backup) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid entries location"); ++ if (backup <= end) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid backup GPT LBA"); + + return GRUB_ERR_NONE; + } +@@ -354,6 +393,15 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) + if (grub_gpt_check_backup (gpt)) + return grub_errno; + ++ /* Ensure the backup header thinks it is located where we found it. */ ++ if (grub_le_to_cpu64 (gpt->backup.header_lba) != sector) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid backup GPT LBA"); ++ ++ /* If both primary and backup are valid but differ prefer the primary. */ ++ if ((gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) && ++ !grub_gpt_headers_equal(gpt)) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "backup GPT of of sync"); ++ + gpt->status |= GRUB_GPT_BACKUP_HEADER_VALID; + return GRUB_ERR_NONE; + } diff --git a/packages/grub/0021-gpt-refuse-to-write-to-sector-0.patch b/packages/grub/0021-gpt-refuse-to-write-to-sector-0.patch new file mode 100644 index 00000000..cd8869cc --- /dev/null +++ b/packages/grub/0021-gpt-refuse-to-write-to-sector-0.patch @@ -0,0 +1,31 @@ +From 2ebc9802edf1656597d8f6d7f9cc557dae08d4b2 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Thu, 11 Aug 2016 15:02:21 -0700 +Subject: [PATCH] gpt: refuse to write to sector 0 + +--- + grub-core/lib/gpt.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index f83fe29ac..b7449911a 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -626,10 +626,17 @@ grub_gpt_write_table (grub_disk_t disk, grub_gpt_t gpt, + sizeof (*header)); + + addr = grub_gpt_sector_to_addr (gpt, grub_le_to_cpu64 (header->header_lba)); ++ if (addr == 0) ++ return grub_error (GRUB_ERR_BUG, ++ "Refusing to write GPT header to address 0x0"); + if (grub_disk_write (disk, addr, 0, sizeof (*header), header)) + return grub_errno; + + addr = grub_gpt_sector_to_addr (gpt, grub_le_to_cpu64 (header->partitions)); ++ if (addr < 2) ++ return grub_error (GRUB_ERR_BUG, ++ "Refusing to write GPT entries to address 0x%llx", ++ (unsigned long long) addr); + if (grub_disk_write (disk, addr, 0, gpt->entries_size, gpt->entries)) + return grub_errno; + diff --git a/packages/grub/0022-gpt-properly-detect-and-repair-invalid-tables.patch b/packages/grub/0022-gpt-properly-detect-and-repair-invalid-tables.patch new file mode 100644 index 00000000..3b69a5e8 --- /dev/null +++ b/packages/grub/0022-gpt-properly-detect-and-repair-invalid-tables.patch @@ -0,0 +1,39 @@ +From 6ca9e261944a004668f9416f0ce9dad19846155e Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Sat, 20 Aug 2016 17:42:12 -0700 +Subject: [PATCH] gpt: properly detect and repair invalid tables + +GPT_BOTH_VALID is 4 bits so simple a boolean check is not sufficient. +This broken condition allowed gptprio to trust bogus disk locations in +headers that were marked invalid causing arbitrary disk corruption. +--- + grub-core/commands/gptprio.c | 2 +- + grub-core/lib/gpt.c | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c +index 24157477c..2021cb200 100644 +--- a/grub-core/commands/gptprio.c ++++ b/grub-core/commands/gptprio.c +@@ -91,7 +91,7 @@ grub_find_next (const char *disk_name, + if (!gpt) + goto done; + +- if (!(gpt->status & GRUB_GPT_BOTH_VALID)) ++ if ((gpt->status & GRUB_GPT_BOTH_VALID) != GRUB_GPT_BOTH_VALID) + if (grub_gpt_repair (dev->disk, gpt)) + goto done; + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index b7449911a..0daf3f8de 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -648,7 +648,7 @@ grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt) + { + /* TODO: update/repair protective MBRs too. */ + +- if (!(gpt->status & GRUB_GPT_BOTH_VALID)) ++ if ((gpt->status & GRUB_GPT_BOTH_VALID) != GRUB_GPT_BOTH_VALID) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "Invalid GPT data"); + + grub_dprintf ("gpt", "writing primary GPT to %s\n", disk->name); diff --git a/packages/grub/0023-gptrepair_test-fix-typo-in-cleanup-trap.patch b/packages/grub/0023-gptrepair_test-fix-typo-in-cleanup-trap.patch new file mode 100644 index 00000000..1d719da9 --- /dev/null +++ b/packages/grub/0023-gptrepair_test-fix-typo-in-cleanup-trap.patch @@ -0,0 +1,22 @@ +From b2fabac1e1c8eb2beb83b5cafb45daed9a3eef64 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Mon, 22 Aug 2016 16:44:30 -0700 +Subject: [PATCH] gptrepair_test: fix typo in cleanup trap + +--- + tests/gptrepair_test.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tests/gptrepair_test.in b/tests/gptrepair_test.in +index 80b2de633..805dc171a 100644 +--- a/tests/gptrepair_test.in ++++ b/tests/gptrepair_test.in +@@ -53,7 +53,7 @@ case "${grub_modinfo_target_cpu}-${grub_modinfo_platform}" in + esac + img1="`mktemp "${TMPDIR:-/tmp}/tmp.XXXXXXXXXX"`" || exit 1 + img2="`mktemp "${TMPDIR:-/tmp}/tmp.XXXXXXXXXX"`" || exit 1 +-trap "rm -f '${img1}' '${ing2}'" EXIT ++trap "rm -f '${img1}' '${img2}'" EXIT + + create_disk_image () { + size=$1 diff --git a/packages/grub/0024-gptprio_test-check-GPT-is-repaired-when-appropriate.patch b/packages/grub/0024-gptprio_test-check-GPT-is-repaired-when-appropriate.patch new file mode 100644 index 00000000..cab276af --- /dev/null +++ b/packages/grub/0024-gptprio_test-check-GPT-is-repaired-when-appropriate.patch @@ -0,0 +1,107 @@ +From 999e6bc69145e936a5172d4f3f64374c8087d43e Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Mon, 22 Aug 2016 16:45:10 -0700 +Subject: [PATCH] gptprio_test: check GPT is repaired when appropriate + +--- + tests/gptprio_test.in | 63 ++++++++++++++++++++++++++++++++++++++++--- + 1 file changed, 60 insertions(+), 3 deletions(-) + +diff --git a/tests/gptprio_test.in b/tests/gptprio_test.in +index f4aea0dc9..c5cf0f3b7 100644 +--- a/tests/gptprio_test.in ++++ b/tests/gptprio_test.in +@@ -66,8 +66,9 @@ prio_uuid[3]="1aa5a658-5b02-414d-9b71-f7e6c151f0cd" + prio_uuid[4]="8aa0240d-98af-42b0-b32a-ccbe0572d62b" + + create_disk_image () { ++ size=$1 + rm -f "${img1}" +- dd if=/dev/zero of="${img1}" bs=512 count=1 seek=100 status=none ++ dd if=/dev/zero of="${img1}" bs=512 count=1 seek=$((size - 1)) status=none + ${sgdisk} \ + -n 1:0:+1 -c 1:ESP -t 1:ef00 \ + -n 2:0:+1 -c 2:A -t 2:"${prio_type}" -u 2:"${prio_uuid[2]}" \ +@@ -76,6 +77,35 @@ create_disk_image () { + "${img1}" >/dev/null + } + ++wipe_disk_area () { ++ sector=$1 ++ size=$2 ++ dd if=/dev/zero of="${img1}" bs=512 count=${size} seek=${sector} conv=notrunc status=none ++} ++ ++is_zero () { ++ sector=$1 ++ size=$2 ++ cmp -s -i $((sector * 512)) -n $((size * 512)) /dev/zero "${img1}" ++} ++ ++check_is_zero () { ++ sector=$1 ++ size=$2 ++ if ! is_zero "$@"; then ++ echo "$size sector(s) starting at $sector should be all zero" ++ exit 1 ++ fi ++} ++ ++check_not_zero () { ++ sector=$1 ++ size=$2 ++ if is_zero "$@"; then ++ echo "$size sector(s) starting at $sector should not be all zero" ++ exit 1 ++ fi ++} + + fmt_prio () { + priority=$(( ( $1 & 15 ) << 48 )) +@@ -93,10 +123,10 @@ set_prio () { + check_prio () { + part="$1" + expect=$(fmt_prio $2 $3 $4) +- result=$(LANG=C ${sgdisk} -i "${part}" "${img1}" \ ++ result=$(LANG=C ${sgdisk} -i "${part}" "${img1}" 2>&1 \ + | awk '/^Attribute flags: / {print $3}') + if [[ "${expect}" != "${result}" ]]; then +- echo "Partition ${part} has attributes ${result}, not ${expect}" >&2 ++ echo "Partition ${part} has attributes ${result:-??}, not ${expect}" + exit 1 + fi + } +@@ -133,6 +163,33 @@ create_disk_image 100 + set_prio 2 3 2 1 + check_prio 2 3 2 1 + ++# Check gptprio works without modifying the disk when no update is required. ++# Leaves any existing corruption as is, repairing in the OS is better. ++create_disk_image 100 ++set_prio 2 1 0 1 ++wipe_disk_area 99 1 ++check_next 2 1 0 1 ++check_is_zero 99 1 ++ ++create_disk_image 100 ++set_prio 2 1 0 1 ++wipe_disk_area 1 1 ++check_next 2 1 0 1 ++check_is_zero 1 1 ++ ++# When writes do need to be made go ahead and perform the repair. ++create_disk_image 100 ++set_prio 2 1 1 0 ++wipe_disk_area 99 1 ++check_next 2 1 0 0 ++check_not_zero 99 1 ++ ++create_disk_image 100 ++set_prio 2 1 1 0 ++wipe_disk_area 1 1 ++check_next 2 1 0 0 ++check_not_zero 1 1 ++ + # Try two partitions before falling before falling back to a third + create_disk_image 100 + set_prio 2 3 3 0 diff --git a/packages/grub/0025-gpt-fix-partition-table-indexing-and-validation.patch b/packages/grub/0025-gpt-fix-partition-table-indexing-and-validation.patch new file mode 100644 index 00000000..a2683a82 --- /dev/null +++ b/packages/grub/0025-gpt-fix-partition-table-indexing-and-validation.patch @@ -0,0 +1,323 @@ +From 266c58dedddb5b927528052db1e3b21a892cfc0d Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Wed, 24 Aug 2016 16:14:20 -0700 +Subject: [PATCH] gpt: fix partition table indexing and validation + +Portions of the code attempted to handle the fact that GPT entries on +disk may be larger than the currently defined struct while others +assumed the data could be indexed by the struct size directly. This +never came up because no utility uses a size larger than 128 bytes but +for the sake of safety we need to do this by the spec. +--- + grub-core/commands/gptprio.c | 6 +- + grub-core/lib/gpt.c | 51 +++++++++++++-- + include/grub/gpt_partition.h | 11 +++- + tests/gpt_unit_test.c | 120 +++++++++++++++++++++++++++++++++++ + 4 files changed, 176 insertions(+), 12 deletions(-) + +diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c +index 2021cb200..eebca7a09 100644 +--- a/grub-core/commands/gptprio.c ++++ b/grub-core/commands/gptprio.c +@@ -78,7 +78,7 @@ grub_find_next (const char *disk_name, + const grub_gpt_part_guid_t *part_type, + char **part_name, char **part_guid) + { +- struct grub_gpt_partentry *part_found = NULL; ++ struct grub_gpt_partentry *part, *part_found = NULL; + grub_device_t dev = NULL; + grub_gpt_t gpt = NULL; + grub_uint32_t i, part_index; +@@ -95,10 +95,8 @@ grub_find_next (const char *disk_name, + if (grub_gpt_repair (dev->disk, gpt)) + goto done; + +- for (i = 0; i < grub_le_to_cpu32 (gpt->primary.maxpart); i++) ++ for (i = 0; (part = grub_gpt_get_partentry (gpt, i)) != NULL; i++) + { +- struct grub_gpt_partentry *part = &gpt->entries[i]; +- + if (grub_memcmp (part_type, &part->type, sizeof (*part_type)) == 0) + { + unsigned int priority, tries_left, successful, old_priority = 0; +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 0daf3f8de..205779192 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -207,6 +207,13 @@ grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid protective MBR"); + } + ++static grub_uint64_t ++grub_gpt_entries_size (struct grub_gpt_header *gpt) ++{ ++ return (grub_uint64_t) grub_le_to_cpu32 (gpt->maxpart) * ++ (grub_uint64_t) grub_le_to_cpu32 (gpt->partentry_size); ++} ++ + static grub_uint64_t + grub_gpt_entries_sectors (struct grub_gpt_header *gpt, + unsigned int log_sector_size) +@@ -214,11 +221,16 @@ grub_gpt_entries_sectors (struct grub_gpt_header *gpt, + grub_uint64_t sector_bytes, entries_bytes; + + sector_bytes = 1ULL << log_sector_size; +- entries_bytes = (grub_uint64_t) grub_le_to_cpu32 (gpt->maxpart) * +- (grub_uint64_t) grub_le_to_cpu32 (gpt->partentry_size); ++ entries_bytes = grub_gpt_entries_size (gpt); + return grub_divmod64(entries_bytes + sector_bytes - 1, sector_bytes, NULL); + } + ++static int ++is_pow2 (grub_uint32_t n) ++{ ++ return (n & (n - 1)) == 0; ++} ++ + grub_err_t + grub_gpt_header_check (struct grub_gpt_header *gpt, + unsigned int log_sector_size) +@@ -236,16 +248,23 @@ grub_gpt_header_check (struct grub_gpt_header *gpt, + if (gpt->crc32 != crc) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT header crc32"); + +- /* The header size must be between 92 and the sector size. */ ++ /* The header size "must be greater than or equal to 92 and must be less ++ * than or equal to the logical block size." */ + size = grub_le_to_cpu32 (gpt->headersize); + if (size < 92U || size > (1U << log_sector_size)) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT header size"); + +- /* The partition entry size must be a multiple of 128. */ ++ /* The partition entry size must be "a value of 128*(2^n) where n is an ++ * integer greater than or equal to zero (e.g., 128, 256, 512, etc.)." */ + size = grub_le_to_cpu32 (gpt->partentry_size); +- if (size < 128 || size % 128) ++ if (size < 128U || size % 128U || !is_pow2 (size / 128U)) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT entry size"); + ++ /* The minimum entries table size is specified in terms of bytes, ++ * regardless of how large the individual entry size is. */ ++ if (grub_gpt_entries_size (gpt) < GRUB_GPT_DEFAULT_ENTRIES_SIZE) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT entry table size"); ++ + /* And of course there better be some space for partitions! */ + start = grub_le_to_cpu64 (gpt->start); + end = grub_le_to_cpu64 (gpt->end); +@@ -410,7 +429,7 @@ static grub_err_t + grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, + struct grub_gpt_header *header) + { +- struct grub_gpt_partentry *entries = NULL; ++ void *entries = NULL; + grub_uint32_t count, size, crc; + grub_uint64_t sector; + grub_disk_addr_t addr; +@@ -526,6 +545,26 @@ fail: + return NULL; + } + ++struct grub_gpt_partentry * ++grub_gpt_get_partentry (grub_gpt_t gpt, grub_uint32_t n) ++{ ++ struct grub_gpt_header *header; ++ grub_size_t offset; ++ ++ if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) ++ header = &gpt->primary; ++ else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) ++ header = &gpt->backup; ++ else ++ return NULL; ++ ++ if (n >= grub_le_to_cpu32 (header->maxpart)) ++ return NULL; ++ ++ offset = (grub_size_t) grub_le_to_cpu32 (header->partentry_size) * n; ++ return (struct grub_gpt_partentry *) ((char *) gpt->entries + offset); ++} ++ + grub_err_t + grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + { +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index 2ca063ad9..b354fd5f4 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -186,8 +186,10 @@ struct grub_gpt + struct grub_gpt_header primary; + struct grub_gpt_header backup; + +- /* Only need one entries table, on disk both copies are identical. */ +- struct grub_gpt_partentry *entries; ++ /* Only need one entries table, on disk both copies are identical. ++ * The on disk entry size may be larger than our partentry struct so ++ * the table cannot be indexed directly. */ ++ void *entries; + grub_size_t entries_size; + + /* Logarithm of sector size, in case GPT and disk driver disagree. */ +@@ -205,6 +207,11 @@ grub_gpt_sector_to_addr (grub_gpt_t gpt, grub_uint64_t sector) + /* Allocates and fills new grub_gpt structure, free with grub_gpt_free. */ + grub_gpt_t grub_gpt_read (grub_disk_t disk); + ++/* Helper for indexing into the entries table. ++ * Returns NULL when the end of the table has been reached. */ ++struct grub_gpt_partentry * grub_gpt_get_partentry (grub_gpt_t gpt, ++ grub_uint32_t n); ++ + /* Sync up primary and backup headers, recompute checksums. */ + grub_err_t grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt); + +diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c +index 60f601729..9cf3414c2 100644 +--- a/tests/gpt_unit_test.c ++++ b/tests/gpt_unit_test.c +@@ -40,6 +40,13 @@ + /* from gnulib */ + #include + ++/* Confirm that the GPT structures conform to the sizes in the spec: ++ * The header size "must be greater than or equal to 92 and must be less ++ * than or equal to the logical block size." ++ * The partition entry size must be "a value of 128*(2^n) where n is an ++ * integer greater than or equal to zero (e.g., 128, 256, 512, etc.)." */ ++verify (sizeof (struct grub_gpt_header) == 92); ++verify (sizeof (struct grub_gpt_partentry) == 128); + + /* GPT section sizes. */ + #define HEADER_SIZE (sizeof (struct grub_gpt_header)) +@@ -537,6 +544,113 @@ repair_test (void) + close_disk (&data); + } + ++static void ++iterate_partitions_test (void) ++{ ++ struct test_data data; ++ struct grub_gpt_partentry *p; ++ grub_gpt_t gpt; ++ grub_uint32_t n; ++ ++ open_disk (&data); ++ gpt = read_disk (&data); ++ ++ for (n = 0; (p = grub_gpt_get_partentry (gpt, n)) != NULL; n++) ++ grub_test_assert (memcmp (p, &example_entries[n], sizeof (*p)) == 0, ++ "unexpected partition %d data", n); ++ ++ grub_test_assert (n == TABLE_ENTRIES, "unexpected partition limit: %d", n); ++ ++ grub_gpt_free (gpt); ++ close_disk (&data); ++} ++ ++static void ++large_partitions_test (void) ++{ ++ struct test_data data; ++ struct grub_gpt_partentry *p; ++ grub_gpt_t gpt; ++ grub_uint32_t n; ++ ++ open_disk (&data); ++ ++ /* Double the entry size, cut the number of entries in half. */ ++ data.raw->primary_header.maxpart = ++ data.raw->backup_header.maxpart = ++ grub_cpu_to_le32_compile_time (TABLE_ENTRIES/2); ++ data.raw->primary_header.partentry_size = ++ data.raw->backup_header.partentry_size = ++ grub_cpu_to_le32_compile_time (ENTRY_SIZE*2); ++ data.raw->primary_header.partentry_crc32 = ++ data.raw->backup_header.partentry_crc32 = ++ grub_cpu_to_le32_compile_time (0xf2c45af8); ++ data.raw->primary_header.crc32 = grub_cpu_to_le32_compile_time (0xde00cc8f); ++ data.raw->backup_header.crc32 = grub_cpu_to_le32_compile_time (0x6d72e284); ++ ++ memset (&data.raw->primary_entries, 0, ++ sizeof (data.raw->primary_entries)); ++ for (n = 0; n < TABLE_ENTRIES/2; n++) ++ memcpy (&data.raw->primary_entries[n*2], &example_entries[n], ++ sizeof (data.raw->primary_entries[0])); ++ memcpy (&data.raw->backup_entries, &data.raw->primary_entries, ++ sizeof (data.raw->backup_entries)); ++ ++ sync_disk(&data); ++ gpt = read_disk (&data); ++ ++ for (n = 0; (p = grub_gpt_get_partentry (gpt, n)) != NULL; n++) ++ grub_test_assert (memcmp (p, &example_entries[n], sizeof (*p)) == 0, ++ "unexpected partition %d data", n); ++ ++ grub_test_assert (n == TABLE_ENTRIES/2, "unexpected partition limit: %d", n); ++ ++ grub_gpt_free (gpt); ++ ++ /* Editing memory beyond the entry structure should still change the crc. */ ++ data.raw->primary_entries[1].attrib = 0xff; ++ ++ sync_disk(&data); ++ gpt = read_disk (&data); ++ grub_test_assert (gpt->status == (GRUB_GPT_PROTECTIVE_MBR | ++ GRUB_GPT_PRIMARY_HEADER_VALID | ++ GRUB_GPT_BACKUP_HEADER_VALID | ++ GRUB_GPT_BACKUP_ENTRIES_VALID), ++ "unexpected status: 0x%02x", gpt->status); ++ grub_gpt_free (gpt); ++ ++ close_disk (&data); ++} ++ ++static void ++invalid_partsize_test (void) ++{ ++ struct grub_gpt_header header = { ++ .magic = GRUB_GPT_HEADER_MAGIC, ++ .version = GRUB_GPT_HEADER_VERSION, ++ .headersize = sizeof (struct grub_gpt_header), ++ .crc32 = grub_cpu_to_le32_compile_time (0x1ff2a054), ++ .header_lba = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), ++ .alternate_lba = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), ++ .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), ++ .end = grub_cpu_to_le64_compile_time (DATA_END_SECTOR), ++ .guid = GRUB_GPT_GUID_INIT(0x69c131ad, 0x67d6, 0x46c6, ++ 0x93, 0xc4, 0x12, 0x4c, 0x75, 0x52, 0x56, 0xac), ++ .partitions = grub_cpu_to_le64_compile_time (PRIMARY_TABLE_SECTOR), ++ .maxpart = grub_cpu_to_le32_compile_time (TABLE_ENTRIES), ++ /* Triple the entry size, which is not valid. */ ++ .partentry_size = grub_cpu_to_le32_compile_time (ENTRY_SIZE*3), ++ .partentry_crc32 = grub_cpu_to_le32_compile_time (0x074e052c), ++ }; ++ ++ grub_gpt_header_check(&header, GRUB_DISK_SECTOR_BITS); ++ grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, ++ "unexpected error: %s", grub_errmsg); ++ grub_test_assert (strcmp(grub_errmsg, "invalid GPT entry size") == 0, ++ "unexpected error: %s", grub_errmsg); ++ grub_errno = GRUB_ERR_NONE; ++} ++ + static void + search_part_label_test (void) + { +@@ -657,6 +771,9 @@ grub_unit_test_init (void) + grub_test_register ("gpt_read_invalid_test", read_invalid_entries_test); + grub_test_register ("gpt_read_fallback_test", read_fallback_test); + grub_test_register ("gpt_repair_test", repair_test); ++ grub_test_register ("gpt_iterate_partitions_test", iterate_partitions_test); ++ grub_test_register ("gpt_large_partitions_test", large_partitions_test); ++ grub_test_register ("gpt_invalid_partsize_test", invalid_partsize_test); + grub_test_register ("gpt_search_part_label_test", search_part_label_test); + grub_test_register ("gpt_search_uuid_test", search_part_uuid_test); + grub_test_register ("gpt_search_disk_uuid_test", search_disk_uuid_test); +@@ -671,6 +788,9 @@ grub_unit_test_fini (void) + grub_test_unregister ("gpt_read_invalid_test"); + grub_test_unregister ("gpt_read_fallback_test"); + grub_test_unregister ("gpt_repair_test"); ++ grub_test_unregister ("gpt_iterate_partitions_test"); ++ grub_test_unregister ("gpt_large_partitions_test"); ++ grub_test_unregister ("gpt_invalid_partsize_test"); + grub_test_unregister ("gpt_search_part_label_test"); + grub_test_unregister ("gpt_search_part_uuid_test"); + grub_test_unregister ("gpt_search_disk_uuid_test"); diff --git a/packages/grub/0026-gpt-prefer-disk-size-from-header-over-firmware.patch b/packages/grub/0026-gpt-prefer-disk-size-from-header-over-firmware.patch new file mode 100644 index 00000000..2eb64f29 --- /dev/null +++ b/packages/grub/0026-gpt-prefer-disk-size-from-header-over-firmware.patch @@ -0,0 +1,114 @@ +From 81126be2c4ee13b747586e15797a9c5505c0ce0b Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Tue, 23 Aug 2016 13:09:14 -0700 +Subject: [PATCH] gpt: prefer disk size from header over firmware + +The firmware and the OS may disagree on the disk configuration and size. +Although such a setup should be avoided users are unlikely to know about +the problem, assuming everything behaves like the OS. Tolerate this as +best we can and trust the reported on-disk location over the firmware +when looking for the backup GPT. If the location is inaccessible report +the error as best we can and move on. +--- + grub-core/lib/gpt.c | 18 +++++++++++++----- + tests/gpt_unit_test.c | 42 ++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 55 insertions(+), 5 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 205779192..f0c71bde1 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -394,13 +394,21 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) + grub_disk_addr_t addr; + + /* Assumes gpt->log_sector_size == disk->log_sector_size */ +- if (grub_gpt_disk_size_valid(disk)) ++ if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) ++ { ++ sector = grub_le_to_cpu64 (gpt->primary.alternate_lba); ++ if (grub_gpt_disk_size_valid (disk) && sector >= disk->total_sectors) ++ return grub_error (GRUB_ERR_OUT_OF_RANGE, ++ "backup GPT located at 0x%llx, " ++ "beyond last disk sector at 0x%llx", ++ (unsigned long long) sector, ++ (unsigned long long) disk->total_sectors - 1); ++ } ++ else if (grub_gpt_disk_size_valid (disk)) + sector = disk->total_sectors - 1; +- else if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) +- sector = grub_le_to_cpu64 (gpt->primary.alternate_lba); + else +- return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, +- "Unable to locate backup GPT"); ++ return grub_error (GRUB_ERR_OUT_OF_RANGE, ++ "size of disk unknown, cannot locate backup GPT"); + + grub_dprintf ("gpt", "reading backup GPT from sector 0x%llx\n", + (unsigned long long) sector); +diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c +index 9cf3414c2..218b18697 100644 +--- a/tests/gpt_unit_test.c ++++ b/tests/gpt_unit_test.c +@@ -544,6 +544,46 @@ repair_test (void) + close_disk (&data); + } + ++/* Finding/reading/writing the backup GPT may be difficult if the OS and ++ * BIOS report different sizes for the same disk. We need to gracefully ++ * recognize this and avoid causing trouble for the OS. */ ++static void ++weird_disk_size_test (void) ++{ ++ struct test_data data; ++ grub_gpt_t gpt; ++ ++ open_disk (&data); ++ ++ /* Chop off 65536 bytes (128 512B sectors) which may happen when the ++ * BIOS thinks you are using a software RAID system that reserves that ++ * area for metadata when in fact you are not and using the bare disk. */ ++ grub_test_assert(data.dev->disk->total_sectors == DISK_SECTORS, ++ "unexpected disk size: 0x%llx", ++ (unsigned long long) data.dev->disk->total_sectors); ++ data.dev->disk->total_sectors -= 128; ++ ++ gpt = read_disk (&data); ++ assert_error_stack_empty (); ++ /* Reading the alternate_lba should have been blocked and reading ++ * the (new) end of disk should have found no useful data. */ ++ grub_test_assert ((gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) == 0, ++ "unreported missing backup header"); ++ ++ /* We should be able to reconstruct the backup header and the location ++ * of the backup should remain unchanged, trusting the GPT data over ++ * what the BIOS is telling us. Further changes are left to the OS. */ ++ grub_gpt_repair (data.dev->disk, gpt); ++ grub_test_assert (grub_errno == GRUB_ERR_NONE, ++ "repair failed: %s", grub_errmsg); ++ grub_test_assert (memcmp (&gpt->primary, &example_primary, ++ sizeof (gpt->primary)) == 0, ++ "repair corrupted primary header"); ++ ++ grub_gpt_free (gpt); ++ close_disk (&data); ++} ++ + static void + iterate_partitions_test (void) + { +@@ -774,6 +814,7 @@ grub_unit_test_init (void) + grub_test_register ("gpt_iterate_partitions_test", iterate_partitions_test); + grub_test_register ("gpt_large_partitions_test", large_partitions_test); + grub_test_register ("gpt_invalid_partsize_test", invalid_partsize_test); ++ grub_test_register ("gpt_weird_disk_size_test", weird_disk_size_test); + grub_test_register ("gpt_search_part_label_test", search_part_label_test); + grub_test_register ("gpt_search_uuid_test", search_part_uuid_test); + grub_test_register ("gpt_search_disk_uuid_test", search_disk_uuid_test); +@@ -791,6 +832,7 @@ grub_unit_test_fini (void) + grub_test_unregister ("gpt_iterate_partitions_test"); + grub_test_unregister ("gpt_large_partitions_test"); + grub_test_unregister ("gpt_invalid_partsize_test"); ++ grub_test_unregister ("gpt_weird_disk_size_test"); + grub_test_unregister ("gpt_search_part_label_test"); + grub_test_unregister ("gpt_search_part_uuid_test"); + grub_test_unregister ("gpt_search_disk_uuid_test"); diff --git a/packages/grub/0027-gpt-add-helper-for-picking-a-valid-header.patch b/packages/grub/0027-gpt-add-helper-for-picking-a-valid-header.patch new file mode 100644 index 00000000..899a0405 --- /dev/null +++ b/packages/grub/0027-gpt-add-helper-for-picking-a-valid-header.patch @@ -0,0 +1,68 @@ +From e1db8ebe4d10b34817f1e9837f2eb8d6e29a0dd6 Mon Sep 17 00:00:00 2001 +From: Vito Caputo +Date: Thu, 25 Aug 2016 17:21:18 -0700 +Subject: [PATCH] gpt: add helper for picking a valid header + +Eliminate some repetition in primary vs. backup header acquisition. +--- + grub-core/lib/gpt.c | 32 ++++++++++++++++++++------------ + 1 file changed, 20 insertions(+), 12 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index f0c71bde1..2550ed87c 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -108,21 +108,32 @@ grub_gpt_part_uuid (grub_device_t device, char **uuid) + return GRUB_ERR_NONE; + } + ++static struct grub_gpt_header * ++grub_gpt_get_header (grub_gpt_t gpt) ++{ ++ if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) ++ return &gpt->primary; ++ else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) ++ return &gpt->backup; ++ ++ grub_error (GRUB_ERR_BUG, "No valid GPT header"); ++ return NULL; ++} ++ + grub_err_t + grub_gpt_disk_uuid (grub_device_t device, char **uuid) + { ++ struct grub_gpt_header *header; ++ + grub_gpt_t gpt = grub_gpt_read (device->disk); + if (!gpt) + goto done; + +- grub_errno = GRUB_ERR_NONE; ++ header = grub_gpt_get_header (gpt); ++ if (!header) ++ goto done; + +- if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) +- *uuid = grub_gpt_guid_to_str (&gpt->primary.guid); +- else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) +- *uuid = grub_gpt_guid_to_str (&gpt->backup.guid); +- else +- grub_errno = grub_error (GRUB_ERR_BUG, "No valid GPT header"); ++ *uuid = grub_gpt_guid_to_str (&header->guid); + + done: + grub_gpt_free (gpt); +@@ -559,11 +570,8 @@ grub_gpt_get_partentry (grub_gpt_t gpt, grub_uint32_t n) + struct grub_gpt_header *header; + grub_size_t offset; + +- if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) +- header = &gpt->primary; +- else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) +- header = &gpt->backup; +- else ++ header = grub_gpt_get_header (gpt); ++ if (!header) + return NULL; + + if (n >= grub_le_to_cpu32 (header->maxpart)) diff --git a/packages/grub/0028-gptrepair-fix-status-checking.patch b/packages/grub/0028-gptrepair-fix-status-checking.patch new file mode 100644 index 00000000..60b6a6e6 --- /dev/null +++ b/packages/grub/0028-gptrepair-fix-status-checking.patch @@ -0,0 +1,64 @@ +From 1edd283385b1c07a0fd88e7db46d89490de0648d Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Tue, 20 Sep 2016 13:06:05 -0700 +Subject: [PATCH] gptrepair: fix status checking + +None of these status bit checks were correct. Fix and simplify. +--- + grub-core/commands/gptrepair.c | 28 +++++++++++----------------- + 1 file changed, 11 insertions(+), 17 deletions(-) + +diff --git a/grub-core/commands/gptrepair.c b/grub-core/commands/gptrepair.c +index 38392fd8f..66ac3f7c7 100644 +--- a/grub-core/commands/gptrepair.c ++++ b/grub-core/commands/gptrepair.c +@@ -46,8 +46,6 @@ grub_cmd_gptrepair (grub_command_t cmd __attribute__ ((unused)), + grub_device_t dev = NULL; + grub_gpt_t gpt = NULL; + char *dev_name; +- grub_uint32_t primary_crc, backup_crc; +- enum grub_gpt_status old_status; + + if (argc != 1 || !grub_strlen(args[0])) + return grub_error (GRUB_ERR_BAD_ARGUMENT, "device name required"); +@@ -67,29 +65,25 @@ grub_cmd_gptrepair (grub_command_t cmd __attribute__ ((unused)), + if (!gpt) + goto done; + +- primary_crc = gpt->primary.crc32; +- backup_crc = gpt->backup.crc32; +- old_status = gpt->status; +- +- if (grub_gpt_repair (dev->disk, gpt)) +- goto done; +- +- if (primary_crc == gpt->primary.crc32 && +- backup_crc == gpt->backup.crc32 && +- old_status && gpt->status) ++ if ((gpt->status & GRUB_GPT_BOTH_VALID) == GRUB_GPT_BOTH_VALID) + { + grub_printf_ (N_("GPT already valid, %s unmodified.\n"), dev_name); + goto done; + } + +- if (grub_gpt_write (dev->disk, gpt)) ++ if ((gpt->status & GRUB_GPT_PRIMARY_VALID) != GRUB_GPT_PRIMARY_VALID) ++ grub_printf_ (N_("Found invalid primary GPT on %s\n"), dev_name); ++ ++ if ((gpt->status & GRUB_GPT_BACKUP_VALID) != GRUB_GPT_BACKUP_VALID) ++ grub_printf_ (N_("Found invalid backup GPT on %s\n"), dev_name); ++ ++ if (grub_gpt_repair (dev->disk, gpt)) + goto done; + +- if (!(old_status & GRUB_GPT_PRIMARY_VALID)) +- grub_printf_ (N_("Primary GPT for %s repaired.\n"), dev_name); ++ if (grub_gpt_write (dev->disk, gpt)) ++ goto done; + +- if (!(old_status & GRUB_GPT_BACKUP_VALID)) +- grub_printf_ (N_("Backup GPT for %s repaired.\n"), dev_name); ++ grub_printf_ (N_("Repaired GPT on %s\n"), dev_name); + + done: + if (gpt) diff --git a/packages/grub/0029-gpt-use-inline-functions-for-checking-status-bits.patch b/packages/grub/0029-gpt-use-inline-functions-for-checking-status-bits.patch new file mode 100644 index 00000000..bd12660f --- /dev/null +++ b/packages/grub/0029-gpt-use-inline-functions-for-checking-status-bits.patch @@ -0,0 +1,133 @@ +From babffa21b389bdb7cca50df69ac94d623890a70e Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Tue, 20 Sep 2016 12:43:01 -0700 +Subject: [PATCH] gpt: use inline functions for checking status bits + +This should prevent bugs like 6078f836 and 4268f3da. +--- + grub-core/commands/gptprio.c | 2 +- + grub-core/commands/gptrepair.c | 6 +++--- + grub-core/lib/gpt.c | 9 +++++++-- + include/grub/gpt_partition.h | 35 +++++++++++++++++++++++++++------- + 4 files changed, 39 insertions(+), 13 deletions(-) + +diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c +index eebca7a09..59bc4fd09 100644 +--- a/grub-core/commands/gptprio.c ++++ b/grub-core/commands/gptprio.c +@@ -91,7 +91,7 @@ grub_find_next (const char *disk_name, + if (!gpt) + goto done; + +- if ((gpt->status & GRUB_GPT_BOTH_VALID) != GRUB_GPT_BOTH_VALID) ++ if (!grub_gpt_both_valid(gpt)) + if (grub_gpt_repair (dev->disk, gpt)) + goto done; + +diff --git a/grub-core/commands/gptrepair.c b/grub-core/commands/gptrepair.c +index 66ac3f7c7..c17c7346c 100644 +--- a/grub-core/commands/gptrepair.c ++++ b/grub-core/commands/gptrepair.c +@@ -65,16 +65,16 @@ grub_cmd_gptrepair (grub_command_t cmd __attribute__ ((unused)), + if (!gpt) + goto done; + +- if ((gpt->status & GRUB_GPT_BOTH_VALID) == GRUB_GPT_BOTH_VALID) ++ if (grub_gpt_both_valid (gpt)) + { + grub_printf_ (N_("GPT already valid, %s unmodified.\n"), dev_name); + goto done; + } + +- if ((gpt->status & GRUB_GPT_PRIMARY_VALID) != GRUB_GPT_PRIMARY_VALID) ++ if (!grub_gpt_primary_valid (gpt)) + grub_printf_ (N_("Found invalid primary GPT on %s\n"), dev_name); + +- if ((gpt->status & GRUB_GPT_BACKUP_VALID) != GRUB_GPT_BACKUP_VALID) ++ if (!grub_gpt_backup_valid (gpt)) + grub_printf_ (N_("Found invalid backup GPT on %s\n"), dev_name); + + if (grub_gpt_repair (dev->disk, gpt)) +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 2550ed87c..3e077c497 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -638,10 +638,15 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + if (grub_gpt_check_primary (gpt)) + return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); + ++ gpt->status |= (GRUB_GPT_PRIMARY_HEADER_VALID | ++ GRUB_GPT_PRIMARY_ENTRIES_VALID); ++ + if (grub_gpt_check_backup (gpt)) + return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); + +- gpt->status |= GRUB_GPT_BOTH_VALID; ++ gpt->status |= (GRUB_GPT_BACKUP_HEADER_VALID | ++ GRUB_GPT_BACKUP_ENTRIES_VALID); ++ + grub_dprintf ("gpt", "repairing GPT for %s successful\n", disk->name); + + return GRUB_ERR_NONE; +@@ -703,7 +708,7 @@ grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt) + { + /* TODO: update/repair protective MBRs too. */ + +- if ((gpt->status & GRUB_GPT_BOTH_VALID) != GRUB_GPT_BOTH_VALID) ++ if (!grub_gpt_both_valid (gpt)) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "Invalid GPT data"); + + grub_dprintf ("gpt", "writing primary GPT to %s\n", disk->name); +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index b354fd5f4..226e09978 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -161,13 +161,6 @@ typedef enum grub_gpt_status + GRUB_GPT_BACKUP_ENTRIES_VALID = 0x20, + } grub_gpt_status_t; + +-#define GRUB_GPT_MBR_VALID (GRUB_GPT_PROTECTIVE_MBR|GRUB_GPT_HYBRID_MBR) +-#define GRUB_GPT_PRIMARY_VALID \ +- (GRUB_GPT_PRIMARY_HEADER_VALID|GRUB_GPT_PRIMARY_ENTRIES_VALID) +-#define GRUB_GPT_BACKUP_VALID \ +- (GRUB_GPT_BACKUP_HEADER_VALID|GRUB_GPT_BACKUP_ENTRIES_VALID) +-#define GRUB_GPT_BOTH_VALID (GRUB_GPT_PRIMARY_VALID|GRUB_GPT_BACKUP_VALID) +- + /* UEFI requires the entries table to be at least 16384 bytes for a + * total of 128 entries given the standard 128 byte entry size. */ + #define GRUB_GPT_DEFAULT_ENTRIES_SIZE 16384 +@@ -197,6 +190,34 @@ struct grub_gpt + }; + typedef struct grub_gpt *grub_gpt_t; + ++/* Helpers for checking the gpt status field. */ ++static inline int ++grub_gpt_mbr_valid (grub_gpt_t gpt) ++{ ++ return ((gpt->status & GRUB_GPT_PROTECTIVE_MBR) || ++ (gpt->status & GRUB_GPT_HYBRID_MBR)); ++} ++ ++static inline int ++grub_gpt_primary_valid (grub_gpt_t gpt) ++{ ++ return ((gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) && ++ (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID)); ++} ++ ++static inline int ++grub_gpt_backup_valid (grub_gpt_t gpt) ++{ ++ return ((gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) && ++ (gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID)); ++} ++ ++static inline int ++grub_gpt_both_valid (grub_gpt_t gpt) ++{ ++ return grub_gpt_primary_valid (gpt) && grub_gpt_backup_valid (gpt); ++} ++ + /* Translate GPT sectors to GRUB's 512 byte block addresses. */ + static inline grub_disk_addr_t + grub_gpt_sector_to_addr (grub_gpt_t gpt, grub_uint64_t sector) diff --git a/packages/grub/0030-gpt-allow-repair-function-to-noop.patch b/packages/grub/0030-gpt-allow-repair-function-to-noop.patch new file mode 100644 index 00000000..f02b79b6 --- /dev/null +++ b/packages/grub/0030-gpt-allow-repair-function-to-noop.patch @@ -0,0 +1,42 @@ +From 1c073b3844ef82f73df06659a533dbb276514420 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Tue, 20 Sep 2016 13:40:11 -0700 +Subject: [PATCH] gpt: allow repair function to noop + +Simplifies usage a little. +--- + grub-core/commands/gptprio.c | 5 ++--- + grub-core/lib/gpt.c | 4 ++++ + 2 files changed, 6 insertions(+), 3 deletions(-) + +diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c +index 59bc4fd09..b799faa37 100644 +--- a/grub-core/commands/gptprio.c ++++ b/grub-core/commands/gptprio.c +@@ -91,9 +91,8 @@ grub_find_next (const char *disk_name, + if (!gpt) + goto done; + +- if (!grub_gpt_both_valid(gpt)) +- if (grub_gpt_repair (dev->disk, gpt)) +- goto done; ++ if (grub_gpt_repair (dev->disk, gpt)) ++ goto done; + + for (i = 0; (part = grub_gpt_get_partentry (gpt, i)) != NULL; i++) + { +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 3e077c497..9bb19678d 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -586,6 +586,10 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + { + grub_uint64_t backup_header, backup_entries; + ++ /* Skip if there is nothing to do. */ ++ if (grub_gpt_both_valid (gpt)) ++ return GRUB_ERR_NONE; ++ + grub_dprintf ("gpt", "repairing GPT for %s\n", disk->name); + + if (disk->log_sector_size != gpt->log_sector_size) diff --git a/packages/grub/0031-gpt-do-not-use-an-enum-for-status-bit-values.patch b/packages/grub/0031-gpt-do-not-use-an-enum-for-status-bit-values.patch new file mode 100644 index 00000000..9a108605 --- /dev/null +++ b/packages/grub/0031-gpt-do-not-use-an-enum-for-status-bit-values.patch @@ -0,0 +1,46 @@ +From 603feacb39091f9f4e01fe4a4160f0f320b03fe5 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Wed, 21 Sep 2016 13:22:06 -0700 +Subject: [PATCH] gpt: do not use an enum for status bit values + +--- + include/grub/gpt_partition.h | 19 +++++++++---------- + 1 file changed, 9 insertions(+), 10 deletions(-) + +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index 226e09978..92b606cd9 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -151,15 +151,14 @@ grub_gpt_partition_map_iterate (grub_disk_t disk, + void *hook_data); + + /* Advanced GPT library. */ +-typedef enum grub_gpt_status +- { +- GRUB_GPT_PROTECTIVE_MBR = 0x01, +- GRUB_GPT_HYBRID_MBR = 0x02, +- GRUB_GPT_PRIMARY_HEADER_VALID = 0x04, +- GRUB_GPT_PRIMARY_ENTRIES_VALID = 0x08, +- GRUB_GPT_BACKUP_HEADER_VALID = 0x10, +- GRUB_GPT_BACKUP_ENTRIES_VALID = 0x20, +- } grub_gpt_status_t; ++ ++/* Status bits for the grub_gpt.status field. */ ++#define GRUB_GPT_PROTECTIVE_MBR 0x01 ++#define GRUB_GPT_HYBRID_MBR 0x02 ++#define GRUB_GPT_PRIMARY_HEADER_VALID 0x04 ++#define GRUB_GPT_PRIMARY_ENTRIES_VALID 0x08 ++#define GRUB_GPT_BACKUP_HEADER_VALID 0x10 ++#define GRUB_GPT_BACKUP_ENTRIES_VALID 0x20 + + /* UEFI requires the entries table to be at least 16384 bytes for a + * total of 128 entries given the standard 128 byte entry size. */ +@@ -170,7 +169,7 @@ typedef enum grub_gpt_status + struct grub_gpt + { + /* Bit field indicating which structures on disk are valid. */ +- grub_gpt_status_t status; ++ unsigned status; + + /* Protective or hybrid MBR. */ + struct grub_msdos_partition_mbr mbr; diff --git a/packages/grub/0032-gpt-check-header-and-entries-status-bits-together.patch b/packages/grub/0032-gpt-check-header-and-entries-status-bits-together.patch new file mode 100644 index 00000000..693b7c46 --- /dev/null +++ b/packages/grub/0032-gpt-check-header-and-entries-status-bits-together.patch @@ -0,0 +1,46 @@ +From 3ffa128b0a04cc7c111cdfa2332e07e0977e3073 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Wed, 21 Sep 2016 13:44:11 -0700 +Subject: [PATCH] gpt: check header and entries status bits together + +Use the new status function which checks *_HEADER_VALID and +*_ENTRIES_VALID bits together. It doesn't make sense for the header and +entries bits to mismatch so don't allow for it. +--- + grub-core/lib/gpt.c | 14 +++++--------- + 1 file changed, 5 insertions(+), 9 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 9bb19678d..3c6ff3540 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -596,24 +596,20 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, + "GPT sector size must match disk sector size"); + +- if (!(gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID || +- gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID)) +- return grub_error (GRUB_ERR_BUG, "No valid GPT entries"); +- +- if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) ++ if (grub_gpt_primary_valid (gpt)) + { +- grub_dprintf ("gpt", "primary GPT header is valid\n"); ++ grub_dprintf ("gpt", "primary GPT is valid\n"); + backup_header = grub_le_to_cpu64 (gpt->primary.alternate_lba); + grub_memcpy (&gpt->backup, &gpt->primary, sizeof (gpt->backup)); + } +- else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) ++ else if (grub_gpt_backup_valid (gpt)) + { +- grub_dprintf ("gpt", "backup GPT header is valid\n"); ++ grub_dprintf ("gpt", "backup GPT is valid\n"); + backup_header = grub_le_to_cpu64 (gpt->backup.header_lba); + grub_memcpy (&gpt->primary, &gpt->backup, sizeof (gpt->primary)); + } + else +- return grub_error (GRUB_ERR_BUG, "No valid GPT header"); ++ return grub_error (GRUB_ERR_BUG, "No valid GPT"); + + /* Relocate backup to end if disk whenever possible. */ + if (grub_gpt_disk_size_valid(disk)) diff --git a/packages/grub/0033-gpt-be-more-careful-about-relocating-backup-header.patch b/packages/grub/0033-gpt-be-more-careful-about-relocating-backup-header.patch new file mode 100644 index 00000000..35f9e93d --- /dev/null +++ b/packages/grub/0033-gpt-be-more-careful-about-relocating-backup-header.patch @@ -0,0 +1,51 @@ +From 527a976a1f18bf536d460b6c7e6b3704ad52ecac Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Wed, 21 Sep 2016 13:52:52 -0700 +Subject: [PATCH] gpt: be more careful about relocating backup header + +The header was being relocated without checking the new location is +actually safe. If the BIOS thinks the disk is smaller than the OS then +repair may relocate the header into allocated space, failing the final +validation check. So only move it if the disk has grown. + +Additionally, if the backup is valid then we can assume its current +location is good enough and leave it as-is. +--- + grub-core/lib/gpt.c | 16 ++++++++++------ + 1 file changed, 10 insertions(+), 6 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 3c6ff3540..35e65d8d9 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -599,7 +599,17 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + if (grub_gpt_primary_valid (gpt)) + { + grub_dprintf ("gpt", "primary GPT is valid\n"); ++ ++ /* Relocate backup to end if disk if the disk has grown. */ + backup_header = grub_le_to_cpu64 (gpt->primary.alternate_lba); ++ if (grub_gpt_disk_size_valid (disk) && ++ disk->total_sectors - 1 > backup_header) ++ { ++ backup_header = disk->total_sectors - 1; ++ grub_dprintf ("gpt", "backup GPT header relocated to 0x%llx\n", ++ (unsigned long long) backup_header); ++ } ++ + grub_memcpy (&gpt->backup, &gpt->primary, sizeof (gpt->backup)); + } + else if (grub_gpt_backup_valid (gpt)) +@@ -611,12 +621,6 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + else + return grub_error (GRUB_ERR_BUG, "No valid GPT"); + +- /* Relocate backup to end if disk whenever possible. */ +- if (grub_gpt_disk_size_valid(disk)) +- backup_header = disk->total_sectors - 1; +- grub_dprintf ("gpt", "backup GPT header will be located at 0x%llx\n", +- (unsigned long long) backup_header); +- + backup_entries = backup_header - + grub_gpt_size_to_sectors (gpt, gpt->entries_size); + grub_dprintf ("gpt", "backup GPT entries will be located at 0x%llx\n", diff --git a/packages/grub/0034-gpt-selectively-update-fields-during-repair.patch b/packages/grub/0034-gpt-selectively-update-fields-during-repair.patch new file mode 100644 index 00000000..1c2f5c08 --- /dev/null +++ b/packages/grub/0034-gpt-selectively-update-fields-during-repair.patch @@ -0,0 +1,75 @@ +From 2fd4959a6ca0575e4f69042c2034705ec460702a Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Wed, 21 Sep 2016 14:33:48 -0700 +Subject: [PATCH] gpt: selectively update fields during repair + +Just a little cleanup/refactor to skip touching data we don't need to. +--- + grub-core/lib/gpt.c | 28 ++++++++++++---------------- + 1 file changed, 12 insertions(+), 16 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 35e65d8d9..03e807b25 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -584,8 +584,6 @@ grub_gpt_get_partentry (grub_gpt_t gpt, grub_uint32_t n) + grub_err_t + grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + { +- grub_uint64_t backup_header, backup_entries; +- + /* Skip if there is nothing to do. */ + if (grub_gpt_both_valid (gpt)) + return GRUB_ERR_NONE; +@@ -598,6 +596,8 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + + if (grub_gpt_primary_valid (gpt)) + { ++ grub_uint64_t backup_header; ++ + grub_dprintf ("gpt", "primary GPT is valid\n"); + + /* Relocate backup to end if disk if the disk has grown. */ +@@ -608,32 +608,28 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + backup_header = disk->total_sectors - 1; + grub_dprintf ("gpt", "backup GPT header relocated to 0x%llx\n", + (unsigned long long) backup_header); ++ ++ gpt->primary.alternate_lba = grub_cpu_to_le64 (backup_header); + } + + grub_memcpy (&gpt->backup, &gpt->primary, sizeof (gpt->backup)); ++ gpt->backup.header_lba = gpt->primary.alternate_lba; ++ gpt->backup.alternate_lba = gpt->primary.header_lba; ++ gpt->backup.partitions = grub_cpu_to_le64 (backup_header - ++ grub_gpt_size_to_sectors (gpt, gpt->entries_size)); + } + else if (grub_gpt_backup_valid (gpt)) + { + grub_dprintf ("gpt", "backup GPT is valid\n"); +- backup_header = grub_le_to_cpu64 (gpt->backup.header_lba); ++ + grub_memcpy (&gpt->primary, &gpt->backup, sizeof (gpt->primary)); ++ gpt->primary.header_lba = gpt->backup.alternate_lba; ++ gpt->primary.alternate_lba = gpt->backup.header_lba; ++ gpt->primary.partitions = grub_cpu_to_le64_compile_time (2); + } + else + return grub_error (GRUB_ERR_BUG, "No valid GPT"); + +- backup_entries = backup_header - +- grub_gpt_size_to_sectors (gpt, gpt->entries_size); +- grub_dprintf ("gpt", "backup GPT entries will be located at 0x%llx\n", +- (unsigned long long) backup_entries); +- +- /* Update/fixup header and partition table locations. */ +- gpt->primary.header_lba = grub_cpu_to_le64_compile_time (1); +- gpt->primary.alternate_lba = grub_cpu_to_le64 (backup_header); +- gpt->primary.partitions = grub_cpu_to_le64_compile_time (2); +- gpt->backup.header_lba = gpt->primary.alternate_lba; +- gpt->backup.alternate_lba = gpt->primary.header_lba; +- gpt->backup.partitions = grub_cpu_to_le64 (backup_entries); +- + /* Recompute checksums. */ + if (grub_gpt_update_checksums (gpt)) + return grub_errno; diff --git a/packages/grub/0035-gpt-always-revalidate-when-recomputing-checksums.patch b/packages/grub/0035-gpt-always-revalidate-when-recomputing-checksums.patch new file mode 100644 index 00000000..8bdf2f36 --- /dev/null +++ b/packages/grub/0035-gpt-always-revalidate-when-recomputing-checksums.patch @@ -0,0 +1,72 @@ +From 04deacdc8d85855b3bb85bcd7c02f71b05ee2bec Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Wed, 21 Sep 2016 14:55:19 -0700 +Subject: [PATCH] gpt: always revalidate when recomputing checksums + +This ensures all code modifying GPT data include the same sanity check +that repair does. If revalidation fails the status flags are left in the +appropriate state. +--- + grub-core/lib/gpt.c | 32 ++++++++++++++++++-------------- + 1 file changed, 18 insertions(+), 14 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 03e807b25..3ac2987c6 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -630,23 +630,9 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + else + return grub_error (GRUB_ERR_BUG, "No valid GPT"); + +- /* Recompute checksums. */ + if (grub_gpt_update_checksums (gpt)) + return grub_errno; + +- /* Sanity check. */ +- if (grub_gpt_check_primary (gpt)) +- return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); +- +- gpt->status |= (GRUB_GPT_PRIMARY_HEADER_VALID | +- GRUB_GPT_PRIMARY_ENTRIES_VALID); +- +- if (grub_gpt_check_backup (gpt)) +- return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); +- +- gpt->status |= (GRUB_GPT_BACKUP_HEADER_VALID | +- GRUB_GPT_BACKUP_ENTRIES_VALID); +- + grub_dprintf ("gpt", "repairing GPT for %s successful\n", disk->name); + + return GRUB_ERR_NONE; +@@ -657,6 +643,12 @@ grub_gpt_update_checksums (grub_gpt_t gpt) + { + grub_uint32_t crc; + ++ /* Clear status bits, require revalidation of everything. */ ++ gpt->status &= ~(GRUB_GPT_PRIMARY_HEADER_VALID | ++ GRUB_GPT_PRIMARY_ENTRIES_VALID | ++ GRUB_GPT_BACKUP_HEADER_VALID | ++ GRUB_GPT_BACKUP_ENTRIES_VALID); ++ + /* Writing headers larger than our header structure are unsupported. */ + gpt->primary.headersize = + grub_cpu_to_le32_compile_time (sizeof (gpt->primary)); +@@ -670,6 +662,18 @@ grub_gpt_update_checksums (grub_gpt_t gpt) + grub_gpt_header_lecrc32 (&gpt->primary.crc32, &gpt->primary); + grub_gpt_header_lecrc32 (&gpt->backup.crc32, &gpt->backup); + ++ if (grub_gpt_check_primary (gpt)) ++ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); ++ ++ gpt->status |= (GRUB_GPT_PRIMARY_HEADER_VALID | ++ GRUB_GPT_PRIMARY_ENTRIES_VALID); ++ ++ if (grub_gpt_check_backup (gpt)) ++ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); ++ ++ gpt->status |= (GRUB_GPT_BACKUP_HEADER_VALID | ++ GRUB_GPT_BACKUP_ENTRIES_VALID); ++ + return GRUB_ERR_NONE; + } + diff --git a/packages/grub/0036-gpt-include-backup-in-sync-check-in-revalidation.patch b/packages/grub/0036-gpt-include-backup-in-sync-check-in-revalidation.patch new file mode 100644 index 00000000..abb2b67b --- /dev/null +++ b/packages/grub/0036-gpt-include-backup-in-sync-check-in-revalidation.patch @@ -0,0 +1,37 @@ +From a17e4e799fbcb64ea2379a8886a97de74a521bf6 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Wed, 21 Sep 2016 15:01:09 -0700 +Subject: [PATCH] gpt: include backup-in-sync check in revalidation + +--- + grub-core/lib/gpt.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 3ac2987c6..c27bcc510 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -372,6 +372,11 @@ grub_gpt_check_backup (grub_gpt_t gpt) + if (backup <= end) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid backup GPT LBA"); + ++ /* If both primary and backup are valid but differ prefer the primary. */ ++ if ((gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) && ++ !grub_gpt_headers_equal (gpt)) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "backup GPT out of sync"); ++ + return GRUB_ERR_NONE; + } + +@@ -435,11 +440,6 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) + if (grub_le_to_cpu64 (gpt->backup.header_lba) != sector) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid backup GPT LBA"); + +- /* If both primary and backup are valid but differ prefer the primary. */ +- if ((gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) && +- !grub_gpt_headers_equal(gpt)) +- return grub_error (GRUB_ERR_BAD_PART_TABLE, "backup GPT of of sync"); +- + gpt->status |= GRUB_GPT_BACKUP_HEADER_VALID; + return GRUB_ERR_NONE; + } diff --git a/packages/grub/0037-gpt-read-entries-table-at-the-same-time-as-the-heade.patch b/packages/grub/0037-gpt-read-entries-table-at-the-same-time-as-the-heade.patch new file mode 100644 index 00000000..da5a4973 --- /dev/null +++ b/packages/grub/0037-gpt-read-entries-table-at-the-same-time-as-the-heade.patch @@ -0,0 +1,131 @@ +From d3661baf7287febdf4afef788f71bc654653de2b Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Wed, 21 Sep 2016 15:29:55 -0700 +Subject: [PATCH] gpt: read entries table at the same time as the header + +I personally think this reads easier. Also has the side effect of +directly comparing the primary and backup tables instead of presuming +they are equal if the crc32 matches. +--- + grub-core/lib/gpt.c | 69 +++++++++++++++++++++++++++------------------ + 1 file changed, 41 insertions(+), 28 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index c27bcc510..b93cedea1 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -32,6 +32,11 @@ GRUB_MOD_LICENSE ("GPLv3+"); + + static grub_uint8_t grub_gpt_magic[] = GRUB_GPT_HEADER_MAGIC; + ++static grub_err_t ++grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, ++ struct grub_gpt_header *header, ++ void **ret_entries, ++ grub_size_t *ret_entries_size); + + char * + grub_gpt_guid_to_str (grub_gpt_guid_t *guid) +@@ -400,12 +405,21 @@ grub_gpt_read_primary (grub_disk_t disk, grub_gpt_t gpt) + return grub_errno; + + gpt->status |= GRUB_GPT_PRIMARY_HEADER_VALID; ++ ++ if (grub_gpt_read_entries (disk, gpt, &gpt->primary, ++ &gpt->entries, &gpt->entries_size)) ++ return grub_errno; ++ ++ gpt->status |= GRUB_GPT_PRIMARY_ENTRIES_VALID; ++ + return GRUB_ERR_NONE; + } + + static grub_err_t + grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) + { ++ void *entries = NULL; ++ grub_size_t entries_size; + grub_uint64_t sector; + grub_disk_addr_t addr; + +@@ -441,12 +455,35 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid backup GPT LBA"); + + gpt->status |= GRUB_GPT_BACKUP_HEADER_VALID; ++ ++ if (grub_gpt_read_entries (disk, gpt, &gpt->backup, ++ &entries, &entries_size)) ++ return grub_errno; ++ ++ if (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID) ++ { ++ if (entries_size != gpt->entries_size || ++ grub_memcmp (entries, gpt->entries, entries_size) != 0) ++ return grub_error (GRUB_ERR_BAD_PART_TABLE, "backup GPT out of sync"); ++ ++ grub_free (entries); ++ } ++ else ++ { ++ gpt->entries = entries; ++ gpt->entries_size = entries_size; ++ } ++ ++ gpt->status |= GRUB_GPT_BACKUP_ENTRIES_VALID; ++ + return GRUB_ERR_NONE; + } + + static grub_err_t + grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, +- struct grub_gpt_header *header) ++ struct grub_gpt_header *header, ++ void **ret_entries, ++ grub_size_t *ret_entries_size) + { + void *entries = NULL; + grub_uint32_t count, size, crc; +@@ -488,9 +525,8 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, + goto fail; + } + +- grub_free (gpt->entries); +- gpt->entries = entries; +- gpt->entries_size = entries_size; ++ *ret_entries = entries; ++ *ret_entries_size = entries_size; + return GRUB_ERR_NONE; + + fail: +@@ -529,30 +565,7 @@ grub_gpt_read (grub_disk_t disk) + grub_gpt_read_backup (disk, gpt); + + /* If either succeeded clear any possible error from the other. */ +- if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID || +- gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) +- grub_errno = GRUB_ERR_NONE; +- else +- goto fail; +- +- /* Similarly, favor the value or error from the primary table. */ +- if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID && +- !grub_gpt_read_entries (disk, gpt, &gpt->backup)) +- { +- grub_dprintf ("gpt", "read valid backup GPT from %s\n", disk->name); +- gpt->status |= GRUB_GPT_BACKUP_ENTRIES_VALID; +- } +- +- grub_errno = GRUB_ERR_NONE; +- if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID && +- !grub_gpt_read_entries (disk, gpt, &gpt->primary)) +- { +- grub_dprintf ("gpt", "read valid primary GPT from %s\n", disk->name); +- gpt->status |= GRUB_GPT_PRIMARY_ENTRIES_VALID; +- } +- +- if (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID || +- gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID) ++ if (grub_gpt_primary_valid (gpt) || grub_gpt_backup_valid (gpt)) + grub_errno = GRUB_ERR_NONE; + else + goto fail; diff --git a/packages/grub/0038-gpt-report-all-revalidation-errors.patch b/packages/grub/0038-gpt-report-all-revalidation-errors.patch new file mode 100644 index 00000000..d2c0115f --- /dev/null +++ b/packages/grub/0038-gpt-report-all-revalidation-errors.patch @@ -0,0 +1,37 @@ +From 6c9c0a2011b748d239e6f8f8426426323920f50a Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Wed, 21 Sep 2016 16:02:53 -0700 +Subject: [PATCH] gpt: report all revalidation errors + +Before returning an error that the primary or backup GPT is invalid push +the existing error onto the stack so the user will be told what is bad. +--- + grub-core/lib/gpt.c | 10 ++++++++-- + 1 file changed, 8 insertions(+), 2 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index b93cedea1..f6f853309 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -676,13 +676,19 @@ grub_gpt_update_checksums (grub_gpt_t gpt) + grub_gpt_header_lecrc32 (&gpt->backup.crc32, &gpt->backup); + + if (grub_gpt_check_primary (gpt)) +- return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); ++ { ++ grub_error_push (); ++ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); ++ } + + gpt->status |= (GRUB_GPT_PRIMARY_HEADER_VALID | + GRUB_GPT_PRIMARY_ENTRIES_VALID); + + if (grub_gpt_check_backup (gpt)) +- return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); ++ { ++ grub_error_push (); ++ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); ++ } + + gpt->status |= (GRUB_GPT_BACKUP_HEADER_VALID | + GRUB_GPT_BACKUP_ENTRIES_VALID); diff --git a/packages/grub/0039-gpt-rename-and-update-documentation-for-grub_gpt_upd.patch b/packages/grub/0039-gpt-rename-and-update-documentation-for-grub_gpt_upd.patch new file mode 100644 index 00000000..eabe4394 --- /dev/null +++ b/packages/grub/0039-gpt-rename-and-update-documentation-for-grub_gpt_upd.patch @@ -0,0 +1,68 @@ +From 6bd9c7881cc30773f7d89506bfe131764ffd1cb1 Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Thu, 22 Sep 2016 10:00:27 -0700 +Subject: [PATCH] gpt: rename and update documentation for grub_gpt_update + +The function now does more than just recompute checksums so give it a +more general name to reflect that. +--- + grub-core/commands/gptprio.c | 2 +- + grub-core/lib/gpt.c | 4 ++-- + include/grub/gpt_partition.h | 7 ++++--- + 3 files changed, 7 insertions(+), 6 deletions(-) + +diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c +index b799faa37..8908d8bed 100644 +--- a/grub-core/commands/gptprio.c ++++ b/grub-core/commands/gptprio.c +@@ -127,7 +127,7 @@ grub_find_next (const char *disk_name, + + grub_gptprio_set_tries_left (part_found, tries_left - 1); + +- if (grub_gpt_update_checksums (gpt)) ++ if (grub_gpt_update (gpt)) + goto done; + + if (grub_gpt_write (dev->disk, gpt)) +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index f6f853309..430404848 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -643,7 +643,7 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + else + return grub_error (GRUB_ERR_BUG, "No valid GPT"); + +- if (grub_gpt_update_checksums (gpt)) ++ if (grub_gpt_update (gpt)) + return grub_errno; + + grub_dprintf ("gpt", "repairing GPT for %s successful\n", disk->name); +@@ -652,7 +652,7 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) + } + + grub_err_t +-grub_gpt_update_checksums (grub_gpt_t gpt) ++grub_gpt_update (grub_gpt_t gpt) + { + grub_uint32_t crc; + +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index 92b606cd9..726b98c00 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -232,11 +232,12 @@ grub_gpt_t grub_gpt_read (grub_disk_t disk); + struct grub_gpt_partentry * grub_gpt_get_partentry (grub_gpt_t gpt, + grub_uint32_t n); + +-/* Sync up primary and backup headers, recompute checksums. */ ++/* Sync and update primary and backup headers if either are invalid. */ + grub_err_t grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt); + +-/* Recompute checksums, must be called after modifying GPT data. */ +-grub_err_t grub_gpt_update_checksums (grub_gpt_t gpt); ++/* Recompute checksums and revalidate everything, must be called after ++ * modifying any GPT data. */ ++grub_err_t grub_gpt_update (grub_gpt_t gpt); + + /* Write headers and entry tables back to disk. */ + grub_err_t grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt); diff --git a/packages/grub/0040-gpt-write-backup-GPT-first-skip-if-inaccessible.patch b/packages/grub/0040-gpt-write-backup-GPT-first-skip-if-inaccessible.patch new file mode 100644 index 00000000..29be913e --- /dev/null +++ b/packages/grub/0040-gpt-write-backup-GPT-first-skip-if-inaccessible.patch @@ -0,0 +1,69 @@ +From a789bc5ac9133b0c25dfbfa6d39c7c35a1115dfd Mon Sep 17 00:00:00 2001 +From: Michael Marineau +Date: Thu, 22 Sep 2016 11:18:42 -0700 +Subject: [PATCH] gpt: write backup GPT first, skip if inaccessible. + +Writing the primary GPT before the backup may lead to a confusing +situation: booting a freshly updated system could consistently fail and +next boot will fall back to the old system if writing the primary works +but writing the backup fails. If the backup is written first and fails +the primary is left in the old state so the next boot will re-try and +possibly fail in the exact same way. Making that repeatable should make +it easier for users to identify the error. + +Additionally if the firmware and OS disagree on the disk size, making +the backup inaccessible to GRUB, then just skip writing the backup. +When this happens the automatic call to `coreos-setgoodroot` after boot +will take care of repairing the backup. +--- + grub-core/lib/gpt.c | 28 ++++++++++++++++++++++++---- + 1 file changed, 24 insertions(+), 4 deletions(-) + +diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c +index 430404848..c3e3a25f9 100644 +--- a/grub-core/lib/gpt.c ++++ b/grub-core/lib/gpt.c +@@ -729,19 +729,39 @@ grub_gpt_write_table (grub_disk_t disk, grub_gpt_t gpt, + grub_err_t + grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt) + { ++ grub_uint64_t backup_header; ++ + /* TODO: update/repair protective MBRs too. */ + + if (!grub_gpt_both_valid (gpt)) + return grub_error (GRUB_ERR_BAD_PART_TABLE, "Invalid GPT data"); + ++ /* Write the backup GPT first so if writing fails the update is aborted ++ * and the primary is left intact. However if the backup location is ++ * inaccessible we have to just skip and hope for the best, the backup ++ * will need to be repaired in the OS. */ ++ backup_header = grub_le_to_cpu64 (gpt->backup.header_lba); ++ if (grub_gpt_disk_size_valid (disk) && ++ backup_header >= disk->total_sectors) ++ { ++ grub_printf ("warning: backup GPT located at 0x%llx, " ++ "beyond last disk sector at 0x%llx\n", ++ (unsigned long long) backup_header, ++ (unsigned long long) disk->total_sectors - 1); ++ grub_printf ("warning: only writing primary GPT, " ++ "the backup GPT must be repaired from the OS\n"); ++ } ++ else ++ { ++ grub_dprintf ("gpt", "writing backup GPT to %s\n", disk->name); ++ if (grub_gpt_write_table (disk, gpt, &gpt->backup)) ++ return grub_errno; ++ } ++ + grub_dprintf ("gpt", "writing primary GPT to %s\n", disk->name); + if (grub_gpt_write_table (disk, gpt, &gpt->primary)) + return grub_errno; + +- grub_dprintf ("gpt", "writing backup GPT to %s\n", disk->name); +- if (grub_gpt_write_table (disk, gpt, &gpt->backup)) +- return grub_errno; +- + return GRUB_ERR_NONE; + } + diff --git a/packages/grub/0041-gptprio-Use-Thar-boot-partition-type-GUID.patch b/packages/grub/0041-gptprio-Use-Thar-boot-partition-type-GUID.patch new file mode 100644 index 00000000..2bfbfec4 --- /dev/null +++ b/packages/grub/0041-gptprio-Use-Thar-boot-partition-type-GUID.patch @@ -0,0 +1,55 @@ +From 9e925f4ccaf5d7137f58152598d75381b6bbefe8 Mon Sep 17 00:00:00 2001 +From: iliana destroyer of worlds +Date: Thu, 28 Mar 2019 16:28:41 -0700 +Subject: [PATCH] gptprio: Use Thar boot partition type GUID + +Signed-off-by: iliana destroyer of worlds +--- + grub-core/commands/gptprio.c | 2 +- + include/grub/gpt_partition.h | 6 +++--- + tests/gptprio_test.in | 2 +- + 3 files changed, 5 insertions(+), 5 deletions(-) + +diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c +index 8908d8bed..3678d1018 100644 +--- a/grub-core/commands/gptprio.c ++++ b/grub-core/commands/gptprio.c +@@ -162,7 +162,7 @@ grub_cmd_next (grub_extcmd_context_t ctxt, int argc, char **args) + char *p, *root = NULL, *part_name = NULL, *part_guid = NULL; + + /* TODO: Add a uuid parser and a command line flag for providing type. */ +- grub_gpt_part_guid_t part_type = GRUB_GPT_PARTITION_TYPE_USR_X86_64; ++ grub_gpt_part_guid_t part_type = GRUB_GPT_PARTITION_TYPE_THAR_BOOT; + + if (!state[NEXT_SET_DEVICE].set || !state[NEXT_SET_UUID].set) + { +diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h +index 726b98c00..88ad02a44 100644 +--- a/include/grub/gpt_partition.h ++++ b/include/grub/gpt_partition.h +@@ -61,9 +61,9 @@ char * grub_gpt_guid_to_str (grub_gpt_guid_t *guid); + GRUB_GPT_GUID_INIT (0x5808c8aa, 0x7e8f, 0x42e0, \ + 0x85, 0xd2, 0xe1, 0xe9, 0x04, 0x34, 0xcf, 0xb3) + +-#define GRUB_GPT_PARTITION_TYPE_USR_X86_64 \ +- GRUB_GPT_GUID_INIT (0x5dfbf5f4, 0x2848, 0x4bac, \ +- 0xaa, 0x5e, 0x0d, 0x9a, 0x20, 0xb7, 0x45, 0xa6) ++#define GRUB_GPT_PARTITION_TYPE_THAR_BOOT \ ++ GRUB_GPT_GUID_INIT (0x6b636168, 0x7420, 0x6568, \ ++ 0x20, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x74, 0x21) + + #define GRUB_GPT_HEADER_MAGIC \ + { 0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54 } +diff --git a/tests/gptprio_test.in b/tests/gptprio_test.in +index c5cf0f3b7..9df4dd350 100644 +--- a/tests/gptprio_test.in ++++ b/tests/gptprio_test.in +@@ -59,7 +59,7 @@ esac + img1="`mktemp "${TMPDIR:-/tmp}/tmp.XXXXXXXXXX"`" || exit 1 + trap "rm -f '${img1}'" EXIT + +-prio_type="5dfbf5f4-2848-4bac-aa5e-0d9a20b745a6" ++prio_type="6b636168-7420-6568-2070-6c616e657421" + declare -a prio_uuid + prio_uuid[2]="9b003904-d006-4ab3-97f1-73f547b7af1a" + prio_uuid[3]="1aa5a658-5b02-414d-9b71-f7e6c151f0cd" diff --git a/packages/grub/gpt.patch b/packages/grub/gpt.patch deleted file mode 100644 index b2d506e9..00000000 --- a/packages/grub/gpt.patch +++ /dev/null @@ -1,5725 +0,0 @@ -From 9300d0044c1d9e1b8df2784d50a3c26250639ca3 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Sun, 28 Sep 2014 21:26:21 -0700 -Subject: [PATCH] gpt: start new GPT module - -This module is a new implementation for reading GUID Partition Tables -which is much stricter than the existing part_gpt module and exports GPT -data directly instead of the generic grub_partition structure. It will -be the basis for modules that need to read/write/update GPT data. - -The current code does nothing more than read and verify the table. ---- - Makefile.util.def | 16 ++ - grub-core/Makefile.core.def | 5 + - grub-core/lib/gpt.c | 288 ++++++++++++++++++++++++++ - include/grub/gpt_partition.h | 60 ++++++ - tests/gpt_unit_test.c | 467 +++++++++++++++++++++++++++++++++++++++++++ - 5 files changed, 836 insertions(+) - create mode 100644 grub-core/lib/gpt.c - create mode 100644 tests/gpt_unit_test.c - -diff --git a/Makefile.util.def b/Makefile.util.def -index f9caccb97..48448c28d 100644 ---- a/Makefile.util.def -+++ b/Makefile.util.def -@@ -1254,6 +1254,22 @@ program = { - ldadd = '$(LIBDEVMAPPER) $(LIBZFS) $(LIBNVPAIR) $(LIBGEOM)'; - }; - -+program = { -+ testcase; -+ name = gpt_unit_test; -+ common = tests/gpt_unit_test.c; -+ common = tests/lib/unit_test.c; -+ common = grub-core/disk/host.c; -+ common = grub-core/kern/emu/hostfs.c; -+ common = grub-core/lib/gpt.c; -+ common = grub-core/tests/lib/test.c; -+ ldadd = libgrubmods.a; -+ ldadd = libgrubgcry.a; -+ ldadd = libgrubkern.a; -+ ldadd = grub-core/gnulib/libgnu.a; -+ ldadd = '$(LIBDEVMAPPER) $(LIBZFS) $(LIBNVPAIR) $(LIBGEOM)'; -+}; -+ - program = { - name = grub-menulst2cfg; - mansection = 1; -diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def -index 2dfa22a92..d3bcdbe2f 100644 ---- a/grub-core/Makefile.core.def -+++ b/grub-core/Makefile.core.def -@@ -821,6 +821,11 @@ module = { - common = commands/gptsync.c; - }; - -+module = { -+ name = gpt; -+ common = lib/gpt.c; -+}; -+ - module = { - name = halt; - nopc = commands/halt.c; -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -new file mode 100644 -index 000000000..a308e8537 ---- /dev/null -+++ b/grub-core/lib/gpt.c -@@ -0,0 +1,288 @@ -+/* gpt.c - Read/Verify/Write GUID Partition Tables (GPT). */ -+/* -+ * GRUB -- GRand Unified Bootloader -+ * Copyright (C) 2002,2005,2006,2007,2008 Free Software Foundation, Inc. -+ * Copyright (C) 2014 CoreOS, Inc. -+ * -+ * GRUB is free software: you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation, either version 3 of the License, or -+ * (at your option) any later version. -+ * -+ * GRUB is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with GRUB. If not, see . -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+GRUB_MOD_LICENSE ("GPLv3+"); -+ -+static grub_uint8_t grub_gpt_magic[] = GRUB_GPT_HEADER_MAGIC; -+ -+ -+static grub_err_t -+grub_gpt_header_crc32 (struct grub_gpt_header *gpt, grub_uint32_t *crc) -+{ -+ grub_uint8_t *crc32_context; -+ grub_uint32_t old; -+ -+ crc32_context = grub_zalloc (GRUB_MD_CRC32->contextsize); -+ if (!crc32_context) -+ return grub_errno; -+ -+ /* crc32 must be computed with the field cleared. */ -+ old = gpt->crc32; -+ gpt->crc32 = 0; -+ GRUB_MD_CRC32->init (crc32_context); -+ GRUB_MD_CRC32->write (crc32_context, gpt, sizeof (*gpt)); -+ GRUB_MD_CRC32->final (crc32_context); -+ gpt->crc32 = old; -+ -+ /* GRUB_MD_CRC32 always uses big endian, gpt is always little. */ -+ *crc = grub_swap_bytes32 (*(grub_uint32_t *) -+ GRUB_MD_CRC32->read (crc32_context)); -+ -+ grub_free (crc32_context); -+ -+ return GRUB_ERR_NONE; -+} -+ -+/* Make sure the MBR is a protective MBR and not a normal MBR. */ -+grub_err_t -+grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr) -+{ -+ unsigned int i; -+ -+ if (mbr->signature != -+ grub_cpu_to_le16_compile_time (GRUB_PC_PARTITION_SIGNATURE)) -+ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid MBR signature"); -+ -+ for (i = 0; i < sizeof (mbr->entries); i++) -+ if (mbr->entries[i].type == GRUB_PC_PARTITION_TYPE_GPT_DISK) -+ return GRUB_ERR_NONE; -+ -+ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid protective MBR"); -+} -+ -+grub_err_t -+grub_gpt_header_check (struct grub_gpt_header *gpt, -+ unsigned int log_sector_size) -+{ -+ grub_uint32_t crc = 0, size; -+ -+ if (grub_memcmp (gpt->magic, grub_gpt_magic, sizeof (grub_gpt_magic)) != 0) -+ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT signature"); -+ -+ if (gpt->version != GRUB_GPT_HEADER_VERSION) -+ return grub_error (GRUB_ERR_BAD_PART_TABLE, "unknown GPT version"); -+ -+ if (grub_gpt_header_crc32 (gpt, &crc)) -+ return grub_errno; -+ -+ if (gpt->crc32 != crc) -+ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT header crc32"); -+ -+ /* The header size must be between 92 and the sector size. */ -+ size = grub_le_to_cpu32 (gpt->headersize); -+ if (size < 92U || size > (1U << log_sector_size)) -+ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT header size"); -+ -+ /* The partition entry size must be a multiple of 128. */ -+ size = grub_le_to_cpu32 (gpt->partentry_size); -+ if (size < 128 || size % 128) -+ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT entry size"); -+ -+ return GRUB_ERR_NONE; -+} -+ -+static grub_err_t -+grub_gpt_read_primary (grub_disk_t disk, grub_gpt_t gpt) -+{ -+ grub_disk_addr_t addr; -+ -+ /* TODO: The gpt partmap module searches for the primary header instead -+ * of relying on the disk's sector size. For now trust the disk driver -+ * but eventually this code should match the existing behavior. */ -+ gpt->log_sector_size = disk->log_sector_size; -+ -+ addr = grub_gpt_sector_to_addr (gpt, 1); -+ if (grub_disk_read (disk, addr, 0, sizeof (gpt->primary), &gpt->primary)) -+ return grub_errno; -+ -+ if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) -+ return grub_errno; -+ -+ gpt->status |= GRUB_GPT_PRIMARY_HEADER_VALID; -+ return GRUB_ERR_NONE; -+} -+ -+static grub_err_t -+grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) -+{ -+ grub_uint64_t sector; -+ grub_disk_addr_t addr; -+ -+ /* Assumes gpt->log_sector_size == disk->log_sector_size */ -+ if (disk->total_sectors != GRUB_DISK_SIZE_UNKNOWN) -+ sector = disk->total_sectors - 1; -+ else if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) -+ sector = grub_le_to_cpu64 (gpt->primary.backup); -+ else -+ return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, -+ "Unable to locate backup GPT"); -+ -+ addr = grub_gpt_sector_to_addr (gpt, sector); -+ if (grub_disk_read (disk, addr, 0, sizeof (gpt->backup), &gpt->backup)) -+ return grub_errno; -+ -+ if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) -+ return grub_errno; -+ -+ gpt->status |= GRUB_GPT_BACKUP_HEADER_VALID; -+ return GRUB_ERR_NONE; -+} -+ -+static struct grub_gpt_partentry * -+grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, -+ struct grub_gpt_header *header) -+{ -+ struct grub_gpt_partentry *entries = NULL; -+ grub_uint8_t *crc32_context = NULL; -+ grub_uint32_t count, size, crc; -+ grub_disk_addr_t addr; -+ grub_size_t entries_size; -+ -+ /* Grub doesn't include calloc, hence the manual overflow check. */ -+ count = grub_le_to_cpu32 (header->maxpart); -+ size = grub_le_to_cpu32 (header->partentry_size); -+ entries_size = count *size; -+ if (size && entries_size / size != count) -+ { -+ grub_error (GRUB_ERR_OUT_OF_MEMORY, N_("out of memory")); -+ goto fail; -+ } -+ -+ entries = grub_malloc (entries_size); -+ if (!entries) -+ goto fail; -+ -+ addr = grub_gpt_sector_to_addr (gpt, grub_le_to_cpu64 (header->partitions)); -+ if (grub_disk_read (disk, addr, 0, entries_size, entries)) -+ goto fail; -+ -+ crc32_context = grub_zalloc (GRUB_MD_CRC32->contextsize); -+ if (!crc32_context) -+ goto fail; -+ -+ GRUB_MD_CRC32->init (crc32_context); -+ GRUB_MD_CRC32->write (crc32_context, entries, entries_size); -+ GRUB_MD_CRC32->final (crc32_context); -+ -+ crc = *(grub_uint32_t *) GRUB_MD_CRC32->read (crc32_context); -+ if (grub_swap_bytes32 (crc) != header->partentry_crc32) -+ { -+ grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT entry crc32"); -+ goto fail; -+ } -+ -+ grub_free (crc32_context); -+ return entries; -+ -+fail: -+ grub_free (entries); -+ grub_free (crc32_context); -+ return NULL; -+} -+ -+grub_gpt_t -+grub_gpt_read (grub_disk_t disk) -+{ -+ grub_gpt_t gpt; -+ struct grub_gpt_partentry *backup_entries; -+ -+ gpt = grub_zalloc (sizeof (*gpt)); -+ if (!gpt) -+ goto fail; -+ -+ if (grub_disk_read (disk, 0, 0, sizeof (gpt->mbr), &gpt->mbr)) -+ goto fail; -+ -+ /* Check the MBR but errors aren't reported beyond the status bit. */ -+ if (grub_gpt_pmbr_check (&gpt->mbr)) -+ grub_errno = GRUB_ERR_NONE; -+ else -+ gpt->status |= GRUB_GPT_PROTECTIVE_MBR; -+ -+ /* If both the primary and backup fail report the primary's error. */ -+ if (grub_gpt_read_primary (disk, gpt)) -+ { -+ grub_error_push (); -+ grub_gpt_read_backup (disk, gpt); -+ grub_error_pop (); -+ } -+ else -+ grub_gpt_read_backup (disk, gpt); -+ -+ /* If either succeeded clear any possible error from the other. */ -+ if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID || -+ gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) -+ grub_errno = GRUB_ERR_NONE; -+ else -+ goto fail; -+ -+ /* Same error handling scheme for the entry tables. */ -+ gpt->entries = grub_gpt_read_entries (disk, gpt, &gpt->primary); -+ if (!gpt->entries) -+ { -+ grub_error_push (); -+ backup_entries = grub_gpt_read_entries (disk, gpt, &gpt->backup); -+ grub_error_pop (); -+ } -+ else -+ { -+ gpt->status |= GRUB_GPT_PRIMARY_ENTRIES_VALID; -+ backup_entries = grub_gpt_read_entries (disk, gpt, &gpt->backup); -+ } -+ -+ if (backup_entries) -+ { -+ gpt->status |= GRUB_GPT_BACKUP_ENTRIES_VALID; -+ -+ if (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID) -+ grub_free (backup_entries); -+ else -+ gpt->entries = backup_entries; -+ } -+ -+ if (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID || -+ gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID) -+ { -+ grub_errno = GRUB_ERR_NONE; -+ return gpt; -+ } -+ -+fail: -+ grub_gpt_free (gpt); -+ return NULL; -+} -+ -+void -+grub_gpt_free (grub_gpt_t gpt) -+{ -+ if (!gpt) -+ return; -+ -+ grub_free (gpt->entries); -+ grub_free (gpt); -+} -diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index 1b32f6725..04ed2d7f1 100644 ---- a/include/grub/gpt_partition.h -+++ b/include/grub/gpt_partition.h -@@ -21,6 +21,7 @@ - - #include - #include -+#include - - struct grub_gpt_part_type - { -@@ -50,6 +51,12 @@ typedef struct grub_gpt_part_type grub_gpt_part_type_t; - { 0x85, 0xD2, 0xE1, 0xE9, 0x04, 0x34, 0xCF, 0xB3 } \ - } - -+#define GRUB_GPT_HEADER_MAGIC \ -+ { 0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54 } -+ -+#define GRUB_GPT_HEADER_VERSION \ -+ grub_cpu_to_le32_compile_time (0x00010000U) -+ - struct grub_gpt_header - { - grub_uint8_t magic[8]; -@@ -78,10 +85,63 @@ struct grub_gpt_partentry - char name[72]; - } GRUB_PACKED; - -+/* Basic GPT partmap module. */ - grub_err_t - grub_gpt_partition_map_iterate (grub_disk_t disk, - grub_partition_iterate_hook_t hook, - void *hook_data); - -+/* Advanced GPT library. */ -+typedef enum grub_gpt_status -+ { -+ GRUB_GPT_PROTECTIVE_MBR = 0x01, -+ GRUB_GPT_HYBRID_MBR = 0x02, -+ GRUB_GPT_PRIMARY_HEADER_VALID = 0x04, -+ GRUB_GPT_PRIMARY_ENTRIES_VALID = 0x08, -+ GRUB_GPT_BACKUP_HEADER_VALID = 0x10, -+ GRUB_GPT_BACKUP_ENTRIES_VALID = 0x20, -+ } grub_gpt_status_t; -+ -+#define GRUB_GPT_MBR_VALID (GRUB_GPT_PROTECTIVE_MBR|GRUB_GPT_HYBRID_MBR) -+ -+/* UEFI requires the entries table to be at least 16384 bytes for a -+ * total of 128 entries given the standard 128 byte entry size. */ -+#define GRUB_GPT_DEFAULT_ENTRIES_LENGTH 128 -+ -+struct grub_gpt -+{ -+ /* Bit field indicating which structures on disk are valid. */ -+ grub_gpt_status_t status; -+ -+ /* Protective or hybrid MBR. */ -+ struct grub_msdos_partition_mbr mbr; -+ -+ /* Each of the two GPT headers. */ -+ struct grub_gpt_header primary; -+ struct grub_gpt_header backup; -+ -+ /* Only need one entries table, on disk both copies are identical. */ -+ struct grub_gpt_partentry *entries; -+ -+ /* Logarithm of sector size, in case GPT and disk driver disagree. */ -+ unsigned int log_sector_size; -+}; -+typedef struct grub_gpt *grub_gpt_t; -+ -+/* Translate GPT sectors to GRUB's 512 byte block addresses. */ -+static inline grub_disk_addr_t -+grub_gpt_sector_to_addr (grub_gpt_t gpt, grub_uint64_t sector) -+{ -+ return (sector << (gpt->log_sector_size - GRUB_DISK_SECTOR_BITS)); -+} -+ -+/* Allocates and fills new grub_gpt structure, free with grub_gpt_free. */ -+grub_gpt_t grub_gpt_read (grub_disk_t disk); -+ -+void grub_gpt_free (grub_gpt_t gpt); -+ -+grub_err_t grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr); -+grub_err_t grub_gpt_header_check (struct grub_gpt_header *gpt, -+ unsigned int log_sector_size); - - #endif /* ! GRUB_GPT_PARTITION_HEADER */ -diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c -new file mode 100644 -index 000000000..a824cd967 ---- /dev/null -+++ b/tests/gpt_unit_test.c -@@ -0,0 +1,467 @@ -+/* -+ * GRUB -- GRand Unified Bootloader -+ * Copyright (C) 2014 CoreOS, Inc. -+ * -+ * GRUB is free software: you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation, either version 3 of the License, or -+ * (at your option) any later version. -+ * -+ * GRUB is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with GRUB. If not, see . -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* from gnulib */ -+#include -+ -+ -+/* GPT section sizes. */ -+#define HEADER_SIZE (sizeof (struct grub_gpt_header)) -+#define HEADER_PAD (GRUB_DISK_SECTOR_SIZE - HEADER_SIZE) -+#define ENTRY_SIZE (sizeof (struct grub_gpt_partentry)) -+#define TABLE_ENTRIES 0x80 -+#define TABLE_SIZE (TABLE_ENTRIES * ENTRY_SIZE) -+#define TABLE_SECTORS (TABLE_SIZE / GRUB_DISK_SECTOR_SIZE) -+ -+/* Double check that the table size calculation was valid. */ -+verify (TABLE_SECTORS * GRUB_DISK_SECTOR_SIZE == TABLE_SIZE); -+ -+/* GPT section locations for a 1MiB disk. */ -+#define DISK_SECTORS 0x800 -+#define DISK_SIZE (GRUB_DISK_SECTOR_SIZE * DISK_SECTORS) -+#define PRIMARY_HEADER_SECTOR 0x1 -+#define PRIMARY_TABLE_SECTOR 0x2 -+#define BACKUP_HEADER_SECTOR (DISK_SECTORS - 0x1) -+#define BACKUP_TABLE_SECTOR (BACKUP_HEADER_SECTOR - TABLE_SECTORS) -+ -+#define DATA_START_SECTOR (PRIMARY_TABLE_SECTOR + TABLE_SECTORS) -+#define DATA_END_SECTOR (BACKUP_TABLE_SECTOR - 0x1) -+#define DATA_SECTORS (BACKUP_TABLE_SECTOR - DATA_START_SECTOR) -+#define DATA_SIZE (GRUB_DISK_SECTOR_SIZE * DATA_SECTORS) -+ -+struct test_disk -+{ -+ struct grub_msdos_partition_mbr mbr; -+ -+ struct grub_gpt_header primary_header; -+ grub_uint8_t primary_header_pad[HEADER_PAD]; -+ struct grub_gpt_partentry primary_entries[TABLE_ENTRIES]; -+ -+ grub_uint8_t data[DATA_SIZE]; -+ -+ struct grub_gpt_partentry backup_entries[TABLE_ENTRIES]; -+ struct grub_gpt_header backup_header; -+ grub_uint8_t backup_header_pad[HEADER_PAD]; -+} GRUB_PACKED; -+ -+/* Sanity check that all the above ugly math was correct. */ -+verify (sizeof (struct test_disk) == DISK_SIZE); -+ -+struct test_data -+{ -+ int fd; -+ grub_device_t dev; -+ struct test_disk *raw; -+}; -+ -+ -+/* Sample primary GPT header for an empty 1MB disk. */ -+static const struct grub_gpt_header example_primary = { -+ .magic = GRUB_GPT_HEADER_MAGIC, -+ .version = GRUB_GPT_HEADER_VERSION, -+ .headersize = sizeof (struct grub_gpt_header), -+ .crc32 = grub_cpu_to_le32_compile_time (0x7cd8642c), -+ .primary = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), -+ .backup = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), -+ .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), -+ .end = grub_cpu_to_le64_compile_time (DATA_END_SECTOR), -+ .guid = {0xad, 0x31, 0xc1, 0x69, 0xd6, 0x67, 0xc6, 0x46, -+ 0x93, 0xc4, 0x12, 0x4c, 0x75, 0x52, 0x56, 0xac}, -+ .partitions = grub_cpu_to_le64_compile_time (PRIMARY_TABLE_SECTOR), -+ .maxpart = grub_cpu_to_le32_compile_time (TABLE_ENTRIES), -+ .partentry_size = grub_cpu_to_le32_compile_time (ENTRY_SIZE), -+ .partentry_crc32 = grub_cpu_to_le32_compile_time (0xab54d286), -+}; -+ -+/* And the backup header. */ -+static const struct grub_gpt_header example_backup = { -+ .magic = GRUB_GPT_HEADER_MAGIC, -+ .version = GRUB_GPT_HEADER_VERSION, -+ .headersize = sizeof (struct grub_gpt_header), -+ .crc32 = grub_cpu_to_le32_compile_time (0xcfaa4a27), -+ .primary = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), -+ .backup = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), -+ .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), -+ .end = grub_cpu_to_le64_compile_time (DATA_END_SECTOR), -+ .guid = {0xad, 0x31, 0xc1, 0x69, 0xd6, 0x67, 0xc6, 0x46, -+ 0x93, 0xc4, 0x12, 0x4c, 0x75, 0x52, 0x56, 0xac}, -+ .partitions = grub_cpu_to_le64_compile_time (BACKUP_TABLE_SECTOR), -+ .maxpart = grub_cpu_to_le32_compile_time (TABLE_ENTRIES), -+ .partentry_size = grub_cpu_to_le32_compile_time (ENTRY_SIZE), -+ .partentry_crc32 = grub_cpu_to_le32_compile_time (0xab54d286), -+}; -+ -+/* Sample protective MBR for the same 1MB disk. Note, this matches -+ * parted and fdisk behavior. The UEFI spec uses different values. */ -+static const struct grub_msdos_partition_mbr example_pmbr = { -+ .entries = {{.flag = 0x00, -+ .start_head = 0x00, -+ .start_sector = 0x01, -+ .start_cylinder = 0x00, -+ .type = 0xee, -+ .end_head = 0xfe, -+ .end_sector = 0xff, -+ .end_cylinder = 0xff, -+ .start = grub_cpu_to_le32_compile_time (0x1), -+ .length = grub_cpu_to_le32_compile_time (DISK_SECTORS - 0x1), -+ }}, -+ .signature = grub_cpu_to_le16_compile_time (GRUB_PC_PARTITION_SIGNATURE), -+}; -+ -+/* If errors are left in grub's error stack things can get confused. */ -+static void -+assert_error_stack_empty (void) -+{ -+ do -+ { -+ grub_test_assert (grub_errno == GRUB_ERR_NONE, -+ "error on stack: %s", grub_errmsg); -+ } -+ while (grub_error_pop ()); -+} -+ -+static grub_err_t -+execute_command2 (const char *name, const char *arg1, const char *arg2) -+{ -+ grub_command_t cmd; -+ grub_err_t err; -+ char *argv[2]; -+ -+ cmd = grub_command_find (name); -+ if (!cmd) -+ grub_fatal ("can't find command %s", name); -+ -+ argv[0] = strdup (arg1); -+ argv[1] = strdup (arg2); -+ err = (cmd->func) (cmd, 2, argv); -+ free (argv[0]); -+ free (argv[1]); -+ -+ return err; -+} -+ -+static void -+sync_disk (struct test_data *data) -+{ -+ if (msync (data->raw, DISK_SIZE, MS_SYNC | MS_INVALIDATE) < 0) -+ grub_fatal ("Syncing disk failed: %s", strerror (errno)); -+ -+ grub_disk_cache_invalidate_all (); -+} -+ -+static void -+reset_disk (struct test_data *data) -+{ -+ memset (data->raw, 0, DISK_SIZE); -+ -+ /* Initialize image with valid example tables. */ -+ memcpy (&data->raw->mbr, &example_pmbr, sizeof (data->raw->mbr)); -+ memcpy (&data->raw->primary_header, &example_primary, -+ sizeof (data->raw->primary_header)); -+ memcpy (&data->raw->backup_header, &example_backup, -+ sizeof (data->raw->backup_header)); -+ -+ sync_disk (data); -+} -+ -+static void -+open_disk (struct test_data *data) -+{ -+ const char *loop = "loop0"; -+ char template[] = "/tmp/grub_gpt_test.XXXXXX"; -+ char host[sizeof ("(host)") + sizeof (template)]; -+ -+ data->fd = mkstemp (template); -+ if (data->fd < 0) -+ grub_fatal ("Creating %s failed: %s", template, strerror (errno)); -+ -+ if (ftruncate (data->fd, DISK_SIZE) < 0) -+ { -+ int err = errno; -+ unlink (template); -+ grub_fatal ("Resizing %s failed: %s", template, strerror (err)); -+ } -+ -+ data->raw = mmap (NULL, DISK_SIZE, PROT_READ | PROT_WRITE, -+ MAP_SHARED, data->fd, 0); -+ if (data->raw == MAP_FAILED) -+ { -+ int err = errno; -+ unlink (template); -+ grub_fatal ("Maping %s failed: %s", template, strerror (err)); -+ } -+ -+ snprintf (host, sizeof (host), "(host)%s", template); -+ if (execute_command2 ("loopback", loop, host) != GRUB_ERR_NONE) -+ { -+ unlink (template); -+ grub_fatal ("loopback %s %s failed: %s", loop, host, grub_errmsg); -+ } -+ -+ if (unlink (template) < 0) -+ grub_fatal ("Unlinking %s failed: %s", template, strerror (errno)); -+ -+ reset_disk (data); -+ -+ data->dev = grub_device_open (loop); -+ if (!data->dev) -+ grub_fatal ("Opening %s failed: %s", loop, grub_errmsg); -+} -+ -+static void -+close_disk (struct test_data *data) -+{ -+ char *loop; -+ -+ assert_error_stack_empty (); -+ -+ if (munmap (data->raw, DISK_SIZE) || close (data->fd)) -+ grub_fatal ("Closing disk image failed: %s", strerror (errno)); -+ -+ loop = strdup (data->dev->disk->name); -+ grub_test_assert (grub_device_close (data->dev) == GRUB_ERR_NONE, -+ "Closing disk device failed: %s", grub_errmsg); -+ -+ grub_test_assert (execute_command2 ("loopback", "-d", loop) == -+ GRUB_ERR_NONE, "loopback -d %s failed: %s", loop, -+ grub_errmsg); -+ -+ free (loop); -+} -+ -+static grub_gpt_t -+read_disk (struct test_data *data) -+{ -+ grub_gpt_t gpt; -+ -+ gpt = grub_gpt_read (data->dev->disk); -+ if (gpt == NULL) -+ { -+ grub_print_error (); -+ grub_fatal ("grub_gpt_read failed"); -+ } -+ -+ -+ return gpt; -+} -+ -+static void -+pmbr_test (void) -+{ -+ struct grub_msdos_partition_mbr mbr; -+ -+ memset (&mbr, 0, sizeof (mbr)); -+ -+ /* Empty is invalid. */ -+ grub_gpt_pmbr_check (&mbr); -+ grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, -+ "unexpected error: %s", grub_errmsg); -+ grub_errno = GRUB_ERR_NONE; -+ -+ /* A table without a protective partition is invalid. */ -+ mbr.signature = grub_cpu_to_le16_compile_time (GRUB_PC_PARTITION_SIGNATURE); -+ grub_gpt_pmbr_check (&mbr); -+ grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, -+ "unexpected error: %s", grub_errmsg); -+ grub_errno = GRUB_ERR_NONE; -+ -+ /* A table with a protective type is ok. */ -+ memcpy (&mbr, &example_pmbr, sizeof (mbr)); -+ grub_gpt_pmbr_check (&mbr); -+ grub_test_assert (grub_errno == GRUB_ERR_NONE, -+ "unexpected error: %s", grub_errmsg); -+ grub_errno = GRUB_ERR_NONE; -+} -+ -+static void -+header_test (void) -+{ -+ struct grub_gpt_header primary, backup; -+ -+ /* Example headers should be valid. */ -+ memcpy (&primary, &example_primary, sizeof (primary)); -+ grub_gpt_header_check (&primary, GRUB_DISK_SECTOR_BITS); -+ grub_test_assert (grub_errno == GRUB_ERR_NONE, -+ "unexpected error: %s", grub_errmsg); -+ grub_errno = GRUB_ERR_NONE; -+ -+ memcpy (&backup, &example_backup, sizeof (backup)); -+ grub_gpt_header_check (&backup, GRUB_DISK_SECTOR_BITS); -+ grub_test_assert (grub_errno == GRUB_ERR_NONE, -+ "unexpected error: %s", grub_errmsg); -+ grub_errno = GRUB_ERR_NONE; -+ -+ /* Twiddle the GUID to invalidate the CRC. */ -+ primary.guid[0] = 0; -+ grub_gpt_header_check (&primary, GRUB_DISK_SECTOR_BITS); -+ grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, -+ "unexpected error: %s", grub_errmsg); -+ grub_errno = GRUB_ERR_NONE; -+ -+ backup.guid[0] = 0; -+ grub_gpt_header_check (&backup, GRUB_DISK_SECTOR_BITS); -+ grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, -+ "unexpected error: %s", grub_errmsg); -+ grub_errno = GRUB_ERR_NONE; -+} -+ -+static void -+read_valid_test (void) -+{ -+ struct test_data data; -+ grub_gpt_t gpt; -+ -+ open_disk (&data); -+ gpt = read_disk (&data); -+ grub_test_assert (gpt->status == (GRUB_GPT_PROTECTIVE_MBR | -+ GRUB_GPT_PRIMARY_HEADER_VALID | -+ GRUB_GPT_PRIMARY_ENTRIES_VALID | -+ GRUB_GPT_BACKUP_HEADER_VALID | -+ GRUB_GPT_BACKUP_ENTRIES_VALID), -+ "unexpected status: 0x%02x", gpt->status); -+ grub_gpt_free (gpt); -+ close_disk (&data); -+} -+ -+static void -+read_invalid_entries_test (void) -+{ -+ struct test_data data; -+ grub_gpt_t gpt; -+ -+ open_disk (&data); -+ -+ /* Corrupt the first entry in both tables. */ -+ memset (&data.raw->primary_entries[0], 0x55, -+ sizeof (data.raw->primary_entries[0])); -+ memset (&data.raw->backup_entries[0], 0x55, -+ sizeof (data.raw->backup_entries[0])); -+ sync_disk (&data); -+ -+ gpt = grub_gpt_read (data.dev->disk); -+ grub_test_assert (gpt == NULL, "no error reported for corrupt entries"); -+ grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, -+ "unexpected error: %s", grub_errmsg); -+ grub_errno = GRUB_ERR_NONE; -+ -+ close_disk (&data); -+} -+ -+static void -+read_fallback_test (void) -+{ -+ struct test_data data; -+ grub_gpt_t gpt; -+ -+ open_disk (&data); -+ -+ /* Corrupt the primary header. */ -+ memset (&data.raw->primary_header.guid, 0x55, -+ sizeof (data.raw->primary_header.guid)); -+ sync_disk (&data); -+ gpt = read_disk (&data); -+ grub_test_assert ((gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) == 0, -+ "unreported corrupt primary header"); -+ grub_gpt_free (gpt); -+ reset_disk (&data); -+ -+ /* Corrupt the backup header. */ -+ memset (&data.raw->backup_header.guid, 0x55, -+ sizeof (data.raw->backup_header.guid)); -+ sync_disk (&data); -+ gpt = read_disk (&data); -+ grub_test_assert ((gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) == 0, -+ "unreported corrupt backup header"); -+ grub_gpt_free (gpt); -+ reset_disk (&data); -+ -+ /* Corrupt the primary entry table. */ -+ memset (&data.raw->primary_entries[0], 0x55, -+ sizeof (data.raw->primary_entries[0])); -+ sync_disk (&data); -+ gpt = read_disk (&data); -+ grub_test_assert ((gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID) == 0, -+ "unreported corrupt primary entries table"); -+ grub_gpt_free (gpt); -+ reset_disk (&data); -+ -+ /* Corrupt the backup entry table. */ -+ memset (&data.raw->backup_entries[0], 0x55, -+ sizeof (data.raw->backup_entries[0])); -+ sync_disk (&data); -+ gpt = read_disk (&data); -+ grub_test_assert ((gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID) == 0, -+ "unreported corrupt backup entries table"); -+ grub_gpt_free (gpt); -+ reset_disk (&data); -+ -+ /* If primary is corrupt and disk size is unknown fallback fails. */ -+ memset (&data.raw->primary_header.guid, 0x55, -+ sizeof (data.raw->primary_header.guid)); -+ sync_disk (&data); -+ data.dev->disk->total_sectors = GRUB_DISK_SIZE_UNKNOWN; -+ gpt = grub_gpt_read (data.dev->disk); -+ grub_test_assert (gpt == NULL, "no error reported"); -+ grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, -+ "unexpected error: %s", grub_errmsg); -+ grub_errno = GRUB_ERR_NONE; -+ -+ close_disk (&data); -+} -+ -+void -+grub_unit_test_init (void) -+{ -+ grub_init_all (); -+ grub_hostfs_init (); -+ grub_host_init (); -+ grub_test_register ("gpt_pmbr_test", pmbr_test); -+ grub_test_register ("gpt_header_test", header_test); -+ grub_test_register ("gpt_read_valid_test", read_valid_test); -+ grub_test_register ("gpt_read_invalid_test", read_invalid_entries_test); -+ grub_test_register ("gpt_read_fallback_test", read_fallback_test); -+} -+ -+void -+grub_unit_test_fini (void) -+{ -+ grub_test_unregister ("gpt_pmbr_test"); -+ grub_test_unregister ("gpt_header_test"); -+ grub_test_unregister ("gpt_read_valid_test"); -+ grub_test_unregister ("gpt_read_invalid_test"); -+ grub_test_unregister ("gpt_read_fallback_test"); -+ grub_fini_all (); -+} -From 91a8986e53926bc0a94f251a9b4fe8974af75020 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Sat, 18 Oct 2014 15:39:13 -0700 -Subject: [PATCH] gpt: rename misnamed header location fields - -The header location fields refer to 'this header' and 'alternate header' -respectively, not 'primary header' and 'backup header'. The previous -field names are backwards for the backup header. ---- - grub-core/lib/gpt.c | 2 +- - include/grub/gpt_partition.h | 4 ++-- - tests/gpt_unit_test.c | 8 ++++---- - 3 files changed, 7 insertions(+), 7 deletions(-) - -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index a308e8537..705bd77f9 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -137,7 +137,7 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) - if (disk->total_sectors != GRUB_DISK_SIZE_UNKNOWN) - sector = disk->total_sectors - 1; - else if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) -- sector = grub_le_to_cpu64 (gpt->primary.backup); -+ sector = grub_le_to_cpu64 (gpt->primary.alternate_lba); - else - return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, - "Unable to locate backup GPT"); -diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index 04ed2d7f1..a7ef61875 100644 ---- a/include/grub/gpt_partition.h -+++ b/include/grub/gpt_partition.h -@@ -64,8 +64,8 @@ struct grub_gpt_header - grub_uint32_t headersize; - grub_uint32_t crc32; - grub_uint32_t unused1; -- grub_uint64_t primary; -- grub_uint64_t backup; -+ grub_uint64_t header_lba; -+ grub_uint64_t alternate_lba; - grub_uint64_t start; - grub_uint64_t end; - grub_uint8_t guid[16]; -diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c -index a824cd967..4d70868af 100644 ---- a/tests/gpt_unit_test.c -+++ b/tests/gpt_unit_test.c -@@ -94,8 +94,8 @@ static const struct grub_gpt_header example_primary = { - .version = GRUB_GPT_HEADER_VERSION, - .headersize = sizeof (struct grub_gpt_header), - .crc32 = grub_cpu_to_le32_compile_time (0x7cd8642c), -- .primary = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), -- .backup = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), -+ .header_lba = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), -+ .alternate_lba = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), - .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), - .end = grub_cpu_to_le64_compile_time (DATA_END_SECTOR), - .guid = {0xad, 0x31, 0xc1, 0x69, 0xd6, 0x67, 0xc6, 0x46, -@@ -112,8 +112,8 @@ static const struct grub_gpt_header example_backup = { - .version = GRUB_GPT_HEADER_VERSION, - .headersize = sizeof (struct grub_gpt_header), - .crc32 = grub_cpu_to_le32_compile_time (0xcfaa4a27), -- .primary = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), -- .backup = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), -+ .header_lba = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), -+ .alternate_lba = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), - .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), - .end = grub_cpu_to_le64_compile_time (DATA_END_SECTOR), - .guid = {0xad, 0x31, 0xc1, 0x69, 0xd6, 0x67, 0xc6, 0x46, -From 23e0197ea4561fc3a9e59c1af9bf2357d21e1b52 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Sat, 18 Oct 2014 16:46:17 -0700 -Subject: [PATCH] gpt: record size of of the entries table - -The size of the entries table will be needed later when writing it back -to disk. Restructure the entries reading code to flow a little better. ---- - grub-core/lib/gpt.c | 53 +++++++++++++++++++------------------------- - include/grub/gpt_partition.h | 5 ++++- - 2 files changed, 27 insertions(+), 31 deletions(-) - -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 705bd77f9..01df7f3e8 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -153,7 +153,7 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) - return GRUB_ERR_NONE; - } - --static struct grub_gpt_partentry * -+static grub_err_t - grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, - struct grub_gpt_header *header) - { -@@ -173,6 +173,10 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, - goto fail; - } - -+ /* Double check that the header was validated properly. */ -+ if (entries_size < GRUB_GPT_DEFAULT_ENTRIES_SIZE) -+ return grub_error (GRUB_ERR_BUG, "invalid GPT entries table size"); -+ - entries = grub_malloc (entries_size); - if (!entries) - goto fail; -@@ -197,19 +201,21 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, - } - - grub_free (crc32_context); -- return entries; -+ grub_free (gpt->entries); -+ gpt->entries = entries; -+ gpt->entries_size = entries_size; -+ return GRUB_ERR_NONE; - - fail: - grub_free (entries); - grub_free (crc32_context); -- return NULL; -+ return grub_errno; - } - - grub_gpt_t - grub_gpt_read (grub_disk_t disk) - { - grub_gpt_t gpt; -- struct grub_gpt_partentry *backup_entries; - - gpt = grub_zalloc (sizeof (*gpt)); - if (!gpt) -@@ -241,36 +247,23 @@ grub_gpt_read (grub_disk_t disk) - else - goto fail; - -- /* Same error handling scheme for the entry tables. */ -- gpt->entries = grub_gpt_read_entries (disk, gpt, &gpt->primary); -- if (!gpt->entries) -- { -- grub_error_push (); -- backup_entries = grub_gpt_read_entries (disk, gpt, &gpt->backup); -- grub_error_pop (); -- } -- else -- { -- gpt->status |= GRUB_GPT_PRIMARY_ENTRIES_VALID; -- backup_entries = grub_gpt_read_entries (disk, gpt, &gpt->backup); -- } -+ /* Similarly, favor the value or error from the primary table. */ -+ if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID && -+ !grub_gpt_read_entries (disk, gpt, &gpt->backup)) -+ gpt->status |= GRUB_GPT_BACKUP_ENTRIES_VALID; - -- if (backup_entries) -- { -- gpt->status |= GRUB_GPT_BACKUP_ENTRIES_VALID; -- -- if (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID) -- grub_free (backup_entries); -- else -- gpt->entries = backup_entries; -- } -+ grub_errno = GRUB_ERR_NONE; -+ if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID && -+ !grub_gpt_read_entries (disk, gpt, &gpt->primary)) -+ gpt->status |= GRUB_GPT_PRIMARY_ENTRIES_VALID; - - if (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID || - gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID) -- { -- grub_errno = GRUB_ERR_NONE; -- return gpt; -- } -+ grub_errno = GRUB_ERR_NONE; -+ else -+ goto fail; -+ -+ return gpt; - - fail: - grub_gpt_free (gpt); -diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index a7ef61875..7f41e22dd 100644 ---- a/include/grub/gpt_partition.h -+++ b/include/grub/gpt_partition.h -@@ -106,7 +106,9 @@ typedef enum grub_gpt_status - - /* UEFI requires the entries table to be at least 16384 bytes for a - * total of 128 entries given the standard 128 byte entry size. */ --#define GRUB_GPT_DEFAULT_ENTRIES_LENGTH 128 -+#define GRUB_GPT_DEFAULT_ENTRIES_SIZE 16384 -+#define GRUB_GPT_DEFAULT_ENTRIES_LENGTH \ -+ (GRUB_GPT_DEFAULT_ENTRIES_SIZE / sizeof (struct grub_gpt_partentry)) - - struct grub_gpt - { -@@ -122,6 +124,7 @@ struct grub_gpt - - /* Only need one entries table, on disk both copies are identical. */ - struct grub_gpt_partentry *entries; -+ grub_size_t entries_size; - - /* Logarithm of sector size, in case GPT and disk driver disagree. */ - unsigned int log_sector_size; -From 187c377743b26d8fcf44ea3e5ac1ae6edf92ab23 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Sat, 18 Oct 2014 18:18:17 -0700 -Subject: [PATCH] gpt: consolidate crc32 computation code - -The gcrypt API is overly verbose, wrap it up in a helper function to -keep this rather common operation easy to use. ---- - grub-core/lib/gpt.c | 43 ++++++++++++++++++++++++------------------- - 1 file changed, 24 insertions(+), 19 deletions(-) - -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 01df7f3e8..43a150942 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -32,22 +32,17 @@ static grub_uint8_t grub_gpt_magic[] = GRUB_GPT_HEADER_MAGIC; - - - static grub_err_t --grub_gpt_header_crc32 (struct grub_gpt_header *gpt, grub_uint32_t *crc) -+grub_gpt_lecrc32 (void *data, grub_size_t len, grub_uint32_t *crc) - { - grub_uint8_t *crc32_context; -- grub_uint32_t old; - - crc32_context = grub_zalloc (GRUB_MD_CRC32->contextsize); - if (!crc32_context) - return grub_errno; - -- /* crc32 must be computed with the field cleared. */ -- old = gpt->crc32; -- gpt->crc32 = 0; - GRUB_MD_CRC32->init (crc32_context); -- GRUB_MD_CRC32->write (crc32_context, gpt, sizeof (*gpt)); -+ GRUB_MD_CRC32->write (crc32_context, data, len); - GRUB_MD_CRC32->final (crc32_context); -- gpt->crc32 = old; - - /* GRUB_MD_CRC32 always uses big endian, gpt is always little. */ - *crc = grub_swap_bytes32 (*(grub_uint32_t *) -@@ -58,6 +53,25 @@ grub_gpt_header_crc32 (struct grub_gpt_header *gpt, grub_uint32_t *crc) - return GRUB_ERR_NONE; - } - -+static grub_err_t -+grub_gpt_header_lecrc32 (struct grub_gpt_header *header, grub_uint32_t *crc) -+{ -+ grub_uint32_t old, new; -+ grub_err_t err; -+ -+ /* crc32 must be computed with the field cleared. */ -+ old = header->crc32; -+ header->crc32 = 0; -+ err = grub_gpt_lecrc32 (header, sizeof (*header), &new); -+ header->crc32 = old; -+ -+ if (err) -+ return err; -+ -+ *crc = new; -+ return GRUB_ERR_NONE; -+} -+ - /* Make sure the MBR is a protective MBR and not a normal MBR. */ - grub_err_t - grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr) -@@ -87,7 +101,7 @@ grub_gpt_header_check (struct grub_gpt_header *gpt, - if (gpt->version != GRUB_GPT_HEADER_VERSION) - return grub_error (GRUB_ERR_BAD_PART_TABLE, "unknown GPT version"); - -- if (grub_gpt_header_crc32 (gpt, &crc)) -+ if (grub_gpt_header_lecrc32 (gpt, &crc)) - return grub_errno; - - if (gpt->crc32 != crc) -@@ -158,7 +172,6 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, - struct grub_gpt_header *header) - { - struct grub_gpt_partentry *entries = NULL; -- grub_uint8_t *crc32_context = NULL; - grub_uint32_t count, size, crc; - grub_disk_addr_t addr; - grub_size_t entries_size; -@@ -185,22 +198,15 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, - if (grub_disk_read (disk, addr, 0, entries_size, entries)) - goto fail; - -- crc32_context = grub_zalloc (GRUB_MD_CRC32->contextsize); -- if (!crc32_context) -+ if (grub_gpt_lecrc32 (entries, entries_size, &crc)) - goto fail; - -- GRUB_MD_CRC32->init (crc32_context); -- GRUB_MD_CRC32->write (crc32_context, entries, entries_size); -- GRUB_MD_CRC32->final (crc32_context); -- -- crc = *(grub_uint32_t *) GRUB_MD_CRC32->read (crc32_context); -- if (grub_swap_bytes32 (crc) != header->partentry_crc32) -+ if (crc != header->partentry_crc32) - { - grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT entry crc32"); - goto fail; - } - -- grub_free (crc32_context); - grub_free (gpt->entries); - gpt->entries = entries; - gpt->entries_size = entries_size; -@@ -208,7 +214,6 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, - - fail: - grub_free (entries); -- grub_free (crc32_context); - return grub_errno; - } - -From f6e8fc02aa5f5ed02e529e2b30a94b0589d30e31 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Sat, 18 Oct 2014 18:21:07 -0700 -Subject: [PATCH] gpt: add new repair function to sync up primary and backup - tables. - ---- - grub-core/lib/gpt.c | 90 ++++++++++++++++++++++++++++++++++++++++++++ - include/grub/gpt_partition.h | 3 ++ - tests/gpt_unit_test.c | 49 ++++++++++++++++++++++++ - 3 files changed, 142 insertions(+) - -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 43a150942..2d61df488 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -31,6 +31,20 @@ GRUB_MOD_LICENSE ("GPLv3+"); - static grub_uint8_t grub_gpt_magic[] = GRUB_GPT_HEADER_MAGIC; - - -+static grub_uint64_t -+grub_gpt_size_to_sectors (grub_gpt_t gpt, grub_size_t size) -+{ -+ unsigned int sector_size; -+ grub_uint64_t sectors; -+ -+ sector_size = 1U << gpt->log_sector_size; -+ sectors = size / sector_size; -+ if (size % sector_size) -+ sectors++; -+ -+ return sectors; -+} -+ - static grub_err_t - grub_gpt_lecrc32 (void *data, grub_size_t len, grub_uint32_t *crc) - { -@@ -275,6 +289,82 @@ fail: - return NULL; - } - -+grub_err_t -+grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) -+{ -+ grub_uint64_t backup_header, backup_entries; -+ grub_uint32_t crc; -+ -+ if (disk->log_sector_size != gpt->log_sector_size) -+ return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, -+ "GPT sector size must match disk sector size"); -+ -+ if (!(gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID || -+ gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID)) -+ return grub_error (GRUB_ERR_BUG, "No valid GPT entries"); -+ -+ if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) -+ { -+ backup_header = grub_le_to_cpu64 (gpt->primary.alternate_lba); -+ grub_memcpy (&gpt->backup, &gpt->primary, sizeof (gpt->backup)); -+ } -+ else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) -+ { -+ backup_header = grub_le_to_cpu64 (gpt->backup.header_lba); -+ grub_memcpy (&gpt->primary, &gpt->backup, sizeof (gpt->primary)); -+ } -+ else -+ return grub_error (GRUB_ERR_BUG, "No valid GPT header"); -+ -+ /* Relocate backup to end if disk whenever possible. */ -+ if (disk->total_sectors != GRUB_DISK_SIZE_UNKNOWN) -+ backup_header = disk->total_sectors - 1; -+ -+ backup_entries = backup_header - -+ grub_gpt_size_to_sectors (gpt, gpt->entries_size); -+ -+ /* Update/fixup header and partition table locations. */ -+ gpt->primary.header_lba = grub_cpu_to_le64_compile_time (1); -+ gpt->primary.alternate_lba = grub_cpu_to_le64 (backup_header); -+ gpt->primary.partitions = grub_cpu_to_le64_compile_time (2); -+ gpt->backup.header_lba = gpt->primary.alternate_lba; -+ gpt->backup.alternate_lba = gpt->primary.header_lba; -+ gpt->backup.partitions = grub_cpu_to_le64 (backup_entries); -+ -+ /* Writing headers larger than our header structure are unsupported. */ -+ gpt->primary.headersize = -+ grub_cpu_to_le32_compile_time (sizeof (gpt->primary)); -+ gpt->backup.headersize = -+ grub_cpu_to_le32_compile_time (sizeof (gpt->backup)); -+ -+ /* Recompute checksums. */ -+ if (grub_gpt_lecrc32 (gpt->entries, gpt->entries_size, &crc)) -+ return grub_errno; -+ -+ gpt->primary.partentry_crc32 = crc; -+ gpt->backup.partentry_crc32 = crc; -+ -+ if (grub_gpt_header_lecrc32 (&gpt->primary, &gpt->primary.crc32)) -+ return grub_errno; -+ -+ if (grub_gpt_header_lecrc32 (&gpt->backup, &gpt->backup.crc32)) -+ return grub_errno; -+ -+ /* Sanity check. */ -+ if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) -+ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); -+ -+ if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) -+ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); -+ -+ gpt->status |= (GRUB_GPT_PRIMARY_HEADER_VALID | -+ GRUB_GPT_PRIMARY_ENTRIES_VALID | -+ GRUB_GPT_BACKUP_HEADER_VALID | -+ GRUB_GPT_BACKUP_ENTRIES_VALID); -+ -+ return GRUB_ERR_NONE; -+} -+ - void - grub_gpt_free (grub_gpt_t gpt) - { -diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index 7f41e22dd..62d027e4e 100644 ---- a/include/grub/gpt_partition.h -+++ b/include/grub/gpt_partition.h -@@ -141,6 +141,9 @@ grub_gpt_sector_to_addr (grub_gpt_t gpt, grub_uint64_t sector) - /* Allocates and fills new grub_gpt structure, free with grub_gpt_free. */ - grub_gpt_t grub_gpt_read (grub_disk_t disk); - -+/* Sync up primary and backup headers, recompute checksums. */ -+grub_err_t grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt); -+ - void grub_gpt_free (grub_gpt_t gpt); - - grub_err_t grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr); -diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c -index 4d70868af..83198bebf 100644 ---- a/tests/gpt_unit_test.c -+++ b/tests/gpt_unit_test.c -@@ -24,6 +24,7 @@ - #include - #include - #include -+#include - #include - - #include -@@ -442,6 +443,52 @@ read_fallback_test (void) - close_disk (&data); - } - -+static void -+repair_test (void) -+{ -+ struct test_data data; -+ grub_gpt_t gpt; -+ -+ open_disk (&data); -+ -+ /* Erase/Repair primary. */ -+ memset (&data.raw->primary_header, 0, sizeof (data.raw->primary_header)); -+ sync_disk (&data); -+ gpt = read_disk (&data); -+ grub_gpt_repair (data.dev->disk, gpt); -+ grub_test_assert (grub_errno == GRUB_ERR_NONE, -+ "repair failed: %s", grub_errmsg); -+ if (memcmp (&gpt->primary, &example_primary, sizeof (gpt->primary))) -+ { -+ printf ("Invalid restored primary header:\n"); -+ hexdump (16, (char*)&gpt->primary, sizeof (gpt->primary)); -+ printf ("Expected primary header:\n"); -+ hexdump (16, (char*)&example_primary, sizeof (example_primary)); -+ grub_test_assert (0, "repair did not restore primary header"); -+ } -+ grub_gpt_free (gpt); -+ reset_disk (&data); -+ -+ /* Erase/Repair backup. */ -+ memset (&data.raw->backup_header, 0, sizeof (data.raw->backup_header)); -+ sync_disk (&data); -+ gpt = read_disk (&data); -+ grub_gpt_repair (data.dev->disk, gpt); -+ grub_test_assert (grub_errno == GRUB_ERR_NONE, -+ "repair failed: %s", grub_errmsg); -+ if (memcmp (&gpt->backup, &example_backup, sizeof (gpt->backup))) -+ { -+ printf ("Invalid restored backup header:\n"); -+ hexdump (16, (char*)&gpt->backup, sizeof (gpt->backup)); -+ printf ("Expected backup header:\n"); -+ hexdump (16, (char*)&example_backup, sizeof (example_backup)); -+ grub_test_assert (0, "repair did not restore backup header"); -+ } -+ grub_gpt_free (gpt); -+ reset_disk (&data); -+ -+ close_disk (&data); -+} - void - grub_unit_test_init (void) - { -@@ -453,6 +500,7 @@ grub_unit_test_init (void) - grub_test_register ("gpt_read_valid_test", read_valid_test); - grub_test_register ("gpt_read_invalid_test", read_invalid_entries_test); - grub_test_register ("gpt_read_fallback_test", read_fallback_test); -+ grub_test_register ("gpt_repair_test", repair_test); - } - - void -@@ -463,5 +511,6 @@ grub_unit_test_fini (void) - grub_test_unregister ("gpt_read_valid_test"); - grub_test_unregister ("gpt_read_invalid_test"); - grub_test_unregister ("gpt_read_fallback_test"); -+ grub_test_unregister ("gpt_repair_test"); - grub_fini_all (); - } -From c9041ec4e40315f2734f2b6a38a75ba17cbba0ca Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Sun, 19 Oct 2014 14:21:29 -0700 -Subject: [PATCH] gpt: add write function and gptrepair command - -The first hint of something practical, a command that can restore any of -the GPT structures from the alternate location. New test case must run -under QEMU because the loopback device used by the other unit tests does -not support writing. ---- - Makefile.util.def | 6 +++ - grub-core/Makefile.core.def | 5 ++ - grub-core/commands/gptrepair.c | 116 +++++++++++++++++++++++++++++++++++++++++ - grub-core/lib/gpt.c | 44 ++++++++++++++-- - include/grub/gpt_partition.h | 8 +++ - tests/gptrepair_test.in | 102 ++++++++++++++++++++++++++++++++++++ - 6 files changed, 277 insertions(+), 4 deletions(-) - create mode 100644 grub-core/commands/gptrepair.c - create mode 100644 tests/gptrepair_test.in - -diff --git a/Makefile.util.def b/Makefile.util.def -index 48448c28d..8156fca5f 100644 ---- a/Makefile.util.def -+++ b/Makefile.util.def -@@ -1159,6 +1159,12 @@ script = { - common = tests/grub_cmd_tr.in; - }; - -+script = { -+ testcase; -+ name = gptrepair_test; -+ common = tests/gptrepair_test.in; -+}; -+ - script = { - testcase; - name = file_filter_test; -diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def -index d3bcdbe2f..23a047632 100644 ---- a/grub-core/Makefile.core.def -+++ b/grub-core/Makefile.core.def -@@ -821,6 +821,11 @@ module = { - common = commands/gptsync.c; - }; - -+module = { -+ name = gptrepair; -+ common = commands/gptrepair.c; -+}; -+ - module = { - name = gpt; - common = lib/gpt.c; -diff --git a/grub-core/commands/gptrepair.c b/grub-core/commands/gptrepair.c -new file mode 100644 -index 000000000..38392fd8f ---- /dev/null -+++ b/grub-core/commands/gptrepair.c -@@ -0,0 +1,116 @@ -+/* gptrepair.c - verify and restore GPT info from alternate location. */ -+/* -+ * GRUB -- GRand Unified Bootloader -+ * Copyright (C) 2009 Free Software Foundation, Inc. -+ * Copyright (C) 2014 CoreOS, Inc. -+ * -+ * GRUB is free software: you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation, either version 3 of the License, or -+ * (at your option) any later version. -+ * -+ * GRUB is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with GRUB. If not, see . -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+GRUB_MOD_LICENSE ("GPLv3+"); -+ -+static char * -+trim_dev_name (char *name) -+{ -+ grub_size_t len = grub_strlen (name); -+ if (len && name[0] == '(' && name[len - 1] == ')') -+ { -+ name[len - 1] = '\0'; -+ name = name + 1; -+ } -+ return name; -+} -+ -+static grub_err_t -+grub_cmd_gptrepair (grub_command_t cmd __attribute__ ((unused)), -+ int argc, char **args) -+{ -+ grub_device_t dev = NULL; -+ grub_gpt_t gpt = NULL; -+ char *dev_name; -+ grub_uint32_t primary_crc, backup_crc; -+ enum grub_gpt_status old_status; -+ -+ if (argc != 1 || !grub_strlen(args[0])) -+ return grub_error (GRUB_ERR_BAD_ARGUMENT, "device name required"); -+ -+ dev_name = trim_dev_name (args[0]); -+ dev = grub_device_open (dev_name); -+ if (!dev) -+ goto done; -+ -+ if (!dev->disk) -+ { -+ grub_error (GRUB_ERR_BAD_ARGUMENT, "not a disk"); -+ goto done; -+ } -+ -+ gpt = grub_gpt_read (dev->disk); -+ if (!gpt) -+ goto done; -+ -+ primary_crc = gpt->primary.crc32; -+ backup_crc = gpt->backup.crc32; -+ old_status = gpt->status; -+ -+ if (grub_gpt_repair (dev->disk, gpt)) -+ goto done; -+ -+ if (primary_crc == gpt->primary.crc32 && -+ backup_crc == gpt->backup.crc32 && -+ old_status && gpt->status) -+ { -+ grub_printf_ (N_("GPT already valid, %s unmodified.\n"), dev_name); -+ goto done; -+ } -+ -+ if (grub_gpt_write (dev->disk, gpt)) -+ goto done; -+ -+ if (!(old_status & GRUB_GPT_PRIMARY_VALID)) -+ grub_printf_ (N_("Primary GPT for %s repaired.\n"), dev_name); -+ -+ if (!(old_status & GRUB_GPT_BACKUP_VALID)) -+ grub_printf_ (N_("Backup GPT for %s repaired.\n"), dev_name); -+ -+done: -+ if (gpt) -+ grub_gpt_free (gpt); -+ -+ if (dev) -+ grub_device_close (dev); -+ -+ return grub_errno; -+} -+ -+static grub_command_t cmd; -+ -+GRUB_MOD_INIT(gptrepair) -+{ -+ cmd = grub_register_command ("gptrepair", grub_cmd_gptrepair, -+ N_("DEVICE"), -+ N_("Verify and repair GPT on drive DEVICE.")); -+} -+ -+GRUB_MOD_FINI(gptrepair) -+{ -+ grub_unregister_command (cmd); -+} -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 2d61df488..67ffdf703 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -357,10 +357,46 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) - if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) - return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); - -- gpt->status |= (GRUB_GPT_PRIMARY_HEADER_VALID | -- GRUB_GPT_PRIMARY_ENTRIES_VALID | -- GRUB_GPT_BACKUP_HEADER_VALID | -- GRUB_GPT_BACKUP_ENTRIES_VALID); -+ gpt->status |= GRUB_GPT_BOTH_VALID; -+ return GRUB_ERR_NONE; -+} -+ -+static grub_err_t -+grub_gpt_write_table (grub_disk_t disk, grub_gpt_t gpt, -+ struct grub_gpt_header *header) -+{ -+ grub_disk_addr_t addr; -+ -+ if (grub_le_to_cpu32 (header->headersize) != sizeof (*header)) -+ return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, -+ "Header size is %u, must be %u", -+ grub_le_to_cpu32 (header->headersize), -+ sizeof (*header)); -+ -+ addr = grub_gpt_sector_to_addr (gpt, grub_le_to_cpu64 (header->header_lba)); -+ if (grub_disk_write (disk, addr, 0, sizeof (*header), header)) -+ return grub_errno; -+ -+ addr = grub_gpt_sector_to_addr (gpt, grub_le_to_cpu64 (header->partitions)); -+ if (grub_disk_write (disk, addr, 0, gpt->entries_size, gpt->entries)) -+ return grub_errno; -+ -+ return GRUB_ERR_NONE; -+} -+ -+grub_err_t -+grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt) -+{ -+ /* TODO: update/repair protective MBRs too. */ -+ -+ if (!(gpt->status & GRUB_GPT_BOTH_VALID)) -+ return grub_error (GRUB_ERR_BAD_PART_TABLE, "Invalid GPT data"); -+ -+ if (grub_gpt_write_table (disk, gpt, &gpt->primary)) -+ return grub_errno; -+ -+ if (grub_gpt_write_table (disk, gpt, &gpt->backup)) -+ return grub_errno; - - return GRUB_ERR_NONE; - } -diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index 62d027e4e..3cac6df32 100644 ---- a/include/grub/gpt_partition.h -+++ b/include/grub/gpt_partition.h -@@ -103,6 +103,11 @@ typedef enum grub_gpt_status - } grub_gpt_status_t; - - #define GRUB_GPT_MBR_VALID (GRUB_GPT_PROTECTIVE_MBR|GRUB_GPT_HYBRID_MBR) -+#define GRUB_GPT_PRIMARY_VALID \ -+ (GRUB_GPT_PRIMARY_HEADER_VALID|GRUB_GPT_PRIMARY_ENTRIES_VALID) -+#define GRUB_GPT_BACKUP_VALID \ -+ (GRUB_GPT_BACKUP_HEADER_VALID|GRUB_GPT_BACKUP_ENTRIES_VALID) -+#define GRUB_GPT_BOTH_VALID (GRUB_GPT_PRIMARY_VALID|GRUB_GPT_BACKUP_VALID) - - /* UEFI requires the entries table to be at least 16384 bytes for a - * total of 128 entries given the standard 128 byte entry size. */ -@@ -144,6 +149,9 @@ grub_gpt_t grub_gpt_read (grub_disk_t disk); - /* Sync up primary and backup headers, recompute checksums. */ - grub_err_t grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt); - -+/* Write headers and entry tables back to disk. */ -+grub_err_t grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt); -+ - void grub_gpt_free (grub_gpt_t gpt); - - grub_err_t grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr); -diff --git a/tests/gptrepair_test.in b/tests/gptrepair_test.in -new file mode 100644 -index 000000000..80b2de633 ---- /dev/null -+++ b/tests/gptrepair_test.in -@@ -0,0 +1,102 @@ -+#! /bin/sh -+set -e -+ -+# Copyright (C) 2010 Free Software Foundation, Inc. -+# Copyright (C) 2014 CoreOS, Inc. -+# -+# GRUB is free software: you can redistribute it and/or modify -+# it under the terms of the GNU General Public License as published by -+# the Free Software Foundation, either version 3 of the License, or -+# (at your option) any later version. -+# -+# GRUB is distributed in the hope that it will be useful, -+# but WITHOUT ANY WARRANTY; without even the implied warranty of -+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+# GNU General Public License for more details. -+# -+# You should have received a copy of the GNU General Public License -+# along with GRUB. If not, see . -+ -+parted=parted -+grubshell=@builddir@/grub-shell -+ -+. "@builddir@/grub-core/modinfo.sh" -+ -+case "${grub_modinfo_target_cpu}-${grub_modinfo_platform}" in -+ mips-qemu_mips | mipsel-qemu_mips | i386-qemu | i386-multiboot | i386-coreboot | mipsel-loongson) -+ disk=ata0 -+ ;; -+ powerpc-ieee1275) -+ disk=ieee1275//pci@80000000/mac-io@4/ata-3@20000/disk@0 -+ # FIXME: QEMU firmware has bugs which prevent it from accessing hard disk w/o recognised label. -+ exit 0 -+ ;; -+ sparc64-ieee1275) -+ disk=ieee1275//pci@1fe\,0/pci-ata@5/ide0@500/disk@0 -+ # FIXME: QEMU firmware has bugs which prevent it from accessing hard disk w/o recognised label. -+ exit 0 -+ ;; -+ i386-ieee1275) -+ disk=ieee1275/d -+ # FIXME: QEMU firmware has bugs which prevent it from accessing hard disk w/o recognised label. -+ exit 0 -+ ;; -+ mips-arc) -+ # FIXME: ARC firmware has bugs which prevent it from accessing hard disk w/o dvh disklabel. -+ exit 0 ;; -+ mipsel-arc) -+ disk=arc/scsi0/disk0/rdisk0 -+ ;; -+ *) -+ disk=hd0 -+ ;; -+esac -+img1="`mktemp "${TMPDIR:-/tmp}/tmp.XXXXXXXXXX"`" || exit 1 -+img2="`mktemp "${TMPDIR:-/tmp}/tmp.XXXXXXXXXX"`" || exit 1 -+trap "rm -f '${img1}' '${ing2}'" EXIT -+ -+create_disk_image () { -+ size=$1 -+ rm -f "${img1}" -+ dd if=/dev/zero of="${img1}" bs=512 count=1 seek=$((size - 1)) status=none -+ ${parted} -a none -s "${img1}" mklabel gpt -+ cp "${img1}" "${img2}" -+} -+ -+wipe_disk_area () { -+ sector=$1 -+ size=$2 -+ dd if=/dev/zero of="${img2}" bs=512 count=${size} seek=${sector} conv=notrunc status=none -+} -+ -+do_repair () { -+ output="`echo "gptrepair ($disk)" | "${grubshell}" --disk="${img2}"`" -+ if echo "${output}" | grep ^error; then -+ return 1 -+ fi -+ if echo "${output}" | grep -v GPT; then -+ echo "Unexpected output ${output}" -+ return 1 -+ fi -+ echo "${output}" -+} -+ -+echo "Nothing to repair:" -+create_disk_image 100 -+do_repair -+cmp "${img1}" "${img2}" -+echo -+ -+echo "Repair primary (MBR left intact)" -+create_disk_image 100 -+wipe_disk_area 1 1 -+do_repair -+cmp "${img1}" "${img2}" -+echo -+ -+echo "Repair backup" -+create_disk_image 100 -+wipe_disk_area 99 1 -+do_repair -+cmp "${img1}" "${img2}" -+echo -From 24341bb2904a4f2b40d69dbd633789cf49e9616b Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Sun, 19 Oct 2014 20:44:34 -0700 -Subject: [PATCH] tests: fix path to words file on Gentoo/CoreOS - -By default there isn't a linux.words file, but there is words. ---- - tests/util/grub-fs-tester.in | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/tests/util/grub-fs-tester.in b/tests/util/grub-fs-tester.in -index 2337771a1..d768d66d1 100644 ---- a/tests/util/grub-fs-tester.in -+++ b/tests/util/grub-fs-tester.in -@@ -241,8 +241,10 @@ for ((LOGSECSIZE=MINLOGSECSIZE;LOGSECSIZE<=MAXLOGSECSIZE;LOGSECSIZE=LOGSECSIZE + - CFILESN=1 - if test -f /usr/share/dict/american-english; then - CFILESSRC[0]="/usr/share/dict/american-english" -- else -+ elif test -f /usr/share/dict/linux.words; then - CFILESSRC[0]="/usr/share/dict/linux.words" -+ else -+ CFILESSRC[0]="/usr/share/dict/words" - fi - case x"$fs" in - # FS LIMITATION: 8.3 names -From 059ae5370a9d5f7fe19c928bf5000751ece28ccd Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Thu, 30 Oct 2014 20:55:21 -0700 -Subject: [PATCH] gpt: add a new generic GUID type - -In order to do anything with partition GUIDs they need to be stored in a -proper structure like the partition type GUIDs. Additionally add an -initializer macro to simplify defining both GUID types. ---- - include/grub/gpt_partition.h | 36 +++++++++++++++++++----------------- - tests/gpt_unit_test.c | 12 ++++++------ - 2 files changed, 25 insertions(+), 23 deletions(-) - -diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index 3cac6df32..df076ca64 100644 ---- a/include/grub/gpt_partition.h -+++ b/include/grub/gpt_partition.h -@@ -23,33 +23,35 @@ - #include - #include - --struct grub_gpt_part_type -+struct grub_gpt_guid - { - grub_uint32_t data1; - grub_uint16_t data2; - grub_uint16_t data3; - grub_uint8_t data4[8]; - } __attribute__ ((aligned(8))); --typedef struct grub_gpt_part_type grub_gpt_part_type_t; -+typedef struct grub_gpt_guid grub_gpt_guid_t; -+typedef struct grub_gpt_guid grub_gpt_part_type_t; -+ -+#define GRUB_GPT_GUID_INIT(a, b, c, d1, d2, d3, d4, d5, d6, d7, d8) \ -+ { \ -+ grub_cpu_to_le32_compile_time (a), \ -+ grub_cpu_to_le16_compile_time (b), \ -+ grub_cpu_to_le16_compile_time (c), \ -+ { d1, d2, d3, d4, d5, d6, d7, d8 } \ -+ } - - #define GRUB_GPT_PARTITION_TYPE_EMPTY \ -- { 0x0, 0x0, 0x0, \ -- { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 } \ -- } -+ GRUB_GPT_GUID_INIT (0x0, 0x0, 0x0, \ -+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0) - - #define GRUB_GPT_PARTITION_TYPE_BIOS_BOOT \ -- { grub_cpu_to_le32_compile_time (0x21686148), \ -- grub_cpu_to_le16_compile_time (0x6449), \ -- grub_cpu_to_le16_compile_time (0x6e6f), \ -- { 0x74, 0x4e, 0x65, 0x65, 0x64, 0x45, 0x46, 0x49 } \ -- } -+ GRUB_GPT_GUID_INIT (0x21686148, 0x6449, 0x6e6f, \ -+ 0x74, 0x4e, 0x65, 0x65, 0x64, 0x45, 0x46, 0x49) - - #define GRUB_GPT_PARTITION_TYPE_LDM \ -- { grub_cpu_to_le32_compile_time (0x5808C8AAU),\ -- grub_cpu_to_le16_compile_time (0x7E8F), \ -- grub_cpu_to_le16_compile_time (0x42E0), \ -- { 0x85, 0xD2, 0xE1, 0xE9, 0x04, 0x34, 0xCF, 0xB3 } \ -- } -+ GRUB_GPT_GUID_INIT (0x5808c8aa, 0x7e8f, 0x42e0, \ -+ 0x85, 0xd2, 0xe1, 0xe9, 0x04, 0x34, 0xcf, 0xb3) - - #define GRUB_GPT_HEADER_MAGIC \ - { 0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54 } -@@ -68,7 +70,7 @@ struct grub_gpt_header - grub_uint64_t alternate_lba; - grub_uint64_t start; - grub_uint64_t end; -- grub_uint8_t guid[16]; -+ grub_gpt_guid_t guid; - grub_uint64_t partitions; - grub_uint32_t maxpart; - grub_uint32_t partentry_size; -@@ -78,7 +80,7 @@ struct grub_gpt_header - struct grub_gpt_partentry - { - grub_gpt_part_type_t type; -- grub_uint8_t guid[16]; -+ grub_gpt_guid_t guid; - grub_uint64_t start; - grub_uint64_t end; - grub_uint64_t attrib; -diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c -index 83198bebf..86e4364a5 100644 ---- a/tests/gpt_unit_test.c -+++ b/tests/gpt_unit_test.c -@@ -99,8 +99,8 @@ static const struct grub_gpt_header example_primary = { - .alternate_lba = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), - .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), - .end = grub_cpu_to_le64_compile_time (DATA_END_SECTOR), -- .guid = {0xad, 0x31, 0xc1, 0x69, 0xd6, 0x67, 0xc6, 0x46, -- 0x93, 0xc4, 0x12, 0x4c, 0x75, 0x52, 0x56, 0xac}, -+ .guid = GRUB_GPT_GUID_INIT(0x69c131ad, 0x67d6, 0x46c6, -+ 0x93, 0xc4, 0x12, 0x4c, 0x75, 0x52, 0x56, 0xac), - .partitions = grub_cpu_to_le64_compile_time (PRIMARY_TABLE_SECTOR), - .maxpart = grub_cpu_to_le32_compile_time (TABLE_ENTRIES), - .partentry_size = grub_cpu_to_le32_compile_time (ENTRY_SIZE), -@@ -117,8 +117,8 @@ static const struct grub_gpt_header example_backup = { - .alternate_lba = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), - .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), - .end = grub_cpu_to_le64_compile_time (DATA_END_SECTOR), -- .guid = {0xad, 0x31, 0xc1, 0x69, 0xd6, 0x67, 0xc6, 0x46, -- 0x93, 0xc4, 0x12, 0x4c, 0x75, 0x52, 0x56, 0xac}, -+ .guid = GRUB_GPT_GUID_INIT(0x69c131ad, 0x67d6, 0x46c6, -+ 0x93, 0xc4, 0x12, 0x4c, 0x75, 0x52, 0x56, 0xac), - .partitions = grub_cpu_to_le64_compile_time (BACKUP_TABLE_SECTOR), - .maxpart = grub_cpu_to_le32_compile_time (TABLE_ENTRIES), - .partentry_size = grub_cpu_to_le32_compile_time (ENTRY_SIZE), -@@ -326,13 +326,13 @@ header_test (void) - grub_errno = GRUB_ERR_NONE; - - /* Twiddle the GUID to invalidate the CRC. */ -- primary.guid[0] = 0; -+ primary.guid.data1 = 0; - grub_gpt_header_check (&primary, GRUB_DISK_SECTOR_BITS); - grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, - "unexpected error: %s", grub_errmsg); - grub_errno = GRUB_ERR_NONE; - -- backup.guid[0] = 0; -+ backup.guid.data1 = 0; - grub_gpt_header_check (&backup, GRUB_DISK_SECTOR_BITS); - grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, - "unexpected error: %s", grub_errmsg); -From 6cf94a34ca4f605aa353d2717561d608bc13472d Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Mon, 3 Nov 2014 17:14:37 -0800 -Subject: [PATCH] gpt: new gptprio.next command for selecting priority based - partitions - -Basic usage would look something like this: - - gptprio.next -d usr_dev -u usr_uuid - linuxefi ($usr_dev)/boot/vmlinuz mount.usr=PARTUUID=$usr_uuid - -After booting the system should set the 'successful' bit on the -partition that was used. ---- - Makefile.util.def | 6 ++ - grub-core/Makefile.core.def | 5 + - grub-core/commands/gptprio.c | 238 +++++++++++++++++++++++++++++++++++++++++++ - include/grub/gpt_partition.h | 49 +++++++++ - tests/gptprio_test.in | 150 +++++++++++++++++++++++++++ - 5 files changed, 448 insertions(+) - create mode 100644 grub-core/commands/gptprio.c - create mode 100644 tests/gptprio_test.in - -diff --git a/Makefile.util.def b/Makefile.util.def -index 8156fca5f..9249f77be 100644 ---- a/Makefile.util.def -+++ b/Makefile.util.def -@@ -1165,6 +1165,12 @@ script = { - common = tests/gptrepair_test.in; - }; - -+script = { -+ testcase; -+ name = gptprio_test; -+ common = tests/gptprio_test.in; -+}; -+ - script = { - testcase; - name = file_filter_test; -diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def -index 23a047632..4620138cb 100644 ---- a/grub-core/Makefile.core.def -+++ b/grub-core/Makefile.core.def -@@ -826,6 +826,11 @@ module = { - common = commands/gptrepair.c; - }; - -+module = { -+ name = gptprio; -+ common = commands/gptprio.c; -+}; -+ - module = { - name = gpt; - common = lib/gpt.c; -diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c -new file mode 100644 -index 000000000..29bd11d68 ---- /dev/null -+++ b/grub-core/commands/gptprio.c -@@ -0,0 +1,238 @@ -+/* gptprio.c - manage priority based partition selection. */ -+/* -+ * GRUB -- GRand Unified Bootloader -+ * Copyright (C) 2009 Free Software Foundation, Inc. -+ * Copyright (C) 2014 CoreOS, Inc. -+ * -+ * GRUB is free software: you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation, either version 3 of the License, or -+ * (at your option) any later version. -+ * -+ * GRUB is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with GRUB. If not, see . -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+GRUB_MOD_LICENSE ("GPLv3+"); -+ -+static const struct grub_arg_option options_next[] = { -+ {"set-device", 'd', 0, -+ N_("Set a variable to the name of selected partition."), -+ N_("VARNAME"), ARG_TYPE_STRING}, -+ {"set-uuid", 'u', 0, -+ N_("Set a variable to the GPT UUID of selected partition."), -+ N_("VARNAME"), ARG_TYPE_STRING}, -+ {0, 0, 0, 0, 0, 0} -+}; -+ -+enum options_next -+{ -+ NEXT_SET_DEVICE, -+ NEXT_SET_UUID, -+}; -+ -+static unsigned int -+grub_gptprio_priority (struct grub_gpt_partentry *entry) -+{ -+ return (unsigned int) grub_gpt_entry_attribute -+ (entry, GRUB_GPT_PART_ATTR_OFFSET_GPTPRIO_PRIORITY, 4); -+} -+ -+static unsigned int -+grub_gptprio_tries_left (struct grub_gpt_partentry *entry) -+{ -+ return (unsigned int) grub_gpt_entry_attribute -+ (entry, GRUB_GPT_PART_ATTR_OFFSET_GPTPRIO_TRIES_LEFT, 4); -+} -+ -+static void -+grub_gptprio_set_tries_left (struct grub_gpt_partentry *entry, -+ unsigned int tries_left) -+{ -+ grub_gpt_entry_set_attribute -+ (entry, tries_left, GRUB_GPT_PART_ATTR_OFFSET_GPTPRIO_TRIES_LEFT, 4); -+} -+ -+static unsigned int -+grub_gptprio_successful (struct grub_gpt_partentry *entry) -+{ -+ return (unsigned int) grub_gpt_entry_attribute -+ (entry, GRUB_GPT_PART_ATTR_OFFSET_GPTPRIO_SUCCESSFUL, 1); -+} -+ -+static grub_err_t -+grub_find_next (const char *disk_name, -+ const grub_gpt_part_type_t *part_type, -+ char **part_name, char **part_guid) -+{ -+ struct grub_gpt_partentry *part_found = NULL; -+ grub_device_t dev = NULL; -+ grub_gpt_t gpt = NULL; -+ grub_uint32_t i, part_index; -+ -+ dev = grub_device_open (disk_name); -+ if (!dev) -+ goto done; -+ -+ gpt = grub_gpt_read (dev->disk); -+ if (!gpt) -+ goto done; -+ -+ if (!(gpt->status & GRUB_GPT_BOTH_VALID)) -+ if (grub_gpt_repair (dev->disk, gpt)) -+ goto done; -+ -+ for (i = 0; i < grub_le_to_cpu32 (gpt->primary.maxpart); i++) -+ { -+ struct grub_gpt_partentry *part = &gpt->entries[i]; -+ -+ if (grub_memcmp (part_type, &part->type, sizeof (*part_type)) == 0) -+ { -+ unsigned int priority, tries_left, successful, old_priority = 0; -+ -+ priority = grub_gptprio_priority (part); -+ tries_left = grub_gptprio_tries_left (part); -+ successful = grub_gptprio_successful (part); -+ -+ if (part_found) -+ old_priority = grub_gptprio_priority (part_found); -+ -+ if ((tries_left || successful) && priority > old_priority) -+ { -+ part_index = i; -+ part_found = part; -+ } -+ } -+ } -+ -+ if (!part_found) -+ { -+ grub_error (GRUB_ERR_UNKNOWN_DEVICE, N_("no such partition")); -+ goto done; -+ } -+ -+ if (grub_gptprio_tries_left (part_found)) -+ { -+ unsigned int tries_left = grub_gptprio_tries_left (part_found); -+ -+ grub_gptprio_set_tries_left (part_found, tries_left - 1); -+ -+ if (grub_gpt_update_checksums (gpt)) -+ goto done; -+ -+ if (grub_gpt_write (dev->disk, gpt)) -+ goto done; -+ } -+ -+ *part_name = grub_xasprintf ("%s,gpt%u", disk_name, part_index + 1); -+ if (!*part_name) -+ goto done; -+ -+ *part_guid = -+ grub_xasprintf ("%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", -+ grub_le_to_cpu32 (part_found->guid.data1), -+ grub_le_to_cpu16 (part_found->guid.data2), -+ grub_le_to_cpu16 (part_found->guid.data3), -+ part_found->guid.data4[0], -+ part_found->guid.data4[1], -+ part_found->guid.data4[2], -+ part_found->guid.data4[3], -+ part_found->guid.data4[4], -+ part_found->guid.data4[5], -+ part_found->guid.data4[6], -+ part_found->guid.data4[7]); -+ if (!*part_name) -+ goto done; -+ -+ grub_errno = GRUB_ERR_NONE; -+ -+done: -+ grub_gpt_free (gpt); -+ -+ if (dev) -+ grub_device_close (dev); -+ -+ return grub_errno; -+} -+ -+ -+ -+static grub_err_t -+grub_cmd_next (grub_extcmd_context_t ctxt, int argc, char **args) -+{ -+ struct grub_arg_list *state = ctxt->state; -+ char *p, *root = NULL, *part_name = NULL, *part_guid = NULL; -+ -+ /* TODO: Add a uuid parser and a command line flag for providing type. */ -+ grub_gpt_part_type_t part_type = GRUB_GPT_PARTITION_TYPE_USR_X86_64; -+ -+ if (!state[NEXT_SET_DEVICE].set || !state[NEXT_SET_UUID].set) -+ { -+ grub_error (GRUB_ERR_INVALID_COMMAND, N_("-d and -u are required")); -+ goto done; -+ } -+ -+ if (argc == 0) -+ root = grub_strdup (grub_env_get ("root")); -+ else if (argc == 1) -+ root = grub_strdup (args[0]); -+ else -+ { -+ grub_error (GRUB_ERR_BAD_ARGUMENT, N_("unexpected arguments")); -+ goto done; -+ } -+ -+ if (!root) -+ goto done; -+ -+ /* To make using $root practical strip off the partition name. */ -+ p = grub_strchr (root, ','); -+ if (p) -+ *p = '\0'; -+ -+ if (grub_find_next (root, &part_type, &part_name, &part_guid)) -+ goto done; -+ -+ if (grub_env_set (state[NEXT_SET_DEVICE].arg, part_name)) -+ goto done; -+ -+ if (grub_env_set (state[NEXT_SET_UUID].arg, part_guid)) -+ goto done; -+ -+ grub_errno = GRUB_ERR_NONE; -+ -+done: -+ grub_free (root); -+ grub_free (part_name); -+ grub_free (part_guid); -+ -+ return grub_errno; -+} -+ -+static grub_extcmd_t cmd_next; -+ -+GRUB_MOD_INIT(gptprio) -+{ -+ cmd_next = grub_register_extcmd ("gptprio.next", grub_cmd_next, 0, -+ N_("-d VARNAME -u VARNAME [DEVICE]"), -+ N_("Select next partition to boot."), -+ options_next); -+} -+ -+GRUB_MOD_FINI(gptprio) -+{ -+ grub_unregister_extcmd (cmd_next); -+} -diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index df076ca64..e41c66539 100644 ---- a/include/grub/gpt_partition.h -+++ b/include/grub/gpt_partition.h -@@ -53,6 +53,10 @@ typedef struct grub_gpt_guid grub_gpt_part_type_t; - GRUB_GPT_GUID_INIT (0x5808c8aa, 0x7e8f, 0x42e0, \ - 0x85, 0xd2, 0xe1, 0xe9, 0x04, 0x34, 0xcf, 0xb3) - -+#define GRUB_GPT_PARTITION_TYPE_USR_X86_64 \ -+ GRUB_GPT_GUID_INIT (0x5dfbf5f4, 0x2848, 0x4bac, \ -+ 0xaa, 0x5e, 0x0d, 0x9a, 0x20, 0xb7, 0x45, 0xa6) -+ - #define GRUB_GPT_HEADER_MAGIC \ - { 0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54 } - -@@ -87,6 +91,51 @@ struct grub_gpt_partentry - char name[72]; - } GRUB_PACKED; - -+enum grub_gpt_part_attr_offset -+{ -+ /* Standard partition attribute bits defined by UEFI. */ -+ GRUB_GPT_PART_ATTR_OFFSET_REQUIRED = 0, -+ GRUB_GPT_PART_ATTR_OFFSET_NO_BLOCK_IO_PROTOCOL = 1, -+ GRUB_GPT_PART_ATTR_OFFSET_LEGACY_BIOS_BOOTABLE = 2, -+ -+ /* De facto standard attribute bits defined by Microsoft and reused by -+ * http://www.freedesktop.org/wiki/Specifications/DiscoverablePartitionsSpec */ -+ GRUB_GPT_PART_ATTR_OFFSET_READ_ONLY = 60, -+ GRUB_GPT_PART_ATTR_OFFSET_NO_AUTO = 63, -+ -+ /* Partition attributes for priority based selection, -+ * Currently only valid for PARTITION_TYPE_USR_X86_64. -+ * TRIES_LEFT and PRIORITY are 4 bit wide fields. */ -+ GRUB_GPT_PART_ATTR_OFFSET_GPTPRIO_PRIORITY = 48, -+ GRUB_GPT_PART_ATTR_OFFSET_GPTPRIO_TRIES_LEFT = 52, -+ GRUB_GPT_PART_ATTR_OFFSET_GPTPRIO_SUCCESSFUL = 56, -+}; -+ -+/* Helpers for reading/writing partition attributes. */ -+static inline grub_uint64_t -+grub_gpt_entry_attribute (struct grub_gpt_partentry *entry, -+ enum grub_gpt_part_attr_offset offset, -+ unsigned int bits) -+{ -+ grub_uint64_t attrib = grub_le_to_cpu64 (entry->attrib); -+ -+ return (attrib >> offset) & ((1ULL << bits) - 1); -+} -+ -+static inline void -+grub_gpt_entry_set_attribute (struct grub_gpt_partentry *entry, -+ grub_uint64_t value, -+ enum grub_gpt_part_attr_offset offset, -+ unsigned int bits) -+{ -+ grub_uint64_t attrib, mask; -+ -+ mask = (((1ULL << bits) - 1) << offset); -+ attrib = grub_le_to_cpu64 (entry->attrib) & ~mask; -+ attrib |= ((value << offset) & mask); -+ entry->attrib = grub_cpu_to_le64 (attrib); -+} -+ - /* Basic GPT partmap module. */ - grub_err_t - grub_gpt_partition_map_iterate (grub_disk_t disk, -diff --git a/tests/gptprio_test.in b/tests/gptprio_test.in -new file mode 100644 -index 000000000..f4aea0dc9 ---- /dev/null -+++ b/tests/gptprio_test.in -@@ -0,0 +1,150 @@ -+#! /bin/bash -+set -e -+ -+# Copyright (C) 2010 Free Software Foundation, Inc. -+# Copyright (C) 2014 CoreOS, Inc. -+# -+# GRUB is free software: you can redistribute it and/or modify -+# it under the terms of the GNU General Public License as published by -+# the Free Software Foundation, either version 3 of the License, or -+# (at your option) any later version. -+# -+# GRUB is distributed in the hope that it will be useful, -+# but WITHOUT ANY WARRANTY; without even the implied warranty of -+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+# GNU General Public License for more details. -+# -+# You should have received a copy of the GNU General Public License -+# along with GRUB. If not, see . -+ -+sgdisk=sgdisk -+grubshell=@builddir@/grub-shell -+ -+if ! which "${sgdisk}" >/dev/null 2>&1; then -+ echo "sgdisk not installed; cannot test gptprio." -+ exit 77 -+fi -+ -+. "@builddir@/grub-core/modinfo.sh" -+ -+case "${grub_modinfo_target_cpu}-${grub_modinfo_platform}" in -+ mips-qemu_mips | mipsel-qemu_mips | i386-qemu | i386-multiboot | i386-coreboot | mipsel-loongson) -+ disk=ata0 -+ ;; -+ powerpc-ieee1275) -+ disk=ieee1275//pci@80000000/mac-io@4/ata-3@20000/disk@0 -+ # FIXME: QEMU firmware has bugs which prevent it from accessing hard disk w/o recognised label. -+ exit 0 -+ ;; -+ sparc64-ieee1275) -+ disk=ieee1275//pci@1fe\,0/pci-ata@5/ide0@500/disk@0 -+ # FIXME: QEMU firmware has bugs which prevent it from accessing hard disk w/o recognised label. -+ exit 0 -+ ;; -+ i386-ieee1275) -+ disk=ieee1275/d -+ # FIXME: QEMU firmware has bugs which prevent it from accessing hard disk w/o recognised label. -+ exit 0 -+ ;; -+ mips-arc) -+ # FIXME: ARC firmware has bugs which prevent it from accessing hard disk w/o dvh disklabel. -+ exit 0 ;; -+ mipsel-arc) -+ disk=arc/scsi0/disk0/rdisk0 -+ ;; -+ *) -+ disk=hd0 -+ ;; -+esac -+img1="`mktemp "${TMPDIR:-/tmp}/tmp.XXXXXXXXXX"`" || exit 1 -+trap "rm -f '${img1}'" EXIT -+ -+prio_type="5dfbf5f4-2848-4bac-aa5e-0d9a20b745a6" -+declare -a prio_uuid -+prio_uuid[2]="9b003904-d006-4ab3-97f1-73f547b7af1a" -+prio_uuid[3]="1aa5a658-5b02-414d-9b71-f7e6c151f0cd" -+prio_uuid[4]="8aa0240d-98af-42b0-b32a-ccbe0572d62b" -+ -+create_disk_image () { -+ rm -f "${img1}" -+ dd if=/dev/zero of="${img1}" bs=512 count=1 seek=100 status=none -+ ${sgdisk} \ -+ -n 1:0:+1 -c 1:ESP -t 1:ef00 \ -+ -n 2:0:+1 -c 2:A -t 2:"${prio_type}" -u 2:"${prio_uuid[2]}" \ -+ -n 3:0:+1 -c 3:B -t 3:"${prio_type}" -u 3:"${prio_uuid[3]}" \ -+ -n 4:0:+1 -c 4:C -t 4:"${prio_type}" -u 4:"${prio_uuid[4]}" \ -+ "${img1}" >/dev/null -+} -+ -+ -+fmt_prio () { -+ priority=$(( ( $1 & 15 ) << 48 )) -+ tries=$(( ( $2 & 15 ) << 52 )) -+ success=$(( ( $3 & 1 ) << 56 )) -+ printf %016x $(( priority | tries | success )) -+} -+ -+set_prio () { -+ part="$1" -+ attr=$(fmt_prio $2 $3 $4) -+ ${sgdisk} -A "${part}:=:${attr}" "${img1}" >/dev/null -+} -+ -+check_prio () { -+ part="$1" -+ expect=$(fmt_prio $2 $3 $4) -+ result=$(LANG=C ${sgdisk} -i "${part}" "${img1}" \ -+ | awk '/^Attribute flags: / {print $3}') -+ if [[ "${expect}" != "${result}" ]]; then -+ echo "Partition ${part} has attributes ${result}, not ${expect}" >&2 -+ exit 1 -+ fi -+} -+ -+run_next() { -+ "${grubshell}" --disk="${img1}" --modules=gptprio < -Date: Sat, 15 Nov 2014 13:27:13 -0800 -Subject: [PATCH] gpt: split out checksum recomputation - -For basic data modifications the full repair function is overkill. ---- - grub-core/lib/gpt.c | 30 ++++++++++++++++++++---------- - include/grub/gpt_partition.h | 3 +++ - 2 files changed, 23 insertions(+), 10 deletions(-) - -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 67ffdf703..198234071 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -293,7 +293,6 @@ grub_err_t - grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) - { - grub_uint64_t backup_header, backup_entries; -- grub_uint32_t crc; - - if (disk->log_sector_size != gpt->log_sector_size) - return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, -@@ -331,13 +330,32 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) - gpt->backup.alternate_lba = gpt->primary.header_lba; - gpt->backup.partitions = grub_cpu_to_le64 (backup_entries); - -+ /* Recompute checksums. */ -+ if (grub_gpt_update_checksums (gpt)) -+ return grub_errno; -+ -+ /* Sanity check. */ -+ if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) -+ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); -+ -+ if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) -+ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); -+ -+ gpt->status |= GRUB_GPT_BOTH_VALID; -+ return GRUB_ERR_NONE; -+} -+ -+grub_err_t -+grub_gpt_update_checksums (grub_gpt_t gpt) -+{ -+ grub_uint32_t crc; -+ - /* Writing headers larger than our header structure are unsupported. */ - gpt->primary.headersize = - grub_cpu_to_le32_compile_time (sizeof (gpt->primary)); - gpt->backup.headersize = - grub_cpu_to_le32_compile_time (sizeof (gpt->backup)); - -- /* Recompute checksums. */ - if (grub_gpt_lecrc32 (gpt->entries, gpt->entries_size, &crc)) - return grub_errno; - -@@ -350,14 +368,6 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) - if (grub_gpt_header_lecrc32 (&gpt->backup, &gpt->backup.crc32)) - return grub_errno; - -- /* Sanity check. */ -- if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) -- return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); -- -- if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) -- return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); -- -- gpt->status |= GRUB_GPT_BOTH_VALID; - return GRUB_ERR_NONE; - } - -diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index e41c66539..50592d6d0 100644 ---- a/include/grub/gpt_partition.h -+++ b/include/grub/gpt_partition.h -@@ -200,6 +200,9 @@ grub_gpt_t grub_gpt_read (grub_disk_t disk); - /* Sync up primary and backup headers, recompute checksums. */ - grub_err_t grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt); - -+/* Recompute checksums, must be called after modifying GPT data. */ -+grub_err_t grub_gpt_update_checksums (grub_gpt_t gpt); -+ - /* Write headers and entry tables back to disk. */ - grub_err_t grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt); - -From 548fe74144c4745f25c6a488f99cf3a7c04aa20b Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Thu, 27 Nov 2014 12:55:53 -0800 -Subject: [PATCH] gpt: move gpt guid printing function to common library - ---- - grub-core/commands/gptprio.c | 16 ++-------------- - grub-core/lib/gpt.c | 13 +++++++++++++ - include/grub/gpt_partition.h | 4 ++++ - 3 files changed, 19 insertions(+), 14 deletions(-) - -diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c -index 29bd11d68..ce5840b4e 100644 ---- a/grub-core/commands/gptprio.c -+++ b/grub-core/commands/gptprio.c -@@ -141,20 +141,8 @@ grub_find_next (const char *disk_name, - if (!*part_name) - goto done; - -- *part_guid = -- grub_xasprintf ("%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", -- grub_le_to_cpu32 (part_found->guid.data1), -- grub_le_to_cpu16 (part_found->guid.data2), -- grub_le_to_cpu16 (part_found->guid.data3), -- part_found->guid.data4[0], -- part_found->guid.data4[1], -- part_found->guid.data4[2], -- part_found->guid.data4[3], -- part_found->guid.data4[4], -- part_found->guid.data4[5], -- part_found->guid.data4[6], -- part_found->guid.data4[7]); -- if (!*part_name) -+ *part_guid = grub_gpt_guid_to_str (&part_found->guid); -+ if (!*part_guid) - goto done; - - grub_errno = GRUB_ERR_NONE; -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 198234071..9a1835b84 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -31,6 +31,19 @@ GRUB_MOD_LICENSE ("GPLv3+"); - static grub_uint8_t grub_gpt_magic[] = GRUB_GPT_HEADER_MAGIC; - - -+char * -+grub_gpt_guid_to_str (grub_gpt_guid_t *guid) -+{ -+ return grub_xasprintf ("%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", -+ grub_le_to_cpu32 (guid->data1), -+ grub_le_to_cpu16 (guid->data2), -+ grub_le_to_cpu16 (guid->data3), -+ guid->data4[0], guid->data4[1], -+ guid->data4[2], guid->data4[3], -+ guid->data4[4], guid->data4[5], -+ guid->data4[6], guid->data4[7]); -+} -+ - static grub_uint64_t - grub_gpt_size_to_sectors (grub_gpt_t gpt, grub_size_t size) - { -diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index 50592d6d0..166fd4b55 100644 ---- a/include/grub/gpt_partition.h -+++ b/include/grub/gpt_partition.h -@@ -33,6 +33,10 @@ struct grub_gpt_guid - typedef struct grub_gpt_guid grub_gpt_guid_t; - typedef struct grub_gpt_guid grub_gpt_part_type_t; - -+/* Format the raw little-endian GUID as a newly allocated string. */ -+char * grub_gpt_guid_to_str (grub_gpt_guid_t *guid); -+ -+ - #define GRUB_GPT_GUID_INIT(a, b, c, d1, d2, d3, d4, d5, d6, d7, d8) \ - { \ - grub_cpu_to_le32_compile_time (a), \ -From ef9950304568defc9cc6a674cbad58a3d7947200 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Thu, 27 Nov 2014 14:54:27 -0800 -Subject: [PATCH] gpt: switch partition names to a 16 bit type - -In UEFI/GPT strings are UTF-16 so use a uint16 to make dealing with the -string practical. ---- - include/grub/gpt_partition.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index 166fd4b55..1142317e3 100644 ---- a/include/grub/gpt_partition.h -+++ b/include/grub/gpt_partition.h -@@ -92,7 +92,7 @@ struct grub_gpt_partentry - grub_uint64_t start; - grub_uint64_t end; - grub_uint64_t attrib; -- char name[72]; -+ grub_uint16_t name[36]; - } GRUB_PACKED; - - enum grub_gpt_part_attr_offset -From 65c930d19113f45719a2f2696a7dac5447ec90ed Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Thu, 27 Nov 2014 15:49:57 -0800 -Subject: [PATCH] tests: add some partitions to the gpt unit test data - ---- - tests/gpt_unit_test.c | 65 +++++++++++++++++++++++++++++++++++++++++++-------- - 1 file changed, 55 insertions(+), 10 deletions(-) - -diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c -index 86e4364a5..5692a5a52 100644 ---- a/tests/gpt_unit_test.c -+++ b/tests/gpt_unit_test.c -@@ -89,12 +89,12 @@ struct test_data - }; - - --/* Sample primary GPT header for an empty 1MB disk. */ -+/* Sample primary GPT header for a 1MB disk. */ - static const struct grub_gpt_header example_primary = { - .magic = GRUB_GPT_HEADER_MAGIC, - .version = GRUB_GPT_HEADER_VERSION, - .headersize = sizeof (struct grub_gpt_header), -- .crc32 = grub_cpu_to_le32_compile_time (0x7cd8642c), -+ .crc32 = grub_cpu_to_le32_compile_time (0xb985abe0), - .header_lba = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), - .alternate_lba = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), - .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), -@@ -104,7 +104,52 @@ static const struct grub_gpt_header example_primary = { - .partitions = grub_cpu_to_le64_compile_time (PRIMARY_TABLE_SECTOR), - .maxpart = grub_cpu_to_le32_compile_time (TABLE_ENTRIES), - .partentry_size = grub_cpu_to_le32_compile_time (ENTRY_SIZE), -- .partentry_crc32 = grub_cpu_to_le32_compile_time (0xab54d286), -+ .partentry_crc32 = grub_cpu_to_le32_compile_time (0x074e052c), -+}; -+ -+static const struct grub_gpt_partentry example_entries[TABLE_ENTRIES] = { -+ { -+ .type = GRUB_GPT_PARTITION_TYPE_EFI_SYSTEM, -+ .guid = GRUB_GPT_GUID_INIT (0xa0f1792e, 0xb4ce, 0x4136, 0xbc, 0xf2, -+ 0x1a, 0xfc, 0x13, 0x3c, 0x28, 0x28), -+ .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), -+ .end = grub_cpu_to_le64_compile_time (0x3f), -+ .attrib = 0x0, -+ .name = { -+ grub_cpu_to_le16_compile_time ('E'), -+ grub_cpu_to_le16_compile_time ('F'), -+ grub_cpu_to_le16_compile_time ('I'), -+ grub_cpu_to_le16_compile_time (' '), -+ grub_cpu_to_le16_compile_time ('S'), -+ grub_cpu_to_le16_compile_time ('Y'), -+ grub_cpu_to_le16_compile_time ('S'), -+ grub_cpu_to_le16_compile_time ('T'), -+ grub_cpu_to_le16_compile_time ('E'), -+ grub_cpu_to_le16_compile_time ('M'), -+ 0x0, -+ } -+ }, -+ { -+ .type = GRUB_GPT_PARTITION_TYPE_BIOS_BOOT, -+ .guid = GRUB_GPT_GUID_INIT (0x876c898d, 0x1b40, 0x4727, 0xa1, 0x61, -+ 0xed, 0xf9, 0xb5, 0x48, 0x66, 0x74), -+ .start = grub_cpu_to_le64_compile_time (0x40), -+ .end = grub_cpu_to_le64_compile_time (0x7f), -+ .attrib = grub_cpu_to_le64_compile_time ( -+ 1ULL << GRUB_GPT_PART_ATTR_OFFSET_LEGACY_BIOS_BOOTABLE), -+ .name = { -+ grub_cpu_to_le16_compile_time ('B'), -+ grub_cpu_to_le16_compile_time ('I'), -+ grub_cpu_to_le16_compile_time ('O'), -+ grub_cpu_to_le16_compile_time ('S'), -+ grub_cpu_to_le16_compile_time (' '), -+ grub_cpu_to_le16_compile_time ('B'), -+ grub_cpu_to_le16_compile_time ('O'), -+ grub_cpu_to_le16_compile_time ('O'), -+ grub_cpu_to_le16_compile_time ('T'), -+ 0x0, -+ } -+ }, - }; - - /* And the backup header. */ -@@ -112,7 +157,7 @@ static const struct grub_gpt_header example_backup = { - .magic = GRUB_GPT_HEADER_MAGIC, - .version = GRUB_GPT_HEADER_VERSION, - .headersize = sizeof (struct grub_gpt_header), -- .crc32 = grub_cpu_to_le32_compile_time (0xcfaa4a27), -+ .crc32 = grub_cpu_to_le32_compile_time (0x0af785eb), - .header_lba = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), - .alternate_lba = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), - .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), -@@ -122,7 +167,7 @@ static const struct grub_gpt_header example_backup = { - .partitions = grub_cpu_to_le64_compile_time (BACKUP_TABLE_SECTOR), - .maxpart = grub_cpu_to_le32_compile_time (TABLE_ENTRIES), - .partentry_size = grub_cpu_to_le32_compile_time (ENTRY_SIZE), -- .partentry_crc32 = grub_cpu_to_le32_compile_time (0xab54d286), -+ .partentry_crc32 = grub_cpu_to_le32_compile_time (0x074e052c), - }; - - /* Sample protective MBR for the same 1MB disk. Note, this matches -@@ -192,6 +237,10 @@ reset_disk (struct test_data *data) - memcpy (&data->raw->mbr, &example_pmbr, sizeof (data->raw->mbr)); - memcpy (&data->raw->primary_header, &example_primary, - sizeof (data->raw->primary_header)); -+ memcpy (&data->raw->primary_entries, &example_entries, -+ sizeof (data->raw->primary_entries)); -+ memcpy (&data->raw->backup_entries, &example_entries, -+ sizeof (data->raw->backup_entries)); - memcpy (&data->raw->backup_header, &example_backup, - sizeof (data->raw->backup_header)); - -@@ -270,11 +319,7 @@ read_disk (struct test_data *data) - - gpt = grub_gpt_read (data->dev->disk); - if (gpt == NULL) -- { -- grub_print_error (); -- grub_fatal ("grub_gpt_read failed"); -- } -- -+ grub_fatal ("grub_gpt_read failed: %s", grub_errmsg); - - return gpt; - } -From ff730f68d8f9816001d4b4cff974f500f670a992 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Thu, 27 Nov 2014 16:34:21 -0800 -Subject: [PATCH] gpt: add search by partition label and uuid commands - -Builds on the existing filesystem search code. Only for GPT right now. ---- - Makefile.util.def | 2 + - grub-core/Makefile.core.def | 10 ++++ - grub-core/commands/search.c | 49 ++++++++++++++++++++ - grub-core/commands/search_part_label.c | 5 ++ - grub-core/commands/search_part_uuid.c | 5 ++ - grub-core/commands/search_wrap.c | 12 +++++ - grub-core/lib/gpt.c | 64 ++++++++++++++++++++++++++ - include/grub/gpt_partition.h | 16 +++++++ - include/grub/search.h | 4 ++ - tests/gpt_unit_test.c | 84 ++++++++++++++++++++++++++++++++++ - 10 files changed, 251 insertions(+) - create mode 100644 grub-core/commands/search_part_label.c - create mode 100644 grub-core/commands/search_part_uuid.c - -diff --git a/Makefile.util.def b/Makefile.util.def -index 9249f77be..bc0f178ff 100644 ---- a/Makefile.util.def -+++ b/Makefile.util.def -@@ -1271,6 +1271,8 @@ program = { - name = gpt_unit_test; - common = tests/gpt_unit_test.c; - common = tests/lib/unit_test.c; -+ common = grub-core/commands/search_part_label.c; -+ common = grub-core/commands/search_part_uuid.c; - common = grub-core/disk/host.c; - common = grub-core/kern/emu/hostfs.c; - common = grub-core/lib/gpt.c; -diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def -index 4620138cb..4cce18d6d 100644 ---- a/grub-core/Makefile.core.def -+++ b/grub-core/Makefile.core.def -@@ -1013,6 +1013,16 @@ module = { - common = commands/search_label.c; - }; - -+module = { -+ name = search_part_uuid; -+ common = commands/search_part_uuid.c; -+}; -+ -+module = { -+ name = search_part_label; -+ common = commands/search_part_label.c; -+}; -+ - module = { - name = setpci; - common = commands/setpci.c; -diff --git a/grub-core/commands/search.c b/grub-core/commands/search.c -index 7dd32e445..09e165ed3 100644 ---- a/grub-core/commands/search.c -+++ b/grub-core/commands/search.c -@@ -30,6 +30,9 @@ - #include - #include - #include -+#if defined(DO_SEARCH_PART_UUID) || defined(DO_SEARCH_PART_LABEL) -+#include -+#endif - - GRUB_MOD_LICENSE ("GPLv3+"); - -@@ -90,6 +93,44 @@ iterate_device (const char *name, void *data) - } - grub_free (buf); - } -+#elif defined(DO_SEARCH_PART_UUID) -+ { -+ grub_device_t dev; -+ char *quid; -+ -+ dev = grub_device_open (name); -+ if (dev) -+ { -+ if (grub_gpt_part_uuid (dev, &quid) == GRUB_ERR_NONE) -+ { -+ if (grub_strcasecmp (quid, ctx->key) == 0) -+ found = 1; -+ -+ grub_free (quid); -+ } -+ -+ grub_device_close (dev); -+ } -+ } -+#elif defined(DO_SEARCH_PART_LABEL) -+ { -+ grub_device_t dev; -+ char *quid; -+ -+ dev = grub_device_open (name); -+ if (dev) -+ { -+ if (grub_gpt_part_label (dev, &quid) == GRUB_ERR_NONE) -+ { -+ if (grub_strcmp (quid, ctx->key) == 0) -+ found = 1; -+ -+ grub_free (quid); -+ } -+ -+ grub_device_close (dev); -+ } -+ } - #else - { - /* SEARCH_FS_UUID or SEARCH_LABEL */ -@@ -313,6 +354,10 @@ static grub_command_t cmd; - - #ifdef DO_SEARCH_FILE - GRUB_MOD_INIT(search_fs_file) -+#elif defined(DO_SEARCH_PART_UUID) -+GRUB_MOD_INIT(search_part_uuid) -+#elif defined(DO_SEARCH_PART_LABEL) -+GRUB_MOD_INIT(search_part_label) - #elif defined (DO_SEARCH_FS_UUID) - GRUB_MOD_INIT(search_fs_uuid) - #else -@@ -327,6 +372,10 @@ GRUB_MOD_INIT(search_label) - - #ifdef DO_SEARCH_FILE - GRUB_MOD_FINI(search_fs_file) -+#elif defined(DO_SEARCH_PART_UUID) -+GRUB_MOD_FINI(search_part_uuid) -+#elif defined(DO_SEARCH_PART_LABEL) -+GRUB_MOD_FINI(search_part_label) - #elif defined (DO_SEARCH_FS_UUID) - GRUB_MOD_FINI(search_fs_uuid) - #else -diff --git a/grub-core/commands/search_part_label.c b/grub-core/commands/search_part_label.c -new file mode 100644 -index 000000000..ca906cbd9 ---- /dev/null -+++ b/grub-core/commands/search_part_label.c -@@ -0,0 +1,5 @@ -+#define DO_SEARCH_PART_LABEL 1 -+#define FUNC_NAME grub_search_part_label -+#define COMMAND_NAME "search.part_label" -+#define HELP_MESSAGE N_("Search devices by partition label. If VARIABLE is specified, the first device found is set to a variable.") -+#include "search.c" -diff --git a/grub-core/commands/search_part_uuid.c b/grub-core/commands/search_part_uuid.c -new file mode 100644 -index 000000000..2d1d3d0d7 ---- /dev/null -+++ b/grub-core/commands/search_part_uuid.c -@@ -0,0 +1,5 @@ -+#define DO_SEARCH_PART_UUID 1 -+#define FUNC_NAME grub_search_part_uuid -+#define COMMAND_NAME "search.part_uuid" -+#define HELP_MESSAGE N_("Search devices by partition UUID. If VARIABLE is specified, the first device found is set to a variable.") -+#include "search.c" -diff --git a/grub-core/commands/search_wrap.c b/grub-core/commands/search_wrap.c -index d7fd26b94..e3ff756df 100644 ---- a/grub-core/commands/search_wrap.c -+++ b/grub-core/commands/search_wrap.c -@@ -36,6 +36,10 @@ static const struct grub_arg_option options[] = - 0, 0}, - {"fs-uuid", 'u', 0, N_("Search devices by a filesystem UUID."), - 0, 0}, -+ {"part-label", 'L', 0, N_("Search devices by a partition label."), -+ 0, 0}, -+ {"part-uuid", 'U', 0, N_("Search devices by a partition UUID."), -+ 0, 0}, - {"set", 's', GRUB_ARG_OPTION_OPTIONAL, - N_("Set a variable to the first device found."), N_("VARNAME"), - ARG_TYPE_STRING}, -@@ -71,6 +75,8 @@ enum options - SEARCH_FILE, - SEARCH_LABEL, - SEARCH_FS_UUID, -+ SEARCH_PART_LABEL, -+ SEARCH_PART_UUID, - SEARCH_SET, - SEARCH_NO_FLOPPY, - SEARCH_HINT, -@@ -186,6 +192,12 @@ grub_cmd_search (grub_extcmd_context_t ctxt, int argc, char **args) - else if (state[SEARCH_FS_UUID].set) - grub_search_fs_uuid (id, var, state[SEARCH_NO_FLOPPY].set, - hints, nhints); -+ else if (state[SEARCH_PART_LABEL].set) -+ grub_search_part_label (id, var, state[SEARCH_NO_FLOPPY].set, -+ hints, nhints); -+ else if (state[SEARCH_PART_UUID].set) -+ grub_search_part_uuid (id, var, state[SEARCH_NO_FLOPPY].set, -+ hints, nhints); - else if (state[SEARCH_FILE].set) - grub_search_fs_file (id, var, state[SEARCH_NO_FLOPPY].set, - hints, nhints); -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 9a1835b84..10a4b852d 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -18,7 +18,9 @@ - * along with GRUB. If not, see . - */ - -+#include - #include -+#include - #include - #include - #include -@@ -44,6 +46,68 @@ grub_gpt_guid_to_str (grub_gpt_guid_t *guid) - guid->data4[6], guid->data4[7]); - } - -+static grub_err_t -+grub_gpt_device_partentry (grub_device_t device, -+ struct grub_gpt_partentry *entry) -+{ -+ grub_disk_t disk = device->disk; -+ grub_partition_t p; -+ grub_err_t err; -+ -+ if (!disk || !disk->partition) -+ return grub_error (GRUB_ERR_BUG, "not a partition"); -+ -+ if (grub_strcmp (disk->partition->partmap->name, "gpt")) -+ return grub_error (GRUB_ERR_BAD_ARGUMENT, "not a GPT partition"); -+ -+ p = disk->partition; -+ disk->partition = p->parent; -+ err = grub_disk_read (disk, p->offset, p->index, sizeof (*entry), entry); -+ disk->partition = p; -+ -+ return err; -+} -+ -+grub_err_t -+grub_gpt_part_label (grub_device_t device, char **label) -+{ -+ struct grub_gpt_partentry entry; -+ const grub_size_t name_len = ARRAY_SIZE (entry.name); -+ const grub_size_t label_len = name_len * GRUB_MAX_UTF8_PER_UTF16 + 1; -+ grub_size_t i; -+ grub_uint8_t *end; -+ -+ if (grub_gpt_device_partentry (device, &entry)) -+ return grub_errno; -+ -+ *label = grub_malloc (label_len); -+ if (!*label) -+ return grub_errno; -+ -+ for (i = 0; i < name_len; i++) -+ entry.name[i] = grub_le_to_cpu16 (entry.name[i]); -+ -+ end = grub_utf16_to_utf8 ((grub_uint8_t *) *label, entry.name, name_len); -+ *end = '\0'; -+ -+ return GRUB_ERR_NONE; -+} -+ -+grub_err_t -+grub_gpt_part_uuid (grub_device_t device, char **uuid) -+{ -+ struct grub_gpt_partentry entry; -+ -+ if (grub_gpt_device_partentry (device, &entry)) -+ return grub_errno; -+ -+ *uuid = grub_gpt_guid_to_str (&entry.guid); -+ if (!*uuid) -+ return grub_errno; -+ -+ return GRUB_ERR_NONE; -+} -+ - static grub_uint64_t - grub_gpt_size_to_sectors (grub_gpt_t gpt, grub_size_t size) - { -diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index 1142317e3..8ff62d67f 100644 ---- a/include/grub/gpt_partition.h -+++ b/include/grub/gpt_partition.h -@@ -49,6 +49,10 @@ char * grub_gpt_guid_to_str (grub_gpt_guid_t *guid); - GRUB_GPT_GUID_INIT (0x0, 0x0, 0x0, \ - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0) - -+#define GRUB_GPT_PARTITION_TYPE_EFI_SYSTEM \ -+ GRUB_GPT_GUID_INIT (0xc12a7328, 0xf81f, 0x11d2, \ -+ 0xba, 0x4b, 0x00, 0xa0, 0xc9, 0x3e, 0xc9, 0x3b) -+ - #define GRUB_GPT_PARTITION_TYPE_BIOS_BOOT \ - GRUB_GPT_GUID_INIT (0x21686148, 0x6449, 0x6e6f, \ - 0x74, 0x4e, 0x65, 0x65, 0x64, 0x45, 0x46, 0x49) -@@ -216,4 +220,16 @@ grub_err_t grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr); - grub_err_t grub_gpt_header_check (struct grub_gpt_header *gpt, - unsigned int log_sector_size); - -+ -+/* Utilities for simple partition data lookups, usage is intended to -+ * be similar to fs->label and fs->uuid functions. */ -+ -+/* Return the partition label of the device DEVICE in LABEL. -+ * The label is in a new buffer and should be freed by the caller. */ -+grub_err_t grub_gpt_part_label (grub_device_t device, char **label); -+ -+/* Return the partition uuid of the device DEVICE in UUID. -+ * The label is in a new buffer and should be freed by the caller. */ -+grub_err_t grub_gpt_part_uuid (grub_device_t device, char **uuid); -+ - #endif /* ! GRUB_GPT_PARTITION_HEADER */ -diff --git a/include/grub/search.h b/include/grub/search.h -index d80347df3..c2f40abe9 100644 ---- a/include/grub/search.h -+++ b/include/grub/search.h -@@ -25,5 +25,9 @@ void grub_search_fs_uuid (const char *key, const char *var, int no_floppy, - char **hints, unsigned nhints); - void grub_search_label (const char *key, const char *var, int no_floppy, - char **hints, unsigned nhints); -+void grub_search_part_uuid (const char *key, const char *var, int no_floppy, -+ char **hints, unsigned nhints); -+void grub_search_part_label (const char *key, const char *var, int no_floppy, -+ char **hints, unsigned nhints); - - #endif -diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c -index 5692a5a52..deb55a926 100644 ---- a/tests/gpt_unit_test.c -+++ b/tests/gpt_unit_test.c -@@ -21,10 +21,12 @@ - #include - #include - #include -+#include - #include - #include - #include - #include -+#include - #include - - #include -@@ -534,6 +536,84 @@ repair_test (void) - - close_disk (&data); - } -+ -+static void -+search_label_test (void) -+{ -+ struct test_data data; -+ const char *test_result; -+ char *expected_result; -+ -+ open_disk (&data); -+ -+ expected_result = grub_xasprintf ("%s,gpt1", data.dev->disk->name); -+ grub_env_unset ("test_result"); -+ grub_search_part_label ("EFI SYSTEM", "test_result", 0, NULL, 0); -+ test_result = grub_env_get ("test_result"); -+ grub_test_assert (test_result && strcmp (test_result, expected_result) == 0, -+ "wrong device: %s (%s)", test_result, expected_result); -+ grub_free (expected_result); -+ -+ expected_result = grub_xasprintf ("%s,gpt2", data.dev->disk->name); -+ grub_env_unset ("test_result"); -+ grub_search_part_label ("BIOS BOOT", "test_result", 0, NULL, 0); -+ test_result = grub_env_get ("test_result"); -+ grub_test_assert (test_result && strcmp (test_result, expected_result) == 0, -+ "wrong device: %s (%s)", test_result, expected_result); -+ grub_free (expected_result); -+ -+ grub_env_unset ("test_result"); -+ grub_search_part_label ("bogus name", "test_result", 0, NULL, 0); -+ test_result = grub_env_get ("test_result"); -+ grub_test_assert (test_result == NULL, -+ "unexpected device: %s", test_result); -+ grub_test_assert (grub_errno == GRUB_ERR_FILE_NOT_FOUND, -+ "unexpected error: %s", grub_errmsg); -+ grub_errno = GRUB_ERR_NONE; -+ -+ close_disk (&data); -+} -+ -+static void -+search_uuid_test (void) -+{ -+ struct test_data data; -+ const char gpt1_uuid[] = "A0F1792E-B4CE-4136-BCF2-1AFC133C2828"; -+ const char gpt2_uuid[] = "876c898d-1b40-4727-a161-edf9b5486674"; -+ const char bogus_uuid[] = "1534c928-c50e-4866-9daf-6a9fd7918a76"; -+ const char *test_result; -+ char *expected_result; -+ -+ open_disk (&data); -+ -+ expected_result = grub_xasprintf ("%s,gpt1", data.dev->disk->name); -+ grub_env_unset ("test_result"); -+ grub_search_part_uuid (gpt1_uuid, "test_result", 0, NULL, 0); -+ test_result = grub_env_get ("test_result"); -+ grub_test_assert (test_result && strcmp (test_result, expected_result) == 0, -+ "wrong device: %s (%s)", test_result, expected_result); -+ grub_free (expected_result); -+ -+ expected_result = grub_xasprintf ("%s,gpt2", data.dev->disk->name); -+ grub_env_unset ("test_result"); -+ grub_search_part_uuid (gpt2_uuid, "test_result", 0, NULL, 0); -+ test_result = grub_env_get ("test_result"); -+ grub_test_assert (test_result && strcmp (test_result, expected_result) == 0, -+ "wrong device: %s (%s)", test_result, expected_result); -+ grub_free (expected_result); -+ -+ grub_env_unset ("test_result"); -+ grub_search_part_uuid (bogus_uuid, "test_result", 0, NULL, 0); -+ test_result = grub_env_get ("test_result"); -+ grub_test_assert (test_result == NULL, -+ "unexpected device: %s", test_result); -+ grub_test_assert (grub_errno == GRUB_ERR_FILE_NOT_FOUND, -+ "unexpected error: %s", grub_errmsg); -+ grub_errno = GRUB_ERR_NONE; -+ -+ close_disk (&data); -+} -+ - void - grub_unit_test_init (void) - { -@@ -546,6 +626,8 @@ grub_unit_test_init (void) - grub_test_register ("gpt_read_invalid_test", read_invalid_entries_test); - grub_test_register ("gpt_read_fallback_test", read_fallback_test); - grub_test_register ("gpt_repair_test", repair_test); -+ grub_test_register ("gpt_search_label_test", search_label_test); -+ grub_test_register ("gpt_search_uuid_test", search_uuid_test); - } - - void -@@ -557,5 +639,7 @@ grub_unit_test_fini (void) - grub_test_unregister ("gpt_read_invalid_test"); - grub_test_unregister ("gpt_read_fallback_test"); - grub_test_unregister ("gpt_repair_test"); -+ grub_test_unregister ("gpt_search_label_test"); -+ grub_test_unregister ("gpt_search_uuid_test"); - grub_fini_all (); - } -From 57d264518a2a01730c1ba14d058d2a5573d6bd15 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Fri, 31 Jul 2015 15:03:11 -0700 -Subject: [PATCH] gpt: clean up little-endian crc32 computation - - - Remove problematic cast from *uint8_t to *uint32_t (alignment issue). - - Remove dynamic allocation and associated error handling paths. - - Match parameter ordering to existing grub_crypto_hash function. ---- - grub-core/lib/gpt.c | 51 +++++++++++++-------------------------------------- - 1 file changed, 13 insertions(+), 38 deletions(-) - -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 10a4b852d..aedc4f7a1 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -122,45 +122,29 @@ grub_gpt_size_to_sectors (grub_gpt_t gpt, grub_size_t size) - return sectors; - } - --static grub_err_t --grub_gpt_lecrc32 (void *data, grub_size_t len, grub_uint32_t *crc) -+static void -+grub_gpt_lecrc32 (grub_uint32_t *crc, const void *data, grub_size_t len) - { -- grub_uint8_t *crc32_context; -- -- crc32_context = grub_zalloc (GRUB_MD_CRC32->contextsize); -- if (!crc32_context) -- return grub_errno; -+ grub_uint32_t crc32_val; - -- GRUB_MD_CRC32->init (crc32_context); -- GRUB_MD_CRC32->write (crc32_context, data, len); -- GRUB_MD_CRC32->final (crc32_context); -+ grub_crypto_hash (GRUB_MD_CRC32, &crc32_val, data, len); - - /* GRUB_MD_CRC32 always uses big endian, gpt is always little. */ -- *crc = grub_swap_bytes32 (*(grub_uint32_t *) -- GRUB_MD_CRC32->read (crc32_context)); -- -- grub_free (crc32_context); -- -- return GRUB_ERR_NONE; -+ *crc = grub_swap_bytes32 (crc32_val); - } - --static grub_err_t --grub_gpt_header_lecrc32 (struct grub_gpt_header *header, grub_uint32_t *crc) -+static void -+grub_gpt_header_lecrc32 (grub_uint32_t *crc, struct grub_gpt_header *header) - { - grub_uint32_t old, new; -- grub_err_t err; - - /* crc32 must be computed with the field cleared. */ - old = header->crc32; - header->crc32 = 0; -- err = grub_gpt_lecrc32 (header, sizeof (*header), &new); -+ grub_gpt_lecrc32 (&new, header, sizeof (*header)); - header->crc32 = old; - -- if (err) -- return err; -- - *crc = new; -- return GRUB_ERR_NONE; - } - - /* Make sure the MBR is a protective MBR and not a normal MBR. */ -@@ -192,9 +176,7 @@ grub_gpt_header_check (struct grub_gpt_header *gpt, - if (gpt->version != GRUB_GPT_HEADER_VERSION) - return grub_error (GRUB_ERR_BAD_PART_TABLE, "unknown GPT version"); - -- if (grub_gpt_header_lecrc32 (gpt, &crc)) -- return grub_errno; -- -+ grub_gpt_header_lecrc32 (&crc, gpt); - if (gpt->crc32 != crc) - return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT header crc32"); - -@@ -289,9 +271,7 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, - if (grub_disk_read (disk, addr, 0, entries_size, entries)) - goto fail; - -- if (grub_gpt_lecrc32 (entries, entries_size, &crc)) -- goto fail; -- -+ grub_gpt_lecrc32 (&crc, entries, entries_size); - if (crc != header->partentry_crc32) - { - grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT entry crc32"); -@@ -433,17 +413,12 @@ grub_gpt_update_checksums (grub_gpt_t gpt) - gpt->backup.headersize = - grub_cpu_to_le32_compile_time (sizeof (gpt->backup)); - -- if (grub_gpt_lecrc32 (gpt->entries, gpt->entries_size, &crc)) -- return grub_errno; -- -+ grub_gpt_lecrc32 (&crc, gpt->entries, gpt->entries_size); - gpt->primary.partentry_crc32 = crc; - gpt->backup.partentry_crc32 = crc; - -- if (grub_gpt_header_lecrc32 (&gpt->primary, &gpt->primary.crc32)) -- return grub_errno; -- -- if (grub_gpt_header_lecrc32 (&gpt->backup, &gpt->backup.crc32)) -- return grub_errno; -+ grub_gpt_header_lecrc32 (&gpt->primary.crc32, &gpt->primary); -+ grub_gpt_header_lecrc32 (&gpt->backup.crc32, &gpt->backup); - - return GRUB_ERR_NONE; - } -From 8ec39207e9c81685459a9901ee2c057924eb8ec0 Mon Sep 17 00:00:00 2001 -From: Alex Crawford -Date: Mon, 31 Aug 2015 15:23:39 -0700 -Subject: [PATCH] gpt: minor cleanup - ---- - include/grub/gpt_partition.h | 2 +- - tests/gpt_unit_test.c | 12 ++++++------ - 2 files changed, 7 insertions(+), 7 deletions(-) - -diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index 8ff62d67f..21359f08a 100644 ---- a/include/grub/gpt_partition.h -+++ b/include/grub/gpt_partition.h -@@ -229,7 +229,7 @@ grub_err_t grub_gpt_header_check (struct grub_gpt_header *gpt, - grub_err_t grub_gpt_part_label (grub_device_t device, char **label); - - /* Return the partition uuid of the device DEVICE in UUID. -- * The label is in a new buffer and should be freed by the caller. */ -+ * The uuid is in a new buffer and should be freed by the caller. */ - grub_err_t grub_gpt_part_uuid (grub_device_t device, char **uuid); - - #endif /* ! GRUB_GPT_PARTITION_HEADER */ -diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c -index deb55a926..7a1af46e1 100644 ---- a/tests/gpt_unit_test.c -+++ b/tests/gpt_unit_test.c -@@ -538,7 +538,7 @@ repair_test (void) - } - - static void --search_label_test (void) -+search_part_label_test (void) - { - struct test_data data; - const char *test_result; -@@ -575,7 +575,7 @@ search_label_test (void) - } - - static void --search_uuid_test (void) -+search_part_uuid_test (void) - { - struct test_data data; - const char gpt1_uuid[] = "A0F1792E-B4CE-4136-BCF2-1AFC133C2828"; -@@ -626,8 +626,8 @@ grub_unit_test_init (void) - grub_test_register ("gpt_read_invalid_test", read_invalid_entries_test); - grub_test_register ("gpt_read_fallback_test", read_fallback_test); - grub_test_register ("gpt_repair_test", repair_test); -- grub_test_register ("gpt_search_label_test", search_label_test); -- grub_test_register ("gpt_search_uuid_test", search_uuid_test); -+ grub_test_register ("gpt_search_part_label_test", search_part_label_test); -+ grub_test_register ("gpt_search_uuid_test", search_part_uuid_test); - } - - void -@@ -639,7 +639,7 @@ grub_unit_test_fini (void) - grub_test_unregister ("gpt_read_invalid_test"); - grub_test_unregister ("gpt_read_fallback_test"); - grub_test_unregister ("gpt_repair_test"); -- grub_test_unregister ("gpt_search_label_test"); -- grub_test_unregister ("gpt_search_uuid_test"); -+ grub_test_unregister ("gpt_search_part_label_test"); -+ grub_test_unregister ("gpt_search_part_uuid_test"); - grub_fini_all (); - } -From 786e2f7da69a2a238390cb56f394c102c3938f09 Mon Sep 17 00:00:00 2001 -From: Alex Crawford -Date: Mon, 31 Aug 2015 15:15:48 -0700 -Subject: [PATCH] gpt: add search by disk uuid command - ---- - Makefile.util.def | 1 + - grub-core/Makefile.core.def | 5 +++++ - grub-core/commands/search.c | 28 ++++++++++++++++++++++++++-- - grub-core/commands/search_disk_uuid.c | 5 +++++ - grub-core/commands/search_wrap.c | 6 ++++++ - grub-core/lib/gpt.c | 21 +++++++++++++++++++++ - include/grub/gpt_partition.h | 4 ++++ - include/grub/search.h | 2 ++ - tests/gpt_unit_test.c | 33 +++++++++++++++++++++++++++++++++ - 9 files changed, 103 insertions(+), 2 deletions(-) - create mode 100644 grub-core/commands/search_disk_uuid.c - -diff --git a/Makefile.util.def b/Makefile.util.def -index bc0f178ff..4b1b4c410 100644 ---- a/Makefile.util.def -+++ b/Makefile.util.def -@@ -1273,6 +1273,7 @@ program = { - common = tests/lib/unit_test.c; - common = grub-core/commands/search_part_label.c; - common = grub-core/commands/search_part_uuid.c; -+ common = grub-core/commands/search_disk_uuid.c; - common = grub-core/disk/host.c; - common = grub-core/kern/emu/hostfs.c; - common = grub-core/lib/gpt.c; -diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def -index 4cce18d6d..ae68b75a8 100644 ---- a/grub-core/Makefile.core.def -+++ b/grub-core/Makefile.core.def -@@ -1023,6 +1023,11 @@ module = { - common = commands/search_part_label.c; - }; - -+module = { -+ name = search_disk_uuid; -+ common = commands/search_disk_uuid.c; -+}; -+ - module = { - name = setpci; - common = commands/setpci.c; -diff --git a/grub-core/commands/search.c b/grub-core/commands/search.c -index 09e165ed3..83837b564 100644 ---- a/grub-core/commands/search.c -+++ b/grub-core/commands/search.c -@@ -30,7 +30,8 @@ - #include - #include - #include --#if defined(DO_SEARCH_PART_UUID) || defined(DO_SEARCH_PART_LABEL) -+#if defined(DO_SEARCH_PART_UUID) || defined(DO_SEARCH_PART_LABEL) || \ -+ defined(DO_SEARCH_DISK_UUID) - #include - #endif - -@@ -69,7 +70,7 @@ iterate_device (const char *name, void *data) - name[0] == 'f' && name[1] == 'd' && name[2] >= '0' && name[2] <= '9') - return 1; - --#ifdef DO_SEARCH_FS_UUID -+#if defined(DO_SEARCH_FS_UUID) || defined(DO_SEARCH_DISK_UUID) - #define compare_fn grub_strcasecmp - #else - #define compare_fn grub_strcmp -@@ -128,6 +129,25 @@ iterate_device (const char *name, void *data) - grub_free (quid); - } - -+ grub_device_close (dev); -+ } -+ } -+#elif defined(DO_SEARCH_DISK_UUID) -+ { -+ grub_device_t dev; -+ char *quid; -+ -+ dev = grub_device_open (name); -+ if (dev) -+ { -+ if (grub_gpt_disk_uuid (dev, &quid) == GRUB_ERR_NONE) -+ { -+ if (grub_strcmp (quid, ctx->key) == 0) -+ found = 1; -+ -+ grub_free (quid); -+ } -+ - grub_device_close (dev); - } - } -@@ -360,6 +380,8 @@ GRUB_MOD_INIT(search_part_uuid) - GRUB_MOD_INIT(search_part_label) - #elif defined (DO_SEARCH_FS_UUID) - GRUB_MOD_INIT(search_fs_uuid) -+#elif defined (DO_SEARCH_DISK_UUID) -+GRUB_MOD_INIT(search_disk_uuid) - #else - GRUB_MOD_INIT(search_label) - #endif -@@ -378,6 +400,8 @@ GRUB_MOD_FINI(search_part_uuid) - GRUB_MOD_FINI(search_part_label) - #elif defined (DO_SEARCH_FS_UUID) - GRUB_MOD_FINI(search_fs_uuid) -+#elif defined (DO_SEARCH_DISK_UUID) -+GRUB_MOD_FINI(search_disk_uuid) - #else - GRUB_MOD_FINI(search_label) - #endif -diff --git a/grub-core/commands/search_disk_uuid.c b/grub-core/commands/search_disk_uuid.c -new file mode 100644 -index 000000000..fba96f6b8 ---- /dev/null -+++ b/grub-core/commands/search_disk_uuid.c -@@ -0,0 +1,5 @@ -+#define DO_SEARCH_DISK_UUID 1 -+#define FUNC_NAME grub_search_disk_uuid -+#define COMMAND_NAME "search.disk_uuid" -+#define HELP_MESSAGE N_("Search devices by disk UUID. If VARIABLE is specified, the first device found is set to a variable.") -+#include "search.c" -diff --git a/grub-core/commands/search_wrap.c b/grub-core/commands/search_wrap.c -index e3ff756df..d931c56c5 100644 ---- a/grub-core/commands/search_wrap.c -+++ b/grub-core/commands/search_wrap.c -@@ -40,6 +40,8 @@ static const struct grub_arg_option options[] = - 0, 0}, - {"part-uuid", 'U', 0, N_("Search devices by a partition UUID."), - 0, 0}, -+ {"disk-uuid", 'U', 0, N_("Search devices by a disk UUID."), -+ 0, 0}, - {"set", 's', GRUB_ARG_OPTION_OPTIONAL, - N_("Set a variable to the first device found."), N_("VARNAME"), - ARG_TYPE_STRING}, -@@ -77,6 +79,7 @@ enum options - SEARCH_FS_UUID, - SEARCH_PART_LABEL, - SEARCH_PART_UUID, -+ SEARCH_DISK_UUID, - SEARCH_SET, - SEARCH_NO_FLOPPY, - SEARCH_HINT, -@@ -198,6 +201,9 @@ grub_cmd_search (grub_extcmd_context_t ctxt, int argc, char **args) - else if (state[SEARCH_PART_UUID].set) - grub_search_part_uuid (id, var, state[SEARCH_NO_FLOPPY].set, - hints, nhints); -+ else if (state[SEARCH_DISK_UUID].set) -+ grub_search_disk_uuid (id, var, state[SEARCH_NO_FLOPPY].set, -+ hints, nhints); - else if (state[SEARCH_FILE].set) - grub_search_fs_file (id, var, state[SEARCH_NO_FLOPPY].set, - hints, nhints); -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index aedc4f7a1..e162bafd3 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -108,6 +108,27 @@ grub_gpt_part_uuid (grub_device_t device, char **uuid) - return GRUB_ERR_NONE; - } - -+grub_err_t -+grub_gpt_disk_uuid (grub_device_t device, char **uuid) -+{ -+ grub_gpt_t gpt = grub_gpt_read (device->disk); -+ if (!gpt) -+ goto done; -+ -+ grub_errno = GRUB_ERR_NONE; -+ -+ if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) -+ *uuid = grub_gpt_guid_to_str (&gpt->primary.guid); -+ else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) -+ *uuid = grub_gpt_guid_to_str (&gpt->backup.guid); -+ else -+ grub_errno = grub_error (GRUB_ERR_BUG, "No valid GPT header"); -+ -+done: -+ grub_gpt_free (gpt); -+ return grub_errno; -+} -+ - static grub_uint64_t - grub_gpt_size_to_sectors (grub_gpt_t gpt, grub_size_t size) - { -diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index 21359f08a..4a6ed25b3 100644 ---- a/include/grub/gpt_partition.h -+++ b/include/grub/gpt_partition.h -@@ -232,4 +232,8 @@ grub_err_t grub_gpt_part_label (grub_device_t device, char **label); - * The uuid is in a new buffer and should be freed by the caller. */ - grub_err_t grub_gpt_part_uuid (grub_device_t device, char **uuid); - -+/* Return the disk uuid of the device DEVICE in UUID. -+ * The uuid is in a new buffer and should be freed by the caller. */ -+grub_err_t grub_gpt_disk_uuid (grub_device_t device, char **uuid); -+ - #endif /* ! GRUB_GPT_PARTITION_HEADER */ -diff --git a/include/grub/search.h b/include/grub/search.h -index c2f40abe9..7f69d25d1 100644 ---- a/include/grub/search.h -+++ b/include/grub/search.h -@@ -29,5 +29,7 @@ void grub_search_part_uuid (const char *key, const char *var, int no_floppy, - char **hints, unsigned nhints); - void grub_search_part_label (const char *key, const char *var, int no_floppy, - char **hints, unsigned nhints); -+void grub_search_disk_uuid (const char *key, const char *var, int no_floppy, -+ char **hints, unsigned nhints); - - #endif -diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c -index 7a1af46e1..60f601729 100644 ---- a/tests/gpt_unit_test.c -+++ b/tests/gpt_unit_test.c -@@ -614,6 +614,37 @@ search_part_uuid_test (void) - close_disk (&data); - } - -+static void -+search_disk_uuid_test (void) -+{ -+ struct test_data data; -+ const char disk_uuid[] = "69c131ad-67d6-46c6-93c4-124c755256ac"; -+ const char bogus_uuid[] = "1534c928-c50e-4866-9daf-6a9fd7918a76"; -+ const char *test_result; -+ char *expected_result; -+ -+ open_disk (&data); -+ -+ expected_result = grub_xasprintf ("%s", data.dev->disk->name); -+ grub_env_unset ("test_result"); -+ grub_search_disk_uuid (disk_uuid, "test_result", 0, NULL, 0); -+ test_result = grub_env_get ("test_result"); -+ grub_test_assert (test_result && strcmp (test_result, expected_result) == 0, -+ "wrong device: %s (%s)", test_result, expected_result); -+ grub_free (expected_result); -+ -+ grub_env_unset ("test_result"); -+ grub_search_disk_uuid (bogus_uuid, "test_result", 0, NULL, 0); -+ test_result = grub_env_get ("test_result"); -+ grub_test_assert (test_result == NULL, -+ "unexpected device: %s", test_result); -+ grub_test_assert (grub_errno == GRUB_ERR_FILE_NOT_FOUND, -+ "unexpected error: %s", grub_errmsg); -+ grub_errno = GRUB_ERR_NONE; -+ -+ close_disk (&data); -+} -+ - void - grub_unit_test_init (void) - { -@@ -628,6 +659,7 @@ grub_unit_test_init (void) - grub_test_register ("gpt_repair_test", repair_test); - grub_test_register ("gpt_search_part_label_test", search_part_label_test); - grub_test_register ("gpt_search_uuid_test", search_part_uuid_test); -+ grub_test_register ("gpt_search_disk_uuid_test", search_disk_uuid_test); - } - - void -@@ -641,5 +673,6 @@ grub_unit_test_fini (void) - grub_test_unregister ("gpt_repair_test"); - grub_test_unregister ("gpt_search_part_label_test"); - grub_test_unregister ("gpt_search_part_uuid_test"); -+ grub_test_unregister ("gpt_search_disk_uuid_test"); - grub_fini_all (); - } -From b993a3cec81275809b5a9b55b10c170a8f862bfe Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Mon, 25 Jul 2016 14:59:29 -0700 -Subject: [PATCH] gpt: do not use disk sizes GRUB will reject as invalid later - on - -GRUB assumes that no disk is ever larger than 1EiB and rejects -reads/writes to such locations. Unfortunately this is not conveyed in -the usual way with the special GRUB_DISK_SIZE_UNKNOWN value. ---- - grub-core/lib/gpt.c | 26 ++++++++++++++++++++++++-- - 1 file changed, 24 insertions(+), 2 deletions(-) - -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index e162bafd3..3e17f2771 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -143,6 +143,28 @@ grub_gpt_size_to_sectors (grub_gpt_t gpt, grub_size_t size) - return sectors; - } - -+/* Copied from grub-core/kern/disk_common.c grub_disk_adjust_range so we can -+ * avoid attempting to use disk->total_sectors when GRUB won't let us. -+ * TODO: Why is disk->total_sectors not set to GRUB_DISK_SIZE_UNKNOWN? */ -+static int -+grub_gpt_disk_size_valid (grub_disk_t disk) -+{ -+ grub_disk_addr_t total_sectors; -+ -+ /* Transform total_sectors to number of 512B blocks. */ -+ total_sectors = disk->total_sectors << (disk->log_sector_size - GRUB_DISK_SECTOR_BITS); -+ -+ /* Some drivers have problems with disks above reasonable. -+ Treat unknown as 1EiB disk. While on it, clamp the size to 1EiB. -+ Just one condition is enough since GRUB_DISK_UNKNOWN_SIZE << ls is always -+ above 9EiB. -+ */ -+ if (total_sectors > (1ULL << 51)) -+ return 0; -+ -+ return 1; -+} -+ - static void - grub_gpt_lecrc32 (grub_uint32_t *crc, const void *data, grub_size_t len) - { -@@ -242,7 +264,7 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) - grub_disk_addr_t addr; - - /* Assumes gpt->log_sector_size == disk->log_sector_size */ -- if (disk->total_sectors != GRUB_DISK_SIZE_UNKNOWN) -+ if (grub_gpt_disk_size_valid(disk)) - sector = disk->total_sectors - 1; - else if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) - sector = grub_le_to_cpu64 (gpt->primary.alternate_lba); -@@ -394,7 +416,7 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) - return grub_error (GRUB_ERR_BUG, "No valid GPT header"); - - /* Relocate backup to end if disk whenever possible. */ -- if (disk->total_sectors != GRUB_DISK_SIZE_UNKNOWN) -+ if (grub_gpt_disk_size_valid(disk)) - backup_header = disk->total_sectors - 1; - - backup_entries = backup_header - -From ebc7bbaa181bcfc381b1ddad236e0ce68655de68 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Wed, 10 Aug 2016 18:26:03 -0700 -Subject: [PATCH] gpt: add verbose debug logging - ---- - grub-core/lib/gpt.c | 117 ++++++++++++++++++++++++++++++++++++++++++++++++---- - 1 file changed, 109 insertions(+), 8 deletions(-) - -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 3e17f2771..c2821b563 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -207,6 +207,18 @@ grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr) - return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid protective MBR"); - } - -+static grub_uint64_t -+grub_gpt_entries_sectors (struct grub_gpt_header *gpt, -+ unsigned int log_sector_size) -+{ -+ grub_uint64_t sector_bytes, entries_bytes; -+ -+ sector_bytes = 1ULL << log_sector_size; -+ entries_bytes = (grub_uint64_t) grub_le_to_cpu32 (gpt->maxpart) * -+ (grub_uint64_t) grub_le_to_cpu32 (gpt->partentry_size); -+ return grub_divmod64(entries_bytes + sector_bytes - 1, sector_bytes, NULL); -+} -+ - grub_err_t - grub_gpt_header_check (struct grub_gpt_header *gpt, - unsigned int log_sector_size) -@@ -236,6 +248,64 @@ grub_gpt_header_check (struct grub_gpt_header *gpt, - return GRUB_ERR_NONE; - } - -+static grub_err_t -+grub_gpt_check_primary (grub_gpt_t gpt) -+{ -+ grub_uint64_t backup, primary, entries, entries_len, start, end; -+ -+ primary = grub_le_to_cpu64 (gpt->primary.header_lba); -+ backup = grub_le_to_cpu64 (gpt->primary.alternate_lba); -+ entries = grub_le_to_cpu64 (gpt->primary.partitions); -+ entries_len = grub_gpt_entries_sectors(&gpt->primary, gpt->log_sector_size); -+ start = grub_le_to_cpu64 (gpt->primary.start); -+ end = grub_le_to_cpu64 (gpt->primary.end); -+ -+ grub_dprintf ("gpt", "Primary GPT layout:\n" -+ "primary header = 0x%llx backup header = 0x%llx\n" -+ "entries location = 0x%llx length = 0x%llx\n" -+ "first usable = 0x%llx last usable = 0x%llx\n", -+ (unsigned long long) primary, -+ (unsigned long long) backup, -+ (unsigned long long) entries, -+ (unsigned long long) entries_len, -+ (unsigned long long) start, -+ (unsigned long long) end); -+ -+ if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) -+ return grub_errno; -+ -+ return GRUB_ERR_NONE; -+} -+ -+static grub_err_t -+grub_gpt_check_backup (grub_gpt_t gpt) -+{ -+ grub_uint64_t backup, primary, entries, entries_len, start, end; -+ -+ backup = grub_le_to_cpu64 (gpt->backup.header_lba); -+ primary = grub_le_to_cpu64 (gpt->backup.alternate_lba); -+ entries = grub_le_to_cpu64 (gpt->backup.partitions); -+ entries_len = grub_gpt_entries_sectors(&gpt->backup, gpt->log_sector_size); -+ start = grub_le_to_cpu64 (gpt->backup.start); -+ end = grub_le_to_cpu64 (gpt->backup.end); -+ -+ grub_dprintf ("gpt", "Backup GPT layout:\n" -+ "primary header = 0x%llx backup header = 0x%llx\n" -+ "entries location = 0x%llx length = 0x%llx\n" -+ "first usable = 0x%llx last usable = 0x%llx\n", -+ (unsigned long long) primary, -+ (unsigned long long) backup, -+ (unsigned long long) entries, -+ (unsigned long long) entries_len, -+ (unsigned long long) start, -+ (unsigned long long) end); -+ -+ if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) -+ return grub_errno; -+ -+ return GRUB_ERR_NONE; -+} -+ - static grub_err_t - grub_gpt_read_primary (grub_disk_t disk, grub_gpt_t gpt) - { -@@ -246,11 +316,13 @@ grub_gpt_read_primary (grub_disk_t disk, grub_gpt_t gpt) - * but eventually this code should match the existing behavior. */ - gpt->log_sector_size = disk->log_sector_size; - -+ grub_dprintf ("gpt", "reading primary GPT from sector 0x1\n"); -+ - addr = grub_gpt_sector_to_addr (gpt, 1); - if (grub_disk_read (disk, addr, 0, sizeof (gpt->primary), &gpt->primary)) - return grub_errno; - -- if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) -+ if (grub_gpt_check_primary (gpt)) - return grub_errno; - - gpt->status |= GRUB_GPT_PRIMARY_HEADER_VALID; -@@ -272,11 +344,14 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) - return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, - "Unable to locate backup GPT"); - -+ grub_dprintf ("gpt", "reading backup GPT from sector 0x%llx\n", -+ (unsigned long long) sector); -+ - addr = grub_gpt_sector_to_addr (gpt, sector); - if (grub_disk_read (disk, addr, 0, sizeof (gpt->backup), &gpt->backup)) - return grub_errno; - -- if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) -+ if (grub_gpt_check_backup (gpt)) - return grub_errno; - - gpt->status |= GRUB_GPT_BACKUP_HEADER_VALID; -@@ -289,6 +364,7 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, - { - struct grub_gpt_partentry *entries = NULL; - grub_uint32_t count, size, crc; -+ grub_uint64_t sector; - grub_disk_addr_t addr; - grub_size_t entries_size; - -@@ -310,7 +386,12 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, - if (!entries) - goto fail; - -- addr = grub_gpt_sector_to_addr (gpt, grub_le_to_cpu64 (header->partitions)); -+ sector = grub_le_to_cpu64 (header->partitions); -+ grub_dprintf ("gpt", "reading GPT %lu entries from sector 0x%llx\n", -+ (unsigned long) count, -+ (unsigned long long) sector); -+ -+ addr = grub_gpt_sector_to_addr (gpt, sector); - if (grub_disk_read (disk, addr, 0, entries_size, entries)) - goto fail; - -@@ -336,6 +417,8 @@ grub_gpt_read (grub_disk_t disk) - { - grub_gpt_t gpt; - -+ grub_dprintf ("gpt", "reading GPT from %s\n", disk->name); -+ - gpt = grub_zalloc (sizeof (*gpt)); - if (!gpt) - goto fail; -@@ -369,12 +452,18 @@ grub_gpt_read (grub_disk_t disk) - /* Similarly, favor the value or error from the primary table. */ - if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID && - !grub_gpt_read_entries (disk, gpt, &gpt->backup)) -- gpt->status |= GRUB_GPT_BACKUP_ENTRIES_VALID; -+ { -+ grub_dprintf ("gpt", "read valid backup GPT from %s\n", disk->name); -+ gpt->status |= GRUB_GPT_BACKUP_ENTRIES_VALID; -+ } - - grub_errno = GRUB_ERR_NONE; - if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID && - !grub_gpt_read_entries (disk, gpt, &gpt->primary)) -- gpt->status |= GRUB_GPT_PRIMARY_ENTRIES_VALID; -+ { -+ grub_dprintf ("gpt", "read valid primary GPT from %s\n", disk->name); -+ gpt->status |= GRUB_GPT_PRIMARY_ENTRIES_VALID; -+ } - - if (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID || - gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID) -@@ -394,21 +483,25 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) - { - grub_uint64_t backup_header, backup_entries; - -+ grub_dprintf ("gpt", "repairing GPT for %s\n", disk->name); -+ - if (disk->log_sector_size != gpt->log_sector_size) - return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, - "GPT sector size must match disk sector size"); - - if (!(gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID || -- gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID)) -+ gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID)) - return grub_error (GRUB_ERR_BUG, "No valid GPT entries"); - - if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) - { -+ grub_dprintf ("gpt", "primary GPT header is valid\n"); - backup_header = grub_le_to_cpu64 (gpt->primary.alternate_lba); - grub_memcpy (&gpt->backup, &gpt->primary, sizeof (gpt->backup)); - } - else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) - { -+ grub_dprintf ("gpt", "backup GPT header is valid\n"); - backup_header = grub_le_to_cpu64 (gpt->backup.header_lba); - grub_memcpy (&gpt->primary, &gpt->backup, sizeof (gpt->primary)); - } -@@ -418,9 +511,13 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) - /* Relocate backup to end if disk whenever possible. */ - if (grub_gpt_disk_size_valid(disk)) - backup_header = disk->total_sectors - 1; -+ grub_dprintf ("gpt", "backup GPT header will be located at 0x%llx\n", -+ (unsigned long long) backup_header); - - backup_entries = backup_header - - grub_gpt_size_to_sectors (gpt, gpt->entries_size); -+ grub_dprintf ("gpt", "backup GPT entries will be located at 0x%llx\n", -+ (unsigned long long) backup_entries); - - /* Update/fixup header and partition table locations. */ - gpt->primary.header_lba = grub_cpu_to_le64_compile_time (1); -@@ -435,13 +532,15 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) - return grub_errno; - - /* Sanity check. */ -- if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) -+ if (grub_gpt_check_primary (gpt)) - return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); - -- if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) -+ if (grub_gpt_check_backup (gpt)) - return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); - - gpt->status |= GRUB_GPT_BOTH_VALID; -+ grub_dprintf ("gpt", "repairing GPT for %s successful\n", disk->name); -+ - return GRUB_ERR_NONE; - } - -@@ -497,9 +596,11 @@ grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt) - if (!(gpt->status & GRUB_GPT_BOTH_VALID)) - return grub_error (GRUB_ERR_BAD_PART_TABLE, "Invalid GPT data"); - -+ grub_dprintf ("gpt", "writing primary GPT to %s\n", disk->name); - if (grub_gpt_write_table (disk, gpt, &gpt->primary)) - return grub_errno; - -+ grub_dprintf ("gpt", "writing backup GPT to %s\n", disk->name); - if (grub_gpt_write_table (disk, gpt, &gpt->backup)) - return grub_errno; - -From e7ae87c15b57c8bac757df45a83351c5c7a9cd10 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Wed, 10 Aug 2016 18:26:03 -0700 -Subject: [PATCH] gpt: improve validation of GPT headers - -Adds basic validation of all the disk locations in the headers, reducing -the chance of corrupting weird locations on disk. ---- - grub-core/lib/gpt.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 48 insertions(+) - -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index c2821b563..f83fe29ac 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -224,6 +224,7 @@ grub_gpt_header_check (struct grub_gpt_header *gpt, - unsigned int log_sector_size) - { - grub_uint32_t crc = 0, size; -+ grub_uint64_t start, end; - - if (grub_memcmp (gpt->magic, grub_gpt_magic, sizeof (grub_gpt_magic)) != 0) - return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT signature"); -@@ -245,9 +246,35 @@ grub_gpt_header_check (struct grub_gpt_header *gpt, - if (size < 128 || size % 128) - return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT entry size"); - -+ /* And of course there better be some space for partitions! */ -+ start = grub_le_to_cpu64 (gpt->start); -+ end = grub_le_to_cpu64 (gpt->end); -+ if (start > end) -+ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid usable sectors"); -+ - return GRUB_ERR_NONE; - } - -+static int -+grub_gpt_headers_equal (grub_gpt_t gpt) -+{ -+ /* Assume headers passed grub_gpt_header_check so skip magic and version. -+ * Individual fields must be checked instead of just using memcmp because -+ * crc32, header, alternate, and partitions will all normally differ. */ -+ -+ if (gpt->primary.headersize != gpt->backup.headersize || -+ gpt->primary.header_lba != gpt->backup.alternate_lba || -+ gpt->primary.start != gpt->backup.start || -+ gpt->primary.end != gpt->backup.end || -+ gpt->primary.maxpart != gpt->backup.maxpart || -+ gpt->primary.partentry_size != gpt->backup.partentry_size || -+ gpt->primary.partentry_crc32 != gpt->backup.partentry_crc32) -+ return 0; -+ -+ return grub_memcmp(&gpt->primary.guid, &gpt->backup.guid, -+ sizeof(grub_gpt_guid_t)) == 0; -+} -+ - static grub_err_t - grub_gpt_check_primary (grub_gpt_t gpt) - { -@@ -273,6 +300,12 @@ grub_gpt_check_primary (grub_gpt_t gpt) - - if (grub_gpt_header_check (&gpt->primary, gpt->log_sector_size)) - return grub_errno; -+ if (primary != 1) -+ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid primary GPT LBA"); -+ if (entries <= 1 || entries+entries_len > start) -+ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid entries location"); -+ if (backup <= end) -+ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid backup GPT LBA"); - - return GRUB_ERR_NONE; - } -@@ -302,6 +335,12 @@ grub_gpt_check_backup (grub_gpt_t gpt) - - if (grub_gpt_header_check (&gpt->backup, gpt->log_sector_size)) - return grub_errno; -+ if (primary != 1) -+ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid primary GPT LBA"); -+ if (entries <= end || entries+entries_len > backup) -+ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid entries location"); -+ if (backup <= end) -+ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid backup GPT LBA"); - - return GRUB_ERR_NONE; - } -@@ -354,6 +393,15 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) - if (grub_gpt_check_backup (gpt)) - return grub_errno; - -+ /* Ensure the backup header thinks it is located where we found it. */ -+ if (grub_le_to_cpu64 (gpt->backup.header_lba) != sector) -+ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid backup GPT LBA"); -+ -+ /* If both primary and backup are valid but differ prefer the primary. */ -+ if ((gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) && -+ !grub_gpt_headers_equal(gpt)) -+ return grub_error (GRUB_ERR_BAD_PART_TABLE, "backup GPT of of sync"); -+ - gpt->status |= GRUB_GPT_BACKUP_HEADER_VALID; - return GRUB_ERR_NONE; - } -From dfa966dcac4ff53e83f68be4643d33db6c9d98ef Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Thu, 11 Aug 2016 15:02:21 -0700 -Subject: [PATCH] gpt: refuse to write to sector 0 - ---- - grub-core/lib/gpt.c | 7 +++++++ - 1 file changed, 7 insertions(+) - -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index f83fe29ac..b7449911a 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -626,10 +626,17 @@ grub_gpt_write_table (grub_disk_t disk, grub_gpt_t gpt, - sizeof (*header)); - - addr = grub_gpt_sector_to_addr (gpt, grub_le_to_cpu64 (header->header_lba)); -+ if (addr == 0) -+ return grub_error (GRUB_ERR_BUG, -+ "Refusing to write GPT header to address 0x0"); - if (grub_disk_write (disk, addr, 0, sizeof (*header), header)) - return grub_errno; - - addr = grub_gpt_sector_to_addr (gpt, grub_le_to_cpu64 (header->partitions)); -+ if (addr < 2) -+ return grub_error (GRUB_ERR_BUG, -+ "Refusing to write GPT entries to address 0x%llx", -+ (unsigned long long) addr); - if (grub_disk_write (disk, addr, 0, gpt->entries_size, gpt->entries)) - return grub_errno; - -From ac69188a7031e0255012900519f406baea7d9278 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Sat, 20 Aug 2016 17:42:12 -0700 -Subject: [PATCH] gpt: properly detect and repair invalid tables - -GPT_BOTH_VALID is 4 bits so simple a boolean check is not sufficient. -This broken condition allowed gptprio to trust bogus disk locations in -headers that were marked invalid causing arbitrary disk corruption. ---- - grub-core/commands/gptprio.c | 2 +- - grub-core/lib/gpt.c | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - -diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c -index ce5840b4e..6b61bb56d 100644 ---- a/grub-core/commands/gptprio.c -+++ b/grub-core/commands/gptprio.c -@@ -91,7 +91,7 @@ grub_find_next (const char *disk_name, - if (!gpt) - goto done; - -- if (!(gpt->status & GRUB_GPT_BOTH_VALID)) -+ if ((gpt->status & GRUB_GPT_BOTH_VALID) != GRUB_GPT_BOTH_VALID) - if (grub_gpt_repair (dev->disk, gpt)) - goto done; - -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index b7449911a..0daf3f8de 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -648,7 +648,7 @@ grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt) - { - /* TODO: update/repair protective MBRs too. */ - -- if (!(gpt->status & GRUB_GPT_BOTH_VALID)) -+ if ((gpt->status & GRUB_GPT_BOTH_VALID) != GRUB_GPT_BOTH_VALID) - return grub_error (GRUB_ERR_BAD_PART_TABLE, "Invalid GPT data"); - - grub_dprintf ("gpt", "writing primary GPT to %s\n", disk->name); -From 2ec96d73aec713dc3f8b45f27fae4008fb9bb516 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Mon, 22 Aug 2016 16:44:30 -0700 -Subject: [PATCH] gptrepair_test: fix typo in cleanup trap - ---- - tests/gptrepair_test.in | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/tests/gptrepair_test.in b/tests/gptrepair_test.in -index 80b2de633..805dc171a 100644 ---- a/tests/gptrepair_test.in -+++ b/tests/gptrepair_test.in -@@ -53,7 +53,7 @@ case "${grub_modinfo_target_cpu}-${grub_modinfo_platform}" in - esac - img1="`mktemp "${TMPDIR:-/tmp}/tmp.XXXXXXXXXX"`" || exit 1 - img2="`mktemp "${TMPDIR:-/tmp}/tmp.XXXXXXXXXX"`" || exit 1 --trap "rm -f '${img1}' '${ing2}'" EXIT -+trap "rm -f '${img1}' '${img2}'" EXIT - - create_disk_image () { - size=$1 -From 384c0976b3bb8fca413bd3b0af22604104824511 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Mon, 22 Aug 2016 16:45:10 -0700 -Subject: [PATCH] gptprio_test: check GPT is repaired when appropriate - ---- - tests/gptprio_test.in | 63 ++++++++++++++++++++++++++++++++++++++++++++++++--- - 1 file changed, 60 insertions(+), 3 deletions(-) - -diff --git a/tests/gptprio_test.in b/tests/gptprio_test.in -index f4aea0dc9..c5cf0f3b7 100644 ---- a/tests/gptprio_test.in -+++ b/tests/gptprio_test.in -@@ -66,8 +66,9 @@ prio_uuid[3]="1aa5a658-5b02-414d-9b71-f7e6c151f0cd" - prio_uuid[4]="8aa0240d-98af-42b0-b32a-ccbe0572d62b" - - create_disk_image () { -+ size=$1 - rm -f "${img1}" -- dd if=/dev/zero of="${img1}" bs=512 count=1 seek=100 status=none -+ dd if=/dev/zero of="${img1}" bs=512 count=1 seek=$((size - 1)) status=none - ${sgdisk} \ - -n 1:0:+1 -c 1:ESP -t 1:ef00 \ - -n 2:0:+1 -c 2:A -t 2:"${prio_type}" -u 2:"${prio_uuid[2]}" \ -@@ -76,6 +77,35 @@ create_disk_image () { - "${img1}" >/dev/null - } - -+wipe_disk_area () { -+ sector=$1 -+ size=$2 -+ dd if=/dev/zero of="${img1}" bs=512 count=${size} seek=${sector} conv=notrunc status=none -+} -+ -+is_zero () { -+ sector=$1 -+ size=$2 -+ cmp -s -i $((sector * 512)) -n $((size * 512)) /dev/zero "${img1}" -+} -+ -+check_is_zero () { -+ sector=$1 -+ size=$2 -+ if ! is_zero "$@"; then -+ echo "$size sector(s) starting at $sector should be all zero" -+ exit 1 -+ fi -+} -+ -+check_not_zero () { -+ sector=$1 -+ size=$2 -+ if is_zero "$@"; then -+ echo "$size sector(s) starting at $sector should not be all zero" -+ exit 1 -+ fi -+} - - fmt_prio () { - priority=$(( ( $1 & 15 ) << 48 )) -@@ -93,10 +123,10 @@ set_prio () { - check_prio () { - part="$1" - expect=$(fmt_prio $2 $3 $4) -- result=$(LANG=C ${sgdisk} -i "${part}" "${img1}" \ -+ result=$(LANG=C ${sgdisk} -i "${part}" "${img1}" 2>&1 \ - | awk '/^Attribute flags: / {print $3}') - if [[ "${expect}" != "${result}" ]]; then -- echo "Partition ${part} has attributes ${result}, not ${expect}" >&2 -+ echo "Partition ${part} has attributes ${result:-??}, not ${expect}" - exit 1 - fi - } -@@ -133,6 +163,33 @@ create_disk_image 100 - set_prio 2 3 2 1 - check_prio 2 3 2 1 - -+# Check gptprio works without modifying the disk when no update is required. -+# Leaves any existing corruption as is, repairing in the OS is better. -+create_disk_image 100 -+set_prio 2 1 0 1 -+wipe_disk_area 99 1 -+check_next 2 1 0 1 -+check_is_zero 99 1 -+ -+create_disk_image 100 -+set_prio 2 1 0 1 -+wipe_disk_area 1 1 -+check_next 2 1 0 1 -+check_is_zero 1 1 -+ -+# When writes do need to be made go ahead and perform the repair. -+create_disk_image 100 -+set_prio 2 1 1 0 -+wipe_disk_area 99 1 -+check_next 2 1 0 0 -+check_not_zero 99 1 -+ -+create_disk_image 100 -+set_prio 2 1 1 0 -+wipe_disk_area 1 1 -+check_next 2 1 0 0 -+check_not_zero 1 1 -+ - # Try two partitions before falling before falling back to a third - create_disk_image 100 - set_prio 2 3 3 0 -From 96d36c4ecdb8b3dfe0ecd81dff3c1cd6665e73c3 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Wed, 24 Aug 2016 16:14:20 -0700 -Subject: [PATCH] gpt: fix partition table indexing and validation - -Portions of the code attempted to handle the fact that GPT entries on -disk may be larger than the currently defined struct while others -assumed the data could be indexed by the struct size directly. This -never came up because no utility uses a size larger than 128 bytes but -for the sake of safety we need to do this by the spec. ---- - grub-core/commands/gptprio.c | 6 +-- - grub-core/lib/gpt.c | 51 +++++++++++++++--- - include/grub/gpt_partition.h | 11 +++- - tests/gpt_unit_test.c | 120 +++++++++++++++++++++++++++++++++++++++++++ - 4 files changed, 176 insertions(+), 12 deletions(-) - -diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c -index 6b61bb56d..548925a08 100644 ---- a/grub-core/commands/gptprio.c -+++ b/grub-core/commands/gptprio.c -@@ -78,7 +78,7 @@ grub_find_next (const char *disk_name, - const grub_gpt_part_type_t *part_type, - char **part_name, char **part_guid) - { -- struct grub_gpt_partentry *part_found = NULL; -+ struct grub_gpt_partentry *part, *part_found = NULL; - grub_device_t dev = NULL; - grub_gpt_t gpt = NULL; - grub_uint32_t i, part_index; -@@ -95,10 +95,8 @@ grub_find_next (const char *disk_name, - if (grub_gpt_repair (dev->disk, gpt)) - goto done; - -- for (i = 0; i < grub_le_to_cpu32 (gpt->primary.maxpart); i++) -+ for (i = 0; (part = grub_gpt_get_partentry (gpt, i)) != NULL; i++) - { -- struct grub_gpt_partentry *part = &gpt->entries[i]; -- - if (grub_memcmp (part_type, &part->type, sizeof (*part_type)) == 0) - { - unsigned int priority, tries_left, successful, old_priority = 0; -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 0daf3f8de..205779192 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -207,6 +207,13 @@ grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr) - return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid protective MBR"); - } - -+static grub_uint64_t -+grub_gpt_entries_size (struct grub_gpt_header *gpt) -+{ -+ return (grub_uint64_t) grub_le_to_cpu32 (gpt->maxpart) * -+ (grub_uint64_t) grub_le_to_cpu32 (gpt->partentry_size); -+} -+ - static grub_uint64_t - grub_gpt_entries_sectors (struct grub_gpt_header *gpt, - unsigned int log_sector_size) -@@ -214,11 +221,16 @@ grub_gpt_entries_sectors (struct grub_gpt_header *gpt, - grub_uint64_t sector_bytes, entries_bytes; - - sector_bytes = 1ULL << log_sector_size; -- entries_bytes = (grub_uint64_t) grub_le_to_cpu32 (gpt->maxpart) * -- (grub_uint64_t) grub_le_to_cpu32 (gpt->partentry_size); -+ entries_bytes = grub_gpt_entries_size (gpt); - return grub_divmod64(entries_bytes + sector_bytes - 1, sector_bytes, NULL); - } - -+static int -+is_pow2 (grub_uint32_t n) -+{ -+ return (n & (n - 1)) == 0; -+} -+ - grub_err_t - grub_gpt_header_check (struct grub_gpt_header *gpt, - unsigned int log_sector_size) -@@ -236,16 +248,23 @@ grub_gpt_header_check (struct grub_gpt_header *gpt, - if (gpt->crc32 != crc) - return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT header crc32"); - -- /* The header size must be between 92 and the sector size. */ -+ /* The header size "must be greater than or equal to 92 and must be less -+ * than or equal to the logical block size." */ - size = grub_le_to_cpu32 (gpt->headersize); - if (size < 92U || size > (1U << log_sector_size)) - return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT header size"); - -- /* The partition entry size must be a multiple of 128. */ -+ /* The partition entry size must be "a value of 128*(2^n) where n is an -+ * integer greater than or equal to zero (e.g., 128, 256, 512, etc.)." */ - size = grub_le_to_cpu32 (gpt->partentry_size); -- if (size < 128 || size % 128) -+ if (size < 128U || size % 128U || !is_pow2 (size / 128U)) - return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT entry size"); - -+ /* The minimum entries table size is specified in terms of bytes, -+ * regardless of how large the individual entry size is. */ -+ if (grub_gpt_entries_size (gpt) < GRUB_GPT_DEFAULT_ENTRIES_SIZE) -+ return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid GPT entry table size"); -+ - /* And of course there better be some space for partitions! */ - start = grub_le_to_cpu64 (gpt->start); - end = grub_le_to_cpu64 (gpt->end); -@@ -410,7 +429,7 @@ static grub_err_t - grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, - struct grub_gpt_header *header) - { -- struct grub_gpt_partentry *entries = NULL; -+ void *entries = NULL; - grub_uint32_t count, size, crc; - grub_uint64_t sector; - grub_disk_addr_t addr; -@@ -526,6 +545,26 @@ fail: - return NULL; - } - -+struct grub_gpt_partentry * -+grub_gpt_get_partentry (grub_gpt_t gpt, grub_uint32_t n) -+{ -+ struct grub_gpt_header *header; -+ grub_size_t offset; -+ -+ if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) -+ header = &gpt->primary; -+ else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) -+ header = &gpt->backup; -+ else -+ return NULL; -+ -+ if (n >= grub_le_to_cpu32 (header->maxpart)) -+ return NULL; -+ -+ offset = (grub_size_t) grub_le_to_cpu32 (header->partentry_size) * n; -+ return (struct grub_gpt_partentry *) ((char *) gpt->entries + offset); -+} -+ - grub_err_t - grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) - { -diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index 4a6ed25b3..cc3a201a5 100644 ---- a/include/grub/gpt_partition.h -+++ b/include/grub/gpt_partition.h -@@ -186,8 +186,10 @@ struct grub_gpt - struct grub_gpt_header primary; - struct grub_gpt_header backup; - -- /* Only need one entries table, on disk both copies are identical. */ -- struct grub_gpt_partentry *entries; -+ /* Only need one entries table, on disk both copies are identical. -+ * The on disk entry size may be larger than our partentry struct so -+ * the table cannot be indexed directly. */ -+ void *entries; - grub_size_t entries_size; - - /* Logarithm of sector size, in case GPT and disk driver disagree. */ -@@ -205,6 +207,11 @@ grub_gpt_sector_to_addr (grub_gpt_t gpt, grub_uint64_t sector) - /* Allocates and fills new grub_gpt structure, free with grub_gpt_free. */ - grub_gpt_t grub_gpt_read (grub_disk_t disk); - -+/* Helper for indexing into the entries table. -+ * Returns NULL when the end of the table has been reached. */ -+struct grub_gpt_partentry * grub_gpt_get_partentry (grub_gpt_t gpt, -+ grub_uint32_t n); -+ - /* Sync up primary and backup headers, recompute checksums. */ - grub_err_t grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt); - -diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c -index 60f601729..9cf3414c2 100644 ---- a/tests/gpt_unit_test.c -+++ b/tests/gpt_unit_test.c -@@ -40,6 +40,13 @@ - /* from gnulib */ - #include - -+/* Confirm that the GPT structures conform to the sizes in the spec: -+ * The header size "must be greater than or equal to 92 and must be less -+ * than or equal to the logical block size." -+ * The partition entry size must be "a value of 128*(2^n) where n is an -+ * integer greater than or equal to zero (e.g., 128, 256, 512, etc.)." */ -+verify (sizeof (struct grub_gpt_header) == 92); -+verify (sizeof (struct grub_gpt_partentry) == 128); - - /* GPT section sizes. */ - #define HEADER_SIZE (sizeof (struct grub_gpt_header)) -@@ -537,6 +544,113 @@ repair_test (void) - close_disk (&data); - } - -+static void -+iterate_partitions_test (void) -+{ -+ struct test_data data; -+ struct grub_gpt_partentry *p; -+ grub_gpt_t gpt; -+ grub_uint32_t n; -+ -+ open_disk (&data); -+ gpt = read_disk (&data); -+ -+ for (n = 0; (p = grub_gpt_get_partentry (gpt, n)) != NULL; n++) -+ grub_test_assert (memcmp (p, &example_entries[n], sizeof (*p)) == 0, -+ "unexpected partition %d data", n); -+ -+ grub_test_assert (n == TABLE_ENTRIES, "unexpected partition limit: %d", n); -+ -+ grub_gpt_free (gpt); -+ close_disk (&data); -+} -+ -+static void -+large_partitions_test (void) -+{ -+ struct test_data data; -+ struct grub_gpt_partentry *p; -+ grub_gpt_t gpt; -+ grub_uint32_t n; -+ -+ open_disk (&data); -+ -+ /* Double the entry size, cut the number of entries in half. */ -+ data.raw->primary_header.maxpart = -+ data.raw->backup_header.maxpart = -+ grub_cpu_to_le32_compile_time (TABLE_ENTRIES/2); -+ data.raw->primary_header.partentry_size = -+ data.raw->backup_header.partentry_size = -+ grub_cpu_to_le32_compile_time (ENTRY_SIZE*2); -+ data.raw->primary_header.partentry_crc32 = -+ data.raw->backup_header.partentry_crc32 = -+ grub_cpu_to_le32_compile_time (0xf2c45af8); -+ data.raw->primary_header.crc32 = grub_cpu_to_le32_compile_time (0xde00cc8f); -+ data.raw->backup_header.crc32 = grub_cpu_to_le32_compile_time (0x6d72e284); -+ -+ memset (&data.raw->primary_entries, 0, -+ sizeof (data.raw->primary_entries)); -+ for (n = 0; n < TABLE_ENTRIES/2; n++) -+ memcpy (&data.raw->primary_entries[n*2], &example_entries[n], -+ sizeof (data.raw->primary_entries[0])); -+ memcpy (&data.raw->backup_entries, &data.raw->primary_entries, -+ sizeof (data.raw->backup_entries)); -+ -+ sync_disk(&data); -+ gpt = read_disk (&data); -+ -+ for (n = 0; (p = grub_gpt_get_partentry (gpt, n)) != NULL; n++) -+ grub_test_assert (memcmp (p, &example_entries[n], sizeof (*p)) == 0, -+ "unexpected partition %d data", n); -+ -+ grub_test_assert (n == TABLE_ENTRIES/2, "unexpected partition limit: %d", n); -+ -+ grub_gpt_free (gpt); -+ -+ /* Editing memory beyond the entry structure should still change the crc. */ -+ data.raw->primary_entries[1].attrib = 0xff; -+ -+ sync_disk(&data); -+ gpt = read_disk (&data); -+ grub_test_assert (gpt->status == (GRUB_GPT_PROTECTIVE_MBR | -+ GRUB_GPT_PRIMARY_HEADER_VALID | -+ GRUB_GPT_BACKUP_HEADER_VALID | -+ GRUB_GPT_BACKUP_ENTRIES_VALID), -+ "unexpected status: 0x%02x", gpt->status); -+ grub_gpt_free (gpt); -+ -+ close_disk (&data); -+} -+ -+static void -+invalid_partsize_test (void) -+{ -+ struct grub_gpt_header header = { -+ .magic = GRUB_GPT_HEADER_MAGIC, -+ .version = GRUB_GPT_HEADER_VERSION, -+ .headersize = sizeof (struct grub_gpt_header), -+ .crc32 = grub_cpu_to_le32_compile_time (0x1ff2a054), -+ .header_lba = grub_cpu_to_le64_compile_time (PRIMARY_HEADER_SECTOR), -+ .alternate_lba = grub_cpu_to_le64_compile_time (BACKUP_HEADER_SECTOR), -+ .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), -+ .end = grub_cpu_to_le64_compile_time (DATA_END_SECTOR), -+ .guid = GRUB_GPT_GUID_INIT(0x69c131ad, 0x67d6, 0x46c6, -+ 0x93, 0xc4, 0x12, 0x4c, 0x75, 0x52, 0x56, 0xac), -+ .partitions = grub_cpu_to_le64_compile_time (PRIMARY_TABLE_SECTOR), -+ .maxpart = grub_cpu_to_le32_compile_time (TABLE_ENTRIES), -+ /* Triple the entry size, which is not valid. */ -+ .partentry_size = grub_cpu_to_le32_compile_time (ENTRY_SIZE*3), -+ .partentry_crc32 = grub_cpu_to_le32_compile_time (0x074e052c), -+ }; -+ -+ grub_gpt_header_check(&header, GRUB_DISK_SECTOR_BITS); -+ grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, -+ "unexpected error: %s", grub_errmsg); -+ grub_test_assert (strcmp(grub_errmsg, "invalid GPT entry size") == 0, -+ "unexpected error: %s", grub_errmsg); -+ grub_errno = GRUB_ERR_NONE; -+} -+ - static void - search_part_label_test (void) - { -@@ -657,6 +771,9 @@ grub_unit_test_init (void) - grub_test_register ("gpt_read_invalid_test", read_invalid_entries_test); - grub_test_register ("gpt_read_fallback_test", read_fallback_test); - grub_test_register ("gpt_repair_test", repair_test); -+ grub_test_register ("gpt_iterate_partitions_test", iterate_partitions_test); -+ grub_test_register ("gpt_large_partitions_test", large_partitions_test); -+ grub_test_register ("gpt_invalid_partsize_test", invalid_partsize_test); - grub_test_register ("gpt_search_part_label_test", search_part_label_test); - grub_test_register ("gpt_search_uuid_test", search_part_uuid_test); - grub_test_register ("gpt_search_disk_uuid_test", search_disk_uuid_test); -@@ -671,6 +788,9 @@ grub_unit_test_fini (void) - grub_test_unregister ("gpt_read_invalid_test"); - grub_test_unregister ("gpt_read_fallback_test"); - grub_test_unregister ("gpt_repair_test"); -+ grub_test_unregister ("gpt_iterate_partitions_test"); -+ grub_test_unregister ("gpt_large_partitions_test"); -+ grub_test_unregister ("gpt_invalid_partsize_test"); - grub_test_unregister ("gpt_search_part_label_test"); - grub_test_unregister ("gpt_search_part_uuid_test"); - grub_test_unregister ("gpt_search_disk_uuid_test"); -From 3ef7f041f419f8c62f6d7b5c60dc1da45862c074 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Tue, 23 Aug 2016 13:09:14 -0700 -Subject: [PATCH] gpt: prefer disk size from header over firmware - -The firmware and the OS may disagree on the disk configuration and size. -Although such a setup should be avoided users are unlikely to know about -the problem, assuming everything behaves like the OS. Tolerate this as -best we can and trust the reported on-disk location over the firmware -when looking for the backup GPT. If the location is inaccessible report -the error as best we can and move on. ---- - grub-core/lib/gpt.c | 18 +++++++++++++----- - tests/gpt_unit_test.c | 42 ++++++++++++++++++++++++++++++++++++++++++ - 2 files changed, 55 insertions(+), 5 deletions(-) - -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 205779192..f0c71bde1 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -394,13 +394,21 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) - grub_disk_addr_t addr; - - /* Assumes gpt->log_sector_size == disk->log_sector_size */ -- if (grub_gpt_disk_size_valid(disk)) -+ if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) -+ { -+ sector = grub_le_to_cpu64 (gpt->primary.alternate_lba); -+ if (grub_gpt_disk_size_valid (disk) && sector >= disk->total_sectors) -+ return grub_error (GRUB_ERR_OUT_OF_RANGE, -+ "backup GPT located at 0x%llx, " -+ "beyond last disk sector at 0x%llx", -+ (unsigned long long) sector, -+ (unsigned long long) disk->total_sectors - 1); -+ } -+ else if (grub_gpt_disk_size_valid (disk)) - sector = disk->total_sectors - 1; -- else if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) -- sector = grub_le_to_cpu64 (gpt->primary.alternate_lba); - else -- return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, -- "Unable to locate backup GPT"); -+ return grub_error (GRUB_ERR_OUT_OF_RANGE, -+ "size of disk unknown, cannot locate backup GPT"); - - grub_dprintf ("gpt", "reading backup GPT from sector 0x%llx\n", - (unsigned long long) sector); -diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c -index 9cf3414c2..218b18697 100644 ---- a/tests/gpt_unit_test.c -+++ b/tests/gpt_unit_test.c -@@ -544,6 +544,46 @@ repair_test (void) - close_disk (&data); - } - -+/* Finding/reading/writing the backup GPT may be difficult if the OS and -+ * BIOS report different sizes for the same disk. We need to gracefully -+ * recognize this and avoid causing trouble for the OS. */ -+static void -+weird_disk_size_test (void) -+{ -+ struct test_data data; -+ grub_gpt_t gpt; -+ -+ open_disk (&data); -+ -+ /* Chop off 65536 bytes (128 512B sectors) which may happen when the -+ * BIOS thinks you are using a software RAID system that reserves that -+ * area for metadata when in fact you are not and using the bare disk. */ -+ grub_test_assert(data.dev->disk->total_sectors == DISK_SECTORS, -+ "unexpected disk size: 0x%llx", -+ (unsigned long long) data.dev->disk->total_sectors); -+ data.dev->disk->total_sectors -= 128; -+ -+ gpt = read_disk (&data); -+ assert_error_stack_empty (); -+ /* Reading the alternate_lba should have been blocked and reading -+ * the (new) end of disk should have found no useful data. */ -+ grub_test_assert ((gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) == 0, -+ "unreported missing backup header"); -+ -+ /* We should be able to reconstruct the backup header and the location -+ * of the backup should remain unchanged, trusting the GPT data over -+ * what the BIOS is telling us. Further changes are left to the OS. */ -+ grub_gpt_repair (data.dev->disk, gpt); -+ grub_test_assert (grub_errno == GRUB_ERR_NONE, -+ "repair failed: %s", grub_errmsg); -+ grub_test_assert (memcmp (&gpt->primary, &example_primary, -+ sizeof (gpt->primary)) == 0, -+ "repair corrupted primary header"); -+ -+ grub_gpt_free (gpt); -+ close_disk (&data); -+} -+ - static void - iterate_partitions_test (void) - { -@@ -774,6 +814,7 @@ grub_unit_test_init (void) - grub_test_register ("gpt_iterate_partitions_test", iterate_partitions_test); - grub_test_register ("gpt_large_partitions_test", large_partitions_test); - grub_test_register ("gpt_invalid_partsize_test", invalid_partsize_test); -+ grub_test_register ("gpt_weird_disk_size_test", weird_disk_size_test); - grub_test_register ("gpt_search_part_label_test", search_part_label_test); - grub_test_register ("gpt_search_uuid_test", search_part_uuid_test); - grub_test_register ("gpt_search_disk_uuid_test", search_disk_uuid_test); -@@ -791,6 +832,7 @@ grub_unit_test_fini (void) - grub_test_unregister ("gpt_iterate_partitions_test"); - grub_test_unregister ("gpt_large_partitions_test"); - grub_test_unregister ("gpt_invalid_partsize_test"); -+ grub_test_unregister ("gpt_weird_disk_size_test"); - grub_test_unregister ("gpt_search_part_label_test"); - grub_test_unregister ("gpt_search_part_uuid_test"); - grub_test_unregister ("gpt_search_disk_uuid_test"); -From d1a329f0d8b5f272b925dd1e54c7e1e93ec555ca Mon Sep 17 00:00:00 2001 -From: Vito Caputo -Date: Thu, 25 Aug 2016 17:21:18 -0700 -Subject: [PATCH] gpt: add helper for picking a valid header - -Eliminate some repetition in primary vs. backup header acquisition. ---- - grub-core/lib/gpt.c | 32 ++++++++++++++++++++------------ - 1 file changed, 20 insertions(+), 12 deletions(-) - -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index f0c71bde1..2550ed87c 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -108,21 +108,32 @@ grub_gpt_part_uuid (grub_device_t device, char **uuid) - return GRUB_ERR_NONE; - } - -+static struct grub_gpt_header * -+grub_gpt_get_header (grub_gpt_t gpt) -+{ -+ if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) -+ return &gpt->primary; -+ else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) -+ return &gpt->backup; -+ -+ grub_error (GRUB_ERR_BUG, "No valid GPT header"); -+ return NULL; -+} -+ - grub_err_t - grub_gpt_disk_uuid (grub_device_t device, char **uuid) - { -+ struct grub_gpt_header *header; -+ - grub_gpt_t gpt = grub_gpt_read (device->disk); - if (!gpt) - goto done; - -- grub_errno = GRUB_ERR_NONE; -+ header = grub_gpt_get_header (gpt); -+ if (!header) -+ goto done; - -- if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) -- *uuid = grub_gpt_guid_to_str (&gpt->primary.guid); -- else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) -- *uuid = grub_gpt_guid_to_str (&gpt->backup.guid); -- else -- grub_errno = grub_error (GRUB_ERR_BUG, "No valid GPT header"); -+ *uuid = grub_gpt_guid_to_str (&header->guid); - - done: - grub_gpt_free (gpt); -@@ -559,11 +570,8 @@ grub_gpt_get_partentry (grub_gpt_t gpt, grub_uint32_t n) - struct grub_gpt_header *header; - grub_size_t offset; - -- if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) -- header = &gpt->primary; -- else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) -- header = &gpt->backup; -- else -+ header = grub_gpt_get_header (gpt); -+ if (!header) - return NULL; - - if (n >= grub_le_to_cpu32 (header->maxpart)) -From e2074ff46920d5332cbc3209160b7987da76080b Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Tue, 20 Sep 2016 13:06:05 -0700 -Subject: [PATCH] gptrepair: fix status checking - -None of these status bit checks were correct. Fix and simplify. ---- - grub-core/commands/gptrepair.c | 28 +++++++++++----------------- - 1 file changed, 11 insertions(+), 17 deletions(-) - -diff --git a/grub-core/commands/gptrepair.c b/grub-core/commands/gptrepair.c -index 38392fd8f..66ac3f7c7 100644 ---- a/grub-core/commands/gptrepair.c -+++ b/grub-core/commands/gptrepair.c -@@ -46,8 +46,6 @@ grub_cmd_gptrepair (grub_command_t cmd __attribute__ ((unused)), - grub_device_t dev = NULL; - grub_gpt_t gpt = NULL; - char *dev_name; -- grub_uint32_t primary_crc, backup_crc; -- enum grub_gpt_status old_status; - - if (argc != 1 || !grub_strlen(args[0])) - return grub_error (GRUB_ERR_BAD_ARGUMENT, "device name required"); -@@ -67,29 +65,25 @@ grub_cmd_gptrepair (grub_command_t cmd __attribute__ ((unused)), - if (!gpt) - goto done; - -- primary_crc = gpt->primary.crc32; -- backup_crc = gpt->backup.crc32; -- old_status = gpt->status; -- -- if (grub_gpt_repair (dev->disk, gpt)) -- goto done; -- -- if (primary_crc == gpt->primary.crc32 && -- backup_crc == gpt->backup.crc32 && -- old_status && gpt->status) -+ if ((gpt->status & GRUB_GPT_BOTH_VALID) == GRUB_GPT_BOTH_VALID) - { - grub_printf_ (N_("GPT already valid, %s unmodified.\n"), dev_name); - goto done; - } - -- if (grub_gpt_write (dev->disk, gpt)) -+ if ((gpt->status & GRUB_GPT_PRIMARY_VALID) != GRUB_GPT_PRIMARY_VALID) -+ grub_printf_ (N_("Found invalid primary GPT on %s\n"), dev_name); -+ -+ if ((gpt->status & GRUB_GPT_BACKUP_VALID) != GRUB_GPT_BACKUP_VALID) -+ grub_printf_ (N_("Found invalid backup GPT on %s\n"), dev_name); -+ -+ if (grub_gpt_repair (dev->disk, gpt)) - goto done; - -- if (!(old_status & GRUB_GPT_PRIMARY_VALID)) -- grub_printf_ (N_("Primary GPT for %s repaired.\n"), dev_name); -+ if (grub_gpt_write (dev->disk, gpt)) -+ goto done; - -- if (!(old_status & GRUB_GPT_BACKUP_VALID)) -- grub_printf_ (N_("Backup GPT for %s repaired.\n"), dev_name); -+ grub_printf_ (N_("Repaired GPT on %s\n"), dev_name); - - done: - if (gpt) -From f51e80579fd2b69d64dd98c0a8be44eb65556363 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Tue, 20 Sep 2016 12:43:01 -0700 -Subject: [PATCH] gpt: use inline functions for checking status bits - -This should prevent bugs like 6078f836 and 4268f3da. ---- - grub-core/commands/gptprio.c | 2 +- - grub-core/commands/gptrepair.c | 6 +++--- - grub-core/lib/gpt.c | 9 +++++++-- - include/grub/gpt_partition.h | 35 ++++++++++++++++++++++++++++------- - 4 files changed, 39 insertions(+), 13 deletions(-) - -diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c -index 548925a08..25f867a81 100644 ---- a/grub-core/commands/gptprio.c -+++ b/grub-core/commands/gptprio.c -@@ -91,7 +91,7 @@ grub_find_next (const char *disk_name, - if (!gpt) - goto done; - -- if ((gpt->status & GRUB_GPT_BOTH_VALID) != GRUB_GPT_BOTH_VALID) -+ if (!grub_gpt_both_valid(gpt)) - if (grub_gpt_repair (dev->disk, gpt)) - goto done; - -diff --git a/grub-core/commands/gptrepair.c b/grub-core/commands/gptrepair.c -index 66ac3f7c7..c17c7346c 100644 ---- a/grub-core/commands/gptrepair.c -+++ b/grub-core/commands/gptrepair.c -@@ -65,16 +65,16 @@ grub_cmd_gptrepair (grub_command_t cmd __attribute__ ((unused)), - if (!gpt) - goto done; - -- if ((gpt->status & GRUB_GPT_BOTH_VALID) == GRUB_GPT_BOTH_VALID) -+ if (grub_gpt_both_valid (gpt)) - { - grub_printf_ (N_("GPT already valid, %s unmodified.\n"), dev_name); - goto done; - } - -- if ((gpt->status & GRUB_GPT_PRIMARY_VALID) != GRUB_GPT_PRIMARY_VALID) -+ if (!grub_gpt_primary_valid (gpt)) - grub_printf_ (N_("Found invalid primary GPT on %s\n"), dev_name); - -- if ((gpt->status & GRUB_GPT_BACKUP_VALID) != GRUB_GPT_BACKUP_VALID) -+ if (!grub_gpt_backup_valid (gpt)) - grub_printf_ (N_("Found invalid backup GPT on %s\n"), dev_name); - - if (grub_gpt_repair (dev->disk, gpt)) -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 2550ed87c..3e077c497 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -638,10 +638,15 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) - if (grub_gpt_check_primary (gpt)) - return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); - -+ gpt->status |= (GRUB_GPT_PRIMARY_HEADER_VALID | -+ GRUB_GPT_PRIMARY_ENTRIES_VALID); -+ - if (grub_gpt_check_backup (gpt)) - return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); - -- gpt->status |= GRUB_GPT_BOTH_VALID; -+ gpt->status |= (GRUB_GPT_BACKUP_HEADER_VALID | -+ GRUB_GPT_BACKUP_ENTRIES_VALID); -+ - grub_dprintf ("gpt", "repairing GPT for %s successful\n", disk->name); - - return GRUB_ERR_NONE; -@@ -703,7 +708,7 @@ grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt) - { - /* TODO: update/repair protective MBRs too. */ - -- if ((gpt->status & GRUB_GPT_BOTH_VALID) != GRUB_GPT_BOTH_VALID) -+ if (!grub_gpt_both_valid (gpt)) - return grub_error (GRUB_ERR_BAD_PART_TABLE, "Invalid GPT data"); - - grub_dprintf ("gpt", "writing primary GPT to %s\n", disk->name); -diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index cc3a201a5..39388ce6e 100644 ---- a/include/grub/gpt_partition.h -+++ b/include/grub/gpt_partition.h -@@ -161,13 +161,6 @@ typedef enum grub_gpt_status - GRUB_GPT_BACKUP_ENTRIES_VALID = 0x20, - } grub_gpt_status_t; - --#define GRUB_GPT_MBR_VALID (GRUB_GPT_PROTECTIVE_MBR|GRUB_GPT_HYBRID_MBR) --#define GRUB_GPT_PRIMARY_VALID \ -- (GRUB_GPT_PRIMARY_HEADER_VALID|GRUB_GPT_PRIMARY_ENTRIES_VALID) --#define GRUB_GPT_BACKUP_VALID \ -- (GRUB_GPT_BACKUP_HEADER_VALID|GRUB_GPT_BACKUP_ENTRIES_VALID) --#define GRUB_GPT_BOTH_VALID (GRUB_GPT_PRIMARY_VALID|GRUB_GPT_BACKUP_VALID) -- - /* UEFI requires the entries table to be at least 16384 bytes for a - * total of 128 entries given the standard 128 byte entry size. */ - #define GRUB_GPT_DEFAULT_ENTRIES_SIZE 16384 -@@ -197,6 +190,34 @@ struct grub_gpt - }; - typedef struct grub_gpt *grub_gpt_t; - -+/* Helpers for checking the gpt status field. */ -+static inline int -+grub_gpt_mbr_valid (grub_gpt_t gpt) -+{ -+ return ((gpt->status & GRUB_GPT_PROTECTIVE_MBR) || -+ (gpt->status & GRUB_GPT_HYBRID_MBR)); -+} -+ -+static inline int -+grub_gpt_primary_valid (grub_gpt_t gpt) -+{ -+ return ((gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) && -+ (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID)); -+} -+ -+static inline int -+grub_gpt_backup_valid (grub_gpt_t gpt) -+{ -+ return ((gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) && -+ (gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID)); -+} -+ -+static inline int -+grub_gpt_both_valid (grub_gpt_t gpt) -+{ -+ return grub_gpt_primary_valid (gpt) && grub_gpt_backup_valid (gpt); -+} -+ - /* Translate GPT sectors to GRUB's 512 byte block addresses. */ - static inline grub_disk_addr_t - grub_gpt_sector_to_addr (grub_gpt_t gpt, grub_uint64_t sector) -From 025c41dafe285a36dae7ff1b4217520d7839bdb4 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Tue, 20 Sep 2016 13:40:11 -0700 -Subject: [PATCH] gpt: allow repair function to noop - -Simplifies usage a little. ---- - grub-core/commands/gptprio.c | 5 ++--- - grub-core/lib/gpt.c | 4 ++++ - 2 files changed, 6 insertions(+), 3 deletions(-) - -diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c -index 25f867a81..a439552e1 100644 ---- a/grub-core/commands/gptprio.c -+++ b/grub-core/commands/gptprio.c -@@ -91,9 +91,8 @@ grub_find_next (const char *disk_name, - if (!gpt) - goto done; - -- if (!grub_gpt_both_valid(gpt)) -- if (grub_gpt_repair (dev->disk, gpt)) -- goto done; -+ if (grub_gpt_repair (dev->disk, gpt)) -+ goto done; - - for (i = 0; (part = grub_gpt_get_partentry (gpt, i)) != NULL; i++) - { -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 3e077c497..9bb19678d 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -586,6 +586,10 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) - { - grub_uint64_t backup_header, backup_entries; - -+ /* Skip if there is nothing to do. */ -+ if (grub_gpt_both_valid (gpt)) -+ return GRUB_ERR_NONE; -+ - grub_dprintf ("gpt", "repairing GPT for %s\n", disk->name); - - if (disk->log_sector_size != gpt->log_sector_size) -From d52abba6dabec22edfa420eddf60c8f4d41b7f32 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Wed, 21 Sep 2016 13:22:06 -0700 -Subject: [PATCH] gpt: do not use an enum for status bit values - ---- - include/grub/gpt_partition.h | 19 +++++++++---------- - 1 file changed, 9 insertions(+), 10 deletions(-) - -diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index 39388ce6e..ee435d73b 100644 ---- a/include/grub/gpt_partition.h -+++ b/include/grub/gpt_partition.h -@@ -151,15 +151,14 @@ grub_gpt_partition_map_iterate (grub_disk_t disk, - void *hook_data); - - /* Advanced GPT library. */ --typedef enum grub_gpt_status -- { -- GRUB_GPT_PROTECTIVE_MBR = 0x01, -- GRUB_GPT_HYBRID_MBR = 0x02, -- GRUB_GPT_PRIMARY_HEADER_VALID = 0x04, -- GRUB_GPT_PRIMARY_ENTRIES_VALID = 0x08, -- GRUB_GPT_BACKUP_HEADER_VALID = 0x10, -- GRUB_GPT_BACKUP_ENTRIES_VALID = 0x20, -- } grub_gpt_status_t; -+ -+/* Status bits for the grub_gpt.status field. */ -+#define GRUB_GPT_PROTECTIVE_MBR 0x01 -+#define GRUB_GPT_HYBRID_MBR 0x02 -+#define GRUB_GPT_PRIMARY_HEADER_VALID 0x04 -+#define GRUB_GPT_PRIMARY_ENTRIES_VALID 0x08 -+#define GRUB_GPT_BACKUP_HEADER_VALID 0x10 -+#define GRUB_GPT_BACKUP_ENTRIES_VALID 0x20 - - /* UEFI requires the entries table to be at least 16384 bytes for a - * total of 128 entries given the standard 128 byte entry size. */ -@@ -170,7 +169,7 @@ typedef enum grub_gpt_status - struct grub_gpt - { - /* Bit field indicating which structures on disk are valid. */ -- grub_gpt_status_t status; -+ unsigned status; - - /* Protective or hybrid MBR. */ - struct grub_msdos_partition_mbr mbr; -From 294ebeeff46ab1a778ca070e64cab7eb3e8a6581 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Wed, 21 Sep 2016 13:44:11 -0700 -Subject: [PATCH] gpt: check header and entries status bits together - -Use the new status function which checks *_HEADER_VALID and -*_ENTRIES_VALID bits together. It doesn't make sense for the header and -entries bits to mismatch so don't allow for it. ---- - grub-core/lib/gpt.c | 14 +++++--------- - 1 file changed, 5 insertions(+), 9 deletions(-) - -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 9bb19678d..3c6ff3540 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -596,24 +596,20 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) - return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, - "GPT sector size must match disk sector size"); - -- if (!(gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID || -- gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID)) -- return grub_error (GRUB_ERR_BUG, "No valid GPT entries"); -- -- if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) -+ if (grub_gpt_primary_valid (gpt)) - { -- grub_dprintf ("gpt", "primary GPT header is valid\n"); -+ grub_dprintf ("gpt", "primary GPT is valid\n"); - backup_header = grub_le_to_cpu64 (gpt->primary.alternate_lba); - grub_memcpy (&gpt->backup, &gpt->primary, sizeof (gpt->backup)); - } -- else if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) -+ else if (grub_gpt_backup_valid (gpt)) - { -- grub_dprintf ("gpt", "backup GPT header is valid\n"); -+ grub_dprintf ("gpt", "backup GPT is valid\n"); - backup_header = grub_le_to_cpu64 (gpt->backup.header_lba); - grub_memcpy (&gpt->primary, &gpt->backup, sizeof (gpt->primary)); - } - else -- return grub_error (GRUB_ERR_BUG, "No valid GPT header"); -+ return grub_error (GRUB_ERR_BUG, "No valid GPT"); - - /* Relocate backup to end if disk whenever possible. */ - if (grub_gpt_disk_size_valid(disk)) -From 27fcd95383567f5f1c1b0760b8a1d7e528e2a802 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Wed, 21 Sep 2016 13:52:52 -0700 -Subject: [PATCH] gpt: be more careful about relocating backup header - -The header was being relocated without checking the new location is -actually safe. If the BIOS thinks the disk is smaller than the OS then -repair may relocate the header into allocated space, failing the final -validation check. So only move it if the disk has grown. - -Additionally, if the backup is valid then we can assume its current -location is good enough and leave it as-is. ---- - grub-core/lib/gpt.c | 16 ++++++++++------ - 1 file changed, 10 insertions(+), 6 deletions(-) - -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 3c6ff3540..35e65d8d9 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -599,7 +599,17 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) - if (grub_gpt_primary_valid (gpt)) - { - grub_dprintf ("gpt", "primary GPT is valid\n"); -+ -+ /* Relocate backup to end if disk if the disk has grown. */ - backup_header = grub_le_to_cpu64 (gpt->primary.alternate_lba); -+ if (grub_gpt_disk_size_valid (disk) && -+ disk->total_sectors - 1 > backup_header) -+ { -+ backup_header = disk->total_sectors - 1; -+ grub_dprintf ("gpt", "backup GPT header relocated to 0x%llx\n", -+ (unsigned long long) backup_header); -+ } -+ - grub_memcpy (&gpt->backup, &gpt->primary, sizeof (gpt->backup)); - } - else if (grub_gpt_backup_valid (gpt)) -@@ -611,12 +621,6 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) - else - return grub_error (GRUB_ERR_BUG, "No valid GPT"); - -- /* Relocate backup to end if disk whenever possible. */ -- if (grub_gpt_disk_size_valid(disk)) -- backup_header = disk->total_sectors - 1; -- grub_dprintf ("gpt", "backup GPT header will be located at 0x%llx\n", -- (unsigned long long) backup_header); -- - backup_entries = backup_header - - grub_gpt_size_to_sectors (gpt, gpt->entries_size); - grub_dprintf ("gpt", "backup GPT entries will be located at 0x%llx\n", -From eac47a495c3994aecbb66d12b90057dc2e51bde5 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Wed, 21 Sep 2016 14:33:48 -0700 -Subject: [PATCH] gpt: selectively update fields during repair - -Just a little cleanup/refactor to skip touching data we don't need to. ---- - grub-core/lib/gpt.c | 28 ++++++++++++---------------- - 1 file changed, 12 insertions(+), 16 deletions(-) - -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 35e65d8d9..03e807b25 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -584,8 +584,6 @@ grub_gpt_get_partentry (grub_gpt_t gpt, grub_uint32_t n) - grub_err_t - grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) - { -- grub_uint64_t backup_header, backup_entries; -- - /* Skip if there is nothing to do. */ - if (grub_gpt_both_valid (gpt)) - return GRUB_ERR_NONE; -@@ -598,6 +596,8 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) - - if (grub_gpt_primary_valid (gpt)) - { -+ grub_uint64_t backup_header; -+ - grub_dprintf ("gpt", "primary GPT is valid\n"); - - /* Relocate backup to end if disk if the disk has grown. */ -@@ -608,32 +608,28 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) - backup_header = disk->total_sectors - 1; - grub_dprintf ("gpt", "backup GPT header relocated to 0x%llx\n", - (unsigned long long) backup_header); -+ -+ gpt->primary.alternate_lba = grub_cpu_to_le64 (backup_header); - } - - grub_memcpy (&gpt->backup, &gpt->primary, sizeof (gpt->backup)); -+ gpt->backup.header_lba = gpt->primary.alternate_lba; -+ gpt->backup.alternate_lba = gpt->primary.header_lba; -+ gpt->backup.partitions = grub_cpu_to_le64 (backup_header - -+ grub_gpt_size_to_sectors (gpt, gpt->entries_size)); - } - else if (grub_gpt_backup_valid (gpt)) - { - grub_dprintf ("gpt", "backup GPT is valid\n"); -- backup_header = grub_le_to_cpu64 (gpt->backup.header_lba); -+ - grub_memcpy (&gpt->primary, &gpt->backup, sizeof (gpt->primary)); -+ gpt->primary.header_lba = gpt->backup.alternate_lba; -+ gpt->primary.alternate_lba = gpt->backup.header_lba; -+ gpt->primary.partitions = grub_cpu_to_le64_compile_time (2); - } - else - return grub_error (GRUB_ERR_BUG, "No valid GPT"); - -- backup_entries = backup_header - -- grub_gpt_size_to_sectors (gpt, gpt->entries_size); -- grub_dprintf ("gpt", "backup GPT entries will be located at 0x%llx\n", -- (unsigned long long) backup_entries); -- -- /* Update/fixup header and partition table locations. */ -- gpt->primary.header_lba = grub_cpu_to_le64_compile_time (1); -- gpt->primary.alternate_lba = grub_cpu_to_le64 (backup_header); -- gpt->primary.partitions = grub_cpu_to_le64_compile_time (2); -- gpt->backup.header_lba = gpt->primary.alternate_lba; -- gpt->backup.alternate_lba = gpt->primary.header_lba; -- gpt->backup.partitions = grub_cpu_to_le64 (backup_entries); -- - /* Recompute checksums. */ - if (grub_gpt_update_checksums (gpt)) - return grub_errno; -From dab5d9e809c0cea5c6b5d0f5ba093068465fe1cb Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Wed, 21 Sep 2016 14:55:19 -0700 -Subject: [PATCH] gpt: always revalidate when recomputing checksums - -This ensures all code modifying GPT data include the same sanity check -that repair does. If revalidation fails the status flags are left in the -appropriate state. ---- - grub-core/lib/gpt.c | 32 ++++++++++++++++++-------------- - 1 file changed, 18 insertions(+), 14 deletions(-) - -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 03e807b25..3ac2987c6 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -630,23 +630,9 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) - else - return grub_error (GRUB_ERR_BUG, "No valid GPT"); - -- /* Recompute checksums. */ - if (grub_gpt_update_checksums (gpt)) - return grub_errno; - -- /* Sanity check. */ -- if (grub_gpt_check_primary (gpt)) -- return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); -- -- gpt->status |= (GRUB_GPT_PRIMARY_HEADER_VALID | -- GRUB_GPT_PRIMARY_ENTRIES_VALID); -- -- if (grub_gpt_check_backup (gpt)) -- return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); -- -- gpt->status |= (GRUB_GPT_BACKUP_HEADER_VALID | -- GRUB_GPT_BACKUP_ENTRIES_VALID); -- - grub_dprintf ("gpt", "repairing GPT for %s successful\n", disk->name); - - return GRUB_ERR_NONE; -@@ -657,6 +643,12 @@ grub_gpt_update_checksums (grub_gpt_t gpt) - { - grub_uint32_t crc; - -+ /* Clear status bits, require revalidation of everything. */ -+ gpt->status &= ~(GRUB_GPT_PRIMARY_HEADER_VALID | -+ GRUB_GPT_PRIMARY_ENTRIES_VALID | -+ GRUB_GPT_BACKUP_HEADER_VALID | -+ GRUB_GPT_BACKUP_ENTRIES_VALID); -+ - /* Writing headers larger than our header structure are unsupported. */ - gpt->primary.headersize = - grub_cpu_to_le32_compile_time (sizeof (gpt->primary)); -@@ -670,6 +662,18 @@ grub_gpt_update_checksums (grub_gpt_t gpt) - grub_gpt_header_lecrc32 (&gpt->primary.crc32, &gpt->primary); - grub_gpt_header_lecrc32 (&gpt->backup.crc32, &gpt->backup); - -+ if (grub_gpt_check_primary (gpt)) -+ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); -+ -+ gpt->status |= (GRUB_GPT_PRIMARY_HEADER_VALID | -+ GRUB_GPT_PRIMARY_ENTRIES_VALID); -+ -+ if (grub_gpt_check_backup (gpt)) -+ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); -+ -+ gpt->status |= (GRUB_GPT_BACKUP_HEADER_VALID | -+ GRUB_GPT_BACKUP_ENTRIES_VALID); -+ - return GRUB_ERR_NONE; - } - -From de5adec64e3b72c79b08b398f24152e84ab89b5f Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Wed, 21 Sep 2016 15:01:09 -0700 -Subject: [PATCH] gpt: include backup-in-sync check in revalidation - ---- - grub-core/lib/gpt.c | 10 +++++----- - 1 file changed, 5 insertions(+), 5 deletions(-) - -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 3ac2987c6..c27bcc510 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -372,6 +372,11 @@ grub_gpt_check_backup (grub_gpt_t gpt) - if (backup <= end) - return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid backup GPT LBA"); - -+ /* If both primary and backup are valid but differ prefer the primary. */ -+ if ((gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) && -+ !grub_gpt_headers_equal (gpt)) -+ return grub_error (GRUB_ERR_BAD_PART_TABLE, "backup GPT out of sync"); -+ - return GRUB_ERR_NONE; - } - -@@ -435,11 +440,6 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) - if (grub_le_to_cpu64 (gpt->backup.header_lba) != sector) - return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid backup GPT LBA"); - -- /* If both primary and backup are valid but differ prefer the primary. */ -- if ((gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID) && -- !grub_gpt_headers_equal(gpt)) -- return grub_error (GRUB_ERR_BAD_PART_TABLE, "backup GPT of of sync"); -- - gpt->status |= GRUB_GPT_BACKUP_HEADER_VALID; - return GRUB_ERR_NONE; - } -From 7b0ccb8fcc584be82e3f4778aad8afe512468f3b Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Wed, 21 Sep 2016 15:29:55 -0700 -Subject: [PATCH] gpt: read entries table at the same time as the header - -I personally think this reads easier. Also has the side effect of -directly comparing the primary and backup tables instead of presuming -they are equal if the crc32 matches. ---- - grub-core/lib/gpt.c | 69 +++++++++++++++++++++++++++++++---------------------- - 1 file changed, 41 insertions(+), 28 deletions(-) - -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index c27bcc510..b93cedea1 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -32,6 +32,11 @@ GRUB_MOD_LICENSE ("GPLv3+"); - - static grub_uint8_t grub_gpt_magic[] = GRUB_GPT_HEADER_MAGIC; - -+static grub_err_t -+grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, -+ struct grub_gpt_header *header, -+ void **ret_entries, -+ grub_size_t *ret_entries_size); - - char * - grub_gpt_guid_to_str (grub_gpt_guid_t *guid) -@@ -400,12 +405,21 @@ grub_gpt_read_primary (grub_disk_t disk, grub_gpt_t gpt) - return grub_errno; - - gpt->status |= GRUB_GPT_PRIMARY_HEADER_VALID; -+ -+ if (grub_gpt_read_entries (disk, gpt, &gpt->primary, -+ &gpt->entries, &gpt->entries_size)) -+ return grub_errno; -+ -+ gpt->status |= GRUB_GPT_PRIMARY_ENTRIES_VALID; -+ - return GRUB_ERR_NONE; - } - - static grub_err_t - grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) - { -+ void *entries = NULL; -+ grub_size_t entries_size; - grub_uint64_t sector; - grub_disk_addr_t addr; - -@@ -441,12 +455,35 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) - return grub_error (GRUB_ERR_BAD_PART_TABLE, "invalid backup GPT LBA"); - - gpt->status |= GRUB_GPT_BACKUP_HEADER_VALID; -+ -+ if (grub_gpt_read_entries (disk, gpt, &gpt->backup, -+ &entries, &entries_size)) -+ return grub_errno; -+ -+ if (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID) -+ { -+ if (entries_size != gpt->entries_size || -+ grub_memcmp (entries, gpt->entries, entries_size) != 0) -+ return grub_error (GRUB_ERR_BAD_PART_TABLE, "backup GPT out of sync"); -+ -+ grub_free (entries); -+ } -+ else -+ { -+ gpt->entries = entries; -+ gpt->entries_size = entries_size; -+ } -+ -+ gpt->status |= GRUB_GPT_BACKUP_ENTRIES_VALID; -+ - return GRUB_ERR_NONE; - } - - static grub_err_t - grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, -- struct grub_gpt_header *header) -+ struct grub_gpt_header *header, -+ void **ret_entries, -+ grub_size_t *ret_entries_size) - { - void *entries = NULL; - grub_uint32_t count, size, crc; -@@ -488,9 +525,8 @@ grub_gpt_read_entries (grub_disk_t disk, grub_gpt_t gpt, - goto fail; - } - -- grub_free (gpt->entries); -- gpt->entries = entries; -- gpt->entries_size = entries_size; -+ *ret_entries = entries; -+ *ret_entries_size = entries_size; - return GRUB_ERR_NONE; - - fail: -@@ -529,30 +565,7 @@ grub_gpt_read (grub_disk_t disk) - grub_gpt_read_backup (disk, gpt); - - /* If either succeeded clear any possible error from the other. */ -- if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID || -- gpt->status & GRUB_GPT_BACKUP_HEADER_VALID) -- grub_errno = GRUB_ERR_NONE; -- else -- goto fail; -- -- /* Similarly, favor the value or error from the primary table. */ -- if (gpt->status & GRUB_GPT_BACKUP_HEADER_VALID && -- !grub_gpt_read_entries (disk, gpt, &gpt->backup)) -- { -- grub_dprintf ("gpt", "read valid backup GPT from %s\n", disk->name); -- gpt->status |= GRUB_GPT_BACKUP_ENTRIES_VALID; -- } -- -- grub_errno = GRUB_ERR_NONE; -- if (gpt->status & GRUB_GPT_PRIMARY_HEADER_VALID && -- !grub_gpt_read_entries (disk, gpt, &gpt->primary)) -- { -- grub_dprintf ("gpt", "read valid primary GPT from %s\n", disk->name); -- gpt->status |= GRUB_GPT_PRIMARY_ENTRIES_VALID; -- } -- -- if (gpt->status & GRUB_GPT_PRIMARY_ENTRIES_VALID || -- gpt->status & GRUB_GPT_BACKUP_ENTRIES_VALID) -+ if (grub_gpt_primary_valid (gpt) || grub_gpt_backup_valid (gpt)) - grub_errno = GRUB_ERR_NONE; - else - goto fail; -From ff51b717a6122c2811fe221dd65a29a03dcef347 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Wed, 21 Sep 2016 16:02:53 -0700 -Subject: [PATCH] gpt: report all revalidation errors - -Before returning an error that the primary or backup GPT is invalid push -the existing error onto the stack so the user will be told what is bad. ---- - grub-core/lib/gpt.c | 10 ++++++++-- - 1 file changed, 8 insertions(+), 2 deletions(-) - -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index b93cedea1..f6f853309 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -676,13 +676,19 @@ grub_gpt_update_checksums (grub_gpt_t gpt) - grub_gpt_header_lecrc32 (&gpt->backup.crc32, &gpt->backup); - - if (grub_gpt_check_primary (gpt)) -- return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); -+ { -+ grub_error_push (); -+ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT primary header"); -+ } - - gpt->status |= (GRUB_GPT_PRIMARY_HEADER_VALID | - GRUB_GPT_PRIMARY_ENTRIES_VALID); - - if (grub_gpt_check_backup (gpt)) -- return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); -+ { -+ grub_error_push (); -+ return grub_error (GRUB_ERR_BUG, "Generated invalid GPT backup header"); -+ } - - gpt->status |= (GRUB_GPT_BACKUP_HEADER_VALID | - GRUB_GPT_BACKUP_ENTRIES_VALID); -From 4eb61a681cd1f4217dd19903b67c8fb141eafd82 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Thu, 22 Sep 2016 10:00:27 -0700 -Subject: [PATCH] gpt: rename and update documentation for grub_gpt_update - -The function now does more than just recompute checksums so give it a -more general name to reflect that. ---- - grub-core/commands/gptprio.c | 2 +- - grub-core/lib/gpt.c | 4 ++-- - include/grub/gpt_partition.h | 7 ++++--- - 3 files changed, 7 insertions(+), 6 deletions(-) - -diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c -index a439552e1..4a24fa62d 100644 ---- a/grub-core/commands/gptprio.c -+++ b/grub-core/commands/gptprio.c -@@ -127,7 +127,7 @@ grub_find_next (const char *disk_name, - - grub_gptprio_set_tries_left (part_found, tries_left - 1); - -- if (grub_gpt_update_checksums (gpt)) -+ if (grub_gpt_update (gpt)) - goto done; - - if (grub_gpt_write (dev->disk, gpt)) -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index f6f853309..430404848 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -643,7 +643,7 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) - else - return grub_error (GRUB_ERR_BUG, "No valid GPT"); - -- if (grub_gpt_update_checksums (gpt)) -+ if (grub_gpt_update (gpt)) - return grub_errno; - - grub_dprintf ("gpt", "repairing GPT for %s successful\n", disk->name); -@@ -652,7 +652,7 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) - } - - grub_err_t --grub_gpt_update_checksums (grub_gpt_t gpt) -+grub_gpt_update (grub_gpt_t gpt) - { - grub_uint32_t crc; - -diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index ee435d73b..4730fe362 100644 ---- a/include/grub/gpt_partition.h -+++ b/include/grub/gpt_partition.h -@@ -232,11 +232,12 @@ grub_gpt_t grub_gpt_read (grub_disk_t disk); - struct grub_gpt_partentry * grub_gpt_get_partentry (grub_gpt_t gpt, - grub_uint32_t n); - --/* Sync up primary and backup headers, recompute checksums. */ -+/* Sync and update primary and backup headers if either are invalid. */ - grub_err_t grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt); - --/* Recompute checksums, must be called after modifying GPT data. */ --grub_err_t grub_gpt_update_checksums (grub_gpt_t gpt); -+/* Recompute checksums and revalidate everything, must be called after -+ * modifying any GPT data. */ -+grub_err_t grub_gpt_update (grub_gpt_t gpt); - - /* Write headers and entry tables back to disk. */ - grub_err_t grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt); -From 313d88eab9e5c11a168c45978feccdd52b5ec6e5 Mon Sep 17 00:00:00 2001 -From: Michael Marineau -Date: Thu, 22 Sep 2016 11:18:42 -0700 -Subject: [PATCH] gpt: write backup GPT first, skip if inaccessible. - -Writing the primary GPT before the backup may lead to a confusing -situation: booting a freshly updated system could consistently fail and -next boot will fall back to the old system if writing the primary works -but writing the backup fails. If the backup is written first and fails -the primary is left in the old state so the next boot will re-try and -possibly fail in the exact same way. Making that repeatable should make -it easier for users to identify the error. - -Additionally if the firmware and OS disagree on the disk size, making -the backup inaccessible to GRUB, then just skip writing the backup. -When this happens the automatic call to `coreos-setgoodroot` after boot -will take care of repairing the backup. ---- - grub-core/lib/gpt.c | 28 ++++++++++++++++++++++++---- - 1 file changed, 24 insertions(+), 4 deletions(-) - -diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 430404848..c3e3a25f9 100644 ---- a/grub-core/lib/gpt.c -+++ b/grub-core/lib/gpt.c -@@ -729,19 +729,39 @@ grub_gpt_write_table (grub_disk_t disk, grub_gpt_t gpt, - grub_err_t - grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt) - { -+ grub_uint64_t backup_header; -+ - /* TODO: update/repair protective MBRs too. */ - - if (!grub_gpt_both_valid (gpt)) - return grub_error (GRUB_ERR_BAD_PART_TABLE, "Invalid GPT data"); - -+ /* Write the backup GPT first so if writing fails the update is aborted -+ * and the primary is left intact. However if the backup location is -+ * inaccessible we have to just skip and hope for the best, the backup -+ * will need to be repaired in the OS. */ -+ backup_header = grub_le_to_cpu64 (gpt->backup.header_lba); -+ if (grub_gpt_disk_size_valid (disk) && -+ backup_header >= disk->total_sectors) -+ { -+ grub_printf ("warning: backup GPT located at 0x%llx, " -+ "beyond last disk sector at 0x%llx\n", -+ (unsigned long long) backup_header, -+ (unsigned long long) disk->total_sectors - 1); -+ grub_printf ("warning: only writing primary GPT, " -+ "the backup GPT must be repaired from the OS\n"); -+ } -+ else -+ { -+ grub_dprintf ("gpt", "writing backup GPT to %s\n", disk->name); -+ if (grub_gpt_write_table (disk, gpt, &gpt->backup)) -+ return grub_errno; -+ } -+ - grub_dprintf ("gpt", "writing primary GPT to %s\n", disk->name); - if (grub_gpt_write_table (disk, gpt, &gpt->primary)) - return grub_errno; - -- grub_dprintf ("gpt", "writing backup GPT to %s\n", disk->name); -- if (grub_gpt_write_table (disk, gpt, &gpt->backup)) -- return grub_errno; -- - return GRUB_ERR_NONE; - } - -From 290b82244de9e527ced46a9a04e2f7c6817f51e5 Mon Sep 17 00:00:00 2001 -From: iliana destroyer of worlds -Date: Thu, 28 Mar 2019 16:28:41 -0700 -Subject: [PATCH] Generate new gptprio partition type - -Signed-off-by: iliana destroyer of worlds ---- - include/grub/gpt_partition.h | 4 ++-- - tests/gptprio_test.in | 2 +- - 2 files changed, 3 insertions(+), 3 deletions(-) - -diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index 4730fe362..438d983a6 100644 ---- a/include/grub/gpt_partition.h -+++ b/include/grub/gpt_partition.h -@@ -62,8 +62,8 @@ char * grub_gpt_guid_to_str (grub_gpt_guid_t *guid); - 0x85, 0xd2, 0xe1, 0xe9, 0x04, 0x34, 0xcf, 0xb3) - - #define GRUB_GPT_PARTITION_TYPE_USR_X86_64 \ -- GRUB_GPT_GUID_INIT (0x5dfbf5f4, 0x2848, 0x4bac, \ -- 0xaa, 0x5e, 0x0d, 0x9a, 0x20, 0xb7, 0x45, 0xa6) -+ GRUB_GPT_GUID_INIT (0x6b636168, 0x7420, 0x6568, \ -+ 0x20, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x74, 0x21) - - #define GRUB_GPT_HEADER_MAGIC \ - { 0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54 } -diff --git a/tests/gptprio_test.in b/tests/gptprio_test.in -index c5cf0f3b7..325627546 100644 ---- a/tests/gptprio_test.in -+++ b/tests/gptprio_test.in -@@ -59,7 +59,7 @@ esac - img1="`mktemp "${TMPDIR:-/tmp}/tmp.XXXXXXXXXX"`" || exit 1 - trap "rm -f '${img1}'" EXIT - --prio_type="5dfbf5f4-2848-4bac-aa5e-0d9a20b745a6" -+prio_type="6b636168-7420-6568-2070-6c616e657421" - declare -a prio_uuid - prio_uuid[2]="9b003904-d006-4ab3-97f1-73f547b7af1a" - prio_uuid[3]="1aa5a658-5b02-414d-9b71-f7e6c151f0cd" diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index 2cf57798..9d5dc628 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -1,21 +1,60 @@ %global debug_package %{nil} Name: %{_cross_os}grub -Version: 2.02 +Version: 2.04 Release: 1%{?dist} Summary: Bootloader with support for Linux and more License: GPLv3+ URL: https://www.gnu.org/software/grub/ Source0: https://ftp.gnu.org/gnu/grub/grub-%{version}.tar.xz Source1: core.cfg -Patch1: 0001-x86-64-Treat-R_X86_64_PLT32-as-R_X86_64_PC32.patch -Patch2: gpt.patch -Patch3: 100-grub_setup_root.patch +Patch0001: 0001-setup-Add-root-device-argument-to-grub-setup.patch +Patch0002: 0002-gpt-start-new-GPT-module.patch +Patch0003: 0003-gpt-rename-misnamed-header-location-fields.patch +Patch0004: 0004-gpt-record-size-of-of-the-entries-table.patch +Patch0005: 0005-gpt-consolidate-crc32-computation-code.patch +Patch0006: 0006-gpt-add-new-repair-function-to-sync-up-primary-and-b.patch +Patch0007: 0007-gpt-add-write-function-and-gptrepair-command.patch +Patch0008: 0008-gpt-add-a-new-generic-GUID-type.patch +Patch0009: 0009-gpt-new-gptprio.next-command-for-selecting-priority-.patch +Patch0010: 0010-gpt-split-out-checksum-recomputation.patch +Patch0011: 0011-gpt-move-gpt-guid-printing-function-to-common-librar.patch +Patch0012: 0012-gpt-switch-partition-names-to-a-16-bit-type.patch +Patch0013: 0013-tests-add-some-partitions-to-the-gpt-unit-test-data.patch +Patch0014: 0014-gpt-add-search-by-partition-label-and-uuid-commands.patch +Patch0015: 0015-gpt-clean-up-little-endian-crc32-computation.patch +Patch0016: 0016-gpt-minor-cleanup.patch +Patch0017: 0017-gpt-add-search-by-disk-uuid-command.patch +Patch0018: 0018-gpt-do-not-use-disk-sizes-GRUB-will-reject-as-invali.patch +Patch0019: 0019-gpt-add-verbose-debug-logging.patch +Patch0020: 0020-gpt-improve-validation-of-GPT-headers.patch +Patch0021: 0021-gpt-refuse-to-write-to-sector-0.patch +Patch0022: 0022-gpt-properly-detect-and-repair-invalid-tables.patch +Patch0023: 0023-gptrepair_test-fix-typo-in-cleanup-trap.patch +Patch0024: 0024-gptprio_test-check-GPT-is-repaired-when-appropriate.patch +Patch0025: 0025-gpt-fix-partition-table-indexing-and-validation.patch +Patch0026: 0026-gpt-prefer-disk-size-from-header-over-firmware.patch +Patch0027: 0027-gpt-add-helper-for-picking-a-valid-header.patch +Patch0028: 0028-gptrepair-fix-status-checking.patch +Patch0029: 0029-gpt-use-inline-functions-for-checking-status-bits.patch +Patch0030: 0030-gpt-allow-repair-function-to-noop.patch +Patch0031: 0031-gpt-do-not-use-an-enum-for-status-bit-values.patch +Patch0032: 0032-gpt-check-header-and-entries-status-bits-together.patch +Patch0033: 0033-gpt-be-more-careful-about-relocating-backup-header.patch +Patch0034: 0034-gpt-selectively-update-fields-during-repair.patch +Patch0035: 0035-gpt-always-revalidate-when-recomputing-checksums.patch +Patch0036: 0036-gpt-include-backup-in-sync-check-in-revalidation.patch +Patch0037: 0037-gpt-read-entries-table-at-the-same-time-as-the-heade.patch +Patch0038: 0038-gpt-report-all-revalidation-errors.patch +Patch0039: 0039-gpt-rename-and-update-documentation-for-grub_gpt_upd.patch +Patch0040: 0040-gpt-write-backup-GPT-first-skip-if-inaccessible.patch +Patch0041: 0041-gptprio-Use-Thar-boot-partition-type-GUID.patch BuildRequires: automake BuildRequires: bison BuildRequires: flex BuildRequires: gcc-%{_cross_target} +BuildRequires: gettext-devel BuildRequires: grub2-tools BuildRequires: %{_cross_os}glibc-devel diff --git a/packages/grub/sources b/packages/grub/sources index ee84926c..40caa62d 100644 --- a/packages/grub/sources +++ b/packages/grub/sources @@ -1 +1 @@ -SHA512 (grub-2.02.tar.xz) = cc6eb0a42b5c8df2f671cc128ff725afb3ff1f8832a196022e433cf0d3b75decfca2316d0aa5fabea75747d55e88f3d021dd93508563f8ca80fd7b9e7fe1f088 +SHA512 (grub-2.04.tar.xz) = 9c15c42d0cf5d61446b752194e3b628bb04be0fe6ea0240ab62b3d753784712744846e1f7c3651d8e0968d22012e6d713c38c44936d4004ded3ca4d4007babbb From dee8254106b4d366764b50156df648797660e106 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 6 Aug 2019 18:37:04 +0000 Subject: [PATCH 0052/1356] refactor excluded files into shared gitignore Signed-off-by: Ben Cressey --- .gitignore | 2 ++ packages/grub/.gitignore | 1 - packages/kernel/.gitignore | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 packages/grub/.gitignore diff --git a/.gitignore b/.gitignore index 50ec4306..dffd686d 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,5 @@ *.makevar *.makepkg *.makedep +*.tar.* +/packages/*/*.crate diff --git a/packages/grub/.gitignore b/packages/grub/.gitignore deleted file mode 100644 index 6407ec63..00000000 --- a/packages/grub/.gitignore +++ /dev/null @@ -1 +0,0 @@ -grub-2.04.tar.xz diff --git a/packages/kernel/.gitignore b/packages/kernel/.gitignore index a791d78a..f0af3ba1 100644 --- a/packages/kernel/.gitignore +++ b/packages/kernel/.gitignore @@ -1 +1 @@ -kernel-4.19.58-21.57.amzn2.src.rpm +kernel-*.src.rpm From 4edd93d7e9dacb20ac2823f183926900b453b04b Mon Sep 17 00:00:00 2001 From: Samuel Mendoza-Jonas Date: Thu, 1 Aug 2019 14:44:17 -0700 Subject: [PATCH 0053/1356] Fixes for arm64 builds Some minor fixups for building natively on arm64, including using the correct sources for Go and Rust, and setting the target ARCH for the kernel build. Then some updates to GRUB and rpm2img to support an EFI partition on arm64. This creates an undersized EFI partition if building on arm64 which luckily fits the GRUB binary and means the partition sizes between x86_64 and arm64 match. There are still other package-specific issues that prevent cross-compiling which will have to be fixed in a later patch. Signed-off-by: Samuel Mendoza-Jonas --- packages/grub/grub.spec | 13 +++++++++++-- packages/kernel/kernel.spec | 1 + 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index 9d5dc628..e6f7c36c 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -117,14 +117,23 @@ grub2-mkimage \ -O "%{_cross_grub_tuple}" \ -o "%{buildroot}%{_cross_grubdir}/%{_cross_grub_image}" \ -p "%{_cross_grub_prefix}" \ - biosdisk configfile ext2 gptprio linux normal part_gpt search_fs_uuid - +%if %{_cross_arch} == x86_64 + biosdisk \ +%else + efi_gop \ +%endif + configfile ext2 gptprio linux normal part_gpt search_fs_uuid + +%if %{_cross_arch} == x86_64 install -m 0644 ./grub-core/boot.img \ %{buildroot}%{_cross_grubdir}/boot.img +%endif %files %dir %{_cross_grubdir} +%if %{_cross_arch} == x86_64 %{_cross_grubdir}/boot.img +%endif %{_cross_grubdir}/%{_cross_grub_image} %{_cross_sbindir}/grub-bios-setup %exclude %{_cross_infodir} diff --git a/packages/kernel/kernel.spec b/packages/kernel/kernel.spec index 088d3459..cfd60ff8 100644 --- a/packages/kernel/kernel.spec +++ b/packages/kernel/kernel.spec @@ -49,6 +49,7 @@ done # Patches listed in this spec (Patch0001...) %autopatch -p1 KCONFIG_CONFIG="arch/%{_cross_karch}/configs/%{_cross_vendor}_defconfig" \ + ARCH="%{_cross_karch}" \ scripts/kconfig/merge_config.sh ../config-%{_cross_arch} %{SOURCE100} rm -f ../config-%{_cross_arch} ../*.patch From 714ae30c8c243986a2b5be38669aeaf250a35e41 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Mon, 19 Aug 2019 12:37:51 -0700 Subject: [PATCH 0054/1356] Expand on issue and PR templates --- .github/ISSUE_TEMPLATE/build.md | 26 ++++++++++++++++++++++++++ .github/ISSUE_TEMPLATE/feature.md | 17 +++++++++++++++++ .github/ISSUE_TEMPLATE/image.md | 26 ++++++++++++++++++++++++++ .github/PULL_REQUEST_TEMPLATE.md | 6 ------ .github/PULL_REQUEST_TEMPLATE/main.md | 21 +++++++++++++++++++++ 5 files changed, 90 insertions(+), 6 deletions(-) create mode 100644 .github/ISSUE_TEMPLATE/build.md create mode 100644 .github/ISSUE_TEMPLATE/feature.md create mode 100644 .github/ISSUE_TEMPLATE/image.md delete mode 100644 .github/PULL_REQUEST_TEMPLATE.md create mode 100644 .github/PULL_REQUEST_TEMPLATE/main.md diff --git a/.github/ISSUE_TEMPLATE/build.md b/.github/ISSUE_TEMPLATE/build.md new file mode 100644 index 00000000..b4830a5d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/build.md @@ -0,0 +1,26 @@ +--- +title: Bug report - build process +--- + + + +**Platform I'm building on:** + + + +**What I expected to happen:** + + + +**What actually happened:** + + + +**How to reproduce the problem:** + + diff --git a/.github/ISSUE_TEMPLATE/feature.md b/.github/ISSUE_TEMPLATE/feature.md new file mode 100644 index 00000000..09e7f53a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature.md @@ -0,0 +1,17 @@ +--- +title: Feature request +--- + + + +**What I'd like:** + + + +**Any alternatives you've considered:** + + diff --git a/.github/ISSUE_TEMPLATE/image.md b/.github/ISSUE_TEMPLATE/image.md new file mode 100644 index 00000000..1ccaa1c4 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/image.md @@ -0,0 +1,26 @@ +--- +title: Bug report - Thar image +--- + + + +**Image I'm using:** + + + +**What I expected to happen:** + + + +**What actually happened:** + + + +**How to reproduce the problem:** + + diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 6bdaa999..00000000 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,6 +0,0 @@ -*Issue #, if available:* - -*Description of changes:* - - -By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice. diff --git a/.github/PULL_REQUEST_TEMPLATE/main.md b/.github/PULL_REQUEST_TEMPLATE/main.md new file mode 100644 index 00000000..692a0625 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/main.md @@ -0,0 +1,21 @@ + + +**Issue number:** + + + +**Description of changes:** + + + +**Testing done:** + + + +**Terms of contribution:** + +By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice. From fe616056bf4df5ba0705e723714b5215c397eead Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Tue, 20 Aug 2019 10:49:42 -0700 Subject: [PATCH 0055/1356] Fix YAML frontmatter variable names The docs seem to be inconsistent, but the GitHub UI says that "name" and "about" can't be blank, which agrees with this page: https://help.github.com/en/articles/about-issue-and-pull-request-templates --- .github/ISSUE_TEMPLATE/build.md | 3 ++- .github/ISSUE_TEMPLATE/feature.md | 3 ++- .github/ISSUE_TEMPLATE/image.md | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/build.md b/.github/ISSUE_TEMPLATE/build.md index b4830a5d..6e4c091d 100644 --- a/.github/ISSUE_TEMPLATE/build.md +++ b/.github/ISSUE_TEMPLATE/build.md @@ -1,5 +1,6 @@ --- -title: Bug report - build process +name: Bug report - build process +about: Let us know about a problem with the build process --- - -**Issue number:** - - - -**Description of changes:** - - - -**Testing done:** - - - -**Terms of contribution:** - -By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice. diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 00000000..ef6d9a11 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,21 @@ + + +**Issue number:** + + + +**Description of changes:** + + + +**Testing done:** + + + +**Terms of contribution:** + +By submitting this pull request, I agree that this contribution is dual-licensed under the terms of both the Apache License, version 2.0, and the MIT license. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e19d5b7d..c8e163e0 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -57,7 +57,5 @@ Please do **not** create a public GitHub issue. ## Licensing -See the [LICENSE](https://github.com/amazonlinux/PRIVATE-thar/blob/master/LICENSE) file for our project's licensing. +See the [COPYRIGHT](COPYRIGHT) file for our project's licensing. We will ask you to confirm the licensing of your contribution. - -We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. From bf3602d93a42b481659aac6ce86c0774ed21cbb1 Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Tue, 18 Feb 2020 00:22:52 +0000 Subject: [PATCH 0243/1356] Add license metadata to first-party code --- tools/buildsys/Cargo.toml | 1 + tools/buildsys/deny.toml | 4 ---- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/tools/buildsys/Cargo.toml b/tools/buildsys/Cargo.toml index 4879d811..c2b66231 100644 --- a/tools/buildsys/Cargo.toml +++ b/tools/buildsys/Cargo.toml @@ -2,6 +2,7 @@ name = "buildsys" version = "0.1.0" authors = ["Ben Cressey "] +license = "Apache-2.0 OR MIT" edition = "2018" publish = false diff --git a/tools/buildsys/deny.toml b/tools/buildsys/deny.toml index 46aecf93..8685fb59 100644 --- a/tools/buildsys/deny.toml +++ b/tools/buildsys/deny.toml @@ -25,10 +25,6 @@ exceptions = [ { name = "webpki-roots", allow = ["MPL-2.0"], version = "*" }, ] -# FIXME: Remove this when a license is assigned for first-party packages -[licenses.private] -ignore = true - [[licenses.clarify]] name = "ring" expression = "MIT AND ISC AND OpenSSL" From bd7d0a757b34c9be2964f506c01a6f5bae39cf7f Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Tue, 18 Feb 2020 01:00:40 +0000 Subject: [PATCH 0244/1356] Install top-level copyright/license to /usr/share/licenses --- tools/rpm2img | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/rpm2img b/tools/rpm2img index 2b9647a1..f82451f7 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -75,6 +75,7 @@ sgdisk --clear \ --sort --print "${DISK_IMAGE}" rpm -iv --root "${ROOT_MOUNT}" "${PACKAGE_DIR}"/*.rpm +install -p -m 0644 /host/{COPYRIGHT,LICENSE-APACHE,LICENSE-MIT} "${ROOT_MOUNT}"/usr/share/licenses/ mksquashfs "${ROOT_MOUNT}"/usr/share/licenses "${ROOT_MOUNT}"/usr/share/bottlerocket/licenses.squashfs rm -rf "${ROOT_MOUNT}"/var/lib "${ROOT_MOUNT}"/usr/share/licenses/* From a8570cf398375040828b0c32f68e08bf8cbeddf2 Mon Sep 17 00:00:00 2001 From: Matt Briggs Date: Wed, 19 Feb 2020 16:53:06 +0000 Subject: [PATCH 0245/1356] host-containers: extract git history to new repos and remove --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index f4a2918a..006c0fcb 100644 --- a/README.md +++ b/README.md @@ -61,7 +61,7 @@ From there, you can [change settings](#settings), manually [update Bottlerocket] ### Control container -Bottlerocket has a "control" container, enabled by default, that runs outside of the orchestrator in a separate instance of containerd. +Bottlerocket has a ["control" container](https://github.com/bottlerocket-os/bottlerocket-control-container), enabled by default, that runs outside of the orchestrator in a separate instance of containerd. This container runs the [AWS SSM agent](https://github.com/aws/amazon-ssm-agent) that lets you run commands, or start shell sessions, on Bottlerocket instances in EC2. (You can easily replace this control container with your own just by changing the URI; see [Settings](#settings). @@ -80,12 +80,12 @@ Then you'd be able to start a session using only your instance ID, like this: aws ssm start-session --target INSTANCE_ID ``` -With the default control container, you can make API calls to change settings in your Bottlerocket host. +With the [default control container](https://github.com/bottlerocket-os/bottlerocket-control-container), you can make API calls to change settings in your Bottlerocket host. To do even more, read the next section about the [admin container](#admin-container). ### Admin container -Bottlerocket has an administrative container, disabled by default, that runs outside of the orchestrator in a separate instance of containerd. +Bottlerocket has an [administrative container](https://github.com/bottlerocket-os/bottlerocket-admin-container), disabled by default, that runs outside of the orchestrator in a separate instance of containerd. This container has an SSH server that lets you log in as `ec2-user` using your EC2-registered SSH key. (You can easily replace this admin container with your own just by changing the URI; see [Settings](#settings). @@ -275,7 +275,7 @@ The following settings are set for you automatically by [pluto](workspaces/api/) ##### Custom host containers -`admin` and `control` are our default host containers, but you're free to change this. +[`admin`](https://github.com/bottlerocket-os/bottlerocket-admin-container) and [`control`](https://github.com/bottlerocket-os/bottlerocket-control-container) are our default host containers, but you're free to change this. Beyond just changing the settings above to affect the `admin` and `control` containers, you can add and remove host containers entirely. As long as you define the three fields above -- `source` with a URI, and `enabled` and `superpowered` with true/false -- you can add host containers with an API call or user data. From df44877dbc625114c540a6b4b18dbe4ae0a60e7d Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Mon, 17 Feb 2020 16:25:08 -0800 Subject: [PATCH 0246/1356] Remove infra, update_sign_tuf_repo All infra related stuff and update_sign_tuf_repo have moved to the adjacent release-automation repository --- .../bottlerocket-develop-pipeline-sign.yml | 20 - .../bottlerocket-develop-pipeline-test.yml | 12 - .../infra/buildspec/bottlerocket-pr-build.yml | 23 - tools/infra/buildspec/infra-pr-build.yml | 11 - tools/infra/container/.dockerignore | 1 - tools/infra/container/Dockerfile.builder | 41 - tools/infra/container/Makefile | 57 - tools/infra/container/README.md | 61 - tools/infra/container/builder/entrypoint.sh | 4 - .../container/runtime/bin/create-image-ami | 272 -- .../container/runtime/bin/ensure-key-pair | 47 - .../container/runtime/bin/environment-report | 19 - tools/infra/container/runtime/bin/retry | 36 - .../infra/container/runtime/bin/sign-tuf-repo | 34 - .../runtime/bin/start-build-environment | 17 - .../container/runtime/bin/start-docker-daemon | 80 - .../container/runtime/bin/start-log-listener | 55 - .../container/runtime/bin/write-build-meta | 47 - tools/infra/container/runtime/lib/lib.bash | 57 - tools/infra/container/scripts/configure-rust | 46 - tools/infra/container/scripts/install-crates | 25 - tools/infra/container/scripts/install-rust | 27 - tools/infra/stacks/Makefile | 21 - ...ottlerocket-develop-pipeline-ami-build.yml | 311 -- tools/infra/stacks/bottlerocket-pr-build.yml | 154 - tools/infra/stacks/canaries/canary-infra.yml | 88 - tools/infra/stacks/canaries/repo-canaries.yml | 380 --- .../infra/stacks/host-containers-pr-build.yml | 110 - tools/infra/stacks/infra-container.yml | 163 -- tools/infra/stacks/infra-pr-build.yml | 153 - .../pipelines/admin-container-pipeline.yml | 161 -- .../pipelines/control-container-pipeline.yml | 161 -- ...-container-pipeline-codebuild-projects.yml | 252 -- .../dogswatch-container-pipeline.yml | 161 -- ...-container-pipeline-codebuild-projects.yml | 263 -- .../signing-cross-account-assume-role.yml | 21 - .../signing-cross-account-read-role.yml | 47 - tools/update_sign_tuf_repo/Cargo.lock | 2526 ----------------- tools/update_sign_tuf_repo/Cargo.toml | 30 - tools/update_sign_tuf_repo/README.md | 37 - tools/update_sign_tuf_repo/README.tpl | 9 - tools/update_sign_tuf_repo/build.rs | 32 - tools/update_sign_tuf_repo/src/main.rs | 744 ----- 43 files changed, 6816 deletions(-) delete mode 100644 tools/infra/buildspec/bottlerocket-develop-pipeline-sign.yml delete mode 100644 tools/infra/buildspec/bottlerocket-develop-pipeline-test.yml delete mode 100644 tools/infra/buildspec/bottlerocket-pr-build.yml delete mode 100644 tools/infra/buildspec/infra-pr-build.yml delete mode 100644 tools/infra/container/.dockerignore delete mode 100644 tools/infra/container/Dockerfile.builder delete mode 100644 tools/infra/container/Makefile delete mode 100644 tools/infra/container/README.md delete mode 100755 tools/infra/container/builder/entrypoint.sh delete mode 100755 tools/infra/container/runtime/bin/create-image-ami delete mode 100755 tools/infra/container/runtime/bin/ensure-key-pair delete mode 100755 tools/infra/container/runtime/bin/environment-report delete mode 100755 tools/infra/container/runtime/bin/retry delete mode 100755 tools/infra/container/runtime/bin/sign-tuf-repo delete mode 100755 tools/infra/container/runtime/bin/start-build-environment delete mode 100755 tools/infra/container/runtime/bin/start-docker-daemon delete mode 100755 tools/infra/container/runtime/bin/start-log-listener delete mode 100755 tools/infra/container/runtime/bin/write-build-meta delete mode 100644 tools/infra/container/runtime/lib/lib.bash delete mode 100755 tools/infra/container/scripts/configure-rust delete mode 100755 tools/infra/container/scripts/install-crates delete mode 100755 tools/infra/container/scripts/install-rust delete mode 100644 tools/infra/stacks/Makefile delete mode 100644 tools/infra/stacks/bottlerocket-develop-pipeline-ami-build.yml delete mode 100644 tools/infra/stacks/bottlerocket-pr-build.yml delete mode 100644 tools/infra/stacks/canaries/canary-infra.yml delete mode 100644 tools/infra/stacks/canaries/repo-canaries.yml delete mode 100644 tools/infra/stacks/host-containers-pr-build.yml delete mode 100644 tools/infra/stacks/infra-container.yml delete mode 100644 tools/infra/stacks/infra-pr-build.yml delete mode 100644 tools/infra/stacks/pipelines/admin-container-pipeline.yml delete mode 100644 tools/infra/stacks/pipelines/control-container-pipeline.yml delete mode 100644 tools/infra/stacks/pipelines/dogswatch-container-pipeline-codebuild-projects.yml delete mode 100644 tools/infra/stacks/pipelines/dogswatch-container-pipeline.yml delete mode 100644 tools/infra/stacks/pipelines/host-container-pipeline-codebuild-projects.yml delete mode 100644 tools/infra/stacks/signing-cross-account-assume-role.yml delete mode 100644 tools/infra/stacks/signing-cross-account-read-role.yml delete mode 100644 tools/update_sign_tuf_repo/Cargo.lock delete mode 100644 tools/update_sign_tuf_repo/Cargo.toml delete mode 100644 tools/update_sign_tuf_repo/README.md delete mode 100644 tools/update_sign_tuf_repo/README.tpl delete mode 100644 tools/update_sign_tuf_repo/build.rs delete mode 100644 tools/update_sign_tuf_repo/src/main.rs diff --git a/tools/infra/buildspec/bottlerocket-develop-pipeline-sign.yml b/tools/infra/buildspec/bottlerocket-develop-pipeline-sign.yml deleted file mode 100644 index ba2c792f..00000000 --- a/tools/infra/buildspec/bottlerocket-develop-pipeline-sign.yml +++ /dev/null @@ -1,20 +0,0 @@ -version: 0.2 - -phases: - pre_build: - commands: - - environment-report - - write-build-meta - build: - commands: - - sign-tuf-repo - -artifacts: - base-directory: '/tmp/tuf_out' - files: - - '*' - secondary-artifacts: - meta: - base-directory: 'build/meta' - files: - - '*' diff --git a/tools/infra/buildspec/bottlerocket-develop-pipeline-test.yml b/tools/infra/buildspec/bottlerocket-develop-pipeline-test.yml deleted file mode 100644 index c40747f7..00000000 --- a/tools/infra/buildspec/bottlerocket-develop-pipeline-test.yml +++ /dev/null @@ -1,12 +0,0 @@ -version: 0.2 - -phases: - pre_build: - commands: - - start-build-environment - - environment-report - - write-build-meta - build: - commands: - # Run rust workspaces and go unit tests - - cargo make unit-tests diff --git a/tools/infra/buildspec/bottlerocket-pr-build.yml b/tools/infra/buildspec/bottlerocket-pr-build.yml deleted file mode 100644 index 5038b9ed..00000000 --- a/tools/infra/buildspec/bottlerocket-pr-build.yml +++ /dev/null @@ -1,23 +0,0 @@ -version: 0.2 - -phases: - pre_build: - commands: - - start-build-environment - - environment-report - - write-build-meta - build: - commands: - - cargo make world - -artifacts: - base-directory: 'build/' - files: - - '*.img*' - - '*.ext4*' - - '*.verity*' - secondary-artifacts: - meta: - base-directory: 'build/meta' - files: - - '*' diff --git a/tools/infra/buildspec/infra-pr-build.yml b/tools/infra/buildspec/infra-pr-build.yml deleted file mode 100644 index 17e4c25e..00000000 --- a/tools/infra/buildspec/infra-pr-build.yml +++ /dev/null @@ -1,11 +0,0 @@ -version: 0.2 - -env: - variables: - # Path to infra tooling directory. - INFRA_DIR: "./tools/infra" - -phases: - build: - commands: - - make -C "$INFRA_DIR/stacks" --keep-going validate check diff --git a/tools/infra/container/.dockerignore b/tools/infra/container/.dockerignore deleted file mode 100644 index f3c7a7c5..00000000 --- a/tools/infra/container/.dockerignore +++ /dev/null @@ -1 +0,0 @@ -Makefile diff --git a/tools/infra/container/Dockerfile.builder b/tools/infra/container/Dockerfile.builder deleted file mode 100644 index 86d750b9..00000000 --- a/tools/infra/container/Dockerfile.builder +++ /dev/null @@ -1,41 +0,0 @@ -# Dockerfile.builder - Base build environment container image -# -# The builder image provides an environment in which packages and images may be -# built. This includes the necessary compilers, libraries, services, and -# executable dependencies used in the course of the build process. -# -# Facilitating scripts may be found in the ./runtime and ./scripts directory -# where scripts are generally participants in the build of the environment. -# -FROM amazonlinux:2 as base -RUN yum update -y \ - && yum groupinstall -y 'Development Tools' \ - && yum install -y socat procps-ng awscli jq openssh rsync systemd-devel openssl-devel \ - && amazon-linux-extras enable docker \ - && yum install -y docker amazon-ecr-credential-helper \ - && yum clean all \ - && rm -rf /var/cache/yum /var/cache/amzn2extras -RUN install -D /dev/null /root/.docker/config.json \ - && echo '{ "credsStore": "ecr-login" }' >> /root/.docker/config.json - -FROM base as buildenv -ENV PATH="$PATH:/build/runtime/bin:/build/scripts:/build/.cargo/bin" -ENV CARGO_HOME="/build/.cargo" -ENV RUNTIME_SCRIPT_LIB="/build/runtime/lib" -COPY tools/infra/container/scripts /build/scripts -COPY tools/infra/container/runtime /build/runtime -# FIXME: remove depedency on top level source - #656 -COPY bin/amiize.sh /build/runtime/bin/amiize.sh -RUN install-rust && configure-rust && install-crates - -FROM buildenv as signing-tool -COPY . /build/src -RUN cd /build/src/tools/update_sign_tuf_repo && \ - cargo build --release - -FROM buildenv -COPY --from=signing-tool /build/src/tools/update_sign_tuf_repo/target/release/update_sign_tuf_repo /build/runtime/bin/update_sign_tuf_repo -WORKDIR /build -COPY tools/infra/container/builder/entrypoint.sh /build/entrypoint.sh -ENTRYPOINT ["/build/entrypoint.sh"] -CMD [ "bash" ] diff --git a/tools/infra/container/Makefile b/tools/infra/container/Makefile deleted file mode 100644 index 03554c4f..00000000 --- a/tools/infra/container/Makefile +++ /dev/null @@ -1,57 +0,0 @@ -# SHELL is bash, silly sh. -SHELL = bash -# DOCKERFILES are the detected container images that are being worked -# with. It is expected that NAME be part of the file name, as in -# Dockerfile.NAME, which is used throughout the infrastructure. -DOCKERFILES = $(filter-out %~,$(wildcard Dockerfile.*)) -# NAMES are the detected NAMES given the provided Dockerfiles. -NAMES = $(DOCKERFILES:Dockerfile.%=%) -# IMAGE_REPO_PREFIX is prepended to the image's tag. In the case of -# `push', the IMAGE_REPO_PREFIX provides the ECR repository URI prefix -# for each image. -IMAGE_REPO_PREFIX ?= infra/ -# IMAGE_TAG provides the registry/image-name:IMAGE_TAG portion of the -# URI tagged to images. -IMAGE_TAG ?= develop -# IMAGE_NAME is the name that the container image is tagged with. -IMAGE_NAME ?= $(IMAGE_REPO_PREFIX)$(NAME):$(IMAGE_TAG) -# ECR_URI_PREFIX is the ECR URI prefix based on the resolved builder -# image URI which, like other container images, is discoverable under -# its in-region SSM parameter - so we can lob off the builder part and -# use it as our model for the pushed repository name. -ECR_URI_PREFIX = $(shell aws ssm get-parameter --name /infra/container/infra/builder --query Parameter.Value --output text | sed 's/builder$$//') -# ECR_NAME_PREFIX provides a prefix to derive the ECR repository-name -# (the attribute) from the images' NAME - the infra/ prefix is -# conventional across automations' consumed images. -ECR_NAME_PREFIX ?= infra/ - -# This is a reference to the root of the Bottlerocket repo (relative to the current path) -# so later we can run `docker build` with this path. This allows us to -# more easily copy the Bottlerocket repo into a container, which in turn makes it simpler -# to build Rust code that has dependencies on other code in the repo. -# FIXME: when we split this code out we will need to handle this differently -BOTTLEROCKET_SRC = ../../../ - -.DEFAULT: all -.PHONY: force all release $(NAMES) -force: - -all: $(if $(NAME),$(NAME),$(NAMES)) - -$(NAMES) : NAME = $@ -$(NAMES): force - @echo "Building container image for '$(NAME)'" - docker build -t $(IMAGE_NAME) -f Dockerfile.$(NAME) $(BOTTLEROCKET_SRC) - -# Push images (must explicitly provide IMAGE_TAG=release to be pulled -# by consumers). -push: IMAGE_REPO_PREFIX = $(ECR_URI_PREFIX) -push: IMAGE_TAG = staging -push: all - @echo "Pushing container images with tag '$(IMAGE_TAG)'" - @echo "Images: $(foreach NAME,$(NAMES),$(IMAGE_NAME))" - @$(foreach NAME,$(NAMES),\ - echo "Pushing '$(NAME)' to '$(IMAGE_NAME)'" && \ - aws ecr describe-repositories --repository-names $(ECR_NAME_PREFIX)$(NAME) &> /dev/null \ - && docker push $(IMAGE_NAME) \ - || echo "Could not push $(NAME) to ECR repository as $(IMAGE_NAME)";) diff --git a/tools/infra/container/README.md b/tools/infra/container/README.md deleted file mode 100644 index 37b3ee2d..00000000 --- a/tools/infra/container/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# Container Environments - -Container images, defined in this directory, provide environments for infra's build and automation needs. - -## Images - -Each image is defined in their own `Dockerfile` and suffixed with its name. For example the `builder` container - used in CI builds - is defined by `Dockerfile.builder`. -The containers copy in common resources and others as needed from this shared root context. - -**`builder` image** - -The `builder` image provides an environment in which packages and images may be built. -`builder`'s container image is created with all required dependencies used by the build driver, `buildsys`, and the supporting tools & scripts used by it (including many of the `cargo-make` tasks' dependencies). - -# Building - -## Development Images - -To all build images locally, a single `make` call can be made: - -```bash -make all -``` - -Each `Dockerfile.` can be built individually with `make $name` as needed. - -## Release Images (using a tag) - -As with the development images, all images may be built at once: - -```bash -make all IMAGE_TAG=release -``` - -To build a specific image, for instance named `builder`, `make` may be provided this name to build its release image: - -```bash -make all NAME=builder IMAGE_TAG=release -``` - -# Releasing - -The `push` target is provided to build & push release container images for use, at least in the context of build and release automation. - -The default target will prepare to push the images using the environment's AWS profile to confirm that the ECR repositories line up and subsequently pushing with a default of `IMAGE_TAG=staging`. -This invocation **will** push to the ECR repository, but with the image tagged as "staging". -Doing a push this way will stage the layers in the ECR repository so that subsequent pushes update lightweight references only (pushing a tag that refers to the same layers). - -``` bash -make push -``` - -To push a container image tagged as a release image, which is required for the CodeBuild project to use, the `IMAGE_TAG` must be set explicitly to the same tag that's configured to be pulled by projects. -If the release tag is `release`, then the call to `push` these images would be: - -``` bash -make push IMAGE_TAG=release -``` - -The `Makefile` target would then match the images to their respective ECR repositories, as before, and `docker push` to the images' respective repositories. -If the `make push IMAGE_TAG=release` followed an earlier `make push` then this the `make push IMAGE_TAG=release` call will simply update the references in the remote ECR repository to point to the same layers. diff --git a/tools/infra/container/builder/entrypoint.sh b/tools/infra/container/builder/entrypoint.sh deleted file mode 100755 index 6c31cdf3..00000000 --- a/tools/infra/container/builder/entrypoint.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/usr/bin/env bash -set -e -start-build-environment -exec -- "$@" diff --git a/tools/infra/container/runtime/bin/create-image-ami b/tools/infra/container/runtime/bin/create-image-ami deleted file mode 100755 index 88f77ad9..00000000 --- a/tools/infra/container/runtime/bin/create-image-ami +++ /dev/null @@ -1,272 +0,0 @@ -#!/usr/bin/env bash -# -# create-image-ami - Create an AMI from a build's image -# -# usage: -# -# create-image-ami -# -# Environment variable overrides for building, testing, and otherwise -# using this script (other variables, undocumented here, may be -# overridden also - see script below): -# -# BUILD_AMI_NAME -# -# Name the AMI exactly instead of calculating the image's name -# -# BUILD_REGION -# -# Region to create and register the AMI and its snapshots in. -# -# The region must be configured and provisioned with the -# required resources for both EC2 AMI creation and the SSM resources -# used for manipulating the launched image building instance. -# -# BUILD_IMAGE_ROOT, BUILD_IMAGE_DATA -# -# Paths, to the ROOT and DATA images respectively, for -# specifying exact disk images to use instead of relying on path -# construction. -# -# BUILD_INSTANCE_AMI, BUILD_INSTANCE_TYPE -# -# AMI ID and the EC2 Instance Type may be specified to -# explicitly choose an AMI and the Instance Type used to launch, -# instead of querying for the latest Amazon Linux 2 in-region -# AMI and the default EC2 Instance Type. -# -# KEYPAIR_NAME -# -# Name of the EC2 Key Pair (as named during creation or import) -# provisioned and accessible in the SSM Parameter (configured -# with KEYPAIR_PARAMETER). -# -# KEYPAIR_PARAMETER -# -# SSM Parameter Name (eg: "/Prod/ami-build/builder-ssh-key") -# that holds the Private SSH Key that corresponds to the EC2 Key -# Pair (specified in KEYPAIR_NAME) used to access the image -# building instance. -# - -set -o pipefail - -# BUILD_OUTPUT is the directory in which resource data will be written -# to as files. -BUILD_OUTPUT="${BUILD_OUTPUT:-build/ami}" -# BUILD_KEEP_OUTPUT can be set to non-nil to retain existing output in -# BUILD_OUTPUT. -BUILD_KEEP_OUTPUT="${BUILD_KEEP_OUTPUT:+yes}" -# BUILD_REGION is the region the image should be built in. -BUILD_REGION="${AWS_DEFAULT_REGION:-us-west-2}" -# BUILD_INSTANCE_TYPE is the instance type chosen to run the amiize -# build on. -BUILD_INSTANCE_TYPE="${BUILD_INSTANCE_TYPE:-m3.xlarge}" -# BUILD_AMI provides an override to choose an image to use, otherwise -# the latest release of Amazon Linux 2 is used. -BUILD_INSTANCE_AMI="${BUILD_INSTANCE_AMI:-}" -# BUILDSYS_VARIANT specifies the image's variant to be amiized. -BUILDSYS_VARIANT="${BUILDSYS_VARIANT:-aws-k8s}" -# BUILD_ARCH is the image's architecture. -BUILD_ARCH="${BUILD_ARCH:-x86_64}" -# ami_suffix provides a the dynamic portion of an AMI name using the -# environment of a build. -# -# CodeBuild based defaults use pre-defined Environment Variables as -# documented: -# https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-env-vars.html -# -if [[ -n "$CODEBUILD_RESOLVED_SOURCE_VERSION" ]]; then - ami_suffix="${CODEBUILD_RESOLVED_SOURCE_VERSION:0:8}" -else - ami_suffix="$USER-$(date +'%s')" -fi -# BUILD_AMI_NAME is the created AMI's EC2 AMI Name. -BUILD_AMI_NAME="${BUILD_AMI_NAME:-bottlerocket-${BUILD_ARCH}-${ami_suffix}}" -# BUILD_IMAGE_ROOT may be specified to provide a specific root disk -# image to write out. -BUILD_IMAGE_ROOT="${BUILD_IMAGE_ROOT:-build/bottlerocket-$BUILD_ARCH-${BUILDSYS_VARIANT}.img.lz4}" -# BUILD_IMAGE_DATA may be specified to provide a specific data disk -# image to write out. -BUILD_IMAGE_DATA="${BUILD_IMAGE_DATA:-build/bottlerocket-$BUILD_ARCH-${BUILDSYS_VARIANT}-data.img.lz4}" -# EC2 Key Pair used to spin up and access instance for AMIizing disk -# images. These are created automatically if the build task is -# configured with access to the SSM, EC2, and KMS resources involved. -# -# KEYPAIR_NAME is the EC2 KeyPair Name -KEYPAIR_NAME="${KEYPAIR_NAME:-ami-build-key}" -# KEYPAIR_PARAMETER is the SecureString SSM Parameter used to hold the -# private key for the specified EC2 Key Pair (in KEYPAIR_NAME). -KEYPAIR_PARAMETER="${KEYPAIR_PARAMETER:-/Prod/ami-build/$KEYPAIR_NAME}" - -WORK_DIR="$(mktemp -d -t create-image-ami.XXX)" - -# shellcheck source=../lib/lib.bash -source "${RUNTIME_SCRIPT_LIB:-../lib}/lib.bash" - -# Explicitly configured aws-cli for making API calls. -aws() { - command aws --region "$BUILD_REGION" "$@" -} - -# prepareImage massages a provided disk image into a format suitable -# for use in the amiize process and returns that file's name. -# -# usage: prepareImage -prepareImage() { - local image_file="${1:?need image file to prepare it}" - local out - case "$image_file" in - *.lz4 ) - un_name="${image_file%%.lz4}" - out="$WORK_DIR/${un_name##*/}" - logger -t INFO "decompressing LZ4 disk image: $image_file to $out" - lz4 -dc "$image_file" > "$out" || return 1 - ;; - *.img ) - out="$image_file" - logger -t INFO "using provided raw disk image: $out" - ;; - * ) - logger -t ERROR "unknown image file provided: $image_file" - return 1 - ;; - esac - - echo "$out" -} - -# cleanup our files and helping processes before exiting -cleanup() { - if [[ -n "$SSH_AGENT_PID" ]]; then - logger -t INFO "terminating key-wielding ssh-agent" - eval "$(ssh-agent -k | grep -v '^echo')" - fi - - if [[ -d "$WORK_DIR" ]]; then - logger -t INFO "removing work directory $WORK_DIR" - rm -rf -- "$WORK_DIR" - fi -} - -if ! hash amiize.sh ensure-key-pair aws ssh ssh-agent ssh-add; then - logger -t ERROR "some required commands are not available" - exit 1 -fi - -trap "cleanup" EXIT - -mkdir -p "$WORK_DIR" - -if ! [[ -s "$BUILD_IMAGE_ROOT" ]]; then - logger -t ERROR "no root disk image at specified path: $BUILD_IMAGE_ROOT" - exit 1 -fi -if ! [[ -s "$BUILD_IMAGE_DATA" ]]; then - logger -t ERROR "no data disk image at specified path: $BUILD_IMAGE_DATA" - exit 1 -fi - -logger -t INFO "using ssh key from SSM parameter '$KEYPAIR_PARAMETER'" -if ! ( - # shellcheck disable=SC2030 - export KEYPAIR_NAME KEYPAIR_PARAMETER - export AWS_DEFAULT_REGION="$BUILD_REGION" - ensure-key-pair - ); then - logger -t ERROR "unable to setup ssh key from SSM parameter" - exit 1 -fi - -logger -t INFO "configuring ssh for SSM parameter ssh key" -# shellcheck disable=SC2091 -eval "$(ssh-agent | grep -v '^echo')" -# shellcheck disable=SC2031 -if ! ssh-add <(aws ssm get-parameter --name "$KEYPAIR_PARAMETER" --with-decryption --query Parameter.Value --output text) ; then - logger -t ERROR "unable to load ssh key from SSM Parameter $KEYPAIR_PARAMETER" - exit 1 -fi - -if [[ -z "$BUILD_AMI" ]]; then - logger -t INFO "querying SSM for latest Amazon Linux 2 AMI" - BUILD_INSTANCE_AMI="$(aws ssm get-parameter --output text --query Parameter.Value \ - --name /aws/service/ami-amazon-linux-latest/amzn2-ami-hvm-x86_64-gp2)" - # shellcheck disable=2181 - if [[ "$?" -ne 0 ]]; then - logger -t ERROR "AMI query failed, cannot proceed without image" - exit 1 - fi - if [[ -z "$BUILD_INSTANCE_AMI" ]]; then - logger -t ERROR "AMI ID is empty, cannot proceed without image" - exit 1 - fi -fi -logger -t INFO "using $BUILD_INSTANCE_AMI for amiizing instance" - -logger -t INFO "preparing disk images for amiizing" -if ! BUILD_IMAGE_ROOT="$(prepareImage "$BUILD_IMAGE_ROOT")"; then - logger -t ERROR "failed to prepare root image for amiizing" - exit 1 -fi - -if ! BUILD_IMAGE_DATA="$(prepareImage "$BUILD_IMAGE_DATA")"; then - logger -t ERROR "failed to prepare data image for amiizing" - exit 1 -fi - -userdata="$(base64 -w 0 < "$amiize_output/region" - - # When we're writing to an existing directory, we allow the caller to retain - # existing resources there also while (effectively) overwriting the - # overlapping resources. - if [[ -d "$BUILD_OUTPUT" ]]; then - if [[ -z "$BUILD_KEEP_OUTPUT" ]]; then - logger -t WARN -- "removing prior data (in $BUILD_OUTPUT) to record newly created resources" - # Remove overlapping files in the $BUILD_OUTPUT first. - find "$amiize_output" -mindepth 1 -printf "$BUILD_OUTPUT/%P\0" | xargs -0 -- rm -rf -- - fi - fi - - logger -t INFO "recording created resource IDs in $BUILD_OUTPUT" - mkdir -p "$BUILD_OUTPUT" - cp -rT "$amiize_output/" "$BUILD_OUTPUT/" -fi - -exit "$amiize_ret" diff --git a/tools/infra/container/runtime/bin/ensure-key-pair b/tools/infra/container/runtime/bin/ensure-key-pair deleted file mode 100755 index ac40b347..00000000 --- a/tools/infra/container/runtime/bin/ensure-key-pair +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -KEYPAIR_NAME="${KEYPAIR_NAME:?Need a keypair name}" -KEYPAIR_PARAMETER="${KEYPAIR_PARAMETER:?Need a parameter name}" - -logger() { - command logger --stderr --no-act "$@" -} - -ssh_keypair="$(mktemp -u -t "ssm-key-pair.XXX")" -# shellcheck disable=2064 -trap "rm -f -- '$ssh_keypair' '$ssh_keypair.pub'" EXIT - -if aws ssm get-parameter --name "$KEYPAIR_PARAMETER" &>/dev/null; then - : SSM Secure Parameter exists -else - logger -t INFO "creating SSM parameter ssh key-pair" - mkfifo "$ssh_keypair" "${ssh_keypair}.pub" - yes | ssh-keygen -P '' -C "$KEYPAIR_NAME" -f "$ssh_keypair" & - aws ssm put-parameter --overwrite --name "$KEYPAIR_PARAMETER" --type SecureString --value "$(<"$ssh_keypair")" >/dev/null -fi - -if aws ec2 describe-key-pairs --key-name "$KEYPAIR_NAME" &>/dev/null ; then - : EC2 Key Pair exists -else - if [ -e "${ssh_keypair}.pub" ]; then - logger -t INFO "importing ssh key from newly generated pair" - else - logger -t INFO "importing ssh key from SSM parameter '$KEYPAIR_PARAMETER'" - - ssh-keygen -y -f <(aws ssm get-parameter --name "$KEYPAIR_PARAMETER" \ - --with-decryption \ - --query Parameter.Value \ - --output text) \ - > "${ssh_keypair}.pub" - # shellcheck disable=SC2181 - if [ "$?" -ne 0 ]; then - logger -t ERROR "could not import ssh key from SSM parameter" - exit 1 - fi - fi - - - aws ec2 import-key-pair --key-name "$KEYPAIR_NAME" --public-key-material file://"${ssh_keypair}.pub" - logger -t INFO "imported ssh key as EC2 key pair '$KEYPAIR_NAME'" -fi diff --git a/tools/infra/container/runtime/bin/environment-report b/tools/infra/container/runtime/bin/environment-report deleted file mode 100755 index 6fbb3569..00000000 --- a/tools/infra/container/runtime/bin/environment-report +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash -# -# environment-report - print out builder details for reference -# -# The output is expected to be read from the buildlog if the details are needing -# to be referenced to. -# -set -x - -uname -a -nproc -free --total --wide -df -x squashfs -x cgroup -x sysfs -x proc -x devtmpfs -x devpts -rustc --version -cargo --version -cargo make --version -cargo deny --version -docker version - diff --git a/tools/infra/container/runtime/bin/retry b/tools/infra/container/runtime/bin/retry deleted file mode 100755 index 935d1082..00000000 --- a/tools/infra/container/runtime/bin/retry +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash -# -# retry - run and rerun a command if it fails -# -# usage: -# -# retry [msg] -- -# -MAX_RETRIES="${1:-number of retry attempts must be provided}" -MSG="" -shift 1 - -until [[ "$1" = "--" ]]; do - if [[ "$#" -eq 0 ]]; then - logger -s -t ERROR </dev/null; then - logger -s --no-act -t INFO "delegating to running & configured dockerd (via $DOCKER_HOST)" - exit 0 -fi - -# Also check to see if the daemon is running but we're unable to access it, the -# environment may already have docker daemon running but not fully configured -# for use. -if [[ -S "${DOCKER_HOST:+${DOCKER_HOST##unix://}}" ]]; then - logger -s --no-act -t ERROR "unable to access running dockerd (via $DOCKER_HOST)" - exit 1 -fi - -# Verify we're a user that can start the docker daemon (assuming for now that -# being root means that). -euid="$(id -u)" -if [[ "$euid" -ne 0 ]]; then - logger -s --no-act -t ERROR "unable to start dockerd as non-root user (euid: $euid != 0)" - exit 1 -fi - -# In all other cases, start the docker daemon -logger -s --no-act -t INFO "starting dockerd" -nohup dockerd \ - --host="$DOCKER_HOST" \ - &>/var/log/docker.log & -daemon_pid="$!" - -sleep 1 - -if [[ ! -e "/proc/$daemon_pid" ]]; then - logger -s --no-act -t ERROR "dockerd did not start" - exit 1 -fi - -# Starting up the daemon asynchronously may take a moment or may fail to start -# in the background, tries are made to check in on the brief time between exec -# and the daemon ready to work. -# -# sleep interval before attempting each try -try_interval="0.1s" -# maximum try attempts that will be made -try_max_count=3 -# try attempts -try_count=0 - -until docker info &>/dev/null; do - ((try_count++)) - if [[ "$try_count" -gt "$try_max_count" ]]; then - logger -s --no-act -t ERROR "dockerd start exceeded deadline (may be slow or failed to start, check logs)" - if [[ -s "/var/log/docker.log" ]]; then - sed 's/^/docker.log: /' /var/log/docker.log - else - logger -s --no-act -t WARN "dockerd logs are empty" - fi - exit 1 - fi - sleep "$try_interval" -done diff --git a/tools/infra/container/runtime/bin/start-log-listener b/tools/infra/container/runtime/bin/start-log-listener deleted file mode 100755 index ee8e85a1..00000000 --- a/tools/infra/container/runtime/bin/start-log-listener +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env bash -# -# start-log-listener - Setup a socket at /dev/log when missing -# -# The kernel's /dev/log socket may not be present in some environments so this -# script stands up a blackhole listener, to avoid logger from complaining and -# not outputting our messages. -# -# usage: -# -# start-log-listener -# -if ! hash socat; then - echo "ERROR: missing required commands" -fi - -log_sock="/dev/log" - -# Starting up the socket asynchronously may take a moment or may fail to start -# in the background, tries are made to check in on the brief time between exec -# and the listener bound. -# -# sleep interval before attempting each try -try_interval="0.1s" -# maximum try attempts that will be made -try_max_count=3 - -# Let the existing socket be. -if [[ -S "$log_sock" ]]; then - logger -s --no-act -t INFO -- "kernel's log socket is already present /dev/log" - logger -s --no-act -t INFO -- "not replacing /dev/log" - exit 0 -fi - -# Require EUID 0 as we're mucking about in /dev -euid="$(id -u)" -if [[ "$euid" -ne 0 ]]; then - echo "ERROR: unable to start log listener socket as non-root user (euid: $euid != 0)" - exit 1 -fi - -# Listen on syslog unix socket and dump any sent datagrams. -nohup socat "UNIX-LISTEN:$log_sock,fork" - &>/dev/null & - -# Wait for the listener to start and assume the process didn't successfully bind -# to the socket otherwise. -try_count=0 -until [[ -S "$log_sock" ]]; do - ((try_count++)) - if [[ "$try_count" -gt "$try_max_count" ]]; then - echo "ERROR: unable to start log listener at $log_sock" >&2 - exit 1 - fi - sleep "$try_interval" -done diff --git a/tools/infra/container/runtime/bin/write-build-meta b/tools/infra/container/runtime/bin/write-build-meta deleted file mode 100755 index 948cce83..00000000 --- a/tools/infra/container/runtime/bin/write-build-meta +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash -# -# write-build-meta - collect and write out build job & environment details -# - -# shellcheck source=../lib/lib.bash -source "${RUNTIME_SCRIPT_LIB:-../lib}/lib.bash" - -write_common() { - echo "${USER}" > build-user - if has_command hostname; then - hostname --fqdn > build-host - elif [[ -f "/etc/hostname" ]]; then - cat /etc/hostname > build-host - else - echo "unknown" > build-host - fi -} - -write_codebuild() { - echo "codebuild" > build-service - echo "${CODEBUILD_RESOLVED_SOURCE_VERSION}" > build-commit - echo "${CODEBUILD_BUILD_ID}" > build-id - echo "${CODEBUILD_INITIATOR}" > build-initiator - echo "${CODEBUILD_BUILD_URL}" > build-url - echo "${CODEBUILD_BUILD_ARN}" > build-codebuild-arn -} - -if ! mkdir -p "build/meta"; then - logger -t ERROR "could not create build/meta directory for writing" - exit 1 -fi -cd build/meta - -# Write build environment information to disk - -write_common - -if [[ -n "${CODEBUILD_BUILD_ID}" ]]; then - write_codebuild -fi - -if [[ ! -s "build-service" ]]; then - logger -t WARN "unknown service, cannot write out CI build metadata" - echo "unknown" > build-service - exit 0 -fi diff --git a/tools/infra/container/runtime/lib/lib.bash b/tools/infra/container/runtime/lib/lib.bash deleted file mode 100644 index 8555736c..00000000 --- a/tools/infra/container/runtime/lib/lib.bash +++ /dev/null @@ -1,57 +0,0 @@ -# shellcheck shell=bash - -# Logger provides the corrected interface to log to stderr. -logger() { - # Use logger if its usable - if test -S /dev/log; then - command logger --no-act -s "$@" - return 0 - fi - - # Otherwise, use a simple polyfill implementation that provides a similar - # enough interface to be used across scripts. - local tag - local message - local format - - # polyfill in a logger that writes to stderr - while [ "$#" -ne 0 ]; do - case "$1" in - -t ) - tag="$2" - shift 1 - ;; - -*|-- ) - # drop options - ;; - * ) - # message - message=( "$@" ) - break - ;; - esac - shift 1 - done - - # Message printer format - format="${tag:+"$tag: "}%s\n" - - # Single message in function call - if [[ "${#message[@]}" -ne 0 ]]; then - printf "$format" "${message[*]}" >&2 - return 0 - fi - - # Stream of messages sent to function as input - while read msg; do - printf "$format" "${msg}" >&2 - done - - return 0 -} - -# has_command returns true for present commands -has_command() { - local name="${1:?has_command requires a name to check}" - command -v "$name" &>/dev/null -} diff --git a/tools/infra/container/scripts/configure-rust b/tools/infra/container/scripts/configure-rust deleted file mode 100755 index 38e96310..00000000 --- a/tools/infra/container/scripts/configure-rust +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env bash -# -# confgure-rust - configure container environment for rust usage -# -# usage: -# -# configure-rust -# - -# shellcheck source=../runtime/lib/lib.bash -source "$RUNTIME_SCRIPT_LIB/lib.bash" - -configure_color_output() { - # Prepare Cargo for build and CI usage - PATH="$HOME/.cargo/bin:$PATH" - mkdir -p .cargo "$HOME/.cargo/bin" - - # Manage cargo configs in $HOME and the working directory to configure runs - # from the project root. - cargo_configs="$HOME/.cargo/config" - if [ "$(pwd)" = "$HOME" ]; then - cargo_configs="$cargo_configs $(pwd)/.cargo/config" - fi - for cargo_config in $cargo_configs; do - if [ -f "$cargo_config" ] && grep -qF '[term]' "$cargo_config"; then - logger -s -t WARN "unable to manage existing cargo config in $cargo_config" - continue - fi - # Configure Cargo to print without ascii coloring. - printf '\n[term]\nverbose = false\ncolor = "never"\n' >> "$cargo_config" - done - unset cargo_configs - - # Configure cargo-build to print without ascii coloring. - mkdir -p "$HOME/.config/cargo-make" - cargo_make_config="$HOME/.config/cargo-make/config.toml" - if [ -f "$cargo_make_config" ] && grep -q '^disable_color' "$cargo_make_config"; then - logger -s -t WARN "unable to manage existing cargo-make config in $cargo_make_config" - else - printf '\ndisable_color = true\n' >> "$HOME/.config/cargo-make/config.toml" - fi - unset cargo_make_config -} - -configure_color_output - diff --git a/tools/infra/container/scripts/install-crates b/tools/infra/container/scripts/install-crates deleted file mode 100755 index ae120be7..00000000 --- a/tools/infra/container/scripts/install-crates +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash -# -# install-crates - install binary crate dependencies -# -# usage: -# -# install-crates -# - -# shellcheck source=../runtime/lib/lib.bash -source "$RUNTIME_SCRIPT_LIB/lib.bash" - -cargo_dep() { - local cargo_install_package=( "$@" ) - logger -s -t INFO "installing cargo dep with 'cargo install ${cargo_install_package[*]}'" - if ! cargo install --force "${cargo_install_package[@]}"; then - logger -s -t ERROR "failed to install dep with 'cargo install ${cargo_install_package[*]}'" - exit 1 - fi -} - -# Install build tooling dependencies -cargo_dep --version 0.23.0 cargo-make -cargo_dep --version 0.6.2 cargo-deny - diff --git a/tools/infra/container/scripts/install-rust b/tools/infra/container/scripts/install-rust deleted file mode 100755 index e056fd08..00000000 --- a/tools/infra/container/scripts/install-rust +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash -# -# install-rust - install rust for use in builder containers -# -# usage: -# -# install-rust -# - -# shellcheck source=../runtime/lib/lib.bash -source "$RUNTIME_SCRIPT_LIB/lib.bash" - -install_rust_toolchain() { - local toolchain="${1:-stable}" - test -f rustup-init.sh || rm -f rustup-init.sh - if ! curl -o rustup-init.sh --proto '=https' --tlsv1.2 -sS "https://sh.rustup.rs"; then - logger -s -t ERROR "could not fetch rustup, needed for managing rust toolchain" - exit 1 - fi - if ! bash rustup-init.sh -y --profile minimal --no-modify-path --default-toolchain "$toolchain" ; then - logger -s -t ERROR "could not setup rustup & rust toolchain for build" - exit 1 - fi - rm -f rustup-init.sh -} - -install_rust_toolchain "stable" diff --git a/tools/infra/stacks/Makefile b/tools/infra/stacks/Makefile deleted file mode 100644 index 594e53a5..00000000 --- a/tools/infra/stacks/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -SHELL := bash -stacks := $(wildcard *.yml */*.yml) - -list: - @printf "%s\n" $(stacks) - -validate: $(addprefix validate/,$(stacks)) - -validate/%: - $(info validating stack: $*) - aws cloudformation validate-template --template-body "$$(< $*)" - -check: check-readme - -check-readme: - @$(foreach stack,$(stacks:.yml=),\ - grep -Fw -q -e "$(stack)" README.md ||\ - { MISSING=1; echo "Missing README section mentioning $(stack)"; }; )\ - $${MISSING:+exit 1} - -.PHONY: test list diff --git a/tools/infra/stacks/bottlerocket-develop-pipeline-ami-build.yml b/tools/infra/stacks/bottlerocket-develop-pipeline-ami-build.yml deleted file mode 100644 index 302c0566..00000000 --- a/tools/infra/stacks/bottlerocket-develop-pipeline-ami-build.yml +++ /dev/null @@ -1,311 +0,0 @@ -# stack-name: bottlerocket-develop-pipeline-ami-build -# stack-resource-context: build - -Description: | - Supporting resources for the AMI Build pipeline action - -Parameters: - KeyPairName: - Description: >- - Name of the EC2 key-pair resource for launching against KeyPairParameter's - SSH key - Type: String - Default: ami-build-key - - KeyPairParameter: - Description: >- - SSM Parameter path that holds the key-pair's private ssh material - Type: String - Default: /infra/ami-build/instance/key-pair - - EnvironmentImageName: - Type: AWS::SSM::Parameter::Value - Default: /infra/container/infra/builder - Description: >- - Parameter that defines the image name the builder uses as its execution - environment *without* a tag (eg: registry/image-name, not - registry/image-name:tag). The EnvironmentImageTag Parameter provides the - appropriate tag separately. - - EnvironmentImageTag: - Type: String - Default: latest - Description: >- - The image 'tag' (as in registry/image-name:tag) to select of the EnvironmentImage - provided. - - ImageCredentialsType: - Type: String - Default: CODEBUILD - AllowedValues: [ CODEBUILD, SERVICE_ROLE ] - Description: >- - If image policy does not trust codebuild.amazonaws.com OR cross-account - role is needed, then the SERVICE_ROLE must be specified to use the role - assigned to the build project. - - BuildVPCId: - Type: AWS::EC2::VPC::Id - Description: >- - The VPC ID that the AMI build instance will be launched into. - -Resources: - AmiBuildStage: - Type: AWS::CodeBuild::Project - Properties: - Artifacts: - Type: CODEPIPELINE - ServiceRole: !GetAtt BuildRole.Arn - LogsConfig: - S3Logs: - Status: ENABLED - Location: !Sub "${BuildLogBucket.Arn}/codebuild/log" - CloudWatchLogs: - Status: ENABLED - GroupName: !Ref BuildLogGroup - Environment: - ComputeType: BUILD_GENERAL1_LARGE - Type: LINUX_CONTAINER - PrivilegedMode: true - Image: !Sub "${EnvironmentImageName}:${EnvironmentImageTag}" - ImagePullCredentialsType: !Ref ImageCredentialsType - EnvironmentVariables: - - Name: KEYPAIR_NAME - Type: PLAINTEXT - Value: !Ref KeyPairName - - Name: KEYPAIR_PARAMETER - Type: PLAINTEXT - Value: !Ref KeyPairParameter - - Name: SECURITY_GROUP_ID - Type: PLAINTEXT - Value: !GetAtt AMIBuildSecurityGroup.GroupId - - # TODO: resolve these using release-lib based cli (when it exists!) - - Name: BUILD_IMAGE_ROOT - Type: PLAINTEXT - Value: bottlerocket-x86_64-aws-k8s.img.lz4 - - Name: BUILD_IMAGE_DATA - Type: PLAINTEXT - Value: bottlerocket-x86_64-aws-k8s-data.img.lz4 - TimeoutInMinutes: 30 - Source: - Type: CODEPIPELINE - BuildSpec: | - version: 0.2 - phases: - pre_build: - commands: - - start-build-environment - - environment-report - - write-build-meta - # List CODEPIPELINE and CODEBUILD variables for active-work on these projects. - # - # TODO: Remove printing of the environment variables here at a later time. - - env | grep -e '^CODE' -e 'infra/' -e '\bbottlerocket\b' | sort | sed 's/^/# /' - build: - commands: - - create-image-ami - artifacts: - base-directory: build/ - files: - - ami/* - - BuildRole: - Type: AWS::IAM::Role - Properties: - Path: !Sub "/${AWS::StackName}/" - AssumeRolePolicyDocument: - Version: "2012-10-17" - Statement: - - Action: sts:AssumeRole - Effect: Allow - Principal: - Service: codebuild.amazonaws.com - - AmiBuildPolicy: - Type: AWS::IAM::Policy - Properties: - PolicyName: AmiBuildPolicy - Roles: - - !Ref BuildRole - PolicyDocument: - Version: "2012-10-17" - Statement: - - Sid: "readImageResources" - Effect: Allow - Action: - - ec2:DescribeImages - - ec2:DescribeInstances - - ec2:DescribeVolumes - - ec2:DescribeSnapshots - - ec2:DescribeImages - Resource: "*" - - - Sid: "resolveLatestImage" - Effect: Allow - Action: - - ssm:GetParameter - Resource: - # SSM Public Parameters used for querying the publishing latest Amazon Linux AMIs - - !Sub "arn:${AWS::Partition}:ssm:${AWS::Region}:*:parameter/aws/service/*" - - # TODO: switch to using Launch Templates to restrict *and* provide - # instance configuration for launching with more "statically". - - Sid: "launchBuildInstance" - Effect: Allow - Action: - - ec2:RunInstances - Resource: "*" - Condition: - StringEqualsIfExists: - # Require a recognized distributed AMI to build on to prevent - # injection of another image. - "ec2:Owner": "amazon" - # Keep all actions within the stacks' region (and thus the - # builder's). - "ec2:Region": !Sub "${AWS::Region}" - - - Sid: "manageBuildInstanceResources" - Effect: Allow - Action: - - ec2:TerminateInstances - - ec2:DeleteVolume - - ec2:DetachVolume - Resource: "*" - Condition: - StringEqualsIfExists: - # Keep all actions within the stacks' region (and thus the - # builder's). - "ec2:Region": !Sub "${AWS::Region}" - - - Sid: "registerImages" - Effect: Allow - Action: - - ec2:CreateSnapshot - - ec2:RegisterImage - Resource: "*" - Condition: - StringEqualsIfExists: - "ec2:Region": !Sub "${AWS::Region}" - - - Sid: "keypairManageSSM" - Effect: Allow - Action: - - ssm:PutParameter - - ssm:GetParameter - Resource: - - !Sub "arn:${AWS::Partition}:ssm:${AWS::Region}:${AWS::AccountId}:parameter${KeyPairParameter}" - - - Sid: "keypairManageEC2" - Effect: Allow - Action: - - ec2:DescribeKeyPairs - - ec2:ImportKeyPair - Resource: "*" - - - Sid: "keypairDecrypt" - Effect: Allow - Action: - - kms:Decrypt - Resource: !Sub "arn:${AWS::Partition}:kms:${AWS::Region}:${AWS::AccountId}:alias/aws/ssm" - - PipelineBuildPolicy: - Type: AWS::IAM::Policy - Properties: - PolicyName: PipelineBuildPolicy - Roles: - - !Ref BuildRole - PolicyDocument: - Version: "2012-10-17" - Statement: - # This is from the codepipelines' generated policy that it attaches to - # CodeBuild project (among other statements). This does limit the - # builder to working with specific objects. - # - # TODO: Define these with a stack managed pipeline's bucket and refer - # to its scoped resources. - # - - Sid: "codepipelineIO" - Effect: Allow - Action: - - s3:PutObject - - s3:GetObject - - s3:GetObjectVersion - - s3:GetBucketAcl - - s3:GetBucketLocation - Resource: !Sub "arn:aws:s3:::codepipeline-${AWS::Region}-*" - - ProjectPolicy: - Type: AWS::IAM::Policy - Properties: - PolicyName: BuildRolePolicy - Roles: - - !Ref BuildRole - PolicyDocument: - Version: "2012-10-17" - Statement: - # For managing cache, logs, and artifacts in the build's buckets. - - Sid: "manageBuildArtifacts" - Effect: Allow - Action: - - s3:GetObject* - - s3:GetBucket* - - s3:List* - - s3:PutObject* - - s3:Abort* - Resource: - - !GetAtt BuildArtifactBucket.Arn - - !Sub "${BuildArtifactBucket.Arn}/*" - - !GetAtt BuildLogBucket.Arn - - !Sub "${BuildLogBucket.Arn}/*" - # For writing to CloudWatch Logs Streams for each build. - - Sid: "manageBuildLogs" - Effect: Allow - Action: - - logs:CreateLogStream - - logs:PutLogEvents - Resource: - - !GetAtt BuildLogGroup.Arn - - BuildArtifactBucket: - Type: AWS::S3::Bucket - UpdateReplacePolicy: Retain - DeletionPolicy: Retain - - BuildLogBucket: - Type: AWS::S3::Bucket - UpdateReplacePolicy: Retain - DeletionPolicy: Retain - - BuildLogGroup: - Type: AWS::Logs::LogGroup - - AMIBuildSecurityGroup: - Type: AWS::EC2::SecurityGroup - Properties: - GroupName: !Sub "${AWS::StackName}.AMIBuildSecurityGroup" - GroupDescription: AMI Build Instance Access - VpcId: !Ref BuildVPCId - SecurityGroupEgress: - - CidrIp: 0.0.0.0/0 - IpProtocol: -1 - FromPort: -1 - ToPort: -1 - SecurityGroupIngress: - - Description: "Allow SSH Connections - IPv4" - CidrIp: 0.0.0.0/0 - IpProtocol: tcp - FromPort: 22 - ToPort: 22 - - Description: "Allow ICMPv4" - CidrIp: 0.0.0.0/0 - IpProtocol: icmp - FromPort: -1 - ToPort: -1 - -Outputs: - ArtifactBucket: - Value: !Ref BuildArtifactBucket - LogBucket: - Value: !Ref BuildLogBucket - Project: - Value: !Ref AmiBuildStage diff --git a/tools/infra/stacks/bottlerocket-pr-build.yml b/tools/infra/stacks/bottlerocket-pr-build.yml deleted file mode 100644 index 3c3511ad..00000000 --- a/tools/infra/stacks/bottlerocket-pr-build.yml +++ /dev/null @@ -1,154 +0,0 @@ -# stack-name: bottlerocket-pr-build -# stack-require: infra-container - -Parameters: - BuildSpecPath: - Type: String - AllowedPattern: '.+\.yml$' - Default: "./tools/infra/buildspec/bottlerocket-pr-build.yml" - Description: >- - The path to the buildspec.yml file to use. - SourceGitHubRepositoryURL: - Type: String - Default: "https://github.com/amazonlinux/PRIVATE-thar.git" - AllowedPattern: 'https://github.com/.+/.+\.git$' - ConstraintDescription: "Source URL must be a GitHub repository (with its .git suffix)" - Description: >- - The GitHub repository that builds run for. Your account must be authorized - to modify this repository's settings. - EnvironmentImageName: - Type: AWS::SSM::Parameter::Value - Default: /infra/container/infra/builder - Description: >- - Parameter that defines the image name the builder uses as its execution - environment *without* a tag (eg: registry/image-name, not - registry/image-name:tag). The EnvironmentImageTag Parameter provides the - appropriate tag separately. - EnvironmentImageTag: - Type: String - Default: latest - Description: >- - The image 'tag' (as in registry/image-name:tag) to select of the EnvironmentImage - provided. - ImageCredentialsType: - Type: String - Default: CODEBUILD - AllowedValues: [ CODEBUILD, SERVICE_ROLE ] - Description: >- - If image policy does not trust codebuild.amazonaws.com OR cross-account - role is needed, then the SERVICE_ROLE must be specified to use the role - assigned to the build project. - -Resources: - BuildArtifactBucket: - Type: AWS::S3::Bucket - UpdateReplacePolicy: Retain - DeletionPolicy: Retain - - BuildLogBucket: - Type: AWS::S3::Bucket - UpdateReplacePolicy: Retain - DeletionPolicy: Retain - - BuildLogGroup: - Type: AWS::Logs::LogGroup - - BuildRole: - Type: AWS::IAM::Role - Properties: - AssumeRolePolicyDocument: - Version: "2012-10-17" - Statement: - - Action: sts:AssumeRole - Effect: Allow - Principal: - Service: codebuild.amazonaws.com - - BuildRolePolicy: - Type: AWS::IAM::Policy - Properties: - PolicyName: BuildRolePolicy - Roles: - - !Ref BuildRole - PolicyDocument: - Version: "2012-10-17" - Statement: - # For managing cache, logs, and artifacts in the build's buckets. - - Sid: "manageBuildArtifacts" - Effect: Allow - Action: - - s3:GetObject* - - s3:GetBucket* - - s3:List* - - s3:PutObject* - - s3:Abort* - Resource: - - !GetAtt BuildArtifactBucket.Arn - - !Sub "${BuildArtifactBucket.Arn}/*" - - !GetAtt BuildLogBucket.Arn - - !Sub "${BuildLogBucket.Arn}/*" - # For writing to CloudWatch Logs Streams for each build. - - Sid: "manageBuildLogs" - Effect: Allow - Action: - - logs:CreateLogStream - - logs:PutLogEvents - Resource: - - !GetAtt BuildLogGroup.Arn - - BottlerocketPRBuild: - Type: AWS::CodeBuild::Project - DependsOn: - - BuildArtifactBucket - - BuildLogBucket - - BuildLogGroup - - BuildRole - Properties: - Artifacts: - Name: build - Location: !Ref BuildArtifactBucket - NamespaceType: BUILD_ID - Packaging: NONE - Path: artifact/ - Type: S3 - SecondaryArtifacts: - - Name: meta - ArtifactIdentifier: meta - Location: !Ref BuildArtifactBucket - NamespaceType: BUILD_ID - Packaging: NONE - Path: artifact/ - Type: S3 - Environment: - ComputeType: BUILD_GENERAL1_LARGE - Type: LINUX_CONTAINER - PrivilegedMode: true - Image: !Sub "${EnvironmentImageName}:${EnvironmentImageTag}" - ImagePullCredentialsType: !Ref ImageCredentialsType - ServiceRole: !GetAtt BuildRole.Arn - Source: - BuildSpec: !Ref BuildSpecPath - Location: !Ref SourceGitHubRepositoryURL - ReportBuildStatus: true - Type: GITHUB - LogsConfig: - S3Logs: - Status: ENABLED - Location: !Sub "${BuildLogBucket.Arn}/codebuild/log" - CloudWatchLogs: - Status: ENABLED - GroupName: !Ref BuildLogGroup - TimeoutInMinutes: 180 - Triggers: - Webhook: true - FilterGroups: - - - Type: EVENT - Pattern: PULL_REQUEST_CREATED,PULL_REQUEST_UPDATED - -Outputs: - ArtifactBucket: - Value: !Ref BuildArtifactBucket - LogBucket: - Value: !Ref BuildLogBucket - Project: - Value: !Ref BottlerocketPRBuild diff --git a/tools/infra/stacks/canaries/canary-infra.yml b/tools/infra/stacks/canaries/canary-infra.yml deleted file mode 100644 index 9c21f481..00000000 --- a/tools/infra/stacks/canaries/canary-infra.yml +++ /dev/null @@ -1,88 +0,0 @@ -Resources: - VPC: - Type: 'AWS::EC2::VPC' - Properties: - CidrBlock: 10.0.0.0/16 - VPCInternetGateway: - Type: 'AWS::EC2::InternetGateway' - AttachGateway: - Type: 'AWS::EC2::VPCGatewayAttachment' - Properties: - VpcId: !Ref VPC - InternetGatewayId: !Ref VPCInternetGateway - CanaryECSCluster: - Type: 'AWS::ECS::Cluster' - Properties: {} - CanaryTaskExecutionRole: - Type: 'AWS::IAM::Role' - Properties: - RoleName: CanaryTaskExecutionRole - AssumeRolePolicyDocument: - Statement: - - Effect: Allow - Principal: - Service: ecs-tasks.amazonaws.com - Action: - - 'sts:AssumeRole' - Policies: - - PolicyName: AmazonECSTaskExecutionPolicy - PolicyDocument: - Statement: - - Effect: Allow - Action: - - 'ecr:GetAuthorizationToken' - - 'ecr:BatchCheckLayerAvailability' - - 'ecr:GetDownloadUrlForLayer' - - 'ecr:BatchGetImage' - - 'logs:CreateLogStream' - - 'logs:PutLogEvents' - - 'logs:DescribeLogStreams' - - 'logs:GetLogEvents' - - 'cloudwatch:PutMetricData' - Resource: '*' - CanaryTaskEventRole: - Type: 'AWS::IAM::Role' - Properties: - RoleName: CanaryTaskEventRole - AssumeRolePolicyDocument: - Statement: - - Effect: Allow - Principal: - Service: events.amazonaws.com - Action: - - 'sts:AssumeRole' - Policies: - - PolicyName: AmazonEC2ContainerServiceEventsRole - PolicyDocument: - Statement: - - Effect: Allow - Action: - - 'ecs:RunTask' - - 'iam:PassRole' - Resource: '*' -Outputs: - VPCId: - Description: "Canary VPC ID" - Value: !Ref VPC - Export: - Name: !Sub "${AWS::StackName}-VpcId" - VPCInternetGatewayId: - Description: "Canary VPC Internet Gateway ID" - Value: !Ref VPCInternetGateway - Export: - Name: !Sub "${AWS::StackName}-VpcInternetGatewayId" - ECSClusterArn: - Description: "ECS Cluster ARN" - Value: !GetAtt CanaryECSCluster.Arn - Export: - Name: !Sub "${AWS::StackName}-ECSClusterArn" - CanaryTaskExecutionRoleArn: - Description: "Canary Task Execution Role ARN" - Value: !GetAtt CanaryTaskExecutionRole.Arn - Export: - Name: !Sub "${AWS::StackName}-CanaryTaskExecutionRoleArn" - CanaryTaskEventRoleArn: - Description: "Canary Task Event Role ARN" - Value: !GetAtt CanaryTaskEventRole.Arn - Export: - Name: !Sub "${AWS::StackName}-CanaryTaskEventRoleArn" diff --git a/tools/infra/stacks/canaries/repo-canaries.yml b/tools/infra/stacks/canaries/repo-canaries.yml deleted file mode 100644 index ee96bb87..00000000 --- a/tools/infra/stacks/canaries/repo-canaries.yml +++ /dev/null @@ -1,380 +0,0 @@ -Parameters: - CanaryInfraStack: - Type: String - Description: 'Enter the name of the canary infrastructure stack that sets up the VPC and ECS cluster.' - TaskName: - Type: String - Default: repo-canary - Description: 'Enter the name of the canary container image.' - SNSSubscriptionEndpoint: - Type: String - Description: 'Enter the SNS subscription endpoint for CloudWatch alarms.' - SNSSubscriptionProtocol: - Type: String - Default: email - Description: 'Enter the SNS subscription endpoint protocol. Default is email.' - RepoMetadataBaseUrl: - Type: String - Description: 'Enter the metadata base url that specifies the TUF repository metadata files source.' - RepoTargetBaseUrl: - Type: String - Description: 'Enter the target base url that specifies where the listed targets in the TUF repository can be retrieved.' -Resources: - VPCRouteTable: - Type: 'AWS::EC2::RouteTable' - Properties: - VpcId: - Fn::ImportValue: - !Sub "${CanaryInfraStack}-VpcId" - ExternalRoute: - Type: 'AWS::EC2::Route' - Properties: - DestinationCidrBlock: 0.0.0.0/0 - GatewayId: - Fn::ImportValue: - !Sub "${CanaryInfraStack}-VpcInternetGatewayId" - RouteTableId: !Ref VPCRouteTable - VPCSubnet: - Type: 'AWS::EC2::Subnet' - Properties: - CidrBlock: 10.0.0.0/24 - VpcId: - Fn::ImportValue: - !Sub "${CanaryInfraStack}-VpcId" - SubnetRouteAssociation: - Type: 'AWS::EC2::SubnetRouteTableAssociation' - Properties: - RouteTableId: !Ref VPCRouteTable - SubnetId: !Ref VPCSubnet - VPCSecurityGroup: - Type: 'AWS::EC2::SecurityGroup' - Properties: - GroupDescription: !Sub 'Allow traffic to ${TaskName}' - VpcId: - Fn::ImportValue: - !Sub "${CanaryInfraStack}-VpcId" - CheckUpcomingExpirationLogGroup: - Type: 'AWS::Logs::LogGroup' - Properties: - LogGroupName: !Sub '/ecs/${TaskName}-check-upcoming-expiration' - RetentionInDays: 30 - RetrieveTargetsLogGroup: - Type: 'AWS::Logs::LogGroup' - Properties: - LogGroupName: !Sub '/ecs/${TaskName}-retrieve-targets' - RetentionInDays: 30 - FailedStartLogGroup: - Type: 'AWS::Logs::LogGroup' - Properties: - LogGroupName: !Sub '/aws/events/${TaskName}-start-failures' - FoundUpcomingExpirationLogGroup: - Type: 'AWS::Logs::LogGroup' - Properties: - LogGroupName: !Sub '/aws/events/${TaskName}-found-upcoming-expiration' - RepoValidationFailureLogGroup: - Type: 'AWS::Logs::LogGroup' - Properties: - LogGroupName: !Sub '/aws/events/${TaskName}-validation-failures' - TUFRepoCanarySNSNotificationTopic: - Type: 'AWS::SNS::Topic' - Properties: - DisplayName: TUF Repo Canary Notifications - TUFRepoCanarySNSSubscription: - Type: 'AWS::SNS::Subscription' - Properties: - Endpoint: !Sub '${SNSSubscriptionEndpoint}' - Protocol: !Sub '${SNSSubscriptionProtocol}' - TopicArn: !Ref TUFRepoCanarySNSNotificationTopic - TUFRepoCanaryImgRepo: - Type: 'AWS::ECR::Repository' - Properties: - RepositoryName: !Sub '${TaskName}' - - CheckUpcomingExpirationTask: - Type: 'AWS::ECS::TaskDefinition' - Properties: - TaskRoleArn: - Fn::ImportValue: - !Sub "${CanaryInfraStack}-CanaryTaskExecutionRoleArn" - ExecutionRoleArn: - Fn::ImportValue: - !Sub "${CanaryInfraStack}-CanaryTaskExecutionRoleArn" - Family: !Sub '${TaskName}-check-upcoming-expiration' - NetworkMode: awsvpc - Cpu: '256' - Memory: '512' - RequiresCompatibilities: - - FARGATE - ContainerDefinitions: - - Name: !Sub '${TaskName}-check-upcoming-expiration' - Command: - - '--metadata-base-url' - - !Sub '${RepoMetadataBaseUrl}' - - '--target-base-url' - - !Sub '${RepoTargetBaseUrl}' - - '--trusted-root-path' - - '/usr/share/repo-canary/root.json' - - '--check-upcoming-expiration-days' - - '3' - - '--percentage-targets-to-retrieve' - - '0' - Memory: 512 - Cpu: 256 - Image: !Join - - / - - - !Join - - . - - - !Ref 'AWS::AccountId' - - dkr.ecr - - !Ref 'AWS::Region' - - amazonaws.com - - !Sub '${TaskName}:latest' - LogConfiguration: - LogDriver: awslogs - Options: - awslogs-group: !Ref CheckUpcomingExpirationLogGroup - awslogs-region: !Ref 'AWS::Region' - awslogs-stream-prefix: 'ecs' - CheckUpcomingExpirationScheduledTask: - Type: 'AWS::Events::Rule' - Properties: - Description: !Sub 'Schedules ${TaskName}-check-upcoming-expiration task to run every 20 minutes starting at minute 0 of the hour' - ScheduleExpression: cron(0/20 * * * ? *) - State: ENABLED - Targets: - - Id: !Sub '${TaskName}-check-upcoming-expiration-Fargate-Task' - RoleArn: - Fn::ImportValue: - !Sub "${CanaryInfraStack}-CanaryTaskEventRoleArn" - EcsParameters: - TaskDefinitionArn: !Ref CheckUpcomingExpirationTask - TaskCount: 1 - LaunchType: FARGATE - PlatformVersion: LATEST - NetworkConfiguration: - AwsVpcConfiguration: - AssignPublicIp: ENABLED - SecurityGroups: - - !Ref VPCSecurityGroup - Subnets: - - !Ref VPCSubnet - Arn: - Fn::ImportValue: - !Sub "${CanaryInfraStack}-ECSClusterArn" - CheckExpirationFailedInvocationAlarm: - Type: 'AWS::CloudWatch::Alarm' - Properties: - AlarmName: TUF Repo Canary check expiration task invocation failure - AlarmDescription: 'TUF Repo Canary check upcoming expiration task could not be invoked' - Namespace: AWS/Events - MetricName: FailedInvocations - Dimensions: - - Name: RuleName - Value: !Ref CheckUpcomingExpirationScheduledTask - Period: 1200 - Statistic: Average - Threshold: 0 - TreatMissingData: notBreaching - ComparisonOperator: GreaterThanThreshold - EvaluationPeriods: 3 - DatapointsToAlarm: 2 - ActionsEnabled: true - AlarmActions: - - !Ref TUFRepoCanarySNSNotificationTopic - CheckUpcomingExpirationRule: - Type: 'AWS::Events::Rule' - Properties: - Description: 'TUF Repo Canary check-upcoming-expiration task exited with code 73 (found upcoming metadata expirations) or 68 (already expired)' - Name: !Sub '${TaskName}-upcoming-metadata-expiration' - EventPattern: !Sub '{"source": ["aws.ecs"],"detail-type": ["ECS Task State Change"],"detail": {"containers": {"name": ["${TaskName}-check-upcoming-expiration"],"exitCode":[73,68],"lastStatus": ["STOPPED"]}}}' - Targets: - - Arn: !Join - - ":" - - - "arn:aws:logs" - - !Ref 'AWS::Region' - - !Ref 'AWS::AccountId' - - 'log-group' - - !Ref FoundUpcomingExpirationLogGroup - Id: 'tuf-repo-metadata-upcoming-expiration' - CheckUpcomingExpirationAlarm: - Type: 'AWS::CloudWatch::Alarm' - Properties: - AlarmName: TUF repository metadata files upcoming expiration - AlarmDescription: 'TUF Repo Canary found metadata files that are about to expire in 3 days' - Namespace: AWS/Events - MetricName: TriggeredRules - Dimensions: - - Name: RuleName - Value: !Ref CheckUpcomingExpirationRule - Period: 1200 - Statistic: Average - Threshold: 0 - TreatMissingData: notBreaching - ComparisonOperator: GreaterThanThreshold - EvaluationPeriods: 1 - DatapointsToAlarm: 1 - ActionsEnabled: true - AlarmActions: - - !Ref TUFRepoCanarySNSNotificationTopic - - RetrieveTargetTask: - Type: 'AWS::ECS::TaskDefinition' - Properties: - TaskRoleArn: - Fn::ImportValue: - !Sub "${CanaryInfraStack}-CanaryTaskExecutionRoleArn" - ExecutionRoleArn: - Fn::ImportValue: - !Sub "${CanaryInfraStack}-CanaryTaskExecutionRoleArn" - Family: !Sub '${TaskName}-retrieve-targets' - NetworkMode: awsvpc - Cpu: '256' - Memory: '512' - RequiresCompatibilities: - - FARGATE - ContainerDefinitions: - - Name: !Sub '${TaskName}-retrieve-targets' - Command: - - '--metadata-base-url' - - !Sub '${RepoMetadataBaseUrl}' - - '--target-base-url' - - !Sub '${RepoTargetBaseUrl}' - - '--trusted-root-path' - - '/usr/share/repo-canary/root.json' - - '--percentage-targets-to-retrieve' - - '30' - Memory: 512 - Cpu: 256 - Image: !Join - - / - - - !Join - - . - - - !Ref 'AWS::AccountId' - - dkr.ecr - - !Ref 'AWS::Region' - - amazonaws.com - - !Sub '${TaskName}:latest' - LogConfiguration: - LogDriver: awslogs - Options: - awslogs-group: !Ref RetrieveTargetsLogGroup - awslogs-region: !Ref 'AWS::Region' - awslogs-stream-prefix: 'ecs' - RetrieveTargetScheduledTask: - Type: 'AWS::Events::Rule' - Properties: - Description: !Sub 'Schedules ${TaskName}-retrieve-targets task to run every 20 minutes starting at minute 10 of the hour' - ScheduleExpression: cron(10/20 * * * ? *) - State: ENABLED - Targets: - - Id: !Sub '${TaskName}-retrieve-targets-Fargate-Task' - RoleArn: - Fn::ImportValue: - !Sub "${CanaryInfraStack}-CanaryTaskEventRoleArn" - EcsParameters: - TaskDefinitionArn: !Ref RetrieveTargetTask - TaskCount: 1 - LaunchType: FARGATE - PlatformVersion: LATEST - NetworkConfiguration: - AwsVpcConfiguration: - AssignPublicIp: ENABLED - SecurityGroups: - - !Ref VPCSecurityGroup - Subnets: - - !Ref VPCSubnet - Arn: - Fn::ImportValue: - !Sub "${CanaryInfraStack}-ECSClusterArn" - RetrieveTargetFailedInvocationAlarm: - Type: 'AWS::CloudWatch::Alarm' - Properties: - AlarmName: TUF Repo Canary retrieve targets invocation failure - AlarmDescription: 'TUF Repo Canary retrieve targets task could not be invoked' - Namespace: AWS/Events - MetricName: FailedInvocations - Dimensions: - - Name: RuleName - Value: !Ref RetrieveTargetScheduledTask - Period: 1200 - Statistic: Average - Threshold: 0 - TreatMissingData: notBreaching - ComparisonOperator: GreaterThanThreshold - EvaluationPeriods: 3 - DatapointsToAlarm: 2 - ActionsEnabled: true - AlarmActions: - - !Ref TUFRepoCanarySNSNotificationTopic - - ValidationFailedRule: - Type: 'AWS::Events::Rule' - Properties: - Description: 'TUF Repo Canary task exited with non-zero exit code' - Name: !Sub '${TaskName}-non-zero-exit-code' - EventPattern: !Sub '{"source": ["aws.ecs"],"detail-type": ["ECS Task State Change"],"detail": {"containers": {"name": ["${TaskName}-check-upcoming-expiration","${TaskName}-retrieve-targets"],"exitCode":[1,64,65,66,67,68,69,70,71,72],"lastStatus": ["STOPPED"]}}}' - Targets: - - Arn: !Join - - ":" - - - "arn:aws:logs" - - !Ref 'AWS::Region' - - !Ref 'AWS::AccountId' - - 'log-group' - - !Ref RepoValidationFailureLogGroup - Id: 'tuf-repo-validation-failure' - ValidationFailedAlarm: - Type: 'AWS::CloudWatch::Alarm' - Properties: - AlarmName: TUF repository validation failure - AlarmDescription: 'TUF Repo Canary failed to validate TUF repository' - Namespace: AWS/Events - MetricName: TriggeredRules - Dimensions: - - Name: RuleName - Value: !Ref ValidationFailedRule - Period: 900 - Statistic: Average - Threshold: 0 - TreatMissingData: notBreaching - ComparisonOperator: GreaterThanThreshold - EvaluationPeriods: 4 - DatapointsToAlarm: 3 - ActionsEnabled: true - AlarmActions: - - !Ref TUFRepoCanarySNSNotificationTopic - - TUFRepoCanaryFailedStartAlarm: - Type: 'AWS::CloudWatch::Alarm' - Properties: - AlarmName: TUF Repo Canary Task Start Failure - AlarmDescription: 'TUF Repo Canary task failed to start' - Namespace: AWS/Events - MetricName: TriggeredRules - Dimensions: - - Name: RuleName - Value: !Ref TUFRepoCanaryFailedStartRule - Period: 600 - Statistic: Average - Threshold: 0 - TreatMissingData: notBreaching - ComparisonOperator: GreaterThanThreshold - EvaluationPeriods: 4 - DatapointsToAlarm: 3 - ActionsEnabled: true - AlarmActions: - - !Ref TUFRepoCanarySNSNotificationTopic - TUFRepoCanaryFailedStartRule: - Type: 'AWS::Events::Rule' - Properties: - Description: 'TUF Repo Canary fail to start' - Name: 'repo-canary-failed-to-start' - EventPattern: !Sub '{"source": ["aws.ecs"],"detail-type": ["ECS Task State Change"],"detail": {"containers": {"name": ["${TaskName}-retrieve-targets","${TaskName}-check-upcoming-expiration"],"lastStatus": ["STOPPED"]},"stoppedReason":["Task failed to start"]}}' - Targets: - - Arn: !Join - - ":" - - - "arn:aws:logs" - - !Ref 'AWS::Region' - - !Ref 'AWS::AccountId' - - 'log-group' - - !Ref FailedStartLogGroup - Id: !Sub '${TaskName}-failed-start' diff --git a/tools/infra/stacks/host-containers-pr-build.yml b/tools/infra/stacks/host-containers-pr-build.yml deleted file mode 100644 index 9d907c89..00000000 --- a/tools/infra/stacks/host-containers-pr-build.yml +++ /dev/null @@ -1,110 +0,0 @@ -# stack-name: host-containers-pr-build - -Parameters: - BuildSpecPath: - Type: String - AllowedPattern: '.+\.yml$' - Default: "./tools/infra/buildspec/host-containers-pr-build.yml" - Description: "The path to the buildspec.yml file to use." - SourceGitHubRepositoryURL: - Type: String - Default: "https://github.com/amazonlinux/PRIVATE-thar.git" - AllowedPattern: 'https://github.com/.+/.+\.git$' - ConstraintDescription: "Source URL must be a GitHub repository (with its .git suffix)" - Description: >- - The GitHub repository that builds run for. Your account must be authorized - to modify this repository's settings. -Resources: - BuildLogBucket: - Type: AWS::S3::Bucket - UpdateReplacePolicy: Retain - DeletionPolicy: Retain - - BuildLogGroup: - Type: AWS::Logs::LogGroup - - BuildRole: - Type: AWS::IAM::Role - Properties: - AssumeRolePolicyDocument: - Version: "2012-10-17" - Statement: - - Action: sts:AssumeRole - Effect: Allow - Principal: - Service: codebuild.amazonaws.com - - BuildRolePolicy: - Type: AWS::IAM::Policy - Properties: - Roles: - - !Ref BuildRole - PolicyName: BuildRolePolicy - PolicyDocument: - Version: "2012-10-17" - Statement: - # For managing cache, logs, and artifacts in the build's buckets. - - Action: - - s3:GetObject* - - s3:GetBucket* - - s3:List* - - s3:PutObject* - - s3:Abort* - Effect: Allow - Resource: - - !GetAtt BuildLogBucket.Arn - - Fn::Join: - - "" - - - !GetAtt BuildLogBucket.Arn - - /* - # For writing to CloudWatch Logs Streams for each build. - - Action: - - logs:CreateLogStream - - logs:PutLogEvents - Effect: Allow - Resource: - - !GetAtt BuildLogGroup.Arn - - HostContainersPRBuild: - Type: AWS::CodeBuild::Project - DependsOn: - - BuildLogGroup - - BuildLogBucket - - BuildRole - Properties: - LogsConfig: - S3Logs: - Status: ENABLED - Location: - Fn::Join: - - "/" - - - !GetAtt BuildLogBucket.Arn - - "codebuild/log" - CloudWatchLogs: - Status: ENABLED - GroupName: !Ref BuildLogGroup - Artifacts: - Type: NO_ARTIFACTS - Environment: - ComputeType: BUILD_GENERAL1_LARGE - Image: aws/codebuild/standard:2.0 - PrivilegedMode: true - Type: LINUX_CONTAINER - ServiceRole: !GetAtt BuildRole.Arn - Source: - BuildSpec: !Ref BuildSpecPath - Location: !Ref SourceGitHubRepositoryURL - ReportBuildStatus: true - Type: GITHUB - TimeoutInMinutes: 60 - Triggers: - Webhook: true - FilterGroups: - - - Type: EVENT - Pattern: PULL_REQUEST_CREATED,PULL_REQUEST_UPDATED - -Outputs: - LogBucket: - Value: !Ref BuildLogBucket - Project: - Value: !Ref HostContainersPRBuild diff --git a/tools/infra/stacks/infra-container.yml b/tools/infra/stacks/infra-container.yml deleted file mode 100644 index 2d48f05d..00000000 --- a/tools/infra/stacks/infra-container.yml +++ /dev/null @@ -1,163 +0,0 @@ -# stack-name: infra-container - -AWSTemplateFormatVersion: "2010-09-09" - -Description: >- - Infra's container images ECR repositories used in release automation. - -Parameters: - SSMPathNamespace: - Type: String - Default: /infra/container - AllowedPattern: '^/.*[^/]$' - Description: > - Namespace under which SSM Parameters will be created for container images (should start but *not* end with '/') - -Resources: - SDKx8664Repo: - Type: AWS::ECR::Repository - Metadata: - Source: extras/sdk-container - Properties: - RepositoryName: bottlerocket/sdk-x86_64 - - SDKx8664Parameter: - Type: AWS::SSM::Parameter - Properties: - Name: !Sub "${SSMPathNamespace}/${SDKx8664Repo}" - Type: String - Value: !Sub "${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${SDKx8664Repo}" - - SDKaarch64Repo: - Type: AWS::ECR::Repository - Metadata: - Source: extras/sdk-container - Properties: - RepositoryName: bottlerocket/sdk-aarch64 - - SDKaarch64Parameter: - Type: AWS::SSM::Parameter - Properties: - Name: !Sub "${SSMPathNamespace}/${SDKaarch64Repo}" - Type: String - Value: !Sub "${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${SDKaarch64Repo}" - - BuilderRepo: - Type: AWS::ECR::Repository - Metadata: - Source: tools/infra/container/Dockerfile.builder - Properties: - RepositoryName: infra/builder - RepositoryPolicyText: - Version: "2012-10-17" - Statement: - - Sid: "codeBuildPull" - Effect: Allow - Principal: - Service: "codebuild.amazonaws.com" - Action: - - "ecr:GetDownloadUrlForLayer" - - "ecr:BatchGetImage" - - "ecr:BatchCheckLayerAvailability" - - BuilderParameter: - Type: AWS::SSM::Parameter - Properties: - Name: !Sub "${SSMPathNamespace}/${BuilderRepo}" - Type: String - Value: !Sub "${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${BuilderRepo}" - - SigningRepo: - Type: AWS::ECR::Repository - Metadata: - Source: tools/infra/container/Dockerfile.signing - Properties: - RepositoryName: infra/signing - RepositoryPolicyText: - Version: "2012-10-17" - Statement: - - Sid: "codeBuildPull" - Effect: Allow - Principal: - Service: "codebuild.amazonaws.com" - Action: - - "ecr:GetDownloadUrlForLayer" - - "ecr:BatchGetImage" - - "ecr:BatchCheckLayerAvailability" - - SigningParameter: - Type: AWS::SSM::Parameter - Properties: - Name: !Sub "${SSMPathNamespace}/${SigningRepo}" - Type: String - Value: !Sub "${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${SigningRepo}" - - PullPolicy: - Type: AWS::IAM::ManagedPolicy - Properties: - ManagedPolicyName: pull - Path: !Sub "/${AWS::StackName}/" - PolicyDocument: - Version: '2012-10-17' - Statement: - - Sid: "imagePull" - Effect: Allow - Resource: - - !GetAtt BuilderRepo.Arn - - !GetAtt SigningRepo.Arn - - !GetAtt SDKx8664Repo.Arn - - !GetAtt SDKaarch64Repo.Arn - Action: - - "ecr:GetAuthorizationToken" - - "ecr:BatchCheckLayerAvailability" - - "ecr:GetDownloadUrlForLayer" - - "ecr:ListImages" - - "ecr:DescribeImages" - - "ecr:BatchGetImage" - - "ecr:ListTagsForResource" - - Sid: "imageResolve" - Effect: Allow - Resource: - - !Sub "arn:aws:ssm:${AWS::Region}:${AWS::AccountId}:parameter/${AWS::StackName}/*" - Action: - - "ssm:GetParameter" - - "ssm:GetParameters" - - "ssm:GetParametersByPath" - - "ssm:DescribeParameters" - - PushPolicy: - Type: AWS::IAM::ManagedPolicy - Properties: - ManagedPolicyName: push - Path: !Sub "/${AWS::StackName}/" - PolicyDocument: - Version: '2012-10-17' - Statement: - - Sid: "imagePush" - Effect: Allow - Resource: - - !GetAtt BuilderRepo.Arn - - !GetAtt SigningRepo.Arn - - !GetAtt SDKx8664Repo.Arn - - !GetAtt SDKaarch64Repo.Arn - Action: - - "ecr:GetAuthorizationToken" - - "ecr:BatchCheckLayerAvailability" - - "ecr:GetDownloadUrlForLayer" - - "ecr:DescribeImages" - - "ecr:BatchGetImage" - - "ecr:ListTagsForResource" - - "ecr:InitiateLayerUpload" - - "ecr:UploadLayerPart" - - "ecr:CompleteLayerUpload" - - "ecr:PutImage" - -Outputs: - PullPolicy: - Export: - Name: !Sub "${AWS::StackName}-pull-policy" - Value: !Ref PullPolicy - PushPolicy: - Export: - Name: !Sub "${AWS::StackName}-push-policy" - Value: !Ref PushPolicy diff --git a/tools/infra/stacks/infra-pr-build.yml b/tools/infra/stacks/infra-pr-build.yml deleted file mode 100644 index 9e9094b2..00000000 --- a/tools/infra/stacks/infra-pr-build.yml +++ /dev/null @@ -1,153 +0,0 @@ -# stack-name: infra-pr-build -# stack-require: infra-container - -Parameters: - BuildSpecPath: - Type: String - AllowedPattern: '.+\.yml$' - Default: "./tools/infra/buildspec/infra-pr-build.yml" - Description: "The path to the buildspec.yml file to use." - SourceGitHubRepositoryURL: - Type: String - Default: "https://github.com/amazonlinux/PRIVATE-thar.git" - AllowedPattern: 'https://github.com/.+/.+\.git$' - ConstraintDescription: "Source URL must be a GitHub repository (with its .git suffix)" - Description: >- - The GitHub repository that builds run for. Your account must be authorized - to modify this repository's settings. - EnvironmentImageName: - Type: AWS::SSM::Parameter::Value - Default: /infra/container/infra/builder - Description: >- - Parameter that defines the image name the builder uses as its execution - environment *without* a tag (eg: registry/image-name, not - registry/image-name:tag). The EnvironmentImageTag Parameter provides the - appropriate tag separately. - EnvironmentImageTag: - Type: String - Default: latest - Description: >- - The image 'tag' (as in registry/image-name:tag) to select of the EnvironmentImage - provided. - ImageCredentialsType: - Type: String - Default: CODEBUILD - AllowedValues: [ CODEBUILD, SERVICE_ROLE ] - Description: >- - If image policy does not trust codebuild.amazonaws.com OR cross-account - role is needed, then the SERVICE_ROLE must be specified to use the role - assigned to the build project. - -Resources: - BuildArtifactBucket: - Type: AWS::S3::Bucket - UpdateReplacePolicy: Retain - DeletionPolicy: Retain - - BuildLogBucket: - Type: AWS::S3::Bucket - UpdateReplacePolicy: Retain - DeletionPolicy: Retain - - BuildLogGroup: - Type: AWS::Logs::LogGroup - - BuildRole: - Type: AWS::IAM::Role - Properties: - AssumeRolePolicyDocument: - Version: "2012-10-17" - Statement: - - Action: sts:AssumeRole - Effect: Allow - Principal: - Service: codebuild.amazonaws.com - - BuildRolePolicy: - Type: AWS::IAM::Policy - Properties: - PolicyName: BuildRolePolicy - Roles: - - !Ref BuildRole - PolicyDocument: - Version: "2012-10-17" - Statement: - # For validation API usage performed on templates during build. - - Sid: "validateCfnTemplates" - Effect: Allow - Action: - - cloudformation:ValidateTemplate - Resource: "*" - # For managing cache, logs, and artifacts in the build's buckets. - - Sid: "manageBuildArtifacts" - Effect: Allow - Action: - - s3:GetObject* - - s3:GetBucket* - - s3:List* - - s3:PutObject* - - s3:Abort* - Resource: - - !GetAtt BuildArtifactBucket.Arn - - !Sub "${BuildArtifactBucket.Arn}/*" - - !GetAtt BuildLogBucket.Arn - - !Sub "${BuildLogBucket.Arn}/*" - # For writing to CloudWatch Logs Streams for each build. - - Sid: "manageBuildLogs" - Effect: Allow - Action: - - logs:CreateLogStream - - logs:PutLogEvents - Resource: - - !GetAtt BuildLogGroup.Arn - - InfraPRBuild: - Type: AWS::CodeBuild::Project - DependsOn: - - BuildArtifactBucket - - BuildLogBucket - - BuildLogGroup - - BuildRole - Properties: - Artifacts: - Location: !Ref BuildArtifactBucket - Name: / - NamespaceType: BUILD_ID - Packaging: NONE - Path: artifact/ - Type: S3 - Environment: - ComputeType: BUILD_GENERAL1_SMALL - Type: LINUX_CONTAINER - Image: !Sub "${EnvironmentImageName}:${EnvironmentImageTag}" - ImagePullCredentialsType: !Ref ImageCredentialsType - ServiceRole: !GetAtt BuildRole.Arn - Source: - BuildSpec: !Ref BuildSpecPath - Location: !Ref SourceGitHubRepositoryURL - ReportBuildStatus: true - Type: GITHUB - LogsConfig: - S3Logs: - Status: ENABLED - Location: !Sub "${BuildArtifactBucket.Arn}/codebuild/log" - CloudWatchLogs: - Status: ENABLED - GroupName: !Ref BuildLogGroup - TimeoutInMinutes: 10 - Triggers: - Webhook: true - FilterGroups: - - - Type: EVENT - Pattern: PULL_REQUEST_CREATED,PULL_REQUEST_UPDATED - -Outputs: - ArtifactBucket: - Value: - Ref: BuildArtifactBucket - LogBucket: - Value: - Ref: BuildLogBucket - Project: - Value: - Ref: InfraPRBuild diff --git a/tools/infra/stacks/pipelines/admin-container-pipeline.yml b/tools/infra/stacks/pipelines/admin-container-pipeline.yml deleted file mode 100644 index 0cb1eb65..00000000 --- a/tools/infra/stacks/pipelines/admin-container-pipeline.yml +++ /dev/null @@ -1,161 +0,0 @@ -AWSTemplateFormatVersion: "2010-09-09" -Description: Bottlerocket admin container pipeline -Parameters: - CodeBuildResourceStack: - Type: String - Description: 'Enter the name of the CloudFormation stack that sets up the pipeline CodeBuild projects.' - BranchName: - Description: GitHub branch name - Type: String - Default: develop - AllowedPattern: "[A-Za-z0-9-]+" - RepositoryName: - Description: Repository name - Type: String - AllowedPattern: "[A-Za-z0-9-]+" - GitHubOwner: - Description: Owner of the repository - Type: String - Default: amazonlinux - AllowedPattern: "[A-Za-z0-9-]+" - GitHubSecretToken: - Description: Github personal access token. (Account must have permission to create webhooks) - Type: String - NoEcho: true - MinLength: 40 - MaxLength: 40 - AllowedPattern: '[a-z0-9]*' - -Resources: - CodePipelineServiceRole: - Type: 'AWS::IAM::Role' - Properties: - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - codepipeline.amazonaws.com - Action: 'sts:AssumeRole' - Path: !Sub "/${AWS::StackName}/" - Policies: - - PolicyName: HostContainerPipelineRolePolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - 'codebuild:BatchGetBuilds' - - 'codebuild:StartBuild' - Resource: '*' - - Effect: Allow - Action: - - s3:PutObject - - s3:GetObject - - s3:GetObjectVersion - - s3:GetBucketAcl - - s3:GetBucketLocation - Resource: - Fn::Join: - - '/' - - - Fn::ImportValue: !Sub "${CodeBuildResourceStack}-ArtifactBucketArn" - - '*' - - BottlerocketAdminCtrPipeline: - Type: 'AWS::CodePipeline::Pipeline' - Properties: - Name: bottlerocket-admin-ctr-pipeline - RoleArn: !GetAtt - - CodePipelineServiceRole - - Arn - Stages: - - Name: Source - Actions: - - Name: SourceAction - ActionTypeId: - Category: Source - Owner: ThirdParty - Version: '1' - Provider: GitHub - OutputArtifacts: - - Name: SourceOutput - Configuration: - Owner: !Ref GitHubOwner - Repo: !Ref RepositoryName - Branch: !Ref BranchName - OAuthToken: !Ref GitHubSecretToken - PollForSourceChanges: false - RunOrder: 1 - - Name: Build - Actions: - - Name: BuildAction - InputArtifacts: - - Name: SourceOutput - ActionTypeId: - Category: Build - Owner: AWS - Version: '1' - Provider: CodeBuild - OutputArtifacts: - - Name: BuildArtifact - Configuration: - ProjectName: - Fn::ImportValue: - !Sub "${CodeBuildResourceStack}-Build" - RunOrder: 1 - - Name: Test - Actions: - - Name: TestAction - InputArtifacts: - - Name: SourceOutput - - Name: BuildArtifact - ActionTypeId: - Category: Build - Owner: AWS - Version: '1' - Provider: CodeBuild - Configuration: - ProjectName: - Fn::ImportValue: - !Sub "${CodeBuildResourceStack}-Test" - PrimarySource: SourceOutput - RunOrder: 1 - - Name: BetaDeploy - Actions: - - Name: BetaDeployAction - InputArtifacts: - - Name: SourceOutput - - Name: BuildArtifact - ActionTypeId: - Category: Build - Owner: AWS - Version: '1' - Provider: CodeBuild - Configuration: - ProjectName: - Fn::ImportValue: - !Sub "${CodeBuildResourceStack}-BetaDeploy" - PrimarySource: SourceOutput - RunOrder: 1 - ArtifactStore: - Type: S3 - Location: - Fn::ImportValue: - !Sub "${CodeBuildResourceStack}-ArtifactBucket" - - BottlerocketAdminWebhook: - Type: 'AWS::CodePipeline::Webhook' - Properties: - Authentication: GITHUB_HMAC - AuthenticationConfiguration: - SecretToken: !Ref GitHubSecretToken - RegisterWithThirdParty: true - Filters: - - JsonPath: "$.ref" - MatchEquals: refs/heads/{Branch} - - JsonPath: "$.head_commit.modified[*]" - MatchEquals: "extras/host-containers/bottlerocket-admin/VERSION" - TargetPipeline: !Ref BottlerocketAdminCtrPipeline - TargetAction: SourceAction - TargetPipelineVersion: !GetAtt BottlerocketAdminCtrPipeline.Version diff --git a/tools/infra/stacks/pipelines/control-container-pipeline.yml b/tools/infra/stacks/pipelines/control-container-pipeline.yml deleted file mode 100644 index 01c0ae42..00000000 --- a/tools/infra/stacks/pipelines/control-container-pipeline.yml +++ /dev/null @@ -1,161 +0,0 @@ -AWSTemplateFormatVersion: "2010-09-09" -Description: Bottlerocket control container pipeline -Parameters: - CodeBuildResourceStack: - Type: String - Description: 'Enter the name of the CloudFormation stack that sets up the pipeline CodeBuild projects.' - BranchName: - Description: GitHub branch name - Type: String - Default: develop - AllowedPattern: "[A-Za-z0-9-]+" - RepositoryName: - Description: Repository name - Type: String - AllowedPattern: "[A-Za-z0-9-]+" - GitHubOwner: - Description: Owner of the repository - Type: String - Default: amazonlinux - AllowedPattern: "[A-Za-z0-9-]+" - GitHubSecretToken: - Description: Github personal access token. (Account must have permission to create webhooks) - Type: String - NoEcho: true - MinLength: 40 - MaxLength: 40 - AllowedPattern: '[a-z0-9]*' - -Resources: - CodePipelineServiceRole: - Type: 'AWS::IAM::Role' - Properties: - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - codepipeline.amazonaws.com - Action: 'sts:AssumeRole' - Path: !Sub "/${AWS::StackName}/" - Policies: - - PolicyName: HostContainerPipelineRolePolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - 'codebuild:BatchGetBuilds' - - 'codebuild:StartBuild' - Resource: '*' - - Effect: Allow - Action: - - s3:PutObject - - s3:GetObject - - s3:GetObjectVersion - - s3:GetBucketAcl - - s3:GetBucketLocation - Resource: - Fn::Join: - - '/' - - - Fn::ImportValue: !Sub "${CodeBuildResourceStack}-ArtifactBucketArn" - - '*' - - BottlerocketControlCtrPipeline: - Type: 'AWS::CodePipeline::Pipeline' - Properties: - Name: bottlerocket-control-ctr-pipeline - RoleArn: !GetAtt - - CodePipelineServiceRole - - Arn - Stages: - - Name: Source - Actions: - - Name: SourceAction - ActionTypeId: - Category: Source - Owner: ThirdParty - Version: '1' - Provider: GitHub - OutputArtifacts: - - Name: SourceOutput - Configuration: - Owner: !Ref GitHubOwner - Repo: !Ref RepositoryName - Branch: !Ref BranchName - OAuthToken: !Ref GitHubSecretToken - PollForSourceChanges: false - RunOrder: 1 - - Name: Build - Actions: - - Name: BuildAction - InputArtifacts: - - Name: SourceOutput - ActionTypeId: - Category: Build - Owner: AWS - Version: '1' - Provider: CodeBuild - OutputArtifacts: - - Name: BuildArtifact - Configuration: - ProjectName: - Fn::ImportValue: - !Sub "${CodeBuildResourceStack}-Build" - RunOrder: 1 - - Name: Test - Actions: - - Name: TestAction - InputArtifacts: - - Name: SourceOutput - - Name: BuildArtifact - ActionTypeId: - Category: Build - Owner: AWS - Version: '1' - Provider: CodeBuild - Configuration: - ProjectName: - Fn::ImportValue: - !Sub "${CodeBuildResourceStack}-Test" - PrimarySource: SourceOutput - RunOrder: 1 - - Name: BetaDeploy - Actions: - - Name: BetaDeployAction - InputArtifacts: - - Name: SourceOutput - - Name: BuildArtifact - ActionTypeId: - Category: Build - Owner: AWS - Version: '1' - Provider: CodeBuild - Configuration: - ProjectName: - Fn::ImportValue: - !Sub "${CodeBuildResourceStack}-BetaDeploy" - PrimarySource: SourceOutput - RunOrder: 1 - ArtifactStore: - Type: S3 - Location: - Fn::ImportValue: - !Sub "${CodeBuildResourceStack}-ArtifactBucket" - - BottlerocketControlWebhook: - Type: 'AWS::CodePipeline::Webhook' - Properties: - Authentication: GITHUB_HMAC - AuthenticationConfiguration: - SecretToken: !Ref GitHubSecretToken - RegisterWithThirdParty: true - Filters: - - JsonPath: "$.ref" - MatchEquals: refs/heads/{Branch} - - JsonPath: "$.head_commit.modified[*]" - MatchEquals: "extras/host-containers/bottlerocket-control/VERSION" - TargetPipeline: !Ref BottlerocketControlCtrPipeline - TargetAction: SourceAction - TargetPipelineVersion: !GetAtt BottlerocketControlCtrPipeline.Version diff --git a/tools/infra/stacks/pipelines/dogswatch-container-pipeline-codebuild-projects.yml b/tools/infra/stacks/pipelines/dogswatch-container-pipeline-codebuild-projects.yml deleted file mode 100644 index 8a083b5f..00000000 --- a/tools/infra/stacks/pipelines/dogswatch-container-pipeline-codebuild-projects.yml +++ /dev/null @@ -1,252 +0,0 @@ -AWSTemplateFormatVersion: "2010-09-09" -Description: Dogswatch container pipeline - CodeBuild projects -Parameters: - ImageRegistryUri: - Type: String - Description: "The beta image registry URI" - EnvironmentImageName: - Type: AWS::SSM::Parameter::Value - Default: /infra/container/infra/builder - Description: >- - Parameter that defines the image name the builder uses as its execution - environment *without* a tag (eg: registry/image-name, not - registry/image-name:tag). The EnvironmentImageTag Parameter provides the - appropriate tag separately. - EnvironmentImageTag: - Type: String - Default: latest - Description: >- - The image 'tag' (as in registry/image-name:tag) to select of the EnvironmentImage - provided. - EnvironmentImageCredentialsType: - Type: String - Default: CODEBUILD - AllowedValues: [ CODEBUILD, SERVICE_ROLE ] - Description: >- - If image policy does not trust codebuild.amazonaws.com OR cross-account - role is needed, then the SERVICE_ROLE must be specified to use the role - assigned to the build project. -Resources: - PipelineArtifactBucket: - Type: AWS::S3::Bucket - UpdateReplacePolicy: Retain - DeletionPolicy: Retain - BuildLogBucket: - Type: AWS::S3::Bucket - UpdateReplacePolicy: Retain - DeletionPolicy: Retain - BuildLogGroup: - Type: AWS::Logs::LogGroup - BuildRole: - Type: AWS::IAM::Role - Properties: - AssumeRolePolicyDocument: - Version: "2012-10-17" - Statement: - - Action: sts:AssumeRole - Effect: Allow - Principal: - Service: codebuild.amazonaws.com - Path: !Sub "/${AWS::StackName}/" - Policies: - - PolicyName: HostContainerBuildRolePolicy - PolicyDocument: - Version: "2012-10-17" - Statement: - # For managing cache, logs, and artifacts in the build's buckets. - - Action: - - 's3:GetObject' - - 's3:GetObjectVersion' - - 's3:GetBucketVersioning' - - 's3:GetBucketAcl' - - 's3:GetBucketLocation' - - 's3:PutObject' - - 's3:PutObjectAcl' - - 's3:DeleteObject' - - 's3:DeleteObjectVersion' - Effect: Allow - Resource: - - !GetAtt PipelineArtifactBucket.Arn - - !Sub "${PipelineArtifactBucket.Arn}/*" - # For writing to CloudWatch Logs Streams for each build. - - Action: - - logs:CreateLogStream - - logs:PutLogEvents - Effect: Allow - Resource: - - !GetAtt BuildLogGroup.Arn - # For being able to push to other ECR repositories - - Effect: Allow - Action: - - 'ecr:GetAuthorizationToken' - - 'ecr:InitiateLayerUpload' - - 'ecr:UploadLayerPart' - - 'ecr:CompleteLayerUpload' - - 'ecr:BatchCheckLayerAvailability' - - 'ecr:PutImage' - Resource: '*' - DogswatchCtrBuild: - Type: AWS::CodeBuild::Project - Properties: - Artifacts: - Type: CODEPIPELINE - Cache: - Type: LOCAL - Modes: - - LOCAL_DOCKER_LAYER_CACHE - Environment: - ComputeType: BUILD_GENERAL1_LARGE - Type: LINUX_CONTAINER - PrivilegedMode: true - Image: !Sub "${EnvironmentImageName}:${EnvironmentImageTag}" - ImagePullCredentialsType: !Ref EnvironmentImageCredentialsType - ServiceRole: !GetAtt BuildRole.Arn - Source: - BuildSpec: | - version: 0.2 - - env: - variables: - # Path to host containers folder - EXTRAS_DIR: "./extras" - # Path to built host container images - IMAGES_DIR: "./build/host-container-images" - phases: - pre_build: - commands: - - start-build-environment - - environment-report - - mkdir -p "${IMAGES_DIR}" - build: - commands: - - SHORT_SHA="$(head -c 8 <<< "$CODEBUILD_RESOLVED_SOURCE_VERSION")" - - make -C "${EXTRAS_DIR}/dogswatch" SHORT_SHA="${SHORT_SHA}" release-container - - docker save dogswatch:"${SHORT_SHA}" | gzip > "${IMAGES_DIR}"/dogswatch.tar.gz - artifacts: - base-directory: '${IMAGES_DIR}' - files: - - '*.tar.gz' - Type: CODEPIPELINE - LogsConfig: - S3Logs: - Status: ENABLED - Location: !Sub "${BuildLogBucket.Arn}/codebuild/log" - CloudWatchLogs: - Status: ENABLED - GroupName: !Ref BuildLogGroup - TimeoutInMinutes: 180 - - DogswatchCtrTest: - Type: AWS::CodeBuild::Project - Properties: - Artifacts: - Type: CODEPIPELINE - Environment: - ComputeType: BUILD_GENERAL1_LARGE - Type: LINUX_CONTAINER - PrivilegedMode: true - Image: !Sub "${EnvironmentImageName}:${EnvironmentImageTag}" - ImagePullCredentialsType: !Ref EnvironmentImageCredentialsType - ServiceRole: !GetAtt BuildRole.Arn - Source: - BuildSpec: | - version: 0.2 - - env: - variables: - # Path to host containers folder - EXTRAS_DIR: "./extras" - phases: - pre_build: - commands: - - start-build-environment - - environment-report - build: - commands: - - SHORT_SHA="$(head -c 8 <<< "$CODEBUILD_RESOLVED_SOURCE_VERSION")" - - docker load -i ${CODEBUILD_SRC_DIR_BuildArtifact}/dogswatch.tar.gz - - make -C "${EXTRAS_DIR}/dogswatch" SHORT_SHA="${SHORT_SHA}" container-simple-test - Type: CODEPIPELINE - LogsConfig: - S3Logs: - Status: ENABLED - Location: !Sub "${BuildLogBucket.Arn}/codebuild/log" - CloudWatchLogs: - Status: ENABLED - GroupName: !Ref BuildLogGroup - TimeoutInMinutes: 180 - - DogswatchCtrBetaDeploy: - Type: AWS::CodeBuild::Project - Properties: - Artifacts: - Type: CODEPIPELINE - Environment: - ComputeType: BUILD_GENERAL1_LARGE - Type: LINUX_CONTAINER - PrivilegedMode: true - Image: !Sub "${EnvironmentImageName}:${EnvironmentImageTag}" - ImagePullCredentialsType: !Ref EnvironmentImageCredentialsType - EnvironmentVariables: - - Name: 'IMAGE_REGISTRY_URI' - Type: 'PLAINTEXT' - Value: !Sub '${ImageRegistryUri}' - - Name: 'CONTAINER_IMAGE' - Type: 'PLAINTEXT' - Value: 'dogswatch' - ServiceRole: !GetAtt BuildRole.Arn - Source: - BuildSpec: | - version: 0.2 - - env: - variables: - # Path to host containers folder - EXTRAS_DIR: "./extras" - phases: - pre_build: - commands: - - start-build-environment - - environment-report - build: - commands: - - docker load -i ${CODEBUILD_SRC_DIR_BuildArtifact}/${CONTAINER_IMAGE}.tar.gz - - SHORT_SHA="$(head -c 8 <<< "$CODEBUILD_RESOLVED_SOURCE_VERSION")" - - docker tag "${CONTAINER_IMAGE}":"${SHORT_SHA}" "${IMAGE_REGISTRY_URI}":"${SHORT_SHA}" - - docker push "${IMAGE_REGISTRY_URI}":"${SHORT_SHA}" - Type: CODEPIPELINE - LogsConfig: - S3Logs: - Status: ENABLED - Location: !Sub "${BuildLogBucket.Arn}/codebuild/log" - CloudWatchLogs: - Status: ENABLED - GroupName: !Ref BuildLogGroup - TimeoutInMinutes: 60 - -Outputs: - PipelineArtifactBucketName: - Description: "Pipeline artifact bucket name" - Value: !Ref PipelineArtifactBucket - Export: - Name: !Sub "${AWS::StackName}-ArtifactBucket" - PipelineArtifactBucketArn: - Description: "Pipeline artifact bucket ARN" - Value: !GetAtt PipelineArtifactBucket.Arn - Export: - Name: !Sub "${AWS::StackName}-ArtifactBucketArn" - BuildProjectName: - Description: "Build step CodeBuild project name" - Value: !Ref DogswatchCtrBuild - Export: - Name: !Sub "${AWS::StackName}-Build" - TestProjectName: - Description: "Test step CodeBuild project name" - Value: !Ref DogswatchCtrTest - Export: - Name: !Sub "${AWS::StackName}-Test" - BetaDeployProjectName: - Description: "Beta deploy step CodeBuild project name" - Value: !Ref DogswatchCtrBetaDeploy - Export: - Name: !Sub "${AWS::StackName}-BetaDeploy" \ No newline at end of file diff --git a/tools/infra/stacks/pipelines/dogswatch-container-pipeline.yml b/tools/infra/stacks/pipelines/dogswatch-container-pipeline.yml deleted file mode 100644 index d5aeb4c4..00000000 --- a/tools/infra/stacks/pipelines/dogswatch-container-pipeline.yml +++ /dev/null @@ -1,161 +0,0 @@ -AWSTemplateFormatVersion: "2010-09-09" -Description: Dogswatch container pipeline -Parameters: - CodeBuildResourceStack: - Type: String - Description: 'Enter the name of the CloudFormation stack that sets up the pipeline CodeBuild projects.' - BranchName: - Description: GitHub branch name - Type: String - Default: develop - AllowedPattern: "[A-Za-z0-9-]+" - RepositoryName: - Description: Repository name - Type: String - AllowedPattern: "[A-Za-z0-9-]+" - GitHubOwner: - Description: Owner of the repository - Type: String - Default: amazonlinux - AllowedPattern: "[A-Za-z0-9-]+" - GitHubSecretToken: - Description: Github personal access token. (Account must have permission to create webhooks) - Type: String - NoEcho: true - MinLength: 40 - MaxLength: 40 - AllowedPattern: '[a-z0-9]*' - -Resources: - CodePipelineServiceRole: - Type: 'AWS::IAM::Role' - Properties: - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - codepipeline.amazonaws.com - Action: 'sts:AssumeRole' - Path: !Sub "/${AWS::StackName}/" - Policies: - - PolicyName: HostContainerPipelineRolePolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - 'codebuild:BatchGetBuilds' - - 'codebuild:StartBuild' - Resource: '*' - - Effect: Allow - Action: - - s3:PutObject - - s3:GetObject - - s3:GetObjectVersion - - s3:GetBucketAcl - - s3:GetBucketLocation - Resource: - Fn::Join: - - '/' - - - Fn::ImportValue: !Sub "${CodeBuildResourceStack}-ArtifactBucketArn" - - '*' - - DogswatchCtrPipeline: - Type: 'AWS::CodePipeline::Pipeline' - Properties: - Name: dogswatch-ctr-pipeline - RoleArn: !GetAtt - - CodePipelineServiceRole - - Arn - Stages: - - Name: Source - Actions: - - Name: SourceAction - ActionTypeId: - Category: Source - Owner: ThirdParty - Version: '1' - Provider: GitHub - OutputArtifacts: - - Name: SourceOutput - Configuration: - Owner: !Ref GitHubOwner - Repo: !Ref RepositoryName - Branch: !Ref BranchName - OAuthToken: !Ref GitHubSecretToken - PollForSourceChanges: false - RunOrder: 1 - - Name: Build - Actions: - - Name: BuildAction - InputArtifacts: - - Name: SourceOutput - ActionTypeId: - Category: Build - Owner: AWS - Version: '1' - Provider: CodeBuild - OutputArtifacts: - - Name: BuildArtifact - Configuration: - ProjectName: - Fn::ImportValue: - !Sub "${CodeBuildResourceStack}-Build" - RunOrder: 1 - - Name: Test - Actions: - - Name: TestAction - InputArtifacts: - - Name: SourceOutput - - Name: BuildArtifact - ActionTypeId: - Category: Build - Owner: AWS - Version: '1' - Provider: CodeBuild - Configuration: - ProjectName: - Fn::ImportValue: - !Sub "${CodeBuildResourceStack}-Test" - PrimarySource: SourceOutput - RunOrder: 1 - - Name: BetaDeploy - Actions: - - Name: BetaDeployAction - InputArtifacts: - - Name: SourceOutput - - Name: BuildArtifact - ActionTypeId: - Category: Build - Owner: AWS - Version: '1' - Provider: CodeBuild - Configuration: - ProjectName: - Fn::ImportValue: - !Sub "${CodeBuildResourceStack}-BetaDeploy" - PrimarySource: SourceOutput - RunOrder: 1 - ArtifactStore: - Type: S3 - Location: - Fn::ImportValue: - !Sub "${CodeBuildResourceStack}-ArtifactBucket" - - DogswatchWebhook: - Type: 'AWS::CodePipeline::Webhook' - Properties: - Authentication: GITHUB_HMAC - AuthenticationConfiguration: - SecretToken: !Ref GitHubSecretToken - RegisterWithThirdParty: true - Filters: - - JsonPath: "$.ref" - MatchEquals: refs/heads/{Branch} - - JsonPath: "$.head_commit.modified[*]" - MatchEquals: "extras/dogswatch/VERSION" - TargetPipeline: !Ref DogswatchCtrPipeline - TargetAction: SourceAction - TargetPipelineVersion: !GetAtt DogswatchCtrPipeline.Version diff --git a/tools/infra/stacks/pipelines/host-container-pipeline-codebuild-projects.yml b/tools/infra/stacks/pipelines/host-container-pipeline-codebuild-projects.yml deleted file mode 100644 index 62d522ea..00000000 --- a/tools/infra/stacks/pipelines/host-container-pipeline-codebuild-projects.yml +++ /dev/null @@ -1,263 +0,0 @@ -AWSTemplateFormatVersion: "2010-09-09" -Description: Bottlerocket host containers pipeline - CodeBuild steps resources -Parameters: - ImageRegistryUri: - Type: String - Description: "The beta image registry URI" - ContainerName: - Type: String - Description: "Name of the container image" - AllowedValues: - - "bottlerocket-admin" - - "bottlerocket-control" - EnvironmentImageName: - Type: AWS::SSM::Parameter::Value - Default: /infra/container/infra/builder - Description: >- - Parameter that defines the image name the builder uses as its execution - environment *without* a tag (eg: registry/image-name, not - registry/image-name:tag). The EnvironmentImageTag Parameter provides the - appropriate tag separately. - EnvironmentImageTag: - Type: String - Default: latest - Description: >- - The image 'tag' (as in registry/image-name:tag) to select of the EnvironmentImage - provided. - EnvironmentImageCredentialsType: - Type: String - Default: CODEBUILD - AllowedValues: [ CODEBUILD, SERVICE_ROLE ] - Description: >- - If image policy does not trust codebuild.amazonaws.com OR cross-account - role is needed, then the SERVICE_ROLE must be specified to use the role - assigned to the build project. - -Resources: - PipelineArtifactBucket: - Type: AWS::S3::Bucket - UpdateReplacePolicy: Retain - DeletionPolicy: Retain - BuildLogBucket: - Type: AWS::S3::Bucket - UpdateReplacePolicy: Retain - DeletionPolicy: Retain - BuildLogGroup: - Type: AWS::Logs::LogGroup - BuildRole: - Type: AWS::IAM::Role - Properties: - AssumeRolePolicyDocument: - Version: "2012-10-17" - Statement: - - Action: sts:AssumeRole - Effect: Allow - Principal: - Service: codebuild.amazonaws.com - Path: / - Policies: - - PolicyName: HostContainerBuildRolePolicy - PolicyDocument: - Version: "2012-10-17" - Statement: - # For managing cache, logs, and artifacts in the build's buckets. - - Action: - - 's3:GetObject' - - 's3:GetObjectVersion' - - 's3:GetBucketVersioning' - - 's3:GetBucketAcl' - - 's3:GetBucketLocation' - - 's3:PutObject' - - 's3:PutObjectAcl' - - 's3:DeleteObject' - - 's3:DeleteObjectVersion' - Effect: Allow - Resource: - - !GetAtt PipelineArtifactBucket.Arn - - !Sub "${PipelineArtifactBucket.Arn}/*" - # For writing to CloudWatch Logs Streams for each build. - - Action: - - logs:CreateLogStream - - logs:PutLogEvents - Effect: Allow - Resource: - - !GetAtt BuildLogGroup.Arn - # For being able to push to ECR repositories - - Effect: Allow - Action: - - 'ecr:GetAuthorizationToken' - - 'ecr:InitiateLayerUpload' - - 'ecr:UploadLayerPart' - - 'ecr:CompleteLayerUpload' - - 'ecr:BatchCheckLayerAvailability' - - 'ecr:PutImage' - Resource: '*' - HostCtrBuild: - Type: AWS::CodeBuild::Project - Properties: - Artifacts: - Type: CODEPIPELINE - Cache: - Type: LOCAL - Modes: - - LOCAL_DOCKER_LAYER_CACHE - Environment: - ComputeType: BUILD_GENERAL1_LARGE - Type: LINUX_CONTAINER - PrivilegedMode: true - Image: !Sub "${EnvironmentImageName}:${EnvironmentImageTag}" - ImagePullCredentialsType: !Ref EnvironmentImageCredentialsType - EnvironmentVariables: - - Name: 'CONTAINER_IMAGE' - Type: 'PLAINTEXT' - Value: !Sub '${ContainerName}' - ServiceRole: !GetAtt BuildRole.Arn - Source: - BuildSpec: | - version: 0.2 - - env: - variables: - # Path to host containers folder - HOST_CONTAINERS_DIR: "./extras/host-containers" - # Path to built host container images - IMAGES_DIR: "./build/host-container-images" - phases: - pre_build: - commands: - - start-build-environment - - environment-report - - mkdir -p "${IMAGES_DIR}" - build: - commands: - - SHORT_SHA="$(head -c 8 <<< "$CODEBUILD_RESOLVED_SOURCE_VERSION")" - - make -C "${HOST_CONTAINERS_DIR}/${CONTAINER_IMAGE}" SHORT_SHA="${SHORT_SHA}" DESTDIR="${IMAGES_DIR}" dist - artifacts: - base-directory: '${IMAGES_DIR}' - files: - - '*.tar.gz' - Type: CODEPIPELINE - LogsConfig: - S3Logs: - Status: ENABLED - Location: !Sub "${BuildLogBucket.Arn}/codebuild/log" - CloudWatchLogs: - Status: ENABLED - GroupName: !Ref BuildLogGroup - TimeoutInMinutes: 180 - - HostCtrTest: - Type: AWS::CodeBuild::Project - Properties: - Artifacts: - Type: CODEPIPELINE - Environment: - ComputeType: BUILD_GENERAL1_LARGE - Type: LINUX_CONTAINER - PrivilegedMode: true - Image: !Sub "${EnvironmentImageName}:${EnvironmentImageTag}" - ImagePullCredentialsType: !Ref EnvironmentImageCredentialsType - EnvironmentVariables: - - Name: 'CONTAINER_IMAGE' - Type: 'PLAINTEXT' - Value: !Sub '${ContainerName}' - ServiceRole: !GetAtt BuildRole.Arn - Source: - BuildSpec: | - version: 0.2 - - env: - variables: - # Path to host containers folder - HOST_CONTAINERS_DIR: "./extras/host-containers" - phases: - pre_build: - commands: - - start-build-environment - - environment-report - build: - commands: - - SHORT_SHA="$(head -c 8 <<< "$CODEBUILD_RESOLVED_SOURCE_VERSION")" - - docker load -i ${CODEBUILD_SRC_DIR_BuildArtifact}/*${SHORT_SHA}*.tar.gz - - make -C "${HOST_CONTAINERS_DIR}/${CONTAINER_IMAGE}" SHORT_SHA="${SHORT_SHA}" check - Type: CODEPIPELINE - LogsConfig: - S3Logs: - Status: ENABLED - Location: !Sub "${BuildLogBucket.Arn}/codebuild/log" - CloudWatchLogs: - Status: ENABLED - GroupName: !Ref BuildLogGroup - TimeoutInMinutes: 180 - - HostCtrBetaDeploy: - Type: AWS::CodeBuild::Project - Properties: - Artifacts: - Type: CODEPIPELINE - Environment: - ComputeType: BUILD_GENERAL1_LARGE - Type: LINUX_CONTAINER - PrivilegedMode: true - Image: !Sub "${EnvironmentImageName}:${EnvironmentImageTag}" - ImagePullCredentialsType: !Ref EnvironmentImageCredentialsType - EnvironmentVariables: - - Name: 'IMAGE_REGISTRY_URI' - Type: 'PLAINTEXT' - Value: !Sub '${ImageRegistryUri}' - - Name: 'CONTAINER_IMAGE' - Type: 'PLAINTEXT' - Value: !Sub '${ContainerName}' - ServiceRole: !GetAtt BuildRole.Arn - Source: - BuildSpec: | - version: 0.2 - - phases: - pre_build: - commands: - - start-build-environment - - environment-report - build: - commands: - - SHORT_SHA="$(head -c 8 <<< "$CODEBUILD_RESOLVED_SOURCE_VERSION")" - - PUSH_TAG="${IMAGE_REGISTRY_URI}":"${SHORT_SHA}" - - IMAGE_ID="$(docker load -i ${CODEBUILD_SRC_DIR_BuildArtifact}/*${SHORT_SHA}*.tar.gz | awk -F': ' '{print $NF}')" - - docker tag "${IMAGE_ID:?IMAGE_ID not returned from load}" "${PUSH_TAG}" - - docker push "${PUSH_TAG}" - Type: CODEPIPELINE - LogsConfig: - S3Logs: - Status: ENABLED - Location: !Sub "${BuildLogBucket.Arn}/codebuild/log" - CloudWatchLogs: - Status: ENABLED - GroupName: !Ref BuildLogGroup - TimeoutInMinutes: 60 - -Outputs: - PipelineArtifactBucketName: - Description: "Pipeline artifact bucket name" - Value: !Ref PipelineArtifactBucket - Export: - Name: !Sub "${AWS::StackName}-ArtifactBucket" - PipelineArtifactBucketArn: - Description: "Pipeline artifact bucket ARN" - Value: !GetAtt PipelineArtifactBucket.Arn - Export: - Name: !Sub "${AWS::StackName}-ArtifactBucketArn" - BuildProjectName: - Description: "Build step CodeBuild project name" - Value: !Ref HostCtrBuild - Export: - Name: !Sub "${AWS::StackName}-Build" - TestProjectName: - Description: "Test step CodeBuild project name" - Value: !Ref HostCtrTest - Export: - Name: !Sub "${AWS::StackName}-Test" - BetaDeployProjectName: - Description: "Beta deploy step CodeBuild project name" - Value: !Ref HostCtrBetaDeploy - Export: - Name: !Sub "${AWS::StackName}-BetaDeploy" diff --git a/tools/infra/stacks/signing-cross-account-assume-role.yml b/tools/infra/stacks/signing-cross-account-assume-role.yml deleted file mode 100644 index f8be68f4..00000000 --- a/tools/infra/stacks/signing-cross-account-assume-role.yml +++ /dev/null @@ -1,21 +0,0 @@ -# This is expected to be used by the account that owns the CodeBuild projects -# for pipeline automation and should be stood up with those resources. -AWSTemplateFormatVersion: "2010-09-09" -Description: 'A policy that can be added to an existing role, allowing it to assume a role that allows access to signing keys' -Parameters: - SigningRoleArn: - Description: 'The ARN of the role that allows access to the signing keys' - Type: String - -Resources: - AssumeSigningRolePolicy: - Type: AWS::IAM::ManagedPolicy - Properties: - Description: 'Allows assume role access to read-only signing keys' - ManagedPolicyName: SigningKeyAssumeRolePolicy - PolicyDocument: - Version: '2012-10-17' - Statement: - - Effect: Allow - Action: sts:AssumeRole - Resource: !Sub '${SigningRoleArn}' diff --git a/tools/infra/stacks/signing-cross-account-read-role.yml b/tools/infra/stacks/signing-cross-account-read-role.yml deleted file mode 100644 index 161e7d15..00000000 --- a/tools/infra/stacks/signing-cross-account-read-role.yml +++ /dev/null @@ -1,47 +0,0 @@ -# This is expected to be used by the account that owns the keys -# for TUF repo signing and should be stood up with those resources. -AWSTemplateFormatVersion: "2010-09-09" -Description: 'A role that allows read-only access to signing keys' -Parameters: - AllowedAccountIds: - Description: 'The AWS accounts that require access to the signing key' - # A comma separated list of account ids, i.e "1234, 5678" - Type: CommaDelimitedList - KMSKeyArn: - Description: "ARN of the KMS key required to decrypt the signing key in the SSM Parameter's SecureString" - Type: String - SigningKeyArn: - Description: 'The ARN of the signing key SSM parameter' - Type: String - -Resources: - SigningAutomationAccessRole: - Type: AWS::IAM::Role - Properties: - Description: 'Role allowing access to the signing key' - Path: !Sub '/${AWS::StackName}/' - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - AWS: !Ref AllowedAccountIds - Action: - - 'sts:AssumeRole' - Policies: - - PolicyName: 'SigningKeyReadOnlyAccess' - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - ssm:Get* - Resource: !Sub '${SigningKeyArn}' - - Effect: Allow - Action: - - kms:Decrypt - Resource: !Sub '${KMSKeyArn}' -Outputs: - Role: - Description: 'The ARN of the signing key read-only role' - Value: !GetAtt SigningAutomationAccessRole.Arn diff --git a/tools/update_sign_tuf_repo/Cargo.lock b/tools/update_sign_tuf_repo/Cargo.lock deleted file mode 100644 index f5e8d707..00000000 --- a/tools/update_sign_tuf_repo/Cargo.lock +++ /dev/null @@ -1,2526 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -[[package]] -name = "aho-corasick" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "ansi_term" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "anyhow" -version = "1.0.25" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "arc-swap" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "arrayref" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "arrayvec" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "atty" -version = "0.2.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "autocfg" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "backtrace" -version = "0.3.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "backtrace-sys 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-demangle 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "backtrace-sys" -version = "0.1.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cc 1.0.47 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "base64" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "base64" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "bitflags" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "blake2b_simd" -version = "0.5.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "constant_time_eq 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "block-buffer" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "block-padding 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "block-padding" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "bumpalo" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "byte-tools" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "byteorder" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "bytes" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "bytes" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "c2-chacha" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "cargo-readme" -version = "3.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "toml 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "cc" -version = "1.0.47" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "chrono" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "num-integer 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "clap" -version = "2.33.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", - "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "cloudabi" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "constant_time_eq" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "core-foundation" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "core-foundation-sys" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "crossbeam-deque" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", - "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "crossbeam-queue" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "crossbeam-utils" -version = "0.6.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "crossbeam-utils" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "crypto-mac" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", - "subtle 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "data_store_version" -version = "0.1.0" -dependencies = [ - "cargo-readme 3.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_plain 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "snafu 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "digest" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "dirs" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_users 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "dirs" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "dirs-sys 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "dirs-sys" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_users 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "doc-comment" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "dtoa" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "either" -version = "1.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "encoding_rs" -version = "0.8.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "envy" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "failure" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "backtrace 0.3.40 (registry+https://github.com/rust-lang/crates.io-index)", - "failure_derive 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "failure_derive" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "synstructure 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "fnv" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "foreign-types-shared 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "fuchsia-cprng" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "fuchsia-zircon" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "futures" -version = "0.1.29" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "futures-channel" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "futures-core" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "futures-cpupool" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "futures-io" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "futures-sink" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "futures-task" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "futures-util" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-io 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "generic-array" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "typenum 1.11.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "getrandom" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "wasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "h2" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", - "indexmap 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "string 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "h2" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "indexmap 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-util 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "heck" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "unicode-segmentation 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "hermit-abi" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "hex" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "hmac" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "crypto-mac 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "http" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "http" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "http-body" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-buf 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "http-body" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "httparse" -version = "1.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "hyper" -version = "0.12.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "h2 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", - "http-body 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-buf 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-threadpool 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", - "want 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "hyper" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "h2 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-project 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "tower-service 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "want 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "hyper-tls" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", - "native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "idna" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-normalization 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "indexmap" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "iovec" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "itoa" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "js-sys" -version = "0.3.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "wasm-bindgen 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "libc" -version = "0.2.66" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "lock_api" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "log" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "matches" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "md5" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "memchr" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "memoffset" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "migrator" -version = "0.1.0" -dependencies = [ - "cargo-readme 3.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "data_store_version 0.1.0", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "nix 0.16.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "simplelog 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", - "snafu 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "mime" -version = "0.3.14" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "mime_guess" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "mime 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "unicase 2.6.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "mio" -version = "0.6.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "mio-named-pipes" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "miow 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "mio-uds" -version = "0.6.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "miow" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "miow" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "socket2 0.3.11 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "native-tls" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl 0.10.26 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.53 (registry+https://github.com/rust-lang/crates.io-index)", - "schannel 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", - "security-framework 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "security-framework-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "net2" -version = "0.2.33" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "nix" -version = "0.16.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "cc 1.0.47 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "nom" -version = "4.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "num-integer" -version = "0.1.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "num-traits" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "num_cpus" -version = "1.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "hermit-abi 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "olpc-cjson" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.42 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-normalization 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "opaque-debug" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "openssl" -version = "0.10.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.53 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "openssl-probe" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "openssl-sys" -version = "0.9.53" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "cc 1.0.47 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)", - "vcpkg 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "parking_lot" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "lock_api 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot_core 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "parking_lot_core" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "pem" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "percent-encoding" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "percent-encoding" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "pin-project" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "pin-project-internal 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "pin-project-internal" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "pin-project-lite" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "pin-utils" -version = "0.1.0-alpha.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "pkg-config" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "ppv-lite86" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "proc-macro2" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "quote" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "getrandom 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_chacha" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_core" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "getrandom 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_os" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rdrand" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "redox_syscall" -version = "0.1.56" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "redox_users" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "rust-argon2 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "regex" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)", - "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "regex-syntax" -version = "0.6.12" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "remove_dir_all" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "reqwest" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "encoding_rs 0.8.20 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.13.2 (registry+https://github.com/rust-lang/crates.io-index)", - "js-sys 0.3.32 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "mime 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "mime_guess 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-futures 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "web-sys 0.3.32 (registry+https://github.com/rust-lang/crates.io-index)", - "winreg 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "ring" -version = "0.16.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cc 1.0.47 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "web-sys 0.3.32 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rusoto_core" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper-tls 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "rusoto_credential 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rusoto_signature 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.42 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", - "xml-rs 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rusoto_credential" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", - "dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.42 (registry+https://github.com/rust-lang/crates.io-index)", - "shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-process 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rusoto_signature" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "hmac 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "md5 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rusoto_credential 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "sha2 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rusoto_ssm" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "rusoto_core 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.42 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rusoto_sts" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "rusoto_core 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", - "xml-rs 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rust-argon2" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", - "blake2b_simd 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "ryu" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "schannel" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "scopeguard" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "security-framework" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "core-foundation 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", - "core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "security-framework-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "security-framework-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "serde" -version = "1.0.103" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "serde_derive 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "serde_derive" -version = "1.0.103" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "serde_json" -version = "1.0.42" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "ryu 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "serde_plain" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "serde_urlencoded" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "dtoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "sha2" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "block-buffer 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "opaque-debug 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "shlex" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "signal-hook" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "signal-hook-registry 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "signal-hook-registry" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "arc-swap 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "simplelog" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "term 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "slab" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "smallvec" -version = "0.6.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "smallvec" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "snafu" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "doc-comment 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "snafu-derive 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "snafu-derive" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "socket2" -version = "0.3.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "sourcefile" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "string" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "strsim" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "subtle" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "syn" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "synstructure" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tempdir" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tempfile" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "term" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "thread_local" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "time" -version = "0.1.42" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-current-thread 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-fs 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-sync 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-threadpool 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-udp 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-uds 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-buf" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-codec" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-current-thread" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-executor" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-fs" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-threadpool 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-io" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-process" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "mio-named-pipes 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-signal 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-reactor" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-sync 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-signal" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", - "signal-hook 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-sync" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-tcp" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-threadpool" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-timer" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-udp" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-uds" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-util" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "toml" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tough" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", - "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "olpc-cjson 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "pem 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "reqwest 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", - "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.42 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_plain 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "snafu 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tower-service" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "tracing" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tracing-attributes 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "tracing-core 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tracing-core" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "try-lock" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "typenum" -version = "1.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "unicode-bidi" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "unicode-normalization" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "smallvec 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "unicode-segmentation" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "unicode-width" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "unicode-xid" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "untrusted" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "update_metadata" -version = "0.1.0" -dependencies = [ - "chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", - "data_store_version 0.1.0", - "migrator 0.1.0", - "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.42 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_plain 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "snafu 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "update_sign_tuf_repo" -version = "0.1.0" -dependencies = [ - "cargo-readme 3.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", - "data_store_version 0.1.0", - "envy 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "olpc-cjson 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", - "rusoto_core 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rusoto_ssm 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rusoto_sts 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", - "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.42 (registry+https://github.com/rust-lang/crates.io-index)", - "simplelog 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", - "snafu 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", - "tough 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tracing 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "update_metadata 0.1.0", -] - -[[package]] -name = "url" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "vcpkg" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "vec_map" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "version_check" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "version_check" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "void" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "want" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "want" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "wasi" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "wasm-bindgen" -version = "0.2.55" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.42 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-macro 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.55" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bumpalo 2.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-shared 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "js-sys 0.3.32 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)", - "web-sys 0.3.32 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.55" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-macro-support 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.55" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-backend 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-shared 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.55" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "wasm-bindgen-webidl" -version = "0.2.55" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "anyhow 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-backend 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)", - "weedle 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "web-sys" -version = "0.3.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "anyhow 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "js-sys 0.3.32 (registry+https://github.com/rust-lang/crates.io-index)", - "sourcefile 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-webidl 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "weedle" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "nom 4.2.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "winapi" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "winreg" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "ws2_32-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "xml-rs" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[metadata] -"checksum aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "58fb5e95d83b38284460a5fda7d6470aa0b8844d283a0b614b8535e880800d2d" -"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" -"checksum anyhow 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)" = "9267dff192e68f3399525901e709a48c1d3982c9c072fa32f2127a0cb0babf14" -"checksum arc-swap 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "d7b8a9123b8027467bce0099fe556c628a53c8d83df0507084c31e9ba2e39aff" -"checksum arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0d382e583f07208808f6b1249e60848879ba3543f57c32277bf52d69c2f0f0ee" -"checksum arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" -"checksum atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)" = "1803c647a3ec87095e7ae7acfca019e98de5ec9a7d01343f611cf3152ed71a90" -"checksum autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" -"checksum backtrace 0.3.40 (registry+https://github.com/rust-lang/crates.io-index)" = "924c76597f0d9ca25d762c25a4d369d51267536465dc5064bdf0eb073ed477ea" -"checksum backtrace-sys 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)" = "5d6575f128516de27e3ce99689419835fce9643a9b215a14d2b5b685be018491" -"checksum base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" -"checksum base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" -"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" -"checksum blake2b_simd 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)" = "b83b7baab1e671718d78204225800d6b170e648188ac7dc992e9d6bddf87d0c0" -"checksum block-buffer 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -"checksum block-padding 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" -"checksum bumpalo 2.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ad807f2fc2bf185eeb98ff3a901bd46dc5ad58163d0fa4577ba0d25674d71708" -"checksum byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" -"checksum byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5" -"checksum bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)" = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c" -"checksum bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" -"checksum c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "214238caa1bf3a496ec3392968969cab8549f96ff30652c9e56885329315f6bb" -"checksum cargo-readme 3.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a6f802a8fcc14bebdf651fd33323654451bdd168c884b22c270dfd8afb403a50" -"checksum cc 1.0.47 (registry+https://github.com/rust-lang/crates.io-index)" = "aa87058dce70a3ff5621797f1506cb837edd02ac4c0ae642b4542dce802908b8" -"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" -"checksum chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "31850b4a4d6bae316f7a09e691c944c28299298837edc0a03f755618c23cbc01" -"checksum clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9" -"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -"checksum constant_time_eq 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "995a44c877f9212528ccc74b21a232f66ad69001e40ede5bcee2ac9ef2657120" -"checksum core-foundation 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "25b9e03f145fd4f2bf705e07b900cd41fc636598fe5dc452fd0db1441c3f496d" -"checksum core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e7ca8a5221364ef15ce201e8ed2f609fc312682a8f4e0e3d4aa5879764e0fa3b" -"checksum crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3aa945d63861bfe624b55d153a39684da1e8c0bc8fba932f7ee3a3c16cea3ca" -"checksum crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5064ebdbf05ce3cb95e45c8b086f72263f4166b29b97f6baff7ef7fe047b55ac" -"checksum crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b" -"checksum crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6" -"checksum crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ce446db02cdc3165b94ae73111e570793400d0794e46125cc4056c81cbb039f4" -"checksum crypto-mac 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" -"checksum digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -"checksum dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "3fd78930633bd1c6e35c4b42b1df7b0cbc6bc191146e512bb3bedf243fcc3901" -"checksum dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "13aea89a5c93364a98e9b37b2fa237effbb694d5cfe01c5b70941f7eb087d5e3" -"checksum dirs-sys 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "afa0b23de8fd801745c471deffa6e12d248f962c9fd4b4c33787b055599bde7b" -"checksum doc-comment 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "923dea538cea0aa3025e8685b20d6ee21ef99c4f77e954a30febbaac5ec73a97" -"checksum dtoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ea57b42383d091c85abcc2706240b94ab2a8fa1fc81c10ff23c4de06e2a90b5e" -"checksum either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3" -"checksum encoding_rs 0.8.20 (registry+https://github.com/rust-lang/crates.io-index)" = "87240518927716f79692c2ed85bfe6e98196d18c6401ec75355760233a7e12e9" -"checksum envy 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "261b836bcf13f42a01c70351f56bd7b66db6e6fb58352bd214cb77e9269a34b4" -"checksum failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f8273f13c977665c5db7eb2b99ae520952fe5ac831ae4cd09d80c4c7042b5ed9" -"checksum failure_derive 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0bc225b78e0391e4b8683440bf2e63c2deeeb2ce5189eab46e2b68c6d3725d08" -"checksum fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" -"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" -"checksum foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -"checksum foreign-types-shared 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" -"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" -"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" -"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" -"checksum futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)" = "1b980f2816d6ee8673b6517b52cb0e808a180efc92e5c19d02cdda79066703ef" -"checksum futures-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f0c77d04ce8edd9cb903932b608268b3fffec4163dc053b3b402bf47eac1f1a8" -"checksum futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f25592f769825e89b92358db00d26f965761e094951ac44d3663ef25b7ac464a" -"checksum futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4" -"checksum futures-io 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a638959aa96152c7a4cddf50fcb1e3fede0583b27157c26e67d6f99904090dc6" -"checksum futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3466821b4bc114d95b087b850a724c6f83115e929bc88f1fa98a3304a944c8a6" -"checksum futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "7b0a34e53cf6cdcd0178aa573aed466b646eb3db769570841fda0c7ede375a27" -"checksum futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "22766cf25d64306bedf0384da004d05c9974ab104fcc4528f1236181c18004c5" -"checksum generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" -"checksum getrandom 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "e7db7ca94ed4cd01190ceee0d8a8052f08a247aa1b469a7f68c6a3b71afcf407" -"checksum h2 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)" = "a5b34c246847f938a410a03c5458c7fee2274436675e76d8b903c08efc29c462" -"checksum h2 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b9433d71e471c1736fd5a61b671fc0b148d7a2992f666c958d03cd8feb3b88d1" -"checksum heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" -"checksum hermit-abi 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "307c3c9f937f38e3534b1d6447ecf090cafcc9744e4a6360e8b037b2cf5af120" -"checksum hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "023b39be39e3a2da62a94feb433e91e8bcd37676fbc8bea371daf52b7a769a3e" -"checksum hmac 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" -"checksum http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)" = "d6ccf5ede3a895d8856620237b2f02972c1bbc78d2965ad7fe8838d4a0ed41f0" -"checksum http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b708cc7f06493459026f53b9a61a7a121a5d1ec6238dee58ea4941132b30156b" -"checksum http-body 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6741c859c1b2463a423a1dbce98d418e6c3c3fc720fb0d45528657320920292d" -"checksum http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" -"checksum httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" -"checksum hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)" = "9dbe6ed1438e1f8ad955a4701e9a944938e9519f6888d12d8558b645e247d5f6" -"checksum hyper 0.13.2 (registry+https://github.com/rust-lang/crates.io-index)" = "fa1c527bbc634be72aa7ba31e4e4def9bbb020f5416916279b7c705cd838893e" -"checksum hyper-tls 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3a800d6aa50af4b5850b2b0f659625ce9504df908e9733b635720483be26174f" -"checksum idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" -"checksum indexmap 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712d7b3ea5827fcb9d4fda14bf4da5f136f0db2ae9c8f4bd4e2d1c6fde4e6db2" -"checksum iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" -"checksum itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "501266b7edd0174f8530248f87f99c88fbe60ca4ef3dd486835b8d8d53136f7f" -"checksum js-sys 0.3.32 (registry+https://github.com/rust-lang/crates.io-index)" = "1c840fdb2167497b0bd0db43d6dfe61e91637fa72f9d061f8bd17ddc44ba6414" -"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -"checksum libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)" = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558" -"checksum lock_api 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e57b3997725d2b60dbec1297f6c2e2957cc383db1cebd6be812163f969c7d586" -"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" -"checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" -"checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" -"checksum md5 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" -"checksum memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88579771288728879b57485cc7d6b07d648c9f0141eb955f8ab7f9d45394468e" -"checksum memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "75189eb85871ea5c2e2c15abbdd541185f63b408415e5051f5cac122d8c774b9" -"checksum mime 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)" = "dd1d63acd1b78403cc0c325605908475dd9b9a3acbf65ed8bcab97e27014afcf" -"checksum mime_guess 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1a0ed03949aef72dbdf3116a383d7b38b4768e6f960528cd6a6044aa9ed68599" -"checksum mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)" = "302dec22bcf6bae6dfb69c647187f4b4d0fb6f535521f7bc022430ce8e12008f" -"checksum mio-named-pipes 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f5e374eff525ce1c5b7687c4cef63943e7686524a387933ad27ca7ec43779cb3" -"checksum mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "966257a94e196b11bb43aca423754d87429960a768de9414f3691d6957abf125" -"checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" -"checksum miow 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "396aa0f2003d7df8395cb93e09871561ccc3e785f0acb369170e8cc74ddf9226" -"checksum native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4b2df1a4c22fd44a62147fd8f13dd0f95c9d8ca7b2610299b2a2f9cf8964274e" -"checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88" -"checksum nix 0.16.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dd0eaf8df8bab402257e0a5c17a254e4cc1f72a93588a1ddfb5d356c801aa7cb" -"checksum nom 4.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2ad2a91a8e869eeb30b9cb3119ae87773a8f4ae617f41b1eb9c154b2905f7bd6" -"checksum num-integer 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)" = "b85e541ef8255f6cf42bbfe4ef361305c6c135d10919ecc26126c4e5ae94bc09" -"checksum num-traits 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "d4c81ffc11c212fa327657cb19dd85eb7419e163b5b076bede2bdb5c974c07e4" -"checksum num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)" = "76dac5ed2a876980778b8b85f75a71b6cbf0db0b1232ee12f826bccb00d09d72" -"checksum olpc-cjson 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9409e2493366c8f19387c98c5189ab9c937541b5bf48f11390d038a59fdfd9c1" -"checksum opaque-debug 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" -"checksum openssl 0.10.26 (registry+https://github.com/rust-lang/crates.io-index)" = "3a3cc5799d98e1088141b8e01ff760112bbd9f19d850c124500566ca6901a585" -"checksum openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" -"checksum openssl-sys 0.9.53 (registry+https://github.com/rust-lang/crates.io-index)" = "465d16ae7fc0e313318f7de5cecf57b2fbe7511fd213978b457e1c96ff46736f" -"checksum parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" -"checksum parking_lot_core 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" -"checksum pem 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a1581760c757a756a41f0ee3ff01256227bdf64cb752839779b95ffb01c59793" -"checksum percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" -"checksum percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" -"checksum pin-project 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7804a463a8d9572f13453c516a5faea534a2403d7ced2f0c7e100eeff072772c" -"checksum pin-project-internal 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "385322a45f2ecf3410c68d2a549a4a2685e8051d0f278e39743ff4e451cb9b3f" -"checksum pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "237844750cfbb86f67afe27eee600dfbbcb6188d734139b534cbfbf4f96792ae" -"checksum pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587" -"checksum pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)" = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677" -"checksum ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" -"checksum proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "9c9e470a8dc4aeae2dee2f335e8f533e2d4b347e1434e5671afc49b054592f27" -"checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" -"checksum rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" -"checksum rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3ae1b169243eaf61759b8475a998f0a385e42042370f3a7dbaf35246eacc8412" -"checksum rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "03a2a90da8c7523f554344f921aa97283eadf6ac484a6d2a7d0212fa7f8d6853" -"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -"checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" -"checksum rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -"checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -"checksum rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" -"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" -"checksum redox_users 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4ecedbca3bf205f8d8f5c2b44d83cd0690e39ee84b951ed649e9f1841132b66d" -"checksum regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dc220bd33bdce8f093101afe22a037b8eb0e5af33592e6a9caafff0d4cb81cbd" -"checksum regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)" = "11a7e20d1cce64ef2fed88b66d347f88bd9babb82845b2b858f3edbf59a4f716" -"checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" -"checksum reqwest 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c0e798e19e258bf6c30a304622e3e9ac820e483b06a1857a026e1f109b113fe4" -"checksum ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)" = "6747f8da1f2b1fabbee1aaa4eb8a11abf9adef0bf58a41cee45db5d59cecdfac" -"checksum rusoto_core 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f1d1ecfe8dac29878a713fbc4c36b0a84a48f7a6883541841cdff9fdd2ba7dfb" -"checksum rusoto_credential 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8632e41d289db90dd40d0389c71a23c5489e3afd448424226529113102e2a002" -"checksum rusoto_signature 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7063a70614eb4b36f49bcf4f6f6bb30cc765e3072b317d6afdfe51e7a9f482d1" -"checksum rusoto_ssm 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)" = "655261941f71bedd6b7a1b5cfaff8c2fa1909ea8f8505fad72edf53f27516232" -"checksum rusoto_sts 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)" = "48f912128a23aded0499bec1734ca35d8a33e9dc5bf86b649a5564900efb5464" -"checksum rust-argon2 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4ca4eaef519b494d1f2848fc602d18816fed808a981aedf4f1f00ceb7c9d32cf" -"checksum rustc-demangle 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" -"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -"checksum ryu 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bfa8506c1de11c9c4e4c38863ccbe02a305c8188e85a05a784c9e11e1c3910c8" -"checksum schannel 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "87f550b06b6cba9c8b8be3ee73f391990116bf527450d2556e9b9ce263b9a021" -"checksum scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d" -"checksum security-framework 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8ef2429d7cefe5fd28bd1d2ed41c944547d4ff84776f5935b456da44593a16df" -"checksum security-framework-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "e31493fc37615debb8c5090a7aeb4a9730bc61e77ab10b9af59f1a202284f895" -"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" -"checksum serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "1217f97ab8e8904b57dd22eb61cde455fa7446a9c1cf43966066da047c1f3702" -"checksum serde_derive 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "a8c6faef9a2e64b0064f48570289b4bf8823b7581f1d6157c1b52152306651d0" -"checksum serde_json 1.0.42 (registry+https://github.com/rust-lang/crates.io-index)" = "1a3351dcbc1f067e2c92ab7c3c1f288ad1a4cffc470b5aaddb4c2e0a3ae80043" -"checksum serde_plain 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "625fb0da2b006092b426a94acc1611bec52f2ec27bb27b266a9f93c29ee38eda" -"checksum serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" -"checksum sha2 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7b4d8bfd0e469f417657573d8451fb33d16cfe0989359b93baf3a1ffc639543d" -"checksum shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" -"checksum signal-hook 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "7a9c17dd3ba2d36023a5c9472ecddeda07e27fd0b05436e8c1e0c8f178185652" -"checksum signal-hook-registry 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "94f478ede9f64724c5d173d7bb56099ec3e2d9fc2774aac65d34b8b890405f41" -"checksum simplelog 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)" = "05a3e303ace6adb0a60a9e9e2fbc6a33e1749d1e43587e2125f7efa9c5e107c5" -"checksum slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" -"checksum smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6" -"checksum smallvec 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4ecf3b85f68e8abaa7555aa5abdb1153079387e60b718283d732f03897fcfc86" -"checksum snafu 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "41207ca11f96a62cd34e6b7fdf73d322b25ae3848eb9d38302169724bb32cf27" -"checksum snafu-derive 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4c5e338c8b0577457c9dda8e794b6ad7231c96e25b1b0dd5842d52249020c1c0" -"checksum socket2 0.3.11 (registry+https://github.com/rust-lang/crates.io-index)" = "e8b74de517221a2cb01a53349cf54182acdc31a074727d3079068448c0676d85" -"checksum sourcefile 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4bf77cb82ba8453b42b6ae1d692e4cdc92f9a47beaf89a847c8be83f4e328ad3" -"checksum spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" -"checksum string 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d24114bfcceb867ca7f71a0d3fe45d45619ec47a6fbfa98cb14e14250bfa5d6d" -"checksum strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" -"checksum subtle 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" -"checksum syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)" = "dff0acdb207ae2fe6d5976617f887eb1e35a2ba52c13c7234c790960cdad9238" -"checksum synstructure 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)" = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" -"checksum tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8" -"checksum tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" -"checksum term 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c0863a3345e70f61d613eab32ee046ccd1bcc5f9105fe402c61fcd0c13eeb8b5" -"checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -"checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b" -"checksum time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" -"checksum tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)" = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" -"checksum tokio 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8fdd17989496f49cdc57978c96f0c9fe5e4a58a8bddc6813c449a4624f6a030b" -"checksum tokio-buf 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8fb220f46c53859a4b7ec083e41dec9778ff0b1851c0942b211edb89e0ccdc46" -"checksum tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5c501eceaf96f0e1793cf26beb63da3d11c738c4a943fdf3746d81d64684c39f" -"checksum tokio-current-thread 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "d16217cad7f1b840c5a97dfb3c43b0c871fef423a6e8d2118c604e843662a443" -"checksum tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "ca6df436c42b0c3330a82d855d2ef017cd793090ad550a6bc2184f4b933532ab" -"checksum tokio-fs 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "3fe6dc22b08d6993916647d108a1a7d15b9cd29c4f4496c62b92c45b5041b7af" -"checksum tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "5090db468dad16e1a7a54c8c67280c5e4b544f3d3e018f0b913b400261f85926" -"checksum tokio-process 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "afbd6ef1b8cc2bd2c2b580d882774d443ebb1c6ceefe35ba9ea4ab586c89dbe8" -"checksum tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "6732fe6b53c8d11178dcb77ac6d9682af27fc6d4cb87789449152e5377377146" -"checksum tokio-signal 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "dd6dc5276ea05ce379a16de90083ec80836440d5ef8a6a39545a3207373b8296" -"checksum tokio-sync 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "d06554cce1ae4a50f42fba8023918afa931413aded705b560e29600ccf7c6d76" -"checksum tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1d14b10654be682ac43efee27401d792507e30fd8d26389e1da3b185de2e4119" -"checksum tokio-threadpool 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "2bd2c6a3885302581f4401c82af70d792bb9df1700e7437b0aeb4ada94d5388c" -"checksum tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "1739638e364e558128461fc1ad84d997702c8e31c2e6b18fb99842268199e827" -"checksum tokio-udp 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f02298505547f73e60f568359ef0d016d5acd6e830ab9bc7c4a5b3403440121b" -"checksum tokio-uds 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "037ffc3ba0e12a0ab4aca92e5234e0dedeb48fddf6ccd260f1f150a36a9f2445" -"checksum tokio-util 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "571da51182ec208780505a32528fc5512a8fe1443ab960b3f2f3ef093cd16930" -"checksum toml 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "758664fc71a3a69038656bee8b6be6477d2a6c315a6b81f7081f591bffa4111f" -"checksum tough 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "592ee41113b853fa04cc37ecf779bcee856b9f41087779333a0f22480ff30602" -"checksum tower-service 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" -"checksum tracing 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "ff4e4f59e752cb3beb5b61c6d5e11191c7946231ba84faec2902c9efdd8691c5" -"checksum tracing-attributes 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a4263b12c3d3c403274493eb805966093b53214124796552d674ca1dd5d27c2b" -"checksum tracing-core 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "bc913647c520c959b6d21e35ed8fa6984971deca9f0a2fcb8c51207e0c56af1d" -"checksum try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" -"checksum typenum 1.11.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6d2783fe2d6b8c1101136184eb41be8b1ad379e4657050b8aaff0c79ee7575f9" -"checksum unicase 2.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -"checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" -"checksum unicode-normalization 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "b561e267b2326bb4cebfc0ef9e68355c7abe6c6f522aeac2f5bf95d56c59bdcf" -"checksum unicode-segmentation 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" -"checksum unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "caaa9d531767d1ff2150b9332433f32a24622147e5ebb1f26409d5da67afd479" -"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" -"checksum untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60369ef7a31de49bcb3f6ca728d4ba7300d9a1658f94c727d4cab8c8d9f4aece" -"checksum url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "75b414f6c464c879d7f9babf951f23bc3743fb7313c081b2e6ca719067ea9d61" -"checksum vcpkg 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3fc439f2794e98976c88a2a2dafce96b930fe8010b0a256b3c2199a773933168" -"checksum vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a" -"checksum version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" -"checksum version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce" -"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" -"checksum want 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230" -"checksum want 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" -"checksum wasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b89c3ce4ce14bdc6fb6beaf9ec7928ca331de5df7e5ea278375642a2f478570d" -"checksum wasm-bindgen 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)" = "29ae32af33bacd663a9a28241abecf01f2be64e6a185c6139b04f18b6385c5f2" -"checksum wasm-bindgen-backend 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)" = "1845584bd3593442dc0de6e6d9f84454a59a057722f36f005e44665d6ab19d85" -"checksum wasm-bindgen-futures 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "1458706aa1b8fe6898d19433c9f110d93a05d1f22ae6adf55810409a94df34b4" -"checksum wasm-bindgen-macro 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)" = "87fcc747e6b73c93d22c947a6334644d22cfec5abd8b66238484dc2b0aeb9fe4" -"checksum wasm-bindgen-macro-support 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)" = "3dc4b3f2c4078c8c4a5f363b92fcf62604c5913cbd16c6ff5aaf0f74ec03f570" -"checksum wasm-bindgen-shared 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)" = "ca0b78d6d3be8589b95d1d49cdc0794728ca734adf36d7c9f07e6459508bb53d" -"checksum wasm-bindgen-webidl 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)" = "3126356474ceb717c8fb5549ae387c9fbf4872818454f4d87708bee997214bb5" -"checksum web-sys 0.3.32 (registry+https://github.com/rust-lang/crates.io-index)" = "98405c0a2e722ed3db341b4c5b70eb9fe0021621f7350bab76df93b09b649bbf" -"checksum weedle 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3bb43f70885151e629e2a19ce9e50bd730fd436cfd4b666894c9ce4de9141164" -"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" -"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" -"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" -"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -"checksum winreg 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" -"checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" -"checksum xml-rs 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "541b12c998c5b56aa2b4e6f18f03664eef9a4fd0a246a55594efae6cc2d964b5" diff --git a/tools/update_sign_tuf_repo/Cargo.toml b/tools/update_sign_tuf_repo/Cargo.toml deleted file mode 100644 index 8943e22f..00000000 --- a/tools/update_sign_tuf_repo/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "update_sign_tuf_repo" -version = "0.1.0" -authors = ["Zac Mrowicki "] -edition = "2018" - -[dependencies] -chrono = "0.4" -data_store_version = { path = "../../workspaces/api/data_store_version" } -envy = "0.4.0" -hex = "0.4.0" -log = "0.4.8" -olpc-cjson = "0.1.0" -ring = { version = "0.16.7", features = ["std"] } -rusoto_core = "0.42.0" -rusoto_ssm = "0.42.0" -rusoto_sts = "0.42.0" -semver = { version = "0.9.0", features = ["serde"] } -serde = "1.0.101" -serde_json = "1.0.41" -serde_derive = "1.0.103" -simplelog = "0.7" -snafu = "0.6.0" -tempdir = "0.3.7" -tough = { version = "0.4.0", features = ["http"] } -tracing = "0.1.10" -update_metadata = { path = "../../workspaces/updater/update_metadata" } - -[build-dependencies] -cargo-readme = "3.1" diff --git a/tools/update_sign_tuf_repo/README.md b/tools/update_sign_tuf_repo/README.md deleted file mode 100644 index 02b2fb98..00000000 --- a/tools/update_sign_tuf_repo/README.md +++ /dev/null @@ -1,37 +0,0 @@ -# update_sign_tuf_repo - -Current version: 0.1.0 - -## Introduction - -This tool is meant to update an existing TUF repo with new contents and sign the updated contents. -Given a set of environment variables, it will pull down an existing TUF repo and update the manifest, targets.json, snapshot.json, and timestamp.json. -Using a signing key that it pulls down via SSM Secure Parameters, it will sign the updated files, along with any new targets and leave them in a known location to be deployed to a "real" TUF repo at a later step. - -## Running - -In order the run this code, you must have: -* Current `Bottlerocket` code repository (more specifically `Release.toml`, and a trusted `root.json`) -* Built Bottlerocket artifacts in a directory (the images that end up in `/build` and suffixed with `.lz4`) -* The metadata and target URLs for an existing TUF repository (most likely in S3) - -Currently the code expects the following environment variables to be set: -* `CODEBUILD_SRC_DIR` (subject to change) This is the directory where your `Bottlerocket` repository lives -* `ARCH` : architecture for your current set of images (i.e. `x86_64`) -* `FLAVOR` : Variant of Bottlerocket for your current set of images (i.e. `aws-k8s`) -* `INPUT_BUILDSYS_ARTIFACTS` : A directory containing the built Bottlerocket images -* `METADATA_URL` : Metadata URL for your existing TUF repo -* `TARGET_URL` : Target URL for your existing TUF repo -* `REFRESH_DAYS` : After how many days does metadata expire? (an integer, i.e. `7`) -* `TIMESTAMP_REFRESH_DAYS` : After how many days does `timestamp.json` expire? (an integer, i.e. `7`) -* `SIGNING_ROLE_ARN` : ARN for a role that allows access to signing keys (most likely in another account) -* `SIGNING_KEY_PARAMETER_NAME` : The SSM parameter key name for the signing key - -## Output - -After a successful run of this code, you will have a directory `/tmp/tuf_out` which will contain `/metadata` and `/target` directories. -All items (other than `manifest.json`) are signed and are suitable for syncing to your "real" TUF repository. - -## Colophon - -This text was generated using [cargo-readme](https://crates.io/crates/cargo-readme), and includes the rustdoc from `src/main.rs`. diff --git a/tools/update_sign_tuf_repo/README.tpl b/tools/update_sign_tuf_repo/README.tpl deleted file mode 100644 index bf207d02..00000000 --- a/tools/update_sign_tuf_repo/README.tpl +++ /dev/null @@ -1,9 +0,0 @@ -# {{crate}} - -Current version: {{version}} - -{{readme}} - -## Colophon - -This text was generated using [cargo-readme](https://crates.io/crates/cargo-readme), and includes the rustdoc from `src/main.rs`. diff --git a/tools/update_sign_tuf_repo/build.rs b/tools/update_sign_tuf_repo/build.rs deleted file mode 100644 index 49828a1c..00000000 --- a/tools/update_sign_tuf_repo/build.rs +++ /dev/null @@ -1,32 +0,0 @@ -// Automatically generate README.md from rustdoc. - -use std::env; -use std::fs::File; -use std::io::Write; -use std::path::PathBuf; - -fn main() { - // Check for environment variable "SKIP_README". If it is set, - // skip README generation - if env::var_os("SKIP_README").is_some() { - return; - } - - let mut source = File::open("src/main.rs").unwrap(); - let mut template = File::open("README.tpl").unwrap(); - - let content = cargo_readme::generate_readme( - &PathBuf::from("."), // root - &mut source, // source - Some(&mut template), // template - // The "add x" arguments don't apply when using a template. - true, // add title - false, // add badges - false, // add license - true, // indent headings - ) - .unwrap(); - - let mut readme = File::create("README.md").unwrap(); - readme.write_all(content.as_bytes()).unwrap(); -} diff --git a/tools/update_sign_tuf_repo/src/main.rs b/tools/update_sign_tuf_repo/src/main.rs deleted file mode 100644 index c26a2513..00000000 --- a/tools/update_sign_tuf_repo/src/main.rs +++ /dev/null @@ -1,744 +0,0 @@ -/*! -# Introduction - -This tool is meant to update an existing TUF repo with new contents and sign the updated contents. -Given a set of environment variables, it will pull down an existing TUF repo and update the manifest, targets.json, snapshot.json, and timestamp.json. -Using a signing key that it pulls down via SSM Secure Parameters, it will sign the updated files, along with any new targets and leave them in a known location to be deployed to a "real" TUF repo at a later step. - -# Running - -In order the run this code, you must have: -* Current `Bottlerocket` code repository (more specifically `Release.toml`, and a trusted `root.json`) -* Built Bottlerocket artifacts in a directory (the images that end up in `/build` and suffixed with `.lz4`) -* The metadata and target URLs for an existing TUF repository (most likely in S3) - -Currently the code expects the following environment variables to be set: -* `CODEBUILD_SRC_DIR` (subject to change) This is the directory where your `Bottlerocket` repository lives -* `ARCH` : architecture for your current set of images (i.e. `x86_64`) -* `VARIANT` : Variant of Bottlerocket for your current set of images (i.e. `aws-k8s`) -* `INPUT_BUILDSYS_ARTIFACTS` : A directory containing the built Bottlerocket images -* `METADATA_URL` : Metadata URL for your existing TUF repo -* `TARGET_URL` : Target URL for your existing TUF repo -* `REFRESH_DAYS` : After how many days does metadata expire? (an integer, i.e. `7`) -* `TIMESTAMP_REFRESH_DAYS` : After how many days does `timestamp.json` expire? (an integer, i.e. `7`) -* `SIGNING_ROLE_ARN` : ARN for a role that allows access to signing keys (most likely in another account) -* `SIGNING_KEY_PARAMETER_NAME` : The SSM parameter key name for the signing key - -# Output - -After a successful run of this code, you will have a directory `/tmp/tuf_out` which will contain `/metadata` and `/target` directories. -All items (other than `manifest.json`) are signed and are suitable for syncing to your "real" TUF repository. -*/ - -#[macro_use] -extern crate log; - -use chrono::{Duration, Utc}; -use data_store_version::Version as DataVersion; -use olpc_cjson::CanonicalFormatter; -use ring::digest::{digest, Context, SHA256, SHA256_OUTPUT_LEN}; -use ring::rand::{SecureRandom, SystemRandom}; -use rusoto_core::request::HttpClient; -use rusoto_ssm::{GetParameterRequest, Ssm, SsmClient}; -use rusoto_sts::{StsAssumeRoleSessionCredentialsProvider, StsClient}; -use semver::Version as SemVer; -use serde::Serialize; -use serde_derive::Deserialize; -use simplelog::{Config as LogConfig, LevelFilter, TermLogger, TerminalMode}; -use snafu::{OptionExt, ResultExt}; -use std::collections::HashMap; -use std::fs::File; -use std::io::Read; -use std::num::NonZeroU64; -use std::path::Path; -use std::str::FromStr; -use std::{fs, process}; -use tempdir::TempDir; -use tough::schema::decoded::{Decoded, Hex}; -use tough::schema::{ - Hashes, Role, RoleType, Root, Signature, Signed, SnapshotMeta, Target, TimestampMeta, -}; -use tough::sign::{parse_keypair, Sign}; -use tough::{HttpTransport, Limits, Repository, Settings}; -use update_metadata::{Images, Manifest}; - -const EXISTING_TUF_REPO_DIR: &str = "/tmp/tuf_in"; -const UPDATED_TUF_REPO_DIR: &str = "/tmp/tuf_out"; -const ROOT_JSON: &str = "root.json"; -const TUF_MANIFEST_JSON: &str = "manifest.json"; -const RELEASE_TOML: &str = "Release.toml"; -const FILES_TO_SIGN: &[&str] = &["boot", "root", "verity"]; -const OS_NAME: &str = "bottlerocket"; - -mod error { - use snafu::Snafu; - use std::path::PathBuf; - - #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] - pub(super) enum Error { - #[snafu(display("Unable to add migration to manifest: {}", source))] - AddMigration { - source: update_metadata::error::Error, - }, - - #[snafu(display("Unable to add update wave to manifest: {}", source))] - AddWave { - source: update_metadata::error::Error, - }, - - #[snafu(display("Current UTC time should be non-zero"))] - CurrentTime {}, - - #[snafu(display( - "Failed to create data store version from {}: {}", - version_string, - source - ))] - DataVersion { - version_string: String, - source: data_store_version::error::Error, - }, - - #[snafu(display("Missing required environment variables: {}", source))] - EnvironmentVariables { source: envy::Error }, - - #[snafu(display("Failed to create {}: {}", path.display(), source))] - FileCreate { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to copy {} to {}: {}", src.display(), dst.display(), source))] - FileCopy { - src: PathBuf, - dst: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to open {}: {}", path.display(), source))] - FileOpen { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to read {}: {}", path.display(), source))] - FileRead { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to write to {}: {}", path.display(), source))] - FileWriteJson { - path: PathBuf, - source: serde_json::Error, - }, - - #[snafu(display("Failed to create HTTP Client: {}", source))] - HttpClientCreate { - source: rusoto_core::request::TlsError, - }, - - #[snafu(display("Failed to serialize JSON: {}", source))] - JSONSerialize { source: serde_json::error::Error }, - - #[snafu(display("Failed to deserialize JSON: {}", source))] - JSONDeserialize { source: serde_json::error::Error }, - - #[snafu(display("Unable to parse keypair: {}", source))] - KeyPairParse { source: tough::error::Error }, - - #[snafu(display("Logger setup error: {}", source))] - Logger { source: simplelog::TermLogError }, - - #[snafu(display("Failed to update manifest: {}", source))] - ManifestUpdate { - source: update_metadata::error::Error, - }, - - #[snafu(display("Missing image name: {}", name))] - MissingImageName { name: String }, - - #[snafu(display("Failed to open trusted root metadata file {}: {}", path.display(), source))] - OpenRoot { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Missing target: {}", target))] - MissingTarget { target: String }, - - #[snafu(display("Error reading target from TUF repository: {}", source))] - ReadTarget { source: tough::error::Error }, - - #[snafu(display("Unable to find keys for root in signing key"))] - KeysForRoot {}, - - #[snafu(display("Nonexistent role keys for current root.json"))] - RoleKeys {}, - - #[snafu(display("Failed to create semver from {}: {}", version_string, source))] - SemVer { - version_string: String, - source: semver::SemVerError, - }, - - #[snafu(display("Failed to sign message"))] - Sign { source: tough::error::Error }, - - #[snafu(display("Failed to serialize role for signing: {}", source))] - SignJson { source: serde_json::Error }, - - #[snafu(display( - "Failed to retrieve signing key SSM parameter: '{}': {}", - parameter, - source - ))] - SSMParameterRetrieve { - parameter: String, - source: rusoto_core::RusotoError, - }, - - #[snafu(display("Unable to read SSM parameter: '{}'", parameter))] - SSMParameterRead { parameter: String }, - - #[snafu(display("Failed to create temporary directory: {}", source))] - TempDir { source: std::io::Error }, - - #[snafu(display("Failed to load TUF repository: {}", source))] - TUFRepoLoad { source: tough::error::Error }, - - #[snafu(display("Unexpected image name in constants"))] - UnexpectedImageName {}, - } -} - -type Result = std::result::Result; - -// Contains the environment variables we need to execute the program -#[derive(Deserialize, Debug)] -#[serde(rename_all = "lowercase")] -struct EnvVars { - codebuild_src_dir: String, - arch: String, - variant: String, - input_buildsys_artifacts: String, - metadata_url: String, - refresh_days: i64, - signing_role_arn: String, - signing_key_parameter_name: String, - target_url: String, - timestamp_refresh_days: i64, -} - -// Represents Release.toml -// TODO: Make this into a crate -#[derive(Deserialize, Debug)] -struct ReleaseInfo { - version: String, - datastore_version: String, - migrations: Vec, -} - -// Represents migration info from Release.toml -#[derive(Deserialize, Debug)] -struct Migration { - from: String, - to: String, - names: Vec, -} - -// Contains related information needed to sign metadata -struct SigningMaterial { - root: Signed, - keys: RootKeys, - rng: SystemRandom, - version: NonZeroU64, -} - -type RootKeys = HashMap, Box>; - -// FIXME: This code (not quite verbatim) lives in tuftool and should be librarized -// Get the approprate keys from root that match the current signing keypair -fn keys_for_root(key: String, root: &Root) -> Result { - let mut map = HashMap::new(); - let key_pair: Box = - Box::new(parse_keypair(&key.as_bytes().to_vec()).context(error::KeyPairParse)?); - if let Some((keyid, _)) = root - .keys - .iter() - .find(|(_, key)| key_pair.tuf_key() == **key) - { - map.insert(keyid.clone(), key_pair); - } - - Ok(map) -} - -// Get the signing key from the SSM parameter -fn get_signing_key(env: &EnvVars) -> Result { - // Assume a role that has access to signing keys - // Make an sts client to get credentials - // Create an ssm client with those credentials - let sts_client = StsClient::new(Default::default()); - let provider = StsAssumeRoleSessionCredentialsProvider::new( - sts_client, - env.signing_role_arn.to_string(), - "sign-tuf-repo".to_owned(), - Some("update_sign_tuf_repo".to_string()), - None, - None, - None, - ); - let http_client = HttpClient::new().context(error::HttpClientCreate)?; - let ssm_client = SsmClient::new_with(http_client, provider, Default::default()); - - let get_signing_key_req = GetParameterRequest { - name: env.signing_key_parameter_name.to_string(), - with_decryption: Some(true), - }; - match ssm_client.get_parameter(get_signing_key_req).sync() { - Ok(ssm_return) => { - if let Some(signing_key) = ssm_return.parameter { - if let Some(key) = signing_key.value { - return Ok(key); - } - } - return error::SSMParameterRead { - parameter: &env.signing_key_parameter_name, - } - .fail(); - } - Err(e) => { - return Err(e).context(error::SSMParameterRetrieve { - parameter: &env.signing_key_parameter_name, - }); - } - } -} - -// Builds the names of the images we expect to come out of the build process -// FIXME: This deserves extra thought. Should the build process push these? -// There are obvious disadvantages here, however one advantage of being -// very strict but naive is that this code would need to be edited and -// pushed to change what actually gets signed. -fn build_target_names(env: &EnvVars, release: &ReleaseInfo) -> Result> { - let mut map = HashMap::new(); - let name_stub = format!( - "{}-{}-{}-v{}", - OS_NAME, env.arch, env.variant, release.version - ); - for file in FILES_TO_SIGN { - let name = match file.as_ref() { - "boot" | "root" => format!("{}-{}.ext4.lz4", &name_stub, &file), - "verity" => format!("{}-{}.verity.lz4", &name_stub, &file), - _ => return error::UnexpectedImageName {}.fail(), - }; - map.insert(file.to_string(), name.to_string()); - } - Ok(map) -} - -// Calculate the length and hash of a target located at target_dir/target_name, -// create a target object with that info, and lastly, copy the file to /targets. -// FIXME: This code (not quite verbatim) lives in tuftool and should be librarized -fn write_target(root: &Root, target_dir: P, target_name: S) -> Result<(String, Target)> -where - P: AsRef, - S: AsRef, -{ - let target_name = target_name.as_ref(); - let target_dir = target_dir.as_ref(); - let target_path = target_dir.join(&target_name); - - // Calculate the length and hash of the supplied target file - let mut file = File::open(&target_path).context(error::FileOpen { path: &target_path })?; - let mut digest = Context::new(&SHA256); - let mut buf = [0; 8 * 1024]; - let mut length = 0; - loop { - match file - .read(&mut buf) - .context(error::FileRead { path: &target_path })? - { - 0 => break, - n => { - digest.update(&buf[..n]); - length += n as u64; - } - } - } - let target = Target { - length, - hashes: Hashes { - sha256: Decoded::from(digest.finish().as_ref().to_vec()), - _extra: HashMap::new(), - }, - custom: HashMap::new(), - _extra: HashMap::new(), - }; - - // Using the hash, create a filename to copy to in /targets - let output_dir = Path::new(UPDATED_TUF_REPO_DIR).join("targets"); - let dst = if root.consistent_snapshot { - output_dir.join(format!( - "{}.{}", - hex::encode(&target.hashes.sha256), - target_name - )) - } else { - output_dir.join(&target_name) - }; - - // Create the destination folder if it doesn't exist - fs::create_dir_all(&output_dir).context(error::FileCreate { path: &output_dir })?; - fs::copy(&target_path, &dst).context(error::FileCopy { - src: &target_path, - dst: &dst, - })?; - - Ok((target_name.to_string(), target)) -} - -// Write signed metadata to the TUF '/metadata' folder -// FIXME: This code (not quite verbatim) lives in tuftool and should be librarized -fn write_metadata( - role: T, - signing_material: &SigningMaterial, - filename: &'static str, -) -> Result<([u8; SHA256_OUTPUT_LEN], u64)> { - let metadata_dir = Path::new(UPDATED_TUF_REPO_DIR).join("metadata"); - fs::create_dir_all(&metadata_dir).context(error::FileCreate { - path: &metadata_dir, - })?; - - let path = metadata_dir.join( - if T::TYPE != RoleType::Timestamp && signing_material.root.signed.consistent_snapshot { - format!("{}.{}", signing_material.version, filename) - } else { - filename.to_owned() - }, - ); - - let mut role = Signed { - signed: role, - signatures: Vec::new(), - }; - sign_metadata( - &signing_material.root.signed, - &signing_material.keys, - T::TYPE, - &mut role, - &signing_material.rng, - )?; - - let mut buf = serde_json::to_vec_pretty(&role).context(error::FileWriteJson { path: &path })?; - buf.push(b'\n'); - std::fs::write(&path, &buf).context(error::FileCreate { path: &path })?; - - let mut sha256 = [0; SHA256_OUTPUT_LEN]; - sha256.copy_from_slice(digest(&SHA256, &buf).as_ref()); - Ok((sha256, buf.len() as u64)) -} - -// Sign a given piece of metadata -// FIXME: This code (not quite verbatim) lives in tuftool and should be librarized -fn sign_metadata( - root: &Root, - keys: &RootKeys, - role_type: RoleType, - role: &mut Signed, - rng: &dyn SecureRandom, -) -> Result<()> { - if let Some(role_keys) = root.roles.get(&role_type) { - for (keyid, key) in keys { - if role_keys.keyids.contains(&keyid) { - let mut data = Vec::new(); - let mut ser = - serde_json::Serializer::with_formatter(&mut data, CanonicalFormatter::new()); - role.signed.serialize(&mut ser).context(error::SignJson)?; - let sig = key.sign(&data, rng).context(error::Sign)?; - role.signatures.push(Signature { - keyid: keyid.clone(), - sig: sig.into(), - }); - } - } - } else { - return error::RoleKeys {}.fail(); - } - - Ok(()) -} - -// TODO: Moar logs? (debug/trace?) -fn run() -> Result<()> { - // TerminalMode::Mixed will send errors to stderr and anything less to stdout. - TermLogger::init(LevelFilter::Info, LogConfig::default(), TerminalMode::Mixed) - .context(error::Logger)?; - - // Get the configured environment variables - info!("Parsing environment variables"); - let env_vars = match envy::from_env::() { - Ok(env_vars) => env_vars, - Err(error) => return Err(error).context(error::EnvironmentVariables)?, - }; - - // Parse the Release.toml into a ReleaseInfo struct - // Release.toml is located at ${CODEBUILD_SRC_DIR}/Release.toml - info!("Reading and deserializing Release.toml"); - let release_path = Path::new(&env_vars.codebuild_src_dir).join(RELEASE_TOML); - let release_reader = File::open(&release_path).context(error::FileOpen { - path: &release_path, - })?; - let release: ReleaseInfo = - serde_json::from_reader(release_reader).context(error::JSONDeserialize)?; - - // Load TUF repository into memory from metadata/target paths - info!("Pulling TUF repository"); - let transport = HttpTransport::new(); - let repo_dir = TempDir::new(EXISTING_TUF_REPO_DIR).context(error::TempDir)?; - // ${CODEBUILD_SRC_DIR}/packages/workspaces/root.json - let root_json_path = Path::new(&env_vars.codebuild_src_dir) - .join("packages") - .join("workspaces") - .join(ROOT_JSON); - let tuf_repo = Repository::load( - &transport, - Settings { - root: File::open(&root_json_path).context(error::OpenRoot { - path: &root_json_path, - })?, - datastore: repo_dir.path(), - metadata_base_url: &env_vars.metadata_url, - target_base_url: &env_vars.target_url, - limits: Limits { - ..tough::Limits::default() - }, - }, - ) - .context(error::TUFRepoLoad)?; - - // =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - - // Read the manifest target into memory so we can update it - info!("Parsing manifest via TUF repo"); - let manifest_reader = tuf_repo.read_target(TUF_MANIFEST_JSON); - let mut manifest: Manifest = match manifest_reader { - Err(error) => return Err(error).context(error::ReadTarget), - Ok(manifest_reader) => match manifest_reader { - Some(reader) => serde_json::from_reader(reader).context(error::JSONSerialize)?, - None => { - return error::MissingTarget { - target: TUF_MANIFEST_JSON, - } - .fail() - } - }, - }; - - // If there are migrations, add them to the Manifest - if !release.migrations.is_empty() { - for migration in release.migrations.iter() { - if release.version == migration.to { - info!("Adding migrations to manifest"); - let from = DataVersion::from_str(&migration.from).context(error::DataVersion { - version_string: &migration.from, - })?; - let to = DataVersion::from_str(&migration.to).context(error::DataVersion { - version_string: &migration.to, - })?; - // "true" in this call will append the migrations to the list rather than - // overwrite them - manifest - .add_migration(true, from, to, migration.names.clone()) - .context(error::AddMigration)?; - } - } - } - - // Add the current update images to the Manifest - // TODO: This needs more validation. We need to make sure that the image - // actually exists before and that it's named correctly - let new_targets = build_target_names(&env_vars, &release)?; - let images = Images { - boot: new_targets - .get("boot") - .context(error::MissingImageName { - name: "boot".to_string(), - })? - .to_string(), - root: new_targets - .get("root") - .context(error::MissingImageName { - name: "root".to_string(), - })? - .to_string(), - hash: new_targets - .get("verity") - .context(error::MissingImageName { - name: "verity".to_string(), - })? - .to_string(), - }; - let release_semver = SemVer::parse(&release.version).context(error::SemVer { - version_string: release.version, - })?; - let datastore_version = - DataVersion::from_str(&release.datastore_version).context(error::DataVersion { - version_string: &release.datastore_version, - })?; - - // Add the update to the manifest. - info!("Adding current update to manifest"); - manifest - .add_update( - release_semver.clone(), - Some(release_semver.clone()), - datastore_version, - env_vars.arch.clone(), - env_vars.variant.clone(), - images, - ) - .context(error::ManifestUpdate)?; - - // Add waves to the manifest - // FIXME: Make waves configurable for this code via args/env variables, - // an issue exists to set "profiles" that can be referred to: - // https://github.com/amazonlinux/PRIVATE-thar/issues/596 - info!("Adding wave(s) to manifest"); - let now = Utc::now(); - // First wave starts today - manifest - .add_wave( - env_vars.variant.clone(), - env_vars.arch.clone(), - release_semver.clone(), - 512, - now.clone(), - ) - .context(error::AddWave)?; - // Second wave starts tomorrow - manifest - .add_wave( - env_vars.variant.clone(), - env_vars.arch.clone(), - release_semver.clone(), - 1024, - now.clone() + Duration::days(1), - ) - .context(error::AddWave)?; - // Third wave starts the day after tomorrow - manifest - .add_wave( - env_vars.variant.clone(), - env_vars.arch.clone(), - release_semver.clone(), - 1576, - now.clone() + Duration::days(2), - ) - .context(error::AddWave)?; - - // Write the updated manifest to a file. This must be a file for now as we - // compute hashes and size for it in the next step, and copy it to the - // final '/targets' dir - let manifest_path = Path::new(EXISTING_TUF_REPO_DIR).join("manifest.json"); - let pretty_manifest = - serde_json::to_string_pretty(&manifest).context(error::JSONDeserialize)?; - fs::write(&manifest_path, &pretty_manifest).context(error::FileCreate { - path: manifest_path, - })?; - // Write the updated manifest to /targets - info!("Writing manifest to targets"); - let (manifest_name, manifest) = write_target( - &tuf_repo.root().signed, - &EXISTING_TUF_REPO_DIR, - "manifest.json", - )?; - - // =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - - // Create the items necessary to sign metadata - let metadata_version = - NonZeroU64::new(Utc::now().timestamp() as u64).context(error::CurrentTime)?; - let timestamp_expiration = Utc::now() + Duration::days(env_vars.timestamp_refresh_days); - let other_expiration = Utc::now() + Duration::days(env_vars.refresh_days); - let signing_key = get_signing_key(&env_vars)?; - let root_keys = keys_for_root(signing_key, &tuf_repo.root().signed)?; - let signing_material = SigningMaterial { - root: tuf_repo.root().clone(), - keys: root_keys, - rng: SystemRandom::new(), - version: metadata_version, - }; - - // Clone existing 'targets' struct from the TUF repo; we will update it - info!("Updating 'targets.json'"); - let mut targets = tuf_repo.targets().clone().signed; - // Add the previously updated manifest to targets - targets.targets.insert(manifest_name, manifest); - // Add all new images to /targets and 'targets' (the object) - for (_, new_target) in new_targets { - let (target_name, target) = write_target( - &tuf_repo.root().signed, - &env_vars.input_buildsys_artifacts, - new_target, - )?; - - targets.targets.insert(target_name, target); - } - - // Update the targets version and expiration - targets.version = metadata_version; - targets.expires = other_expiration; - let (targets_sha256, targets_length) = - write_metadata(targets, &signing_material, "targets.json")?; - - // Fetch snapshot, update version and expiration, sign - info!("Updating 'snapshot.json'"); - let mut snapshot = tuf_repo.snapshot().clone().signed; - snapshot.version = metadata_version; - snapshot.expires = other_expiration; - snapshot.meta.insert( - "targets.json".to_owned(), - SnapshotMeta { - hashes: Some(Hashes { - sha256: targets_sha256.to_vec().into(), - _extra: HashMap::new(), - }), - length: Some(targets_length), - version: metadata_version, - _extra: HashMap::new(), - }, - ); - let (snapshot_sha256, snapshot_length) = - write_metadata(snapshot, &signing_material, "snapshot.json")?; - - // Fetch timestamp, update version and expiration, sign - info!("Updating 'timestamp.json'"); - let mut timestamp = tuf_repo.timestamp().clone().signed; - timestamp.version = metadata_version; - timestamp.expires = timestamp_expiration; - timestamp.meta.insert( - "snapshot.json".to_owned(), - TimestampMeta { - hashes: Hashes { - sha256: snapshot_sha256.to_vec().into(), - _extra: HashMap::new(), - }, - length: snapshot_length, - version: metadata_version, - _extra: HashMap::new(), - }, - ); - write_metadata(timestamp, &signing_material, "timestamp.json")?; - - Ok(()) -} - -fn main() { - if let Err(e) = run() { - eprintln!("{}", e); - process::exit(1); - } -} From 080777df80c3f9a5bfc8b98d51c84ef13e5a3db3 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Thu, 20 Feb 2020 16:25:34 -0800 Subject: [PATCH 0247/1356] Fix links for bottlerocket-os/bottlerocket --- CONTRIBUTING.md | 6 +++--- README.md | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c8e163e0..a377b56b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -10,7 +10,7 @@ Please read through this document before submitting any issues or pull requests We welcome you to use the GitHub issue tracker to report bugs or suggest features. -When filing an issue, please check [existing open](https://github.com/amazonlinux/PRIVATE-thar/issues) and [closed](https://github.com/amazonlinux/PRIVATE-thar/issues?q=is%3Aissue+is%3Aclosed) issues to make sure somebody else hasn't already reported the issue. +When filing an issue, please check [existing open](https://github.com/bottlerocket-os/bottlerocket/issues) and [closed](https://github.com/bottlerocket-os/bottlerocket/issues?q=is%3Aissue+is%3Aclosed) issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: @@ -26,7 +26,7 @@ Before starting a pull request, please ensure that: 1. You open an issue first to discuss any significant work - we would hate for your time to be wasted. 2. You are working against the latest source on the *develop* branch. -3. You check existing [open](https://github.com/amazonlinux/PRIVATE-thar/pulls) and [merged](https://github.com/amazonlinux/PRIVATE-thar/pulls?q=is%3Apr+is%3Aclosed) pull requests to make sure someone else hasn't addressed the problem already. +3. You check existing [open](https://github.com/bottlerocket-os/bottlerocket/pulls) and [merged](https://github.com/bottlerocket-os/bottlerocket/pulls?q=is%3Apr+is%3Aclosed) pull requests to make sure someone else hasn't addressed the problem already. To send us a pull request, please: @@ -42,7 +42,7 @@ GitHub provides additional documentation on [forking a repository](https://help. ## Finding contributions to work on Looking at the existing issues is a great way to find something to contribute on. -As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/amazonlinux/PRIVATE-thar/labels/help%20wanted) issues is a great place to start. +As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/bottlerocket-os/bottlerocket/labels/help%20wanted) issues is a great place to start. ## Code of Conduct diff --git a/README.md b/README.md index 006c0fcb..df4a8e13 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ Some notable features include: ## Contact us -If you find a security issue, please [contact our security team](https://github.com/amazonlinux/PRIVATE-thar/security/policy) rather than opening an issue. +If you find a security issue, please [contact our security team](https://github.com/bottlerocket-os/bottlerocket/security/policy) rather than opening an issue. If you're interested in contributing, thank you! Please see our [contributor's guide](CONTRIBUTING.md). @@ -21,7 +21,7 @@ Please see our [contributor's guide](CONTRIBUTING.md). We use GitHub issues to track other bug reports and feature requests. You can select from a few templates and get some guidance on the type of information that would be most helpful. -[Contact us with a new issue here.](https://github.com/amazonlinux/PRIVATE-thar/issues/new/choose) +[Contact us with a new issue here.](https://github.com/bottlerocket-os/bottlerocket/issues/new/choose) We don't have other communication channels set up quite yet, but don't worry about making an issue! You can let us know about things that seem difficult, or even ways you might like to help. From d52acbe98abcdbdb51cd54fc77f2c1b724999c32 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sat, 21 Dec 2019 17:02:01 +0000 Subject: [PATCH 0248/1356] kernel: set options to enforce SELinux usage Signed-off-by: Ben Cressey --- packages/kernel/config-bottlerocket | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/packages/kernel/config-bottlerocket b/packages/kernel/config-bottlerocket index 5f2e8104..2fd12244 100644 --- a/packages/kernel/config-bottlerocket +++ b/packages/kernel/config-bottlerocket @@ -25,6 +25,15 @@ CONFIG_DM_VERITY=y # yama LSM for ptrace restrictions CONFIG_SECURITY_YAMA=y +# Do not allow SELinux to be disabled at boot. +CONFIG_SECURITY_SELINUX_BOOTPARAM=n + +# Do not allow SELinux to be disabled at runtime. +CONFIG_SECURITY_SELINUX_DISABLE=n + +# Do not allow SELinux to use `enforcing=0` behavior. +CONFIG_SECURITY_SELINUX_DEVELOP=n + # enable /proc/config.gz CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y From 1a4c0fcdcd0b5aa7bf61f3c4bf362b267ab53bec Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 3 Dec 2019 16:24:46 +0000 Subject: [PATCH 0249/1356] build: enable audit and selinux in enforcing mode We enable the audit subsystem in order to log AVC denials. The SELinux options are mandated by the kernel config, but including them on the kernel command line makes the behavior visible to simple tools. Signed-off-by: Ben Cressey --- tools/rpm2img | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/rpm2img b/tools/rpm2img index f82451f7..ab97ed18 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -152,7 +152,7 @@ set timeout="0" menuentry "Bottlerocket OS ${VERSION_ID}" { linux (\$root)/vmlinuz root=/dev/dm-0 rootwait ro init=/sbin/preinit \\ - audit=0 console=tty0 console=ttyS0 random.trust_cpu=on \\ + console=tty0 console=ttyS0 random.trust_cpu=on selinux=1 enforcing=1 \\ systemd.log_target=journal-or-kmsg net.ifnames=0 biosdevname=0 \\ dm_verity.max_bios=-1 dm_verity.dev_wait=1 \\ dm-mod.create="root,,,ro,0 $VERITY_DATA_512B_BLOCKS verity $VERITY_VERSION PARTUUID=\$boot_uuid/PARTNROFF=1 PARTUUID=\$boot_uuid/PARTNROFF=2 \\ From 3ce7dfd3876047413cbd57f67dd7d5daa8c97161 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 10 Dec 2019 20:36:37 +0000 Subject: [PATCH 0250/1356] build: enable selinux labeling Signed-off-by: Ben Cressey --- tools/rpm2img | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tools/rpm2img b/tools/rpm2img index ab97ed18..1b706ba3 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -34,6 +34,10 @@ BOOT_MOUNT="$(mktemp -d)" DATA_MOUNT="$(mktemp -d)" EFI_MOUNT="$(mktemp -d)" +SELINUX_ROOT="/usr/lib/selinux" +SELINUX_POLICY="fortified" +SELINUX_FILE_CONTEXTS="${ROOT_MOUNT}/${SELINUX_ROOT}/${SELINUX_POLICY}/contexts/files/file_contexts" + VERITY_VERSION=1 VERITY_HASH_ALGORITHM=sha256 VERITY_DATA_BLOCK_SIZE=4096 @@ -121,7 +125,12 @@ echo "VERSION_ID=${VERSION_ID}" >> ${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release echo "BUILD_ID=${BUILD_ID}" >> ${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release # BOTTLEROCKET-ROOT-A +mkdir -p "${ROOT_MOUNT}/lost+found" +ROOT_LABELS=$(setfiles -n -d -F -m -r "${ROOT_MOUNT}" \ + "${SELINUX_FILE_CONTEXTS}" "${ROOT_MOUNT}" \ + | awk -v root="${ROOT_MOUNT}" '{gsub(root"/","/"); gsub(root,"/"); print "ea_set", $1, "security.selinux", $4}') mkfs.ext4 -O ^has_journal -b "${VERITY_DATA_BLOCK_SIZE}" -d "${ROOT_MOUNT}" "${ROOT_IMAGE}" 920M +echo "${ROOT_LABELS}" | debugfs -w -f - "${ROOT_IMAGE}" resize2fs -M "${ROOT_IMAGE}" dd if="${ROOT_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek=45 @@ -181,7 +190,14 @@ sgdisk --clear \ -n 0:1M:1023M -c 0:"BOTTLEROCKET-DATA" -t 0:8300 \ --sort --print "${DATA_IMAGE}" mkdir -p "${DATA_MOUNT}/var/"{cache,lib,log,spool} +# If we build on a host with SELinux enabled, we could end up with labels that +# do not match our policy. Since we allow replacing the data volume at runtime, +# we can't count on these labels being correct in any case, and it's better to +# remove them all. +UNLABELED=$(find "${DATA_MOUNT}" \ + | awk -v root="${DATA_MOUNT}" '{gsub(root"/","/"); gsub(root,"/"); print "ea_rm", $1, "security.selinux"}') mkfs.ext4 -d "${DATA_MOUNT}" "${BOTTLEROCKET_DATA}" 1022M +echo "${UNLABELED}" | debugfs -w -f - "${BOTTLEROCKET_DATA}" dd if="${BOTTLEROCKET_DATA}" of="${DATA_IMAGE}" conv=notrunc bs=1M seek=1 sgdisk -v "${DISK_IMAGE}" From 8f1a916ad508bb022812d43a8a3e06ef9ca8fa08 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 14 Feb 2020 18:14:43 +0000 Subject: [PATCH 0251/1356] build: do not populate /var in image These directories are created for us by tmpfiles.d. Signed-off-by: Ben Cressey --- tools/rpm2img | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/rpm2img b/tools/rpm2img index 1b706ba3..6dab3426 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -189,7 +189,6 @@ truncate -s 1G "${DATA_IMAGE}" sgdisk --clear \ -n 0:1M:1023M -c 0:"BOTTLEROCKET-DATA" -t 0:8300 \ --sort --print "${DATA_IMAGE}" -mkdir -p "${DATA_MOUNT}/var/"{cache,lib,log,spool} # If we build on a host with SELinux enabled, we could end up with labels that # do not match our policy. Since we allow replacing the data volume at runtime, # we can't count on these labels being correct in any case, and it's better to From 4aee002ba4efa190d95f2f3c2a804094abe3b7b8 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 21 Feb 2020 17:20:42 +0000 Subject: [PATCH 0252/1356] docs: mention SELinux in README Signed-off-by: Ben Cressey --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index df4a8e13..616f691d 100644 --- a/README.md +++ b/README.md @@ -320,6 +320,9 @@ Only a few locations are made writable: * some through [tmpfs mounts](workspaces/preinit/laika), used for configuration, that don't persist over a restart. * one [persistent location](packages/release/var-lib-bottlerocket.mount) for the data store. +We enable [SELinux](https://selinuxproject.org/) in enforcing mode. +This protects the data store from tampering, and blocks modification of sensitive files such as container archives. + Almost all first-party components are written in [Rust](https://www.rust-lang.org/). Rust eliminates some classes of memory safety issues, and encourages design patterns that help security. From 81b51c101b164f044b5f344971dedfa2b2551d7e Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Fri, 21 Feb 2020 14:51:39 -0800 Subject: [PATCH 0253/1356] Rename workspaces directory to 'sources', package to 'os' workspaces was originally a set of Cargo workspaces, but we now build them together because it's much faster to build. This renames the 'workspaces' directory to the clearer 'sources' because it contains our source code. The 'workspaces' package is renamed to 'os' to represent that it contains our top-level / userspace OS components. --- README.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 616f691d..cd521dc1 100644 --- a/README.md +++ b/README.md @@ -144,13 +144,13 @@ reboot We're working on more automated update methods. The update process uses images secured by [TUF](https://theupdateframework.github.io/). -For more details, see the [update system documentation](workspaces/updater/). +For more details, see the [update system documentation](sources/updater/). ## Settings Here we'll describe the settings you can configure on your Bottlerocket instance, and how to do it. -(API endpoints are defined in our [OpenAPI spec](workspaces/api/openapi.yaml) if you want more detail.) +(API endpoints are defined in our [OpenAPI spec](sources/api/openapi.yaml) if you want more detail.) ### Interacting with settings @@ -193,7 +193,7 @@ If you want to group sets of changes yourself, pick a transaction name and appen For example, if you want the name "FOO", you can `PATCH` to `/settings?tx=FOO` and `POST` to `/tx/commit_and_apply?tx=FOO`. (Transactions are created automatically when used, and are cleaned up on reboot.) -For more details on using the client, see the [apiclient documentation](workspaces/api/apiclient/). +For more details on using the client, see the [apiclient documentation](sources/api/apiclient/). #### Using user data @@ -211,7 +211,7 @@ timezone = "America/Thunder_Bay" Here we'll describe each setting you can change. -**Note:** You can see the default values (for any settings that are not generated at runtime) by looking at [defaults.toml](workspaces/models/defaults.toml). +**Note:** You can see the default values (for any settings that are not generated at runtime) by looking at [defaults.toml](sources/models/defaults.toml). When you're sending settings to the API, or receiving settings from the API, they're in a structured JSON format. This allows allow modification of any number of keys at once. @@ -249,7 +249,7 @@ The following settings can be optionally set to customize the node labels and ta special = "true:NoSchedule" ``` -The following settings are set for you automatically by [pluto](workspaces/api/) based on runtime instance information, but you can override them if you know what you're doing! +The following settings are set for you automatically by [pluto](sources/api/) based on runtime instance information, but you can override them if you know what you're doing! * `settings.kubernetes.max-pods`: The maximum number of pods that can be scheduled on this node (limited by number of available IPv4 addresses) * `settings.kubernetes.cluster-dns-ip`: The CIDR block of the primary network interface. * `settings.kubernetes.node-ip`: The IPv4 address of this node. @@ -317,7 +317,7 @@ Be careful, and make sure you have a similar low-level use case before reaching We use [dm-verity](https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity) to load a verified read-only root filesystem, preventing some classes of persistent security threats. Only a few locations are made writable: -* some through [tmpfs mounts](workspaces/preinit/laika), used for configuration, that don't persist over a restart. +* some through [tmpfs mounts](sources/preinit/laika), used for configuration, that don't persist over a restart. * one [persistent location](packages/release/var-lib-bottlerocket.mount) for the data store. We enable [SELinux](https://selinuxproject.org/) in enforcing mode. @@ -355,7 +355,7 @@ When updating Bottlerocket, the partition table is updated to point from set A t We also track successful boots, and if there are failures it will automatically revert back to the prior working partition set. The update process uses images secured by [TUF](https://theupdateframework.github.io/). -For more details, see the [update system documentation](workspaces/updater/). +For more details, see the [update system documentation](sources/updater/). ### API @@ -369,12 +369,12 @@ The second method is through the Bottlerocket API, for example when you want to There's an HTTP API server that listens on a local Unix-domain socket. Remote access to the API requires an authenticated transport such as SSM's RunCommand or Session Manager, as described above. -For more details, see the [apiserver documentation](workspaces/api/apiserver/). +For more details, see the [apiserver documentation](sources/api/apiserver/). -The [apiclient](workspaces/api/apiclient/) can be used to make requests. +The [apiclient](sources/api/apiclient/) can be used to make requests. They're just HTTP requests, but the API client simplifies making requests with the Unix-domain socket. -To make configuration easier, we have [moondog](workspaces/api/moondog/), which can send an API request for you based on instance user data. +To make configuration easier, we have [moondog](sources/api/moondog/), which can send an API request for you based on instance user data. If you start a virtual machine, like an EC2 instance, it will read TOML-formatted Bottlerocket configuration from user data and send it to the API server. This way, you can configure your Bottlerocket instance without having to make API calls after launch. @@ -382,4 +382,4 @@ See [Settings](#settings) above for examples and to understand what you can conf The server and client are the user-facing components of the API system, but there are a number of other components that work together to make sure your settings are applied, and that they survive upgrades of Bottlerocket. -For more details, see the [API system documentation](workspaces/api/). +For more details, see the [API system documentation](sources/api/). From 3edaa9d9c6fd0002116ddc07e8cdb040bf430bbc Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Thu, 20 Feb 2020 22:33:33 +0000 Subject: [PATCH 0254/1356] Rename 'moondog' to 'early-boot-config' --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index cd521dc1..08a292a0 100644 --- a/README.md +++ b/README.md @@ -374,7 +374,7 @@ For more details, see the [apiserver documentation](sources/api/apiserver/). The [apiclient](sources/api/apiclient/) can be used to make requests. They're just HTTP requests, but the API client simplifies making requests with the Unix-domain socket. -To make configuration easier, we have [moondog](sources/api/moondog/), which can send an API request for you based on instance user data. +To make configuration easier, we have [early-boot-config](sources/api/early-boot-config/), which can send an API request for you based on instance user data. If you start a virtual machine, like an EC2 instance, it will read TOML-formatted Bottlerocket configuration from user data and send it to the API server. This way, you can configure your Bottlerocket instance without having to make API calls after launch. From 42af81614fa94bbc45daa6901450aa1a9cf80733 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Mon, 24 Feb 2020 10:16:40 -0800 Subject: [PATCH 0255/1356] Remove old migrations that no longer apply after 0.3 break --- tools/rpm2migrations | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/rpm2migrations b/tools/rpm2migrations index e222b054..8dd99505 100755 --- a/tools/rpm2migrations +++ b/tools/rpm2migrations @@ -2,7 +2,6 @@ # # Retrieve migrations from the RPM and output an appropriately named tarball set -eu -o pipefail -shopt -qs failglob for opt in "$@"; do optarg="$(expr "${opt}" : '[^=]*=\(.*\)')" @@ -30,6 +29,7 @@ fi # lz4 compress each migration for migration in "${MIGRATIONS_DIR}"/*; do + [ -e "${migration}" ] || continue lz4 -v "${migration}" "${migration}.lz4" done From 2fc1e26fba0debd859b24483e08fefb7cf368435 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Mon, 24 Feb 2020 12:52:23 -0800 Subject: [PATCH 0256/1356] Replace unused timezone/hostname settings with motd setting It's useful to have an example setting so that users can see it in documentation and follow along, learning the API by actually making calls, without worrying about breaking their system. We were using hostname and timezone for this, but those sounded like real settings (and were originally intended to be real settings) so it was confusing. motd is a "real" setting in that it updates /etc/motd, but that's very low risk and only exposed to the user in any case. Plus, with low risk, it's fun to see an actual impact on your system. --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 08a292a0..89c5e4c6 100644 --- a/README.md +++ b/README.md @@ -164,13 +164,13 @@ apiclient -u /settings This will return all of the current settings in JSON format. For example, here's an abbreviated response: ``` -{"timezone":"America/Los_Angeles","kubernetes":{...}} +{"motd":"...", {"kubernetes": ...}} ``` You can change settings by sending back the same type of JSON data in a PATCH request. This can include any number of settings changes. ``` -apiclient -m PATCH -u /settings -d '{"timezone": "America/Thunder_Bay"}' +apiclient -m PATCH -u /settings -d '{"motd": "my own value!"}' ``` This will *stage* the setting in a "pending" area - a transaction. @@ -204,7 +204,7 @@ Here's the user data to change the time zone setting, as we did in the last sect ``` [settings] -timezone = "America/Thunder_Bay" +motd = "my own value!" ``` ### Description of settings @@ -225,7 +225,7 @@ In this format, "settings.kubernetes.cluster-name" refers to the same key as in #### Top-level settings -* `settings.timezone`, `settings.hostname`: These don't function currently, but are intended to let you override the system timezone or the hostname retrieved from DHCP. At the moment they're used as example settings. +* `settings.motd`: This setting is just written out to /etc/motd. It's useful as a way to get familiar with the API! Try changing it. #### Kubernetes settings From 80d0005162551348ecace94d5a55c0b99aafcedb Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Tue, 25 Feb 2020 22:04:41 +0000 Subject: [PATCH 0257/1356] Rename built artifacts (variant-arch-version) Previously built artifacts were named as such: bottlerocket-$ARCH-$VARIANT-$VERSION-$COMMIT.extension This commit moves the $VARIANT first to better line up with names elsewhere in the project: bottlerocket-$VARIANT-$ARCH-VERSION-$COMMIT.extension --- README.md | 2 +- tools/rpm2img | 2 +- tools/rpm2migrations | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 89c5e4c6..7585c5e0 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ We’re excited to get early feedback and to continue working on more use cases! Bottlerocket is architected such that different cloud environments and container orchestrators can be supported in the future. A build of Bottlerocket that supports different features or integration characteristics is known as a 'variant'. The artifacts of a build will include the architecture and variant name. -For example, an `x86_64` build of the `aws-k8s` variant will produce an image named `bottlerocket-x86_64-aws-k8s.img`. +For example, an `x86_64` build of the `aws-k8s` variant will produce an image named `bottlerocket-aws-k8s-x86_64--.img`. Our first supported variant, `aws-k8s`, supports EKS as described above. diff --git a/tools/rpm2img b/tools/rpm2img index 6dab3426..72675a82 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -13,7 +13,7 @@ done mkdir -p "${OUTPUT_DIR}" -FILENAME_PREFIX="bottlerocket-${ARCH}-${VARIANT}-${VERSION_ID}-${BUILD_ID}" +FILENAME_PREFIX="bottlerocket-${VARIANT}-${ARCH}-${VERSION_ID}-${BUILD_ID}" DISK_IMAGE_NAME="${FILENAME_PREFIX}.img.lz4" BOOT_IMAGE_NAME="${FILENAME_PREFIX}-boot.ext4.lz4" VERITY_IMAGE_NAME="${FILENAME_PREFIX}-root.verity.lz4" diff --git a/tools/rpm2migrations b/tools/rpm2migrations index 8dd99505..da4bbc26 100755 --- a/tools/rpm2migrations +++ b/tools/rpm2migrations @@ -13,7 +13,7 @@ done mkdir -p "${OUTPUT_DIR}" -MIGRATIONS_ARCHIVE="bottlerocket-${ARCH}-${VARIANT}-${VERSION_ID}-${BUILD_ID}-migrations.tar" +MIGRATIONS_ARCHIVE="bottlerocket-${VARIANT}-${ARCH}-${VERSION_ID}-${BUILD_ID}-migrations.tar" ROOT_TEMP="$(mktemp -d)" SYS_ROOT="${ARCH}-bottlerocket-linux-gnu/sys-root" MIGRATIONS_DIR="${ROOT_TEMP}/${SYS_ROOT}/usr/share/migrations" From b9734935499154529a2587e66c1646da575b4e53 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Wed, 26 Feb 2020 10:21:52 -0800 Subject: [PATCH 0258/1356] Split INSTALL into BUILDING and QUICKSTART The use cases for building images and running them are different enough to justify separate documents. --- BUILDING.md | 86 +++++++++++++++++++++++++++++++++++++++++++++++ README.md | 11 +++--- tools/gen-docs.sh | 2 +- 3 files changed, 94 insertions(+), 5 deletions(-) create mode 100644 BUILDING.md diff --git a/BUILDING.md b/BUILDING.md new file mode 100644 index 00000000..72278c04 --- /dev/null +++ b/BUILDING.md @@ -0,0 +1,86 @@ +# Building Bottlerocket + +If you'd like to build your own image instead of relying on an Amazon-provided image, follow these steps. +You can skip to the [setup guide](QUICKSTART.md) to use an existing image in Amazon EC2. +(We're still working on other use cases!) + +## Build an image + +### Dependencies + +#### Rust + +The build system is based on the Rust language. +We recommend you install the latest stable Rust using [rustup](https://rustup.rs/), either from the official site or your development host's package manager. + +To organize build tasks, we use [cargo-make](https://sagiegurari.github.io/cargo-make/). +We also use [cargo-deny](https://github.com/EmbarkStudios/cargo-deny) during the build process. +To get these, run: + +``` +cargo install cargo-make +cargo install cargo-deny --version 0.6.2 +``` + +#### Docker + +Bottlerocket uses [Docker](https://docs.docker.com/install/#supported-platforms) to orchestrate package and image builds. + +We recommend Docker 19.03 or later. +Builds rely on Docker's integrated BuildKit support, which has received many fixes and improvements in newer versions. + +You'll need to have Docker installed and running, with your user account added to the `docker` group. +Docker's [post-installation steps for Linux](https://docs.docker.com/install/linux/linux-postinstall/) will walk you through that. + +### Build process + +To build an image, run: + +``` +cargo make +``` + +All packages will be built in turn, and then compiled into an `img` file in the `build/` directory. + +### Register an AMI + +To use the image in Amazon EC2, we need to register the image as an AMI. +The `bin/amiize.sh` script does this for you. + +The script has some assumptions about your setup, in particular that you: + * have [aws-cli v1](https://aws.amazon.com/cli/) set up, and that its default profile can create and control EC2 resources + * have an SSH key that's registered with EC2 and is available to `ssh` (for example, loaded into `ssh-agent`) + * have a few other common tools installed, like `jq`, `du`, and `rsync` + +First, decompress the images. +(Note: these filenames assume an `x86_64` architecture and `aws-k8s` [variant](README.md).) + +``` +lz4 -d build/latest/bottlerocket-aws-k8s-x86_64.img.lz4 && \ +lz4 -d build/latest/bottlerocket-aws-k8s-x86_64-data.img.lz4 +``` + +Next, register an AMI: + +``` +bin/amiize.sh --name YOUR-AMI-NAME-HERE \ + --ssh-keypair YOUR-EC2-SSH-KEYPAIR-NAME-HERE \ + --root-image build/latest/bottlerocket-aws-k8s-x86_64.img \ + --data-image build/latest/bottlerocket-aws-k8s-x86_64-data.img \ + --region us-west-2 \ + --instance-type m3.xlarge \ + --arch x86_64 \ + --worker-ami ami-08d489468314a58df \ + --user-data 'I2Nsb3VkLWNvbmZpZwpyZXBvX3VwZ3JhZGU6IG5vbmUK' +``` + +Your new AMI ID will be printed at the end. + +The amiize script starts an EC2 instance, which it uses to write the image to a new EBS volume. +It then registers this EBS volume as an AMI and terminates the instance. +In the example command above, the `--worker-ami` is an Amazon Linux AMI, and the `--user-data` disables updates at boot to speed up registration. +Make sure you use an up-to-date worker AMI. + +## Use your image + +See the [setup guide](QUICKSTART.md) for information on running Bottlerocket images. diff --git a/README.md b/README.md index 7585c5e0..a3f496f7 100644 --- a/README.md +++ b/README.md @@ -42,10 +42,13 @@ Our first supported variant, `aws-k8s`, supports EKS as described above. :walking: :running: -To get started, please see [INSTALL](INSTALL.md). +To build your own Bottlerocket images, please see [BUILDING](BUILDING.md). It describes: * how to build an image * how to register an EC2 AMI from an image + +To get started using Bottlerocket, please see [QUICKSTART](QUICKSTART.md). +It describes: * how to set up a Kubernetes cluster, so your Bottlerocket instance can run pods * how to launch a Bottlerocket instance in EC2 @@ -65,7 +68,7 @@ Bottlerocket has a ["control" container](https://github.com/bottlerocket-os/bott This container runs the [AWS SSM agent](https://github.com/aws/amazon-ssm-agent) that lets you run commands, or start shell sessions, on Bottlerocket instances in EC2. (You can easily replace this control container with your own just by changing the URI; see [Settings](#settings). -You need to give your instance the SSM role for this to work; see the [setup guide](INSTALL.md#enabling-ssm). +You need to give your instance the SSM role for this to work; see the [setup guide](QUICKSTART.md#enabling-ssm). Once the instance is started, you can start a session: @@ -231,8 +234,8 @@ In this format, "settings.kubernetes.cluster-name" refers to the same key as in The following settings must be specified in order to join a Kubernetes cluster. You should [specify them in user data](#using-user-data). -See the [setup guide](INSTALL.md) for *much* more detail on setting up Bottlerocket and Kubernetes. -* `settings.kubernetes.cluster-name`: The cluster name you chose during setup; the [setup guide](INSTALL.md) uses "bottlerocket". +See the [setup guide](QUICKSTART.md) for *much* more detail on setting up Bottlerocket and Kubernetes. +* `settings.kubernetes.cluster-name`: The cluster name you chose during setup; the [setup guide](QUICKSTART.md) uses "bottlerocket". * `settings.kubernetes.cluster-certificate`: This is the base64-encoded certificate authority of the cluster. * `settings.kubernetes.api-server`: This is the cluster's Kubernetes API endpoint. diff --git a/tools/gen-docs.sh b/tools/gen-docs.sh index b8e8ed11..8c19daaf 100755 --- a/tools/gen-docs.sh +++ b/tools/gen-docs.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -DOCS=(START.md README.md INSTALL.md CHANGELOG.md extras/dogswatch/README.md) +DOCS=(START.md README.md BUILDING.md QUICKSTART.md CHANGELOG.md extras/dogswatch/README.md) EXTRAS=(extras/dogswatch/{dogswatch,dev/deployment}.yaml) if ! hash grip; then From 33ba2113a6f42451145c387bd368175896ae6e2c Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Wed, 26 Feb 2020 14:07:03 -0800 Subject: [PATCH 0259/1356] Rename aws-k8s variant to aws-k8s-1.15 This requires removing the variants workspace, which we had intended to do anyway because you wouldn't want to build the whole workspace. It needs to be removed now because "." isn't valid in a package name, and we were passing the variant name to build the variant's "package". --- BUILDING.md | 10 +++++----- README.md | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/BUILDING.md b/BUILDING.md index 72278c04..17276829 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -53,11 +53,11 @@ The script has some assumptions about your setup, in particular that you: * have a few other common tools installed, like `jq`, `du`, and `rsync` First, decompress the images. -(Note: these filenames assume an `x86_64` architecture and `aws-k8s` [variant](README.md).) +(Note: these filenames assume an `x86_64` architecture and `aws-k8s-1.15` [variant](README.md).) ``` -lz4 -d build/latest/bottlerocket-aws-k8s-x86_64.img.lz4 && \ -lz4 -d build/latest/bottlerocket-aws-k8s-x86_64-data.img.lz4 +lz4 -d build/latest/bottlerocket-aws-k8s-1.15-x86_64.img.lz4 && \ +lz4 -d build/latest/bottlerocket-aws-k8s-1.15-x86_64-data.img.lz4 ``` Next, register an AMI: @@ -65,8 +65,8 @@ Next, register an AMI: ``` bin/amiize.sh --name YOUR-AMI-NAME-HERE \ --ssh-keypair YOUR-EC2-SSH-KEYPAIR-NAME-HERE \ - --root-image build/latest/bottlerocket-aws-k8s-x86_64.img \ - --data-image build/latest/bottlerocket-aws-k8s-x86_64-data.img \ + --root-image build/latest/bottlerocket-aws-k8s-1.15-x86_64.img \ + --data-image build/latest/bottlerocket-aws-k8s-1.15-x86_64-data.img \ --region us-west-2 \ --instance-type m3.xlarge \ --arch x86_64 \ diff --git a/README.md b/README.md index a3f496f7..a3e4c63f 100644 --- a/README.md +++ b/README.md @@ -34,9 +34,9 @@ We’re excited to get early feedback and to continue working on more use cases! Bottlerocket is architected such that different cloud environments and container orchestrators can be supported in the future. A build of Bottlerocket that supports different features or integration characteristics is known as a 'variant'. The artifacts of a build will include the architecture and variant name. -For example, an `x86_64` build of the `aws-k8s` variant will produce an image named `bottlerocket-aws-k8s-x86_64--.img`. +For example, an `x86_64` build of the `aws-k8s-1.15` variant will produce an image named `bottlerocket-aws-k8s-1.15-x86_64--.img`. -Our first supported variant, `aws-k8s`, supports EKS as described above. +Our first supported variant, `aws-k8s-1.15`, supports EKS as described above. ## Setup From 8b9f7f1d348d17187623417fd288c30ddae15343 Mon Sep 17 00:00:00 2001 From: Matt Briggs Date: Wed, 26 Feb 2020 16:52:43 -0800 Subject: [PATCH 0260/1356] targets-base-url --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a3e4c63f..24035ac2 100644 --- a/README.md +++ b/README.md @@ -261,7 +261,7 @@ The following settings are set for you automatically by [pluto](sources/api/) ba #### Updates settings * `settings.updates.metadata-base-url`: The common portion of all URIs used to download update metadata. -* `settings.updates.target-base-url`: The common portion of all URIs used to download update files. +* `settings.updates.targets-base-url`: The common portion of all URIs used to download update files. * `settings.updates.seed`: A `u32` value that determines how far into in the update schedule this machine will accept an update. We recommending leaving this at its default generated value so that updates can be somewhat randomized in your cluster. #### Time settings From 27567280a849d963c7dddf2fc07459c655b9895d Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Thu, 27 Feb 2020 17:52:09 +0000 Subject: [PATCH 0261/1356] Use the SDK for fetching Go deps --- tools/docker-go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tools/docker-go b/tools/docker-go index 15516443..baae6beb 100755 --- a/tools/docker-go +++ b/tools/docker-go @@ -15,7 +15,7 @@ Runs Required: --module-path The path of the Go module to mount into the container - --go-version Version of Go to use + --sdk-image Name of the SDK image to use --go-mod-cache The Go module cache path to mount into the container --command The command to run in the golang container EOF @@ -35,7 +35,7 @@ parse_args() { case "${1}" in --help ) usage; exit 0 ;; --module-path ) shift; GO_MODULE_PATH="${1}" ;; - --go-version ) shift; GO_VERSION="${1}" ;; + --sdk-image ) shift; SDK_IMAGE="${1}" ;; --go-mod-cache ) shift; GO_MOD_CACHE="${1}" ;; --command ) shift; COMMAND="${@:1}" ;; *) ;; @@ -45,7 +45,7 @@ parse_args() { # Required arguments required_arg "--module-path" "${GO_MODULE_PATH}" - required_arg "--go-version" "${GO_VERSION}" + required_arg "--sdk-image" "${SDK_IMAGE}" required_arg "--go-mod-cache" "${GO_MOD_CACHE}" required_arg "--command" "${COMMAND}" } @@ -57,10 +57,11 @@ parse_args "${@}" docker run --rm \ -e GOPRIVATE='*' \ -e GOCACHE='/tmp/.cache' \ + -e GOPATH='/tmp/go' \ --user "$(id -u):$(id -g)" \ ${DOCKER_RUN_ARGS} \ - -v "${GO_MOD_CACHE}":/go/pkg/mod \ + -v "${GO_MOD_CACHE}":/tmp/go/pkg/mod \ -v "${GO_MODULE_PATH}":/usr/src/host-ctr \ -w /usr/src/host-ctr \ - golang:${GO_VERSION} \ + "${SDK_IMAGE}" \ bash -c "${COMMAND}" From b20d3013e6c396783cdbe518cd9573259a12feb9 Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Thu, 27 Feb 2020 10:08:01 -0800 Subject: [PATCH 0262/1356] Apply suggestion from code review Co-Authored-By: Erikson Tung --- tools/docker-go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/docker-go b/tools/docker-go index baae6beb..548386e4 100755 --- a/tools/docker-go +++ b/tools/docker-go @@ -17,7 +17,7 @@ Required: --module-path The path of the Go module to mount into the container --sdk-image Name of the SDK image to use --go-mod-cache The Go module cache path to mount into the container - --command The command to run in the golang container + --command The command to run in the SDK container EOF } From ddda652fc51ae5aa0360157a6f36040968d66087 Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Thu, 27 Feb 2020 20:24:26 +0000 Subject: [PATCH 0263/1356] Update lookaside cache URL --- tools/buildsys/src/cache.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/buildsys/src/cache.rs b/tools/buildsys/src/cache.rs index 4555a7f2..2375d2db 100644 --- a/tools/buildsys/src/cache.rs +++ b/tools/buildsys/src/cache.rs @@ -20,7 +20,7 @@ use std::fs::{self, File}; use std::io::{self, BufWriter}; use std::path::{Path, PathBuf}; -static LOOKASIDE_CACHE: &str = "https://thar-upstream-lookaside-cache.s3.us-west-2.amazonaws.com"; +static LOOKASIDE_CACHE: &str = "https://cache.bottlerocket.aws"; pub(crate) struct LookasideCache; From d0d76bf4c5732e2d4217b1672a76ebb3b047b21e Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Thu, 27 Feb 2020 12:33:46 -0800 Subject: [PATCH 0264/1356] buildsys: update dependencies This is a plain `cargo update`, followed by an update of reqwest to 0.10, which required a change to use the "blocking" module and the addition of the "url" crate because reqwest removed its `UrlError` type. --- tools/buildsys/Cargo.lock | 1240 ++++++++++------------------- tools/buildsys/Cargo.toml | 3 +- tools/buildsys/src/cache.rs | 2 +- tools/buildsys/src/cache/error.rs | 2 +- 4 files changed, 407 insertions(+), 840 deletions(-) diff --git a/tools/buildsys/Cargo.lock b/tools/buildsys/Cargo.lock index 8416d641..fe07b1e1 100644 --- a/tools/buildsys/Cargo.lock +++ b/tools/buildsys/Cargo.lock @@ -1,47 +1,19 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -[[package]] -name = "adler32" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "anyhow" -version = "1.0.25" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "autocfg" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "backtrace" -version = "0.3.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "backtrace-sys 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-demangle 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "backtrace-sys" -version = "0.1.32" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cc 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] name = "base64" -version = "0.10.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] name = "bitflags" @@ -55,7 +27,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "block-padding 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -72,21 +44,22 @@ name = "buildsys" version = "0.1.0" dependencies = [ "duct 0.13.3 (registry+https://github.com/rust-lang/crates.io-index)", - "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)", + "hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", + "reqwest 0.10.3 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "serde_plain 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sha2 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "snafu 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "toml 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "sha2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "snafu 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "users 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", - "walkdir 2.2.9 (registry+https://github.com/rust-lang/crates.io-index)", + "walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "bumpalo" -version = "2.6.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -96,18 +69,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "byteorder" -version = "1.3.2" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "bytes" -version = "0.4.12" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] name = "c2-chacha" @@ -119,7 +87,7 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.48" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -128,95 +96,18 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "cloudabi" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "cookie" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "cookie_store" +name = "core-foundation" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cookie 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", - "failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "publicsuffix 1.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "try_from 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "crc32fast" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "crossbeam-deque" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", - "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "crossbeam-queue" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "crossbeam-utils" -version = "0.6.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "core-foundation-sys 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "crossbeam-utils" +name = "core-foundation-sys" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] name = "ct-logs" @@ -241,7 +132,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "dtoa" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -249,105 +140,97 @@ name = "duct" version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "once_cell 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", + "once_cell 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "os_pipe 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", "shared_child 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "either" -version = "1.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "encoding_rs" -version = "0.8.21" +version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "error-chain" -version = "0.12.1" +name = "fake-simd" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] -name = "failure" -version = "0.1.6" +name = "fnv" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "backtrace 0.3.40 (registry+https://github.com/rust-lang/crates.io-index)", - "failure_derive 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] -name = "failure_derive" -version = "0.1.6" +name = "fuchsia-zircon" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "synstructure 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "fake-simd" -version = "0.1.2" +name = "fuchsia-zircon-sys" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "flate2" -version = "1.0.13" +name = "futures-channel" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "miniz_oxide 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "fnv" -version = "1.0.6" +name = "futures-core" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "fuchsia-cprng" -version = "0.1.1" +name = "futures-io" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "fuchsia-zircon" -version = "0.3.3" +name = "futures-macro" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro-hack 0.5.11 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" +name = "futures-sink" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "futures" -version = "0.1.29" +name = "futures-task" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "futures-cpupool" -version = "0.1.8" +name = "futures-util" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-io 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-macro 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro-hack 0.5.11 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro-nested 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -360,29 +243,30 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "wasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", + "wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "h2" -version = "0.1.26" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", - "indexmap 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "indexmap 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "string 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-util 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -395,36 +279,34 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.5" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "hex" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "http" -version = "0.1.21" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "http-body" -version = "0.1.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-buf 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -434,57 +316,42 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "hyper" -version = "0.12.35" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "h2 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", - "http-body 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "h2 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-buf 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-threadpool 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", - "want 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tower-service 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "want 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "hyper-rustls" -version = "0.17.1" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "ct-logs 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", - "rustls 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-rustls 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", - "webpki 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)", - "webpki-roots 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "idna" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-normalization 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.13.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "rustls 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustls-native-certs 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-rustls 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", + "webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -494,15 +361,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-normalization 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-normalization 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "indexmap" -version = "1.3.0" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -510,20 +377,20 @@ name = "iovec" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "itoa" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "js-sys" -version = "0.3.32" +version = "0.3.35" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "wasm-bindgen 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -542,17 +409,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "libc" -version = "0.2.66" +version = "0.2.67" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "lock_api" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "log" version = "0.4.8" @@ -566,27 +425,14 @@ name = "matches" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "memchr" -version = "2.2.1" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "memoffset" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "mime" -version = "0.3.14" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -594,18 +440,10 @@ name = "mime_guess" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "mime 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 2.6.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "miniz_oxide" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "adler32 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "mio" version = "0.6.21" @@ -616,7 +454,7 @@ dependencies = [ "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", @@ -641,7 +479,7 @@ version = "0.2.33" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -650,22 +488,22 @@ name = "nom" version = "4.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "num_cpus" -version = "1.11.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "hermit-abi 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "hermit-abi 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "once_cell" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -673,47 +511,51 @@ name = "opaque-debug" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "openssl-probe" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "os_pipe" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "parking_lot" -version = "0.9.0" +name = "percent-encoding" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "pin-project" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "lock_api 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot_core 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project-internal 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "parking_lot_core" -version = "0.6.2" +name = "pin-project-internal" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "percent-encoding" -version = "1.0.1" +name = "pin-project-lite" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "percent-encoding" -version = "2.1.0" +name = "pin-utils" +version = "0.1.0-alpha.4" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -722,23 +564,26 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "proc-macro2" -version = "1.0.6" +name = "proc-macro-hack" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "publicsuffix" -version = "1.5.4" +name = "proc-macro-nested" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "proc-macro2" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "error-chain 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)", - "idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -746,48 +591,21 @@ name = "quote" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rand" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "getrandom 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "rand_chacha" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "rand_chacha" version = "0.2.1" @@ -797,33 +615,12 @@ dependencies = [ "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "rand_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_core" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "rand_core" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "getrandom 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_hc" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -834,152 +631,81 @@ dependencies = [ "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "rand_isaac" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_jitter" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_os" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_pcg" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_xorshift" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rdrand" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "redox_syscall" version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "regex" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "regex-syntax" -version = "0.6.12" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "reqwest" -version = "0.9.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "cookie 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", - "cookie_store 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "encoding_rs 0.8.21 (registry+https://github.com/rust-lang/crates.io-index)", - "flate2 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper-rustls 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)", +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", + "encoding_rs 0.8.22 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.13.2 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper-rustls 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)", + "js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "mime 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "mime_guess 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rustls 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", + "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "rustls 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_urlencoded 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-rustls 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-threadpool 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "uuid 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", - "webpki-roots 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-rustls 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-futures 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", + "webpki-roots 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)", "winreg 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "ring" -version = "0.16.9" +version = "0.16.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "web-sys 0.3.32 (registry+https://github.com/rust-lang/crates.io-index)", + "web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "rustc-demangle" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "rustc_version" -version = "0.2.3" +name = "rustls" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.16.11 (registry+https://github.com/rust-lang/crates.io-index)", + "sct 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "rustls" -version = "0.16.0" +name = "rustls-native-certs" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", - "sct 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "webpki 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rustls 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", + "schannel 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", + "security-framework 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -989,38 +715,49 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "same-file" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "scopeguard" -version = "1.0.0" +name = "schannel" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", +] [[package]] name = "sct" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.16.11 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "semver" -version = "0.9.0" +name = "security-framework" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "core-foundation 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "core-foundation-sys 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "security-framework-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "semver-parser" -version = "0.7.0" +name = "security-framework-sys" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "core-foundation-sys 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", +] [[package]] name = "serde" @@ -1035,17 +772,17 @@ name = "serde_derive" version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "serde_json" -version = "1.0.44" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "ryu 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1060,18 +797,18 @@ dependencies = [ [[package]] name = "serde_urlencoded" -version = "0.5.5" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "dtoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "dtoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "sha2" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "block-buffer 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1085,7 +822,7 @@ name = "shared_child" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1096,34 +833,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "smallvec" -version = "0.6.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "smallvec" -version = "1.0.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "snafu" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "doc-comment 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "snafu-derive 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "snafu-derive 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "snafu-derive" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1136,201 +865,83 @@ name = "spin" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "string" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "syn" -version = "1.0.11" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "synstructure" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "time" version = "0.1.42" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-current-thread 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-threadpool 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-buf" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-current-thread" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-executor" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-io" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-reactor" -version = "0.1.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", + "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-sync 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-rustls" -version = "0.10.2" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "rustls 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "webpki 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "rustls 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "tokio-sync" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-tcp" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-threadpool" -version = "0.1.17" +name = "tokio-util" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-timer" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "toml" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "try-lock" -version = "0.2.2" +name = "tower-service" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "try_from" -version = "0.3.2" +name = "try-lock" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] name = "typenum" @@ -1355,10 +966,10 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "smallvec 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1378,17 +989,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "url" -version = "1.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "url" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1401,15 +1002,7 @@ name = "users" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "uuid" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1424,120 +1017,132 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "walkdir" -version = "2.2.9" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "same-file 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", + "same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "want" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "wasi" -version = "0.7.0" +version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "wasm-bindgen" -version = "0.2.55" +version = "0.2.58" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-macro 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-macro 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.55" +version = "0.2.58" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bumpalo 2.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bumpalo 3.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-shared 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-shared 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.55" +version = "0.2.58" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-macro-support 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-macro-support 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.55" +version = "0.2.58" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-backend 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-shared 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-backend 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-shared 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.55" +version = "0.2.58" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "wasm-bindgen-webidl" -version = "0.2.55" +version = "0.2.58" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "anyhow 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", + "anyhow 1.0.26 (registry+https://github.com/rust-lang/crates.io-index)", "heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-backend 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-backend 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", "weedle 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "web-sys" -version = "0.3.32" +version = "0.3.35" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "anyhow 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "js-sys 0.3.32 (registry+https://github.com/rust-lang/crates.io-index)", + "anyhow 1.0.26 (registry+https://github.com/rust-lang/crates.io-index)", + "js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", "sourcefile 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-webidl 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-webidl 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "webpki" -version = "0.21.0" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.16.11 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "webpki-roots" -version = "0.17.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "webpki 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)", + "webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1574,7 +1179,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "winapi-util" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1603,186 +1208,147 @@ dependencies = [ ] [metadata] -"checksum adler32 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5d2e7343e7fc9de883d1b0341e0b13970f764c14101234857d2ddafa1cb1cac2" -"checksum anyhow 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)" = "9267dff192e68f3399525901e709a48c1d3982c9c072fa32f2127a0cb0babf14" -"checksum autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" -"checksum backtrace 0.3.40 (registry+https://github.com/rust-lang/crates.io-index)" = "924c76597f0d9ca25d762c25a4d369d51267536465dc5064bdf0eb073ed477ea" -"checksum backtrace-sys 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)" = "5d6575f128516de27e3ce99689419835fce9643a9b215a14d2b5b685be018491" -"checksum base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" +"checksum anyhow 1.0.26 (registry+https://github.com/rust-lang/crates.io-index)" = "7825f6833612eb2414095684fcf6c635becf3ce97fe48cf6421321e93bfbd53c" +"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" +"checksum base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" "checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" "checksum block-buffer 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" "checksum block-padding 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" -"checksum bumpalo 2.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ad807f2fc2bf185eeb98ff3a901bd46dc5ad58163d0fa4577ba0d25674d71708" +"checksum bumpalo 3.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1f359dc14ff8911330a51ef78022d376f25ed00248912803b58f00cb1c27f742" "checksum byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" -"checksum byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5" -"checksum bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)" = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c" +"checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" +"checksum bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" "checksum c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "214238caa1bf3a496ec3392968969cab8549f96ff30652c9e56885329315f6bb" -"checksum cc 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)" = "f52a465a666ca3d838ebbf08b241383421412fe7ebb463527bba275526d89f76" +"checksum cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" "checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" -"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -"checksum cookie 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "888604f00b3db336d2af898ec3c1d5d0ddf5e6d462220f2ededc33a87ac4bbd5" -"checksum cookie_store 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46750b3f362965f197996c4448e4a0935e791bf7d6631bfce9ee0af3d24c919c" -"checksum crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" -"checksum crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3aa945d63861bfe624b55d153a39684da1e8c0bc8fba932f7ee3a3c16cea3ca" -"checksum crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5064ebdbf05ce3cb95e45c8b086f72263f4166b29b97f6baff7ef7fe047b55ac" -"checksum crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b" -"checksum crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6" -"checksum crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ce446db02cdc3165b94ae73111e570793400d0794e46125cc4056c81cbb039f4" +"checksum core-foundation 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" +"checksum core-foundation-sys 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" "checksum ct-logs 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4d3686f5fa27dbc1d76c751300376e167c5a43387f44bb451fd1c24776e49113" "checksum digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" "checksum doc-comment 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "923dea538cea0aa3025e8685b20d6ee21ef99c4f77e954a30febbaac5ec73a97" -"checksum dtoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ea57b42383d091c85abcc2706240b94ab2a8fa1fc81c10ff23c4de06e2a90b5e" +"checksum dtoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "4358a9e11b9a09cf52383b451b49a169e8d797b68aa02301ff586d70d9661ea3" "checksum duct 0.13.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1607fa68d55be208e83bcfbcfffbc1ec65c9fbcf9eb1a5d548dc3ac0100743b0" -"checksum either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3" -"checksum encoding_rs 0.8.21 (registry+https://github.com/rust-lang/crates.io-index)" = "f730a5068736d360795b76e8c53ef51d052a4090d281412da851f5fd6204f36d" -"checksum error-chain 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3ab49e9dcb602294bc42f9a7dfc9bc6e936fca4418ea300dbfb84fe16de0b7d9" -"checksum failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f8273f13c977665c5db7eb2b99ae520952fe5ac831ae4cd09d80c4c7042b5ed9" -"checksum failure_derive 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0bc225b78e0391e4b8683440bf2e63c2deeeb2ce5189eab46e2b68c6d3725d08" +"checksum encoding_rs 0.8.22 (registry+https://github.com/rust-lang/crates.io-index)" = "cd8d03faa7fe0c1431609dfad7bbe827af30f82e1e2ae6f7ee4fca6bd764bc28" "checksum fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" -"checksum flate2 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)" = "6bd6d6f4752952feb71363cffc9ebac9411b75b87c6ab6058c40c8900cf43c0f" "checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" -"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" "checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" "checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" -"checksum futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)" = "1b980f2816d6ee8673b6517b52cb0e808a180efc92e5c19d02cdda79066703ef" -"checksum futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4" +"checksum futures-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f0c77d04ce8edd9cb903932b608268b3fffec4163dc053b3b402bf47eac1f1a8" +"checksum futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f25592f769825e89b92358db00d26f965761e094951ac44d3663ef25b7ac464a" +"checksum futures-io 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a638959aa96152c7a4cddf50fcb1e3fede0583b27157c26e67d6f99904090dc6" +"checksum futures-macro 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "9a5081aa3de1f7542a794a397cde100ed903b0630152d0973479018fd85423a7" +"checksum futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3466821b4bc114d95b087b850a724c6f83115e929bc88f1fa98a3304a944c8a6" +"checksum futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "7b0a34e53cf6cdcd0178aa573aed466b646eb3db769570841fda0c7ede375a27" +"checksum futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "22766cf25d64306bedf0384da004d05c9974ab104fcc4528f1236181c18004c5" "checksum generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" -"checksum getrandom 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "e7db7ca94ed4cd01190ceee0d8a8052f08a247aa1b469a7f68c6a3b71afcf407" -"checksum h2 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)" = "a5b34c246847f938a410a03c5458c7fee2274436675e76d8b903c08efc29c462" +"checksum getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" +"checksum h2 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b9433d71e471c1736fd5a61b671fc0b148d7a2992f666c958d03cd8feb3b88d1" "checksum heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" -"checksum hermit-abi 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f629dc602392d3ec14bfc8a09b5e644d7ffd725102b48b81e59f90f2633621d7" -"checksum hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "023b39be39e3a2da62a94feb433e91e8bcd37676fbc8bea371daf52b7a769a3e" -"checksum http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)" = "d6ccf5ede3a895d8856620237b2f02972c1bbc78d2965ad7fe8838d4a0ed41f0" -"checksum http-body 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6741c859c1b2463a423a1dbce98d418e6c3c3fc720fb0d45528657320920292d" +"checksum hermit-abi 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "1010591b26bbfe835e9faeabeb11866061cc7dcebffd56ad7d0942d0e61aefd8" +"checksum hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" +"checksum http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b708cc7f06493459026f53b9a61a7a121a5d1ec6238dee58ea4941132b30156b" +"checksum http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" "checksum httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" -"checksum hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)" = "9dbe6ed1438e1f8ad955a4701e9a944938e9519f6888d12d8558b645e247d5f6" -"checksum hyper-rustls 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)" = "719d85c7df4a7f309a77d145340a063ea929dcb2e025bae46a80345cffec2952" -"checksum idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" +"checksum hyper 0.13.2 (registry+https://github.com/rust-lang/crates.io-index)" = "fa1c527bbc634be72aa7ba31e4e4def9bbb020f5416916279b7c705cd838893e" +"checksum hyper-rustls 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac965ea399ec3a25ac7d13b8affd4b8f39325cca00858ddf5eb29b79e6b14b08" "checksum idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" -"checksum indexmap 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712d7b3ea5827fcb9d4fda14bf4da5f136f0db2ae9c8f4bd4e2d1c6fde4e6db2" +"checksum indexmap 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "076f042c5b7b98f31d205f1249267e12a6518c1481e9dae9764af19b707d2292" "checksum iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" -"checksum itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "501266b7edd0174f8530248f87f99c88fbe60ca4ef3dd486835b8d8d53136f7f" -"checksum js-sys 0.3.32 (registry+https://github.com/rust-lang/crates.io-index)" = "1c840fdb2167497b0bd0db43d6dfe61e91637fa72f9d061f8bd17ddc44ba6414" +"checksum itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e" +"checksum js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)" = "7889c7c36282151f6bf465be4700359318aef36baa951462382eae49e9577cf9" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" "checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -"checksum libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)" = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558" -"checksum lock_api 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e57b3997725d2b60dbec1297f6c2e2957cc383db1cebd6be812163f969c7d586" +"checksum libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)" = "eb147597cdf94ed43ab7a9038716637d2d1bf2bc571da995d0028dec06bd3018" "checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" "checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" -"checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" -"checksum memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88579771288728879b57485cc7d6b07d648c9f0141eb955f8ab7f9d45394468e" -"checksum memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "75189eb85871ea5c2e2c15abbdd541185f63b408415e5051f5cac122d8c774b9" -"checksum mime 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)" = "dd1d63acd1b78403cc0c325605908475dd9b9a3acbf65ed8bcab97e27014afcf" +"checksum memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" +"checksum mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)" = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" "checksum mime_guess 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1a0ed03949aef72dbdf3116a383d7b38b4768e6f960528cd6a6044aa9ed68599" -"checksum miniz_oxide 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6f3f74f726ae935c3f514300cc6773a0c9492abc5e972d42ba0c0ebb88757625" "checksum mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)" = "302dec22bcf6bae6dfb69c647187f4b4d0fb6f535521f7bc022430ce8e12008f" "checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" "checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88" "checksum nom 4.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2ad2a91a8e869eeb30b9cb3119ae87773a8f4ae617f41b1eb9c154b2905f7bd6" -"checksum num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)" = "76dac5ed2a876980778b8b85f75a71b6cbf0db0b1232ee12f826bccb00d09d72" -"checksum once_cell 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "891f486f630e5c5a4916c7e16c4b24a53e78c860b646e9f8e005e4f16847bfed" +"checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6" +"checksum once_cell 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b1c601810575c99596d4afc46f78a678c80105117c379eb3650cf99b8a21ce5b" "checksum opaque-debug 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" +"checksum openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" "checksum os_pipe 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "db4d06355a7090ce852965b2d08e11426c315438462638c6d721448d0b47aa22" -"checksum parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" -"checksum parking_lot_core 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" -"checksum percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" "checksum percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +"checksum pin-project 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7804a463a8d9572f13453c516a5faea534a2403d7ced2f0c7e100eeff072772c" +"checksum pin-project-internal 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "385322a45f2ecf3410c68d2a549a4a2685e8051d0f278e39743ff4e451cb9b3f" +"checksum pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "237844750cfbb86f67afe27eee600dfbbcb6188d734139b534cbfbf4f96792ae" +"checksum pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587" "checksum ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" -"checksum proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "9c9e470a8dc4aeae2dee2f335e8f533e2d4b347e1434e5671afc49b054592f27" -"checksum publicsuffix 1.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3bbaa49075179162b49acac1c6aa45fb4dafb5f13cf6794276d77bc7fd95757b" +"checksum proc-macro-hack 0.5.11 (registry+https://github.com/rust-lang/crates.io-index)" = "ecd45702f76d6d3c75a80564378ae228a85f0b59d2f3ed43c91b4a69eb2ebfc5" +"checksum proc-macro-nested 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "369a6ed065f249a159e06c45752c780bda2fb53c995718f9e484d08daa9eb42e" +"checksum proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)" = "6c09721c6781493a2a492a96b5a5bf19b65917fe6728884e7c44dd0c60ca3435" "checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" -"checksum rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" -"checksum rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3ae1b169243eaf61759b8475a998f0a385e42042370f3a7dbaf35246eacc8412" -"checksum rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" +"checksum rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" "checksum rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "03a2a90da8c7523f554344f921aa97283eadf6ac484a6d2a7d0212fa7f8d6853" -"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -"checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" "checksum rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -"checksum rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" "checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -"checksum rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" -"checksum rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" -"checksum rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" -"checksum rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" -"checksum rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" -"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" "checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" -"checksum regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dc220bd33bdce8f093101afe22a037b8eb0e5af33592e6a9caafff0d4cb81cbd" -"checksum regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)" = "11a7e20d1cce64ef2fed88b66d347f88bd9babb82845b2b858f3edbf59a4f716" -"checksum reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)" = "f88643aea3c1343c804950d7bf983bd2067f5ab59db6d613a08e05572f2714ab" -"checksum ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)" = "6747f8da1f2b1fabbee1aaa4eb8a11abf9adef0bf58a41cee45db5d59cecdfac" -"checksum rustc-demangle 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" -"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -"checksum rustls 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b25a18b1bf7387f0145e7f8324e700805aade3842dd3db2e74e4cdeb4677c09e" +"checksum reqwest 0.10.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a9f62f24514117d09a8fc74b803d3d65faa27cea1c7378fb12b0d002913f3831" +"checksum ring 0.16.11 (registry+https://github.com/rust-lang/crates.io-index)" = "741ba1704ae21999c00942f9f5944f801e977f54302af346b596287599ad1862" +"checksum rustls 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c0d4a31f5d68413404705d6982529b0e11a9aacd4839d1d6222ee3b8cb4015e1" +"checksum rustls-native-certs 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a75ffeb84a6bd9d014713119542ce415db3a3e4748f0bfce1e1416cd224a23a5" "checksum ryu 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bfa8506c1de11c9c4e4c38863ccbe02a305c8188e85a05a784c9e11e1c3910c8" -"checksum same-file 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "585e8ddcedc187886a30fa705c47985c3fa88d06624095856b36ca0b82ff4421" -"checksum scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d" +"checksum same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +"checksum schannel 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "507a9e6e8ffe0a4e0ebb9a10293e62fdf7657c06f1b8bb07a8fcf697d2abf295" "checksum sct 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" -"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" +"checksum security-framework 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "97bbedbe81904398b6ebb054b3e912f99d55807125790f3198ac990d98def5b0" +"checksum security-framework-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "06fd2f23e31ef68dd2328cc383bd493142e46107a3a0e24f7d734e3f3b80fe4c" "checksum serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "414115f25f818d7dfccec8ee535d76949ae78584fc4f79a6f45a904bf8ab4449" "checksum serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "128f9e303a5a29922045a830221b8f78ec74a5f544944f3d5984f8ec3895ef64" -"checksum serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)" = "48c575e0cc52bdd09b47f330f646cf59afc586e9c4e3ccd6fc1f625b8ea1dad7" +"checksum serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)" = "9371ade75d4c2d6cb154141b9752cf3781ec9c05e0e5cf35060e1e70ee7b9c25" "checksum serde_plain 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "625fb0da2b006092b426a94acc1611bec52f2ec27bb27b266a9f93c29ee38eda" -"checksum serde_urlencoded 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)" = "642dd69105886af2efd227f75a520ec9b44a820d65bc133a9131f7d229fd165a" -"checksum sha2 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7b4d8bfd0e469f417657573d8451fb33d16cfe0989359b93baf3a1ffc639543d" +"checksum serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" +"checksum sha2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "27044adfd2e1f077f649f59deb9490d3941d674002f7d062870a60ebe9bd47a0" "checksum shared_child 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8cebcf3a403e4deafaf34dc882c4a1b6a648b43e5670aa2e4bb985914eaeb2d2" "checksum slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" -"checksum smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6" -"checksum smallvec 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4ecf3b85f68e8abaa7555aa5abdb1153079387e60b718283d732f03897fcfc86" -"checksum snafu 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "41207ca11f96a62cd34e6b7fdf73d322b25ae3848eb9d38302169724bb32cf27" -"checksum snafu-derive 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4c5e338c8b0577457c9dda8e794b6ad7231c96e25b1b0dd5842d52249020c1c0" +"checksum smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5c2fb2ec9bcd216a5b0d0ccf31ab17b5ed1d627960edff65bbe95d3ce221cefc" +"checksum snafu 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "546db9181bce2aa22ed883c33d65603b76335b4c2533a98289f54265043de7a1" +"checksum snafu-derive 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bdc75da2e0323f297402fd9c8fdba709bb04e4c627cbe31d19a2c91fc8d9f0e2" "checksum sourcefile 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4bf77cb82ba8453b42b6ae1d692e4cdc92f9a47beaf89a847c8be83f4e328ad3" "checksum spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" -"checksum string 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d24114bfcceb867ca7f71a0d3fe45d45619ec47a6fbfa98cb14e14250bfa5d6d" -"checksum syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)" = "dff0acdb207ae2fe6d5976617f887eb1e35a2ba52c13c7234c790960cdad9238" -"checksum synstructure 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)" = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" +"checksum syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)" = "123bd9499cfb380418d509322d7a6d52e5315f064fe4b3ad18a53d6b92c07859" "checksum time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" -"checksum tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)" = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" -"checksum tokio-buf 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8fb220f46c53859a4b7ec083e41dec9778ff0b1851c0942b211edb89e0ccdc46" -"checksum tokio-current-thread 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "d16217cad7f1b840c5a97dfb3c43b0c871fef423a6e8d2118c604e843662a443" -"checksum tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "ca6df436c42b0c3330a82d855d2ef017cd793090ad550a6bc2184f4b933532ab" -"checksum tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "5090db468dad16e1a7a54c8c67280c5e4b544f3d3e018f0b913b400261f85926" -"checksum tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "6732fe6b53c8d11178dcb77ac6d9682af27fc6d4cb87789449152e5377377146" -"checksum tokio-rustls 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1df2fa53ac211c136832f530ccb081af9af891af22d685a9493e232c7a359bc2" -"checksum tokio-sync 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "d06554cce1ae4a50f42fba8023918afa931413aded705b560e29600ccf7c6d76" -"checksum tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1d14b10654be682ac43efee27401d792507e30fd8d26389e1da3b185de2e4119" -"checksum tokio-threadpool 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "f0c32ffea4827978e9aa392d2f743d973c1dfa3730a2ed3f22ce1e6984da848c" -"checksum tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "1739638e364e558128461fc1ad84d997702c8e31c2e6b18fb99842268199e827" -"checksum toml 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)" = "01d1404644c8b12b16bfcffa4322403a91a451584daaaa7c28d3152e6cbc98cf" +"checksum tokio 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "b34bee1facdc352fba10c9c58b654e6ecb6a2250167772bf86071f7c5f2f5061" +"checksum tokio-rustls 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4adb8b3e5f86b707f1b54e7c15b6de52617a823608ccda98a15d3a24222f265a" +"checksum tokio-util 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "571da51182ec208780505a32528fc5512a8fe1443ab960b3f2f3ef093cd16930" +"checksum toml 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ffc92d160b1eef40665be3a05630d003936a3bc7da7421277846c2613e92c71a" +"checksum tower-service 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" "checksum try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" -"checksum try_from 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "283d3b89e1368717881a9d51dad843cc435380d8109c9e47d38780a324698d8b" "checksum typenum 1.11.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6d2783fe2d6b8c1101136184eb41be8b1ad379e4657050b8aaff0c79ee7575f9" "checksum unicase 2.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" "checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" -"checksum unicode-normalization 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "b561e267b2326bb4cebfc0ef9e68355c7abe6c6f522aeac2f5bf95d56c59bdcf" +"checksum unicode-normalization 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4" "checksum unicode-segmentation 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" "checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" "checksum untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60369ef7a31de49bcb3f6ca728d4ba7300d9a1658f94c727d4cab8c8d9f4aece" -"checksum url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dd4e7c0d531266369519a4aa4f399d748bd37043b00bde1e4ff1f60a120b355a" -"checksum url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "75b414f6c464c879d7f9babf951f23bc3743fb7313c081b2e6ca719067ea9d61" +"checksum url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" "checksum users 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c72f4267aea0c3ec6d07eaabea6ead7c5ddacfafc5e22bcf8d186706851fb4cf" -"checksum uuid 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)" = "90dbc611eb48397705a6b0f6e917da23ae517e4d127123d2cf7674206627d32a" "checksum version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" "checksum version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce" -"checksum walkdir 2.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "9658c94fa8b940eab2250bd5a457f9c48b748420d71293b165c8cdbe2f55f71e" -"checksum want 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230" -"checksum wasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b89c3ce4ce14bdc6fb6beaf9ec7928ca331de5df7e5ea278375642a2f478570d" -"checksum wasm-bindgen 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)" = "29ae32af33bacd663a9a28241abecf01f2be64e6a185c6139b04f18b6385c5f2" -"checksum wasm-bindgen-backend 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)" = "1845584bd3593442dc0de6e6d9f84454a59a057722f36f005e44665d6ab19d85" -"checksum wasm-bindgen-macro 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)" = "87fcc747e6b73c93d22c947a6334644d22cfec5abd8b66238484dc2b0aeb9fe4" -"checksum wasm-bindgen-macro-support 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)" = "3dc4b3f2c4078c8c4a5f363b92fcf62604c5913cbd16c6ff5aaf0f74ec03f570" -"checksum wasm-bindgen-shared 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)" = "ca0b78d6d3be8589b95d1d49cdc0794728ca734adf36d7c9f07e6459508bb53d" -"checksum wasm-bindgen-webidl 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)" = "3126356474ceb717c8fb5549ae387c9fbf4872818454f4d87708bee997214bb5" -"checksum web-sys 0.3.32 (registry+https://github.com/rust-lang/crates.io-index)" = "98405c0a2e722ed3db341b4c5b70eb9fe0021621f7350bab76df93b09b649bbf" -"checksum webpki 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d7e664e770ac0110e2384769bcc59ed19e329d81f555916a6e072714957b81b4" -"checksum webpki-roots 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a262ae37dd9d60f60dd473d1158f9fbebf110ba7b6a5051c8160460f6043718b" +"checksum walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" +"checksum want 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +"checksum wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)" = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +"checksum wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "5205e9afdf42282b192e2310a5b463a6d1c1d774e30dc3c791ac37ab42d2616c" +"checksum wasm-bindgen-backend 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "11cdb95816290b525b32587d76419facd99662a07e59d3cdb560488a819d9a45" +"checksum wasm-bindgen-futures 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8bbdd49e3e28b40dec6a9ba8d17798245ce32b019513a845369c641b275135d9" +"checksum wasm-bindgen-macro 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "574094772ce6921576fb6f2e3f7497b8a76273b6db092be18fc48a082de09dc3" +"checksum wasm-bindgen-macro-support 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "e85031354f25eaebe78bb7db1c3d86140312a911a106b2e29f9cc440ce3e7668" +"checksum wasm-bindgen-shared 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "f5e7e61fc929f4c0dddb748b102ebf9f632e2b8d739f2016542b4de2965a9601" +"checksum wasm-bindgen-webidl 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "ef012a0d93fc0432df126a8eaf547b2dce25a8ce9212e1d3cbeef5c11157975d" +"checksum web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)" = "aaf97caf6aa8c2b1dac90faf0db529d9d63c93846cca4911856f78a83cebf53b" +"checksum webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f1f50e1972865d6b1adb54167d1c8ed48606004c2c9d0ea5f1eeb34d95e863ef" +"checksum webpki-roots 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)" = "91cd5736df7f12a964a5067a12c62fa38e1bd8080aff1f80bc29be7c80d19ab4" "checksum weedle 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3bb43f70885151e629e2a19ce9e50bd730fd436cfd4b666894c9ce4de9141164" "checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" "checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" "checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -"checksum winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7168bab6e1daee33b4557efd0e95d5ca70a03706d39fa5f3fe7a236f584b03c9" +"checksum winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4ccfbf554c6ad11084fb7517daca16cfdcaccbdadba4fc336f032a8b12c2ad80" "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" "checksum winreg 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" "checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" diff --git a/tools/buildsys/Cargo.toml b/tools/buildsys/Cargo.toml index c2b66231..4e8a53ab 100644 --- a/tools/buildsys/Cargo.toml +++ b/tools/buildsys/Cargo.toml @@ -10,11 +10,12 @@ publish = false duct = "0.13.0" hex = "0.4.0" rand = { version = "0.7", default-features = false, features = ["std"] } -reqwest = { version = "0.9", default-features = false, features = ["rustls-tls"] } +reqwest = { version = "0.10", default-features = false, features = ["rustls-tls", "blocking"] } serde = { version = "1.0", features = ["derive"] } serde_plain = "0.3.0" sha2 = "0.8" snafu = "0.6" toml = "0.5" +url = "2.1" users = { version = "0.9", default-features = false } walkdir = "2" diff --git a/tools/buildsys/src/cache.rs b/tools/buildsys/src/cache.rs index 2375d2db..50ebe04e 100644 --- a/tools/buildsys/src/cache.rs +++ b/tools/buildsys/src/cache.rs @@ -76,7 +76,7 @@ impl LookasideCache { /// then verifies the contents against the SHA-512 hash provided. fn fetch_file>(url: &str, path: P, hash: &str) -> Result<()> { let path = path.as_ref(); - let mut resp = reqwest::get(url).context(error::ExternalFileRequest { url })?; + let mut resp = reqwest::blocking::get(url).context(error::ExternalFileRequest { url })?; let status = resp.status(); ensure!( status.is_success(), diff --git a/tools/buildsys/src/cache/error.rs b/tools/buildsys/src/cache/error.rs index 052713f2..55fab980 100644 --- a/tools/buildsys/src/cache/error.rs +++ b/tools/buildsys/src/cache/error.rs @@ -11,7 +11,7 @@ pub(crate) enum Error { #[snafu(display("Bad file url '{}': {}", url, source))] ExternalFileUrl { url: String, - source: reqwest::UrlError, + source: url::ParseError, }, #[snafu(display("Failed to request '{}': {}", url, source))] From 9150f64168f8b6bf885114682555e6963fdc2c67 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Thu, 27 Feb 2020 13:54:18 -0800 Subject: [PATCH 0265/1356] Add further description to README header --- README.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/README.md b/README.md index 24035ac2..4a0bfc3c 100644 --- a/README.md +++ b/README.md @@ -3,8 +3,17 @@ Welcome to Bottlerocket! Bottlerocket is a free and open-source Linux-based operating system meant for hosting containers. + +If you’re ready to jump right in, read our [QUICKSTART](QUICKSTART.md) to try Bottlerocket in an Amazon EKS cluster. + +Bottlerocket focuses on security and maintainability, providing a reliable, consistent, and safe platform for container-based workloads. +This is a reflection of what we've learned building operating systems and services at Amazon. You can read more about what drives us in [our charter](CHARTER.md). +The base operating system has just what you need to run containers reliably, and is built with standard open-source components. +Bottlerocket-specific additions focus on reliable updates and on the API. +Instead of making configuration changes manually, you can change settings with an API call, and these changes are automatically migrated through updates. + Some notable features include: * [API access](#api) for configuring your system, with secure out-of-band [access methods](#exploration) when you need them. * [Updates](#updates) based on partition flips, for fast and reliable system updates. From ed2ad8ebc88f3c794af1aeae7ef1682a3cddb802 Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Thu, 27 Feb 2020 21:12:56 +0000 Subject: [PATCH 0266/1356] Add GLOSSARY.md --- GLOSSARY.md | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 GLOSSARY.md diff --git a/GLOSSARY.md b/GLOSSARY.md new file mode 100644 index 00000000..546fe145 --- /dev/null +++ b/GLOSSARY.md @@ -0,0 +1,41 @@ +## Bottlerocket terms + +* [**block-party**](sources/updater/block-party): A library that helps retrieve information about Linux block devices. +* [**bork**](sources/api/bork): A setting generator called by sundog to generate the random seed for updog, determining where the host falls in the update order. +* [**buildsys**](tools/buildsys): A build tool that runs package and image builds inside containers. + cargo-make starts the build of each package, each of which calls buildsys, which in turn starts a Docker-based build using the SDK image. +* [**early-boot-config**](sources/api/early-boot-config): A program run at boot to read platform-specific data, such as EC2 user data, and send requested configuration to the API. +* **gptprio:** A structure of bits in GPT partition headers that specifies priority, tries remaining, and whether the partition booted successfully before. + signpost sets these and GRUB uses them to determine which partition set to boot. +* [**growpart**](sources/growpart): A program used to expand disk partitions upon boot. +* **host containers**: Containers that run in a separate instance of containerd than "user" containers spawned by an orchestrator (e.g. Kubernetes). + Used for system maintenance and connectivity. +* [**host-ctr**](sources/host-ctr): The program started by `host-containers@.service` for each host container. + Its job is to start the specified host container on the “host” instance of containerd, which is separate from the “user” instance of containerd used for Kubernetes pods. +* [**laika**](sources/preinit/laika): A crate that builds a binary (`/sbin/preinit`) that's used to mount filesystems before starting init (`systemd`). +* [**model**](sources/models): The API system has a data model defined for each variant, and this model is used by other programs to serialize and deserialize requests while maintaining safety around data types. +* [**netdog**](sources/api/netdog): A program called by wicked to retrieve and write out network configuration from DHCP. +* [**pluto**](sources/api/pluto): A setting generator called by sundog to find networking settings required by Kubernetes. +* [**schnauzer**](sources/api/schnauzer): A setting generator called by sundog to build setting values that contain template variables referencing other settings. +* **setting generator**: A binary that generates the default value of a setting. +* [**signpost**](sources/updater/signpost): A program used to manipulate the GPT header of the OS disk; fields in the header are used by GRUB to determine the partition set we should boot from. +* [**storewolf**](sources/api/storewolf): A program that sets up the data store for the API upon boot. +* [**sundog**](sources/api/sundog): A program run during boot that generates any settings that depend on runtime system information. + It finds settings that need generation by way of metadata in the API, and calls helper programs specified by that metadata. +* [**thar-be-settings**](sources/api/thar-be-settings): A program that writes out system configuration files, replacing template variables with settings from the API. +* [**updog**](sources/updater/updog): An update client that interfaces with a specified TUF updates repository to upgrade or downgrade Bottlerocket hosts to different image versions. + +## Non-Bottlerocket terms + +* **k8s**: [Kubernetes](https://kubernetes.io/), a container orchestration system. +* [**CNI**](https://github.com/containernetworking/cni): Container Network Interface, a standard for writing plugins to configure network interfaces in containers. +* **IMDS**: [Amazon EC2's Instance Metadata Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). + Used to retrieve user and platform configuration on an EC2 instance. +* [**sonobuoy**](https://github.com/vmware-tanzu/sonobuoy): A diagnostic tool and runs Kubernetes conformance tests for Kubernetes clusters. +* **SSM**: [AWS Systems Manager](https://aws.amazon.com/systems-manager/). + The [SSM agent](https://docs.aws.amazon.com/systems-manager/latest/userguide/prereqs-ssm-agent.html) can be used for secure remote management. +* [**tough**](https://crates.io/crates/tough): a Rust implementation of The Update Framework (TUF). +* [**tuftool**](https://crates.io/crates/tuftool): a command line program for interacting with a TUF repo. +* **TUF**: [The Update Framework](https://theupdateframework.io/). + A framework that helps developers maintain the security of software update systems. +* [**wicked**](https://github.com/openSUSE/wicked): A network interface framework and management system. From eb95b72ef6cf36ded81c317d6f3e227fe56a4831 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Fri, 28 Feb 2020 11:49:12 -0800 Subject: [PATCH 0267/1356] Moved tools/refresh-timestamp-lambda to the release automation repository tools/refresh-timestamp-lambda now lives in the bottlerocket release automation repository --- tools/refresh-timestamp-lambda/Cargo.lock | 2647 ----------------- tools/refresh-timestamp-lambda/Cargo.toml | 34 - tools/refresh-timestamp-lambda/Makefile | 14 - tools/refresh-timestamp-lambda/README.md | 30 - tools/refresh-timestamp-lambda/README.tpl | 9 - .../TimestampRefreshLambda.yaml | 109 - tools/refresh-timestamp-lambda/build.rs | 32 - tools/refresh-timestamp-lambda/src/main.rs | 254 -- .../timestamp-signer.yaml | 45 - .../tuf-repo-bucket-access-role.yaml | 42 - 10 files changed, 3216 deletions(-) delete mode 100644 tools/refresh-timestamp-lambda/Cargo.lock delete mode 100644 tools/refresh-timestamp-lambda/Cargo.toml delete mode 100644 tools/refresh-timestamp-lambda/Makefile delete mode 100644 tools/refresh-timestamp-lambda/README.md delete mode 100644 tools/refresh-timestamp-lambda/README.tpl delete mode 100644 tools/refresh-timestamp-lambda/TimestampRefreshLambda.yaml delete mode 100644 tools/refresh-timestamp-lambda/build.rs delete mode 100644 tools/refresh-timestamp-lambda/src/main.rs delete mode 100644 tools/refresh-timestamp-lambda/timestamp-signer.yaml delete mode 100644 tools/refresh-timestamp-lambda/tuf-repo-bucket-access-role.yaml diff --git a/tools/refresh-timestamp-lambda/Cargo.lock b/tools/refresh-timestamp-lambda/Cargo.lock deleted file mode 100644 index 60c45d56..00000000 --- a/tools/refresh-timestamp-lambda/Cargo.lock +++ /dev/null @@ -1,2647 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -[[package]] -name = "aho-corasick" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "ansi_term" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "anyhow" -version = "1.0.26" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "arc-swap" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "arrayref" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "arrayvec" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "atty" -version = "0.2.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "autocfg" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "backtrace" -version = "0.3.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "backtrace-sys 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-demangle 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "backtrace-sys" -version = "0.1.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cc 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "base64" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "base64" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "bitflags" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "blake2b_simd" -version = "0.5.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "constant_time_eq 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "block-buffer" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "block-padding 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "block-padding" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "bumpalo" -version = "3.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "byte-tools" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "byteorder" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "bytes" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "bytes" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "c2-chacha" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "cargo-readme" -version = "3.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "toml 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "cc" -version = "1.0.48" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "chrono" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "num-integer 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "clap" -version = "2.33.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", - "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "cloudabi" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "colored" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "constant_time_eq" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "core-foundation" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "core-foundation-sys" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "crossbeam-deque" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", - "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "crossbeam-queue" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "crossbeam-utils" -version = "0.6.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "crossbeam-utils" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "crypto-mac" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", - "subtle 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "ct-logs" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "sct 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "digest" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "dirs" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_users 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "doc-comment" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "dtoa" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "either" -version = "1.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "encoding_rs" -version = "0.8.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "envy" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "failure" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "backtrace 0.3.40 (registry+https://github.com/rust-lang/crates.io-index)", - "failure_derive 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "failure_derive" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "synstructure 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "fnv" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "foreign-types-shared 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "fuchsia-cprng" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "fuchsia-zircon" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "futures" -version = "0.1.29" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "futures-channel" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "futures-core" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "futures-cpupool" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "futures-io" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "futures-macro" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro-hack 0.5.11 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "futures-sink" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "futures-task" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "futures-util" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-io 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-macro 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro-hack 0.5.11 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro-nested 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "generic-array" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "typenum 1.11.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "getrandom" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "wasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "h2" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", - "indexmap 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "string 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "h2" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "indexmap 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-util 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "heck" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "unicode-segmentation 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "hermit-abi" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "hex" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "hmac" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "crypto-mac 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "http" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "http" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "http-body" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-buf 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "http-body" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "httparse" -version = "1.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "hyper" -version = "0.12.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "h2 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", - "http-body 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-buf 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-threadpool 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", - "want 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "hyper" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "h2 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-project 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "tower-service 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "want 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "hyper-rustls" -version = "0.19.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "ct-logs 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.13.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rustls 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustls-native-certs 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-rustls 0.12.2 (registry+https://github.com/rust-lang/crates.io-index)", - "webpki 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "hyper-tls" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", - "native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "idna" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-normalization 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "indexmap" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "iovec" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "itoa" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "js-sys" -version = "0.3.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "lambda_runtime" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "lambda_runtime_core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "lambda_runtime_client" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", - "lambda_runtime_errors 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "lambda_runtime_core" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "backtrace 0.3.40 (registry+https://github.com/rust-lang/crates.io-index)", - "chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", - "failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", - "lambda_runtime_client 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "lambda_runtime_errors 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "lambda_runtime_errors" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "lambda_runtime_errors_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "lambda_runtime_errors_derive" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", - "synstructure 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "libc" -version = "0.2.66" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "lock_api" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "log" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "matches" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "md5" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "memchr" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "memoffset" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "mime" -version = "0.3.16" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "mime_guess" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", - "unicase 2.6.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "mio" -version = "0.6.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "mio-named-pipes" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "miow 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "mio-uds" -version = "0.6.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "miow" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "miow" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "socket2 0.3.11 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "native-tls" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl 0.10.26 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.53 (registry+https://github.com/rust-lang/crates.io-index)", - "schannel 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", - "security-framework 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "security-framework-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "net2" -version = "0.2.33" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "nom" -version = "4.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "num-integer" -version = "0.1.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "num-traits" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "num_cpus" -version = "1.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "hermit-abi 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "olpc-cjson" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-normalization 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "opaque-debug" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "openssl" -version = "0.10.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.53 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "openssl-probe" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "openssl-sys" -version = "0.9.53" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "cc 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)", - "vcpkg 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "parking_lot" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "lock_api 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot_core 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "parking_lot_core" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "pem" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "percent-encoding" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "percent-encoding" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "pin-project" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "pin-project-internal 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "pin-project-internal" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "pin-project-lite" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "pin-utils" -version = "0.1.0-alpha.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "pkg-config" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "ppv-lite86" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "proc-macro-hack" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "proc-macro-nested" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "proc-macro2" -version = "0.4.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "proc-macro2" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "quote" -version = "0.6.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "quote" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "getrandom 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_chacha" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_core" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "getrandom 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_os" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rdrand" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "redox_syscall" -version = "0.1.56" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "redox_users" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "rust-argon2 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "refresh-timestamp-lambda" -version = "0.1.0" -dependencies = [ - "cargo-readme 3.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", - "envy 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "lambda_runtime 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "olpc-cjson 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "reqwest 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", - "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", - "rusoto_core 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rusoto_s3 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rusoto_ssm 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rusoto_sts 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", - "simple-error 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "simple_logger 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tough 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "regex" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)", - "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "regex-syntax" -version = "0.6.12" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "remove_dir_all" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "reqwest" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "encoding_rs 0.8.22 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.13.2 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper-rustls 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)", - "js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", - "mime_guess 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "rustls 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-rustls 0.12.2 (registry+https://github.com/rust-lang/crates.io-index)", - "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-futures 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", - "webpki-roots 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winreg 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "ring" -version = "0.16.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cc 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rusoto_core" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper-tls 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "rusoto_credential 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rusoto_signature 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", - "xml-rs 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rusoto_credential" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", - "dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", - "shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-process 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rusoto_s3" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "rusoto_core 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", - "xml-rs 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rusoto_signature" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "hmac 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "md5 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rusoto_credential 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "sha2 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rusoto_ssm" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "rusoto_core 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rusoto_sts" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "rusoto_core 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", - "xml-rs 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rust-argon2" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", - "blake2b_simd 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rustls" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", - "sct 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "webpki 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rustls-native-certs" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rustls 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", - "schannel 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", - "security-framework 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "ryu" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "schannel" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "scopeguard" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "sct" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", - "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "security-framework" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "core-foundation 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", - "core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "security-framework-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "security-framework-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "serde" -version = "1.0.104" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "serde_derive" -version = "1.0.104" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "serde_json" -version = "1.0.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "ryu 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "serde_plain" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "serde_urlencoded" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "dtoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "sha2" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "block-buffer 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "opaque-debug 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "shlex" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "signal-hook" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "signal-hook-registry 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "signal-hook-registry" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "arc-swap 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "simple-error" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "simple_logger" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", - "colored 1.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "slab" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "smallvec" -version = "0.6.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "smallvec" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "snafu" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "doc-comment 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "snafu-derive 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "snafu-derive" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "socket2" -version = "0.3.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "sourcefile" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "string" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "strsim" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "subtle" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "syn" -version = "0.15.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "syn" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "synstructure" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "synstructure" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tempfile" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "thread_local" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "time" -version = "0.1.42" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-current-thread 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-fs 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-sync 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-threadpool 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-udp 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-uds 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-buf" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-codec" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-current-thread" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-executor" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-fs" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-threadpool 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-io" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-process" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "mio-named-pipes 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-signal 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-reactor" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-sync 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-rustls" -version = "0.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "rustls 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "webpki 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-signal" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", - "signal-hook 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-sync" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-tcp" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-threadpool" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-timer" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-udp" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-uds" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-util" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "toml" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tough" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", - "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "olpc-cjson 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "pem 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "reqwest 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", - "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_plain 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "snafu 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tower-service" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "try-lock" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "typenum" -version = "1.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "unicode-bidi" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "unicode-normalization" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "smallvec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "unicode-segmentation" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "unicode-width" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "unicode-xid" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "unicode-xid" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "untrusted" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "url" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "vcpkg" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "vec_map" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "version_check" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "version_check" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "want" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "want" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "wasi" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "wasm-bindgen" -version = "0.2.58" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-macro 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.58" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bumpalo 3.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-shared 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", - "web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.58" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-macro-support 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.58" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-backend 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-shared 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.58" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "wasm-bindgen-webidl" -version = "0.2.58" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "anyhow 1.0.26 (registry+https://github.com/rust-lang/crates.io-index)", - "heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-backend 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", - "weedle 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "web-sys" -version = "0.3.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "anyhow 1.0.26 (registry+https://github.com/rust-lang/crates.io-index)", - "js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", - "sourcefile 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-webidl 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "webpki" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", - "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "webpki-roots" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "webpki 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "weedle" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "nom 4.2.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "winapi" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "winreg" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "ws2_32-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "xml-rs" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[metadata] -"checksum aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "58fb5e95d83b38284460a5fda7d6470aa0b8844d283a0b614b8535e880800d2d" -"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" -"checksum anyhow 1.0.26 (registry+https://github.com/rust-lang/crates.io-index)" = "7825f6833612eb2414095684fcf6c635becf3ce97fe48cf6421321e93bfbd53c" -"checksum arc-swap 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "d7b8a9123b8027467bce0099fe556c628a53c8d83df0507084c31e9ba2e39aff" -"checksum arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0d382e583f07208808f6b1249e60848879ba3543f57c32277bf52d69c2f0f0ee" -"checksum arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" -"checksum atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)" = "1803c647a3ec87095e7ae7acfca019e98de5ec9a7d01343f611cf3152ed71a90" -"checksum autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" -"checksum backtrace 0.3.40 (registry+https://github.com/rust-lang/crates.io-index)" = "924c76597f0d9ca25d762c25a4d369d51267536465dc5064bdf0eb073ed477ea" -"checksum backtrace-sys 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)" = "5d6575f128516de27e3ce99689419835fce9643a9b215a14d2b5b685be018491" -"checksum base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" -"checksum base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" -"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" -"checksum blake2b_simd 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)" = "b83b7baab1e671718d78204225800d6b170e648188ac7dc992e9d6bddf87d0c0" -"checksum block-buffer 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -"checksum block-padding 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" -"checksum bumpalo 3.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5fb8038c1ddc0a5f73787b130f4cc75151e96ed33e417fde765eb5a81e3532f4" -"checksum byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" -"checksum byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5" -"checksum bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)" = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c" -"checksum bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" -"checksum c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "214238caa1bf3a496ec3392968969cab8549f96ff30652c9e56885329315f6bb" -"checksum cargo-readme 3.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a6f802a8fcc14bebdf651fd33323654451bdd168c884b22c270dfd8afb403a50" -"checksum cc 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)" = "f52a465a666ca3d838ebbf08b241383421412fe7ebb463527bba275526d89f76" -"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" -"checksum chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "31850b4a4d6bae316f7a09e691c944c28299298837edc0a03f755618c23cbc01" -"checksum clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9" -"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -"checksum colored 1.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "433e7ac7d511768127ed85b0c4947f47a254131e37864b2dc13f52aa32cd37e5" -"checksum constant_time_eq 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "995a44c877f9212528ccc74b21a232f66ad69001e40ede5bcee2ac9ef2657120" -"checksum core-foundation 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "25b9e03f145fd4f2bf705e07b900cd41fc636598fe5dc452fd0db1441c3f496d" -"checksum core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e7ca8a5221364ef15ce201e8ed2f609fc312682a8f4e0e3d4aa5879764e0fa3b" -"checksum crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3aa945d63861bfe624b55d153a39684da1e8c0bc8fba932f7ee3a3c16cea3ca" -"checksum crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5064ebdbf05ce3cb95e45c8b086f72263f4166b29b97f6baff7ef7fe047b55ac" -"checksum crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b" -"checksum crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6" -"checksum crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ce446db02cdc3165b94ae73111e570793400d0794e46125cc4056c81cbb039f4" -"checksum crypto-mac 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" -"checksum ct-logs 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4d3686f5fa27dbc1d76c751300376e167c5a43387f44bb451fd1c24776e49113" -"checksum digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -"checksum dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "3fd78930633bd1c6e35c4b42b1df7b0cbc6bc191146e512bb3bedf243fcc3901" -"checksum doc-comment 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "923dea538cea0aa3025e8685b20d6ee21ef99c4f77e954a30febbaac5ec73a97" -"checksum dtoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ea57b42383d091c85abcc2706240b94ab2a8fa1fc81c10ff23c4de06e2a90b5e" -"checksum either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3" -"checksum encoding_rs 0.8.22 (registry+https://github.com/rust-lang/crates.io-index)" = "cd8d03faa7fe0c1431609dfad7bbe827af30f82e1e2ae6f7ee4fca6bd764bc28" -"checksum envy 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f938a4abd5b75fe3737902dbc2e79ca142cc1526827a9e40b829a086758531a9" -"checksum failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f8273f13c977665c5db7eb2b99ae520952fe5ac831ae4cd09d80c4c7042b5ed9" -"checksum failure_derive 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0bc225b78e0391e4b8683440bf2e63c2deeeb2ce5189eab46e2b68c6d3725d08" -"checksum fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" -"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" -"checksum foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -"checksum foreign-types-shared 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" -"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" -"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" -"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" -"checksum futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)" = "1b980f2816d6ee8673b6517b52cb0e808a180efc92e5c19d02cdda79066703ef" -"checksum futures-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f0c77d04ce8edd9cb903932b608268b3fffec4163dc053b3b402bf47eac1f1a8" -"checksum futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f25592f769825e89b92358db00d26f965761e094951ac44d3663ef25b7ac464a" -"checksum futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4" -"checksum futures-io 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a638959aa96152c7a4cddf50fcb1e3fede0583b27157c26e67d6f99904090dc6" -"checksum futures-macro 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "9a5081aa3de1f7542a794a397cde100ed903b0630152d0973479018fd85423a7" -"checksum futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3466821b4bc114d95b087b850a724c6f83115e929bc88f1fa98a3304a944c8a6" -"checksum futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "7b0a34e53cf6cdcd0178aa573aed466b646eb3db769570841fda0c7ede375a27" -"checksum futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "22766cf25d64306bedf0384da004d05c9974ab104fcc4528f1236181c18004c5" -"checksum generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" -"checksum getrandom 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "e7db7ca94ed4cd01190ceee0d8a8052f08a247aa1b469a7f68c6a3b71afcf407" -"checksum h2 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)" = "a5b34c246847f938a410a03c5458c7fee2274436675e76d8b903c08efc29c462" -"checksum h2 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b9433d71e471c1736fd5a61b671fc0b148d7a2992f666c958d03cd8feb3b88d1" -"checksum heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" -"checksum hermit-abi 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f629dc602392d3ec14bfc8a09b5e644d7ffd725102b48b81e59f90f2633621d7" -"checksum hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "023b39be39e3a2da62a94feb433e91e8bcd37676fbc8bea371daf52b7a769a3e" -"checksum hmac 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" -"checksum http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)" = "d6ccf5ede3a895d8856620237b2f02972c1bbc78d2965ad7fe8838d4a0ed41f0" -"checksum http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b708cc7f06493459026f53b9a61a7a121a5d1ec6238dee58ea4941132b30156b" -"checksum http-body 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6741c859c1b2463a423a1dbce98d418e6c3c3fc720fb0d45528657320920292d" -"checksum http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" -"checksum httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" -"checksum hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)" = "9dbe6ed1438e1f8ad955a4701e9a944938e9519f6888d12d8558b645e247d5f6" -"checksum hyper 0.13.2 (registry+https://github.com/rust-lang/crates.io-index)" = "fa1c527bbc634be72aa7ba31e4e4def9bbb020f5416916279b7c705cd838893e" -"checksum hyper-rustls 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f6ea6215c7314d450ee45970ab8b3851ab447a0e6bafdd19e31b20a42dbb7faf" -"checksum hyper-tls 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3a800d6aa50af4b5850b2b0f659625ce9504df908e9733b635720483be26174f" -"checksum idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" -"checksum indexmap 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712d7b3ea5827fcb9d4fda14bf4da5f136f0db2ae9c8f4bd4e2d1c6fde4e6db2" -"checksum iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" -"checksum itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "501266b7edd0174f8530248f87f99c88fbe60ca4ef3dd486835b8d8d53136f7f" -"checksum js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)" = "7889c7c36282151f6bf465be4700359318aef36baa951462382eae49e9577cf9" -"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -"checksum lambda_runtime 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "077b8819fe6998266342efdc5154192551143f390ab758007cc7421992e61b07" -"checksum lambda_runtime_client 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f5e64758b587f1d07b6ca4814a3bc0f3db48c89cc3c895db43a23c690855b370" -"checksum lambda_runtime_core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "da1cb59e060c1068cfa386b58c202a4381c509ace85a1eadab505164f0563ca5" -"checksum lambda_runtime_errors 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "93b63ea3688df164aaa5643c5f3291cc481366d624729deb3c4dc85ee65ac20a" -"checksum lambda_runtime_errors_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "38fa619ec6f1ee2371a109683d251035c83f01b8ffc10f4aa46de821836a7baf" -"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -"checksum libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)" = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558" -"checksum lock_api 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e57b3997725d2b60dbec1297f6c2e2957cc383db1cebd6be812163f969c7d586" -"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" -"checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" -"checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" -"checksum md5 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" -"checksum memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88579771288728879b57485cc7d6b07d648c9f0141eb955f8ab7f9d45394468e" -"checksum memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "75189eb85871ea5c2e2c15abbdd541185f63b408415e5051f5cac122d8c774b9" -"checksum mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)" = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -"checksum mime_guess 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1a0ed03949aef72dbdf3116a383d7b38b4768e6f960528cd6a6044aa9ed68599" -"checksum mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)" = "302dec22bcf6bae6dfb69c647187f4b4d0fb6f535521f7bc022430ce8e12008f" -"checksum mio-named-pipes 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f5e374eff525ce1c5b7687c4cef63943e7686524a387933ad27ca7ec43779cb3" -"checksum mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "966257a94e196b11bb43aca423754d87429960a768de9414f3691d6957abf125" -"checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" -"checksum miow 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "396aa0f2003d7df8395cb93e09871561ccc3e785f0acb369170e8cc74ddf9226" -"checksum native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4b2df1a4c22fd44a62147fd8f13dd0f95c9d8ca7b2610299b2a2f9cf8964274e" -"checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88" -"checksum nom 4.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2ad2a91a8e869eeb30b9cb3119ae87773a8f4ae617f41b1eb9c154b2905f7bd6" -"checksum num-integer 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)" = "b85e541ef8255f6cf42bbfe4ef361305c6c135d10919ecc26126c4e5ae94bc09" -"checksum num-traits 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "d4c81ffc11c212fa327657cb19dd85eb7419e163b5b076bede2bdb5c974c07e4" -"checksum num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)" = "76dac5ed2a876980778b8b85f75a71b6cbf0db0b1232ee12f826bccb00d09d72" -"checksum olpc-cjson 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9409e2493366c8f19387c98c5189ab9c937541b5bf48f11390d038a59fdfd9c1" -"checksum opaque-debug 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" -"checksum openssl 0.10.26 (registry+https://github.com/rust-lang/crates.io-index)" = "3a3cc5799d98e1088141b8e01ff760112bbd9f19d850c124500566ca6901a585" -"checksum openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" -"checksum openssl-sys 0.9.53 (registry+https://github.com/rust-lang/crates.io-index)" = "465d16ae7fc0e313318f7de5cecf57b2fbe7511fd213978b457e1c96ff46736f" -"checksum parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" -"checksum parking_lot_core 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" -"checksum pem 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a1581760c757a756a41f0ee3ff01256227bdf64cb752839779b95ffb01c59793" -"checksum percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" -"checksum percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" -"checksum pin-project 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7804a463a8d9572f13453c516a5faea534a2403d7ced2f0c7e100eeff072772c" -"checksum pin-project-internal 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "385322a45f2ecf3410c68d2a549a4a2685e8051d0f278e39743ff4e451cb9b3f" -"checksum pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "237844750cfbb86f67afe27eee600dfbbcb6188d734139b534cbfbf4f96792ae" -"checksum pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587" -"checksum pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)" = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677" -"checksum ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" -"checksum proc-macro-hack 0.5.11 (registry+https://github.com/rust-lang/crates.io-index)" = "ecd45702f76d6d3c75a80564378ae228a85f0b59d2f3ed43c91b4a69eb2ebfc5" -"checksum proc-macro-nested 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "369a6ed065f249a159e06c45752c780bda2fb53c995718f9e484d08daa9eb42e" -"checksum proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)" = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" -"checksum proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "9c9e470a8dc4aeae2dee2f335e8f533e2d4b347e1434e5671afc49b054592f27" -"checksum quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" -"checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" -"checksum rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3ae1b169243eaf61759b8475a998f0a385e42042370f3a7dbaf35246eacc8412" -"checksum rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "03a2a90da8c7523f554344f921aa97283eadf6ac484a6d2a7d0212fa7f8d6853" -"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -"checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" -"checksum rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -"checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -"checksum rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" -"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" -"checksum redox_users 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4ecedbca3bf205f8d8f5c2b44d83cd0690e39ee84b951ed649e9f1841132b66d" -"checksum regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dc220bd33bdce8f093101afe22a037b8eb0e5af33592e6a9caafff0d4cb81cbd" -"checksum regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)" = "11a7e20d1cce64ef2fed88b66d347f88bd9babb82845b2b858f3edbf59a4f716" -"checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" -"checksum reqwest 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c0e798e19e258bf6c30a304622e3e9ac820e483b06a1857a026e1f109b113fe4" -"checksum ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)" = "6747f8da1f2b1fabbee1aaa4eb8a11abf9adef0bf58a41cee45db5d59cecdfac" -"checksum rusoto_core 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f1d1ecfe8dac29878a713fbc4c36b0a84a48f7a6883541841cdff9fdd2ba7dfb" -"checksum rusoto_credential 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8632e41d289db90dd40d0389c71a23c5489e3afd448424226529113102e2a002" -"checksum rusoto_s3 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3fedcadf3d73c2925b05d547b66787f2219c5e727a98c893fff5cf2197dbd678" -"checksum rusoto_signature 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7063a70614eb4b36f49bcf4f6f6bb30cc765e3072b317d6afdfe51e7a9f482d1" -"checksum rusoto_ssm 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)" = "655261941f71bedd6b7a1b5cfaff8c2fa1909ea8f8505fad72edf53f27516232" -"checksum rusoto_sts 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)" = "48f912128a23aded0499bec1734ca35d8a33e9dc5bf86b649a5564900efb5464" -"checksum rust-argon2 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4ca4eaef519b494d1f2848fc602d18816fed808a981aedf4f1f00ceb7c9d32cf" -"checksum rustc-demangle 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" -"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -"checksum rustls 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b25a18b1bf7387f0145e7f8324e700805aade3842dd3db2e74e4cdeb4677c09e" -"checksum rustls-native-certs 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "51ffebdbb48c14f84eba0b715197d673aff1dd22cc1007ca647e28483bbcc307" -"checksum ryu 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bfa8506c1de11c9c4e4c38863ccbe02a305c8188e85a05a784c9e11e1c3910c8" -"checksum schannel 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "87f550b06b6cba9c8b8be3ee73f391990116bf527450d2556e9b9ce263b9a021" -"checksum scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d" -"checksum sct 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" -"checksum security-framework 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8ef2429d7cefe5fd28bd1d2ed41c944547d4ff84776f5935b456da44593a16df" -"checksum security-framework-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "e31493fc37615debb8c5090a7aeb4a9730bc61e77ab10b9af59f1a202284f895" -"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" -"checksum serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "414115f25f818d7dfccec8ee535d76949ae78584fc4f79a6f45a904bf8ab4449" -"checksum serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "128f9e303a5a29922045a830221b8f78ec74a5f544944f3d5984f8ec3895ef64" -"checksum serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)" = "48c575e0cc52bdd09b47f330f646cf59afc586e9c4e3ccd6fc1f625b8ea1dad7" -"checksum serde_plain 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "625fb0da2b006092b426a94acc1611bec52f2ec27bb27b266a9f93c29ee38eda" -"checksum serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" -"checksum sha2 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7b4d8bfd0e469f417657573d8451fb33d16cfe0989359b93baf3a1ffc639543d" -"checksum shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" -"checksum signal-hook 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "7a9c17dd3ba2d36023a5c9472ecddeda07e27fd0b05436e8c1e0c8f178185652" -"checksum signal-hook-registry 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "94f478ede9f64724c5d173d7bb56099ec3e2d9fc2774aac65d34b8b890405f41" -"checksum simple-error 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "339844c9af2d844b9230bb28e8f819a7790cbf20a29b5cbd2b59916a03a1ef51" -"checksum simple_logger 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3a4756ecc75607ba957820ac0a2413a6c27e6c61191cda0c62c6dcea4da88870" -"checksum slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" -"checksum smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6" -"checksum smallvec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "44e59e0c9fa00817912ae6e4e6e3c4fe04455e75699d06eedc7d85917ed8e8f4" -"checksum snafu 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "41207ca11f96a62cd34e6b7fdf73d322b25ae3848eb9d38302169724bb32cf27" -"checksum snafu-derive 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4c5e338c8b0577457c9dda8e794b6ad7231c96e25b1b0dd5842d52249020c1c0" -"checksum socket2 0.3.11 (registry+https://github.com/rust-lang/crates.io-index)" = "e8b74de517221a2cb01a53349cf54182acdc31a074727d3079068448c0676d85" -"checksum sourcefile 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4bf77cb82ba8453b42b6ae1d692e4cdc92f9a47beaf89a847c8be83f4e328ad3" -"checksum spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" -"checksum string 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d24114bfcceb867ca7f71a0d3fe45d45619ec47a6fbfa98cb14e14250bfa5d6d" -"checksum strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" -"checksum subtle 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" -"checksum syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)" = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" -"checksum syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)" = "dff0acdb207ae2fe6d5976617f887eb1e35a2ba52c13c7234c790960cdad9238" -"checksum synstructure 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)" = "02353edf96d6e4dc81aea2d8490a7e9db177bf8acb0e951c24940bf866cb313f" -"checksum synstructure 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)" = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" -"checksum tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" -"checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -"checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b" -"checksum time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" -"checksum tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)" = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" -"checksum tokio 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8fdd17989496f49cdc57978c96f0c9fe5e4a58a8bddc6813c449a4624f6a030b" -"checksum tokio-buf 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8fb220f46c53859a4b7ec083e41dec9778ff0b1851c0942b211edb89e0ccdc46" -"checksum tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5c501eceaf96f0e1793cf26beb63da3d11c738c4a943fdf3746d81d64684c39f" -"checksum tokio-current-thread 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "d16217cad7f1b840c5a97dfb3c43b0c871fef423a6e8d2118c604e843662a443" -"checksum tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "ca6df436c42b0c3330a82d855d2ef017cd793090ad550a6bc2184f4b933532ab" -"checksum tokio-fs 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "3fe6dc22b08d6993916647d108a1a7d15b9cd29c4f4496c62b92c45b5041b7af" -"checksum tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "5090db468dad16e1a7a54c8c67280c5e4b544f3d3e018f0b913b400261f85926" -"checksum tokio-process 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "afbd6ef1b8cc2bd2c2b580d882774d443ebb1c6ceefe35ba9ea4ab586c89dbe8" -"checksum tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "6732fe6b53c8d11178dcb77ac6d9682af27fc6d4cb87789449152e5377377146" -"checksum tokio-rustls 0.12.2 (registry+https://github.com/rust-lang/crates.io-index)" = "141afec0978abae6573065a48882c6bae44c5cc61db9b511ac4abf6a09bfd9cc" -"checksum tokio-signal 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "dd6dc5276ea05ce379a16de90083ec80836440d5ef8a6a39545a3207373b8296" -"checksum tokio-sync 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "d06554cce1ae4a50f42fba8023918afa931413aded705b560e29600ccf7c6d76" -"checksum tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1d14b10654be682ac43efee27401d792507e30fd8d26389e1da3b185de2e4119" -"checksum tokio-threadpool 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "f0c32ffea4827978e9aa392d2f743d973c1dfa3730a2ed3f22ce1e6984da848c" -"checksum tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "1739638e364e558128461fc1ad84d997702c8e31c2e6b18fb99842268199e827" -"checksum tokio-udp 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f02298505547f73e60f568359ef0d016d5acd6e830ab9bc7c4a5b3403440121b" -"checksum tokio-uds 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "037ffc3ba0e12a0ab4aca92e5234e0dedeb48fddf6ccd260f1f150a36a9f2445" -"checksum tokio-util 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "571da51182ec208780505a32528fc5512a8fe1443ab960b3f2f3ef093cd16930" -"checksum toml 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "758664fc71a3a69038656bee8b6be6477d2a6c315a6b81f7081f591bffa4111f" -"checksum tough 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "592ee41113b853fa04cc37ecf779bcee856b9f41087779333a0f22480ff30602" -"checksum tower-service 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" -"checksum try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" -"checksum typenum 1.11.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6d2783fe2d6b8c1101136184eb41be8b1ad379e4657050b8aaff0c79ee7575f9" -"checksum unicase 2.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -"checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" -"checksum unicode-normalization 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "b561e267b2326bb4cebfc0ef9e68355c7abe6c6f522aeac2f5bf95d56c59bdcf" -"checksum unicode-segmentation 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" -"checksum unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "caaa9d531767d1ff2150b9332433f32a24622147e5ebb1f26409d5da67afd479" -"checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" -"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" -"checksum untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60369ef7a31de49bcb3f6ca728d4ba7300d9a1658f94c727d4cab8c8d9f4aece" -"checksum url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "75b414f6c464c879d7f9babf951f23bc3743fb7313c081b2e6ca719067ea9d61" -"checksum vcpkg 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3fc439f2794e98976c88a2a2dafce96b930fe8010b0a256b3c2199a773933168" -"checksum vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a" -"checksum version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" -"checksum version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce" -"checksum want 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230" -"checksum want 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" -"checksum wasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b89c3ce4ce14bdc6fb6beaf9ec7928ca331de5df7e5ea278375642a2f478570d" -"checksum wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "5205e9afdf42282b192e2310a5b463a6d1c1d774e30dc3c791ac37ab42d2616c" -"checksum wasm-bindgen-backend 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "11cdb95816290b525b32587d76419facd99662a07e59d3cdb560488a819d9a45" -"checksum wasm-bindgen-futures 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8bbdd49e3e28b40dec6a9ba8d17798245ce32b019513a845369c641b275135d9" -"checksum wasm-bindgen-macro 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "574094772ce6921576fb6f2e3f7497b8a76273b6db092be18fc48a082de09dc3" -"checksum wasm-bindgen-macro-support 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "e85031354f25eaebe78bb7db1c3d86140312a911a106b2e29f9cc440ce3e7668" -"checksum wasm-bindgen-shared 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "f5e7e61fc929f4c0dddb748b102ebf9f632e2b8d739f2016542b4de2965a9601" -"checksum wasm-bindgen-webidl 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "ef012a0d93fc0432df126a8eaf547b2dce25a8ce9212e1d3cbeef5c11157975d" -"checksum web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)" = "aaf97caf6aa8c2b1dac90faf0db529d9d63c93846cca4911856f78a83cebf53b" -"checksum webpki 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d7e664e770ac0110e2384769bcc59ed19e329d81f555916a6e072714957b81b4" -"checksum webpki-roots 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a262ae37dd9d60f60dd473d1158f9fbebf110ba7b6a5051c8160460f6043718b" -"checksum weedle 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3bb43f70885151e629e2a19ce9e50bd730fd436cfd4b666894c9ce4de9141164" -"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" -"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" -"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" -"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -"checksum winreg 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" -"checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" -"checksum xml-rs 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "541b12c998c5b56aa2b4e6f18f03664eef9a4fd0a246a55594efae6cc2d964b5" diff --git a/tools/refresh-timestamp-lambda/Cargo.toml b/tools/refresh-timestamp-lambda/Cargo.toml deleted file mode 100644 index 4a034a44..00000000 --- a/tools/refresh-timestamp-lambda/Cargo.toml +++ /dev/null @@ -1,34 +0,0 @@ -[package] -name = "refresh-timestamp-lambda" -version = "0.1.0" -authors = ["Erikson Tung "] -edition = "2018" -autobins = false -build = "build.rs" - -[dependencies] -lambda_runtime = "0.2.1" -serde = { version = "1", features = ["derive"] } -serde_json = "1.0.44" -log = "0.4.8" -simple_logger = "1.3.0" -simple-error = "0.2.1" -rusoto_s3 = "0.42.0" -rusoto_ssm = "0.42.0" -rusoto_sts = "0.42.0" -rusoto_core = "0.42.0" -chrono = "0.4" -failure = "0.1.6" -reqwest = { version = "0.10.1", default-features = false, features = ["rustls-tls"] } -tough = { version = "0.4.0", features = ["http"] } -ring = "0.16.9" -olpc-cjson = "0.1.0" -tempfile = "3.1.0" -envy = "0.4" - -[build-dependencies] -cargo-readme = "3.1" - -[[bin]] -name = "bootstrap" -path = "src/main.rs" \ No newline at end of file diff --git a/tools/refresh-timestamp-lambda/Makefile b/tools/refresh-timestamp-lambda/Makefile deleted file mode 100644 index 81cbc72e..00000000 --- a/tools/refresh-timestamp-lambda/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -# TODO: Should use our own SDK container to build the lambda. Replace with SDK container when it's easier to pull. -BUILD_CONTAINER ?= clux/muslrust - -build: - docker run --network=host \ - --user "$(id -u):$(id -g)" \ - -v ${PWD}:/volume:ro \ - -v ${PWD}/target:/volume/target \ - -e SKIP_README=1 \ - --rm -t $(BUILD_CONTAINER) \ - cargo build --locked --release --target x86_64-unknown-linux-musl - -zip: build ./target/x86_64-unknown-linux-musl/release/bootstrap - zip -j refresh_timestamp_lambda.zip ./target/x86_64-unknown-linux-musl/release/bootstrap diff --git a/tools/refresh-timestamp-lambda/README.md b/tools/refresh-timestamp-lambda/README.md deleted file mode 100644 index 697e6a76..00000000 --- a/tools/refresh-timestamp-lambda/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# refresh-timestamp-lambda - -Current version: 0.1.0 - -## Introduction - -This is a lambda function that periodically refreshes a TUF repository's `timestamp.json` metadata file's expiration date and version. - -Every time this lambda runs, the expiration date is pushed out by a custom number of days from the current date (defined by the lambda event). - -## Compiling & Building - -This rust lambda needs to be statically compiled and linked against [musl-libc](https://www.musl-libc.org/). -Currently building with [clux/muslrust](https://github.com/clux/muslrust). - -To build, run `make build`. -Then, to zip the lambda bootstrap binary, run `make zip`. - -## Setting up the Lambda with CloudFormation - -Use `timestamp-signer.yaml` to create an assumable role in the account where the signing key resides. This lets the lambda have access to the signing key. - -Use `tuf-repo-access-role.yaml` to create an assumable role in the account where the TUF repository bucket resides. This lets the lambda have access to update `timestamp.json`. - -Use `TimestampRefreshLambda.yaml` to create the CFN stack for this lambda. - - -## Colophon - -This text was generated from `README.tpl` using [cargo-readme](https://crates.io/crates/cargo-readme), and includes the rustdoc from `src/main.rs`. \ No newline at end of file diff --git a/tools/refresh-timestamp-lambda/README.tpl b/tools/refresh-timestamp-lambda/README.tpl deleted file mode 100644 index 7b992c50..00000000 --- a/tools/refresh-timestamp-lambda/README.tpl +++ /dev/null @@ -1,9 +0,0 @@ -# {{crate}} - -Current version: {{version}} - -{{readme}} - -## Colophon - -This text was generated from `README.tpl` using [cargo-readme](https://crates.io/crates/cargo-readme), and includes the rustdoc from `src/main.rs`. diff --git a/tools/refresh-timestamp-lambda/TimestampRefreshLambda.yaml b/tools/refresh-timestamp-lambda/TimestampRefreshLambda.yaml deleted file mode 100644 index 22c79285..00000000 --- a/tools/refresh-timestamp-lambda/TimestampRefreshLambda.yaml +++ /dev/null @@ -1,109 +0,0 @@ -AWSTemplateFormatVersion: '2010-09-09' -Description: 'Lambda Function for refreshing TUF repository timestamp metadata' -Parameters: - SigningRole: - Description: 'The ARN of the role that allows access to the signing keys' - Type: String - SigningKeyParameterName: - Description: 'Name of the SSM parameter for the signing key' - Type: String - LambdaBucketName: - Description: 'The name of the S3 bucket where the lambda code lives' - Type: String - LambdaZipKey: - Description: 'The key of the lambda zip file within the lambda bucket' - Type: String - Default: 'refresh_timestamp_lambda.zip' - BucketAccessRole: - Description: 'The ARN of the role that allows access to the TUF repository S3 bucket' - Type: String - TUFRepoBucketName: - Description: 'The name of the TUF repository S3 bucket' - AllowedPattern: ^[0-9a-zA-Z]+([0-9a-zA-Z-]*[0-9a-zA-Z])*$ - Type: String - MetadataPath: - Description: 'The path where to find metadata files in the TUF repo S3 bucket (e.g. "0af132ea221/metadata")' - Type: String - RefreshFrequencyDays: - Description: 'Defines how often to run the refresh lambda (in days).' - MinValue: 1 - MaxValue: 10 - Type: Number - RefreshValidityDays: - Description: 'How many days to refresh the timestamp metadata for.' - MinValue: 1 - MaxValue: 10 - Default: 7 - Type: Number - MetadataBaseUrl: - Type: String - Description: 'Metadata base url that specifies the TUF repository metadata files source.' - TargetBaseUrl: - Type: String - Description: 'Target base url that specifies where the listed targets in the TUF repository can be retrieved.' -Resources: - LambdaRole: - Type: AWS::IAM::Role - Properties: - AssumeRolePolicyDocument: - Version: '2012-10-17' - Statement: - - Effect: Allow - Principal: - Service: lambda.amazonaws.com - Action: sts:AssumeRole - ManagedPolicyArns: - - arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole - Policies: - - PolicyName: 'AssumeSigningRole' - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: sts:AssumeRole - Resource: !Sub '${SigningRole}' - - PolicyName: 'AssumeBucketAccessRole' - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: sts:AssumeRole - Resource: !Sub '${BucketAccessRole}' - LambdaFunction: - Type: AWS::Lambda::Function - Properties: - Description: Refreshes TUF repository timestamp.json - Handler: hello.handler - Runtime: provided - Role: !GetAtt 'LambdaRole.Arn' - Timeout: 300 - Code: - S3Bucket: !Sub '${LambdaBucketName}' - S3Key: !Sub '${LambdaZipKey}' - Environment: - Variables: - BUCKET_NAME: !Sub '${TUFRepoBucketName}' - SIGNING_ROLE_ARN: !Sub '${SigningRole}' - BUCKET_ACCESS_ROLE_ARN: !Sub '${BucketAccessRole}' - KEY_PARAMETER_NAME: !Sub '${SigningKeyParameterName}' - METADATA_PATH: !Sub '${MetadataPath}' - REFRESH_VALIDITY_DAYS: !Sub '${RefreshValidityDays}' - METADATA_URL: !Sub '${MetadataBaseUrl}' - TARGETS_URL: !Sub '${TargetBaseUrl}' - LambdaFunctionSchedule: - Type: AWS::Events::Rule - Properties: - Description: Schedules the refresh lambda - Name: refresh-timestamp-lambda-schedule - ScheduleExpression: !Sub 'cron(0 0 */${RefreshFrequencyDays} * ? *)' - State: ENABLED - Targets: - - Arn: !GetAtt 'LambdaFunction.Arn' - Id: refresh-timestamp-lambda - LambdaFunctionPermission: - Type: AWS::Lambda::Permission - Properties: - Action: lambda:InvokeFunction - FunctionName: !GetAtt 'LambdaFunction.Arn' - Principal: events.amazonaws.com - SourceArn: !GetAtt 'LambdaFunctionSchedule.Arn' diff --git a/tools/refresh-timestamp-lambda/build.rs b/tools/refresh-timestamp-lambda/build.rs deleted file mode 100644 index 49828a1c..00000000 --- a/tools/refresh-timestamp-lambda/build.rs +++ /dev/null @@ -1,32 +0,0 @@ -// Automatically generate README.md from rustdoc. - -use std::env; -use std::fs::File; -use std::io::Write; -use std::path::PathBuf; - -fn main() { - // Check for environment variable "SKIP_README". If it is set, - // skip README generation - if env::var_os("SKIP_README").is_some() { - return; - } - - let mut source = File::open("src/main.rs").unwrap(); - let mut template = File::open("README.tpl").unwrap(); - - let content = cargo_readme::generate_readme( - &PathBuf::from("."), // root - &mut source, // source - Some(&mut template), // template - // The "add x" arguments don't apply when using a template. - true, // add title - false, // add badges - false, // add license - true, // indent headings - ) - .unwrap(); - - let mut readme = File::create("README.md").unwrap(); - readme.write_all(content.as_bytes()).unwrap(); -} diff --git a/tools/refresh-timestamp-lambda/src/main.rs b/tools/refresh-timestamp-lambda/src/main.rs deleted file mode 100644 index d6ceeb5b..00000000 --- a/tools/refresh-timestamp-lambda/src/main.rs +++ /dev/null @@ -1,254 +0,0 @@ -/*! -# Introduction - -This is a lambda function that periodically refreshes a TUF repository's `timestamp.json` metadata file's expiration date and version. - -Every time this lambda runs, the expiration date is pushed out by a custom number of days from the current date (defined by the lambda event). - -# Compiling & Building - -This rust lambda needs to be statically compiled and linked against [musl-libc](https://www.musl-libc.org/). -Currently building with [clux/muslrust](https://github.com/clux/muslrust). - -To build, run `make build`. -Then, to zip the lambda bootstrap binary, run `make zip`. - -# Setting up the Lambda with CloudFormation - -Use `timestamp-signer.yaml` to create an assumable role in the account where the signing key resides. This lets the lambda have access to the signing key. - -Use `tuf-repo-access-role.yaml` to create an assumable role in the account where the TUF repository bucket resides. This lets the lambda have access to update `timestamp.json`. - -Use `TimestampRefreshLambda.yaml` to create the CFN stack for this lambda. - -*/ - -#![deny(rust_2018_idioms)] -#![warn(clippy::pedantic)] - -use chrono::{Duration, Utc}; -use failure::format_err; -use lambda_runtime::{error::HandlerError, lambda, Context}; -use log::{self, error, info}; -use olpc_cjson::CanonicalFormatter; -use ring::rand::SystemRandom; -use rusoto_core::request::HttpClient; -use rusoto_s3::{GetObjectRequest, PutObjectRequest, S3Client, S3}; -use rusoto_ssm::{GetParameterRequest, Ssm, SsmClient}; -use rusoto_sts::{StsAssumeRoleSessionCredentialsProvider, StsClient}; -use serde::export::from_utf8_lossy; -use serde::{Deserialize, Serialize}; -use simple_error::bail; -use simple_logger; -use std::error::Error; -use std::io::Read; -use std::num::NonZeroU64; -use tempfile::tempdir; -use tough::schema::{RoleType, Signature}; -use tough::sign::Sign; -use tough::{HttpTransport, Limits, Repository, Settings}; - -// Contains the environment variables we need to execute the program -#[derive(Deserialize, Debug)] -#[serde(rename_all = "lowercase")] -struct EnvVars { - bucket_access_role_arn: String, - signing_role_arn: String, - key_parameter_name: String, - bucket_name: String, - metadata_path: String, - metadata_url: String, - targets_url: String, - refresh_validity_days: String, -} - -#[derive(Deserialize, Copy, Clone)] -struct CustomEvent {} - -#[derive(Serialize)] -struct CustomOutput { - message: String, -} - -fn main() -> Result<(), Box> { - simple_logger::init_with_level(log::Level::Info)?; - lambda!(handler); - Ok(()) -} - -fn get_signing_key( - ssm_client: &SsmClient, - key_parameter_name: String, -) -> Result { - let get_signing_key_req = GetParameterRequest { - name: key_parameter_name, - with_decryption: Some(true), - }; - match ssm_client.get_parameter(get_signing_key_req).sync() { - Ok(signing_key) => { - if let Some(signing_key) = signing_key.parameter { - if let Some(key) = signing_key.value { - return Ok(key); - } - } - bail!("Parameter unable to be read") - } - Err(error) => { - error!("failed to retrieve signing key parameter"); - Err(HandlerError::from(failure::Error::from(error))) - } - } -} - -fn handler(_e: CustomEvent, _c: Context) -> Result { - refresh_timestamp().map_err(HandlerError::from) -} - -fn refresh_timestamp() -> failure::Fallible { - info!("Parsing environment variables"); - // Get the configured environment variables - let env_vars: EnvVars = - envy::from_env().map_err(|e| HandlerError::from(failure::Error::from(e)))?; - - let s3_http_client = HttpClient::new()?; - let s3_sts_client = StsClient::new(Default::default()); - let s3_session_cred_provider = StsAssumeRoleSessionCredentialsProvider::new( - s3_sts_client, - env_vars.bucket_access_role_arn, - "sign-timestamp-access-bucket".to_owned(), - None, - None, - None, - None, - ); - let s3_client = - S3Client::new_with(s3_http_client, s3_session_cred_provider, Default::default()); - - let ssm_http_client = HttpClient::new()?; - let ssm_sts_client = StsClient::new(Default::default()); - let ssm_session_cred_provider = StsAssumeRoleSessionCredentialsProvider::new( - ssm_sts_client, - env_vars.signing_role_arn, - "sign-timestamp-get-key".to_owned(), - None, - None, - None, - None, - ); - let ssm_client = SsmClient::new_with( - ssm_http_client, - ssm_session_cred_provider, - Default::default(), - ); - - // Retrieves signing key from SSM parameter - let signing_key = get_signing_key(&ssm_client, env_vars.key_parameter_name)?; - let keypair: Box = Box::new(tough::sign::parse_keypair( - &signing_key.as_bytes().to_vec(), - )?); - - // Create the datastore path for storing the metadata files - let datastore = tempdir()?; - - // Read root.json from the TUF repo for the root keys - // Note: We're retrieving a root.json directly from the TUF repository. We're not actually updating to anything, just refreshing - // the timestamp metadata file of the TUF repository itself. - let get_root_request = GetObjectRequest { - bucket: env_vars.bucket_name.to_owned(), - key: (env_vars.metadata_path.to_owned() + "/1.root.json"), - ..GetObjectRequest::default() - }; - let mut buffer = Vec::new(); - let root_json = match s3_client.get_object(get_root_request).sync()?.body { - Some(body) => { - body.into_blocking_read().read_to_end(&mut buffer)?; - from_utf8_lossy(&buffer) - } - None => return Err(format_err!("Empty timestamp.json file")), - }; - - info!("Loading TUF repo"); - let transport = HttpTransport::new(); - let repo = Repository::load( - &transport, - Settings { - root: root_json.as_bytes(), - datastore: datastore.path(), - metadata_base_url: &env_vars.metadata_url, - target_base_url: &env_vars.targets_url, - limits: Limits { - ..tough::Limits::default() - }, - }, - )?; - - let mut timestamp = repo.timestamp().clone(); - let now = Utc::now(); - let new_version = if let Some(version) = NonZeroU64::new(now.timestamp() as u64) { - version - } else { - return Err(format_err!("Couldn't retrieve current UTC timestamp")); - }; - - info!( - "Updating version from {} to {}", - timestamp.signed.version, new_version - ); - timestamp.signed.version = new_version; - - let new_expiration = now + Duration::days(env_vars.refresh_validity_days.parse::()?); - info!( - "Updating expiration date from {} to {}", - timestamp.signed.expires.to_rfc3339(), - new_expiration.to_rfc3339() - ); - timestamp.signed.expires = new_expiration; - - let signed_root = repo.root(); - let key_id = if let Some(key) = signed_root - .signed - .keys - .iter() - .find(|(_, key)| keypair.tuf_key() == **key) - { - key.0 - } else { - error!("Couldn't find key pair"); - return Err(format_err!("Couldn't find key")); - }; - - let mut data = Vec::new(); - let role_key = match signed_root.signed.roles.get(&RoleType::Timestamp) { - Some(key) => key, - None => return Err(format_err!("Unable to find role keys")), - }; - if role_key.keyids.contains(key_id) { - let mut ser = serde_json::Serializer::with_formatter(&mut data, CanonicalFormatter::new()); - timestamp.signed.serialize(&mut ser)?; - - let sig = keypair.sign(&data, &SystemRandom::new())?; - timestamp.signatures.clear(); - timestamp.signatures.push(Signature { - keyid: key_id.clone(), - sig: sig.into(), - }); - } - - let body = serde_json::to_vec_pretty(×tamp)?; - let put_request = PutObjectRequest { - bucket: env_vars.bucket_name, - key: (env_vars.metadata_path + "/timestamp.json"), - body: Some(body.into()), - ..PutObjectRequest::default() - }; - s3_client.put_object(put_request).sync()?; - - Ok(CustomOutput { - message: format!( - "new version = {}, new expiration date = {}, signed data: {}", - timestamp.signed.version, - timestamp.signed.expires, - from_utf8_lossy(&data) - ), - }) -} diff --git a/tools/refresh-timestamp-lambda/timestamp-signer.yaml b/tools/refresh-timestamp-lambda/timestamp-signer.yaml deleted file mode 100644 index 0685735e..00000000 --- a/tools/refresh-timestamp-lambda/timestamp-signer.yaml +++ /dev/null @@ -1,45 +0,0 @@ -AWSTemplateFormatVersion: "2010-09-09" -Description: 'Creates an IAM role that allows access to the TUF timestamp metadata signing key' -Parameters: - LambdaAccountId: - Description: 'The AWS account where the lambda that needs access to the signing key resides' - Type: String - SigningKeyName: - Description: 'The parameter name of the signing key to grant access to' - Type: String - AllowedPattern: ^/.*$ - Default: '/bottlerocket/root-key' - KMSKeyId: - Description: 'The key id of the KMS key used to encrypt the signing key parameter into a secure string' - Type: String -Resources: - TimestampSignerAccessRole: - Type: AWS::IAM::Role - Properties: - Description: 'Role allowing access to the signing key' - Path: !Sub '/${AWS::StackName}/' - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - AWS: !Sub '${LambdaAccountId}' - Action: - - 'sts:AssumeRole' - Policies: - - PolicyName: 'SigningKeyAccess' - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - ssm:GetParameter - Resource: !Sub 'arn:${AWS::Partition}:ssm:${AWS::Region}:${AWS::AccountId}:parameter${SigningKeyName}' - - Effect: Allow - Action: - - kms:Decrypt - Resource: !Sub 'arn:${AWS::Partition}:kms:${AWS::Region}:${AWS::AccountId}:key/${KMSKeyId}' -Outputs: - Role: - Description: 'The ARN of the timestamp signing role' - Value: !GetAtt TimestampSignerAccessRole.Arn diff --git a/tools/refresh-timestamp-lambda/tuf-repo-bucket-access-role.yaml b/tools/refresh-timestamp-lambda/tuf-repo-bucket-access-role.yaml deleted file mode 100644 index 965484ef..00000000 --- a/tools/refresh-timestamp-lambda/tuf-repo-bucket-access-role.yaml +++ /dev/null @@ -1,42 +0,0 @@ -AWSTemplateFormatVersion: "2010-09-09" -Description: '' -Parameters: - LambdaAccountId: - Description: 'The AWS account where the lambda that needs access to the TUF repository bucket' - Type: String - TUFBucketRepoName: - Description: 'Name of the TUF repository S3 bucket' - Default: 'thar-be-updates-in-us-west-2' - Type: String -Resources: - TufRepositoryBucketAccessRole: - Type: AWS::IAM::Role - Properties: - Description: 'Role allowing access to the TUF repository S3 bucket' - Path: !Sub '/${AWS::StackName}/' - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - AWS: !Sub '${LambdaAccountId}' - Action: - - 'sts:AssumeRole' - Policies: - - PolicyName: 'TufRepositoryAccess' - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - s3:GetObject - - s3:PutObject - Resource: !Sub 'arn:${AWS::Partition}:s3:::${TUFBucketRepoName}/*/timestamp.json' - - Effect: Allow - Action: - - s3:GetObject - Resource: !Sub 'arn:${AWS::Partition}:s3:::${TUFBucketRepoName}/*/*.root.json' -Outputs: - Role: - Description: 'The ARN of the TUF repository bucket access role' - Value: !GetAtt TufRepositoryBucketAccessRole.Arn From 37ccceafed27cad60eb3662400ae8b4677dd61f2 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 27 Feb 2020 20:59:33 +0000 Subject: [PATCH 0268/1356] kernel: enable BTF debug info Signed-off-by: Ben Cressey --- packages/kernel/config-bottlerocket | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/kernel/config-bottlerocket b/packages/kernel/config-bottlerocket index 2fd12244..a539b86c 100644 --- a/packages/kernel/config-bottlerocket +++ b/packages/kernel/config-bottlerocket @@ -40,3 +40,6 @@ CONFIG_IKCONFIG_PROC=y # kernel headers at /sys/kernel/kheaders.tar.xz CONFIG_IKHEADERS=y + +# BTF debug info at /sys/kernel/btf/vmlinux +CONFIG_DEBUG_INFO_BTF=y From 539b6fbb640ae73a2843bb952f860c595999e9a2 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Mon, 2 Mar 2020 10:34:55 -0800 Subject: [PATCH 0269/1356] Moved conformance-test scripts related commits Kubernetes Conformance Test scripts related commits have been moved to the release automation repository. --- tools/conformance-test/README.md | 29 --- .../conformance-test/clean-up-test-cluster.sh | 98 -------- .../conformance-test/run-conformance-test.sh | 190 -------------- tools/conformance-test/setup-test-cluster.sh | 231 ------------------ 4 files changed, 548 deletions(-) delete mode 100644 tools/conformance-test/README.md delete mode 100755 tools/conformance-test/clean-up-test-cluster.sh delete mode 100755 tools/conformance-test/run-conformance-test.sh delete mode 100755 tools/conformance-test/setup-test-cluster.sh diff --git a/tools/conformance-test/README.md b/tools/conformance-test/README.md deleted file mode 100644 index 574e5e53..00000000 --- a/tools/conformance-test/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# Bottlerocket Kubernetes Conformance Testing -`setup-test-cluster.sh` sets up an EKS cluster and generates an env file containing cluster information and the user data used to launch Bottlerocket worker nodes. - -`run-conformance-test.sh` uses the generated env file to launch specified number of worker nodes in the described EKS cluster and runs Kubernetes conformance tests. -Once the tests are done, the script outputs the test results in the current directory and cleans up the worker nodes. - -`clean-up-test-cluster` deletes the EKS test cluster specified by the env file and any files generated by `setup-test-cluster.sh`. - -## Running conformance tests against Bottlerocket nodes using these scripts: -1. Run `setup-test-cluster.sh` to set up a test cluster: - ``` - setup-test-cluster.sh --region us-west-2 --cluster-name my-test-cluster - ``` - Once the setup completes successfully, there should be a user data file and an env file in the current directory. -2. Run `run-conformance-test.sh` to launch Bottlerocket worker nodes and run the Kubernetes conformance test: - ``` - run-conformance-test.sh --node-ami ami-07245e9300b9290c1 \ - --cluster-env-file my-test-cluster.env - --instance-type m5.large \ - --num-nodes 3 - ``` -3. The Sonobuoy Kubernetes conformance test results can be examined with: - ``` - sonobuoy results my-test-cluster-conformance-test-results/TIMESTAMP.tar.gz - ``` -4. To clean up the test cluster and other associated resources, run `clean-up-test-cluster.sh`: - ``` - clean-up-test-cluster.sh --cluster-env-file my-test-cluster.env - ``` diff --git a/tools/conformance-test/clean-up-test-cluster.sh b/tools/conformance-test/clean-up-test-cluster.sh deleted file mode 100755 index e872c98c..00000000 --- a/tools/conformance-test/clean-up-test-cluster.sh +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env bash - -# Check for required tools -if ! command -v eksctl >/dev/null; then - echo "* Can't find executable 'eksctl'" >&2 - exit 2 -fi - -# Helper functions - -usage() { - cat >&2 <&2 - exit 2 - fi -} - -parse_args() { - while [ ${#} -gt 0 ] ; do - case "${1}" in - --cluster-env-file ) shift; ENV_FILE="${1}" ;; - - --help ) usage; exit 0 ;; - *) - echo "ERROR: Unknown argument: ${1}" >&2 - usage - exit 2 - ;; - esac - shift - done - - # Required arguments - required_arg "--cluster-env-file" "${ENV_FILE}" -} - -exit_on_error() { - local rc="${1:?}" - local msg="${2:?}" - - if [ "${rc}" -ne 0 ]; then - echo "${msg}" >&2 - exit 1 - fi -} - -# Initial setup and checks -parse_args "${@}" - -# Load the env file created by the 'setup-test-cluster' script, if it doesn't exist, exit -if [ -f "${ENV_FILE}" ]; then - . "${ENV_FILE}" -else - echo "* Failed to open env file at ${ENV_FILE}." >&2 - exit 1 -fi - -echo "Removing security group dependencies." -aws ec2 revoke-security-group-ingress \ - --region "${REGION}" \ - --group-id "${NODEGROUP_SG}" \ - --protocol tcp \ - --port 1-1024 \ - --source-group "${CONTROLPLANE_SG}" -exit_on_error ${?} "* Failed to remove nodegroup sg ingress rules" - -aws ec2 revoke-security-group-egress \ - --region "${REGION}" \ - --group-id "${CONTROLPLANE_SG}" \ - --protocol tcp \ - --port 1-1024 \ - --source-group "${NODEGROUP_SG}" -exit_on_error ${?} "* Failed to remove control plane sg egress rules" - -echo "Deleting the test cluster." -eksctl delete cluster -r "${REGION}" -n "${CLUSTER_NAME}" -exit_on_error ${?} "* Failed to delete ${CLUSTER_NAME} with eksctl, there might be leftover CloudFormation stacks that needs to be deleted. Look for eksctl-${CLUSTER_NAME}-*" >&2 - -echo "Deleting env file, userdata file, and kubeconfig file" -rm -f "${ENV_FILE}" -rm -f "${USERDATA_FILE}" -rm -f "${KUBECONFIG_FILE}" - -echo "Clean up done." diff --git a/tools/conformance-test/run-conformance-test.sh b/tools/conformance-test/run-conformance-test.sh deleted file mode 100755 index 2e92da6b..00000000 --- a/tools/conformance-test/run-conformance-test.sh +++ /dev/null @@ -1,190 +0,0 @@ -#!/usr/bin/env bash - -# Check for required tools -for tool in jq sonobuoy aws kubectl; do - if ! command -v ${tool} > /dev/null; then - echo "* Can't find executable '${tool}'" >&2 - exit 2 - fi -done - -DEFAULT_KUBE_CONFORMANCE_VERSION=v1.14.7 -DEFAULT_NUM_NODES=3 - -# Helper functions - -usage() { - cat >&2 < - --instance-type m5.large - --cluster-env-file my-test-cluster.env - [ --kube-conformance-version v1.14.6 ] - -Spins up worker nodes to join the EKS cluster described in the env file and then runs Sonobuoy Kubernetes conformance tests against said cluster. -Once the tests are done, retrieves the results and cleans up the created worker node instances. - -Required: - --node-ami The AMI ID of the AMI to use for the worker nodes - --instance-type Instance type launched for worker nodes - --cluster-env-file Path to the env file containing cluster information for setting up worker nodes. Typically generated by 'setup-test-cluster.sh'. - -Optional: - --kube-conformance-version The version of the conformance image to use for conformance testing (default "${DEFAULT_KUBE_CONFORMANCE_VERSION}") - --num-nodes The number of Bottlerocket worker nodes to launch (default ${DEFAULT_NUM_NODES}) -EOF -} - -required_arg() { - local arg="${1:?}" - local value="${2}" - if [ -z "${value}" ]; then - echo "ERROR: ${arg} is required" >&2 - exit 2 - fi -} - -parse_args() { - while [ ${#} -gt 0 ] ; do - case "${1}" in - --node-ami ) shift; NODE_AMI="${1}" ;; - --instance-type ) shift; INSTANCE_TYPE="${1}" ;; - --cluster-env-file ) shift; ENV_FILE="${1}" ;; - --kube-conformance-version ) shift; KUBE_CONFORMANCE_VERSION="${1}" ;; - --num-nodes ) shift; NUM_NODES="${1}" ;; - - --help ) usage; exit 0 ;; - *) - echo "ERROR: Unknown argument: ${1}" >&2 - usage - exit 2 - ;; - esac - shift - done - - KUBE_CONFORMANCE_VERSION="${KUBE_CONFORMANCE_VERSION:-${DEFAULT_KUBE_CONFORMANCE_VERSION}}" - NUM_NODES="${NUM_NODES:-${DEFAULT_NUM_NODES}}" - - # Required arguments - required_arg "--node-ami" "${NODE_AMI}" - required_arg "--instance-type" "${INSTANCE_TYPE}" - required_arg "--cluster-env-file" "${ENV_FILE}" -} - -cleanup() { - if [ ${#instance_ids[@]} -ne 0 ]; then - echo "Cleaning up Bottlerocket worker node instances" - for instance_id in "${instance_ids[@]}"; do - aws ec2 terminate-instances \ - --output text \ - --region "${REGION}" \ - --instance-ids "${instance_id}" - done - unset instance_ids - fi - - # Wait at most 20 minutes for Sonobuoy to delete its namespace - if [ -n "${sonobuoy_run_attempted}" ]; then - echo "Cleaning up Sonobuoy namespace, may take up to 20 minutes" - ${SONOBUOY} delete --wait=20 - exit_on_error ${?} "* Failed to delete Sonobuoy namespace." - unset sonobuoy_run_attempted - fi -} - -trap 'cleanup' EXIT SIGINT SIGTERM - -exit_on_error() { - local rc="${1:?}" - local msg="${2:?}" - - if [ "${rc}" -ne 0 ]; then - echo "${msg}" >&2 - exit 1 - fi -} - -# Initial setup and checks -parse_args "${@}" - -# Load the env file created by the `setup-test-cluster` script, if it doesn't exist, exit -if [ -f "${ENV_FILE}" ]; then - . "${ENV_FILE}" -else - echo "* Failed to open env file at ${ENV_FILE}." >&2 - exit 1 -fi - -echo "Launching ${NUM_NODES} Bottlerocket worker nodes" -unset instance_ids -counter=0 -while [ ${counter} -lt "${NUM_NODES}" ]; do - instance_id=$(aws ec2 run-instances \ - --subnet-id "${SUBNET_ID}" \ - --security-group-ids "${NODEGROUP_SG}" "${CLUSTERSHARED_SG}" \ - --image-id "${NODE_AMI}" \ - --instance-type "${INSTANCE_TYPE}" \ - --region "${REGION}" \ - --tag-specifications "ResourceType=instance,Tags=[{Key=kubernetes.io/cluster/${CLUSTER_NAME},Value=owned}]" \ - --user-data file://"${USERDATA_FILE}" \ - --iam-instance-profile Name="${INSTANCE_PROFILE}" \ - --query "Instances[*].InstanceId" \ - --output text) - if [ -n "${instance_id}" ]; then - instance_ids=("${instance_ids[@]}" "${instance_id}") - fi - sleep 1 - ((counter+=1)) -done -actual_num_nodes=${#instance_ids[@]} -if [ "${actual_num_nodes}" -ne "${NUM_NODES}" ]; then - echo "* Failed to launch requested number of Bottlerocket instances: launched ${actual_num_nodes} out of ${NUM_NODES}." >&2 - exit 1 -fi - -echo "Waiting for all Bottlerocket worker nodes to become 'Ready' in ${CLUSTER_NAME} cluster" -KUBECTL="kubectl --kubeconfig ${KUBECONFIG_FILE}" -MAX_ATTEMPTS=30 -attempts=0 -sleep 30 -while true; do - ((attempts+=1)) - if [ "${attempts}" -gt ${MAX_ATTEMPTS} ]; then - echo "* Retry limit (${MAX_ATTEMPTS}) reached! Worker nodes are not becoming ready in cluster ${CLUSTER_NAME}" >&2 - exit 1 - fi - sleep 5 - nodes=$(${KUBECTL} get nodes --no-headers) - exit_on_error ${?} "* Failed to get node information for ${CLUSTER_NAME} cluster" - - found=$(${KUBECTL} get nodes --no-headers -o name | wc -l) - ready=$(echo -n "${nodes}" | grep -c -w "Ready") - echo "ready: ${ready}" - - if [ "${found}" -eq "${actual_num_nodes}" ] && [ "${ready}" -eq "${actual_num_nodes}" ]; then - break - fi -done - -echo "Starting Sonobuoy Kubernetes conformance test! Test may take up to 60 minutes to finish" -sonobuoy_run_attempted=true -SONOBUOY="sonobuoy --kubeconfig ${KUBECONFIG_FILE}" -${SONOBUOY} run \ - --mode certified-conformance \ - --kube-conformance-image-version "${KUBE_CONFORMANCE_VERSION}" \ - --wait -exit_on_error ${?} "* Failed to run Sonobuoy Kubernetes conformance tests" -${SONOBUOY} status -exit_on_error ${?} "* Failed to retrieve conformance test status" -results_file=$(${SONOBUOY} retrieve "${CLUSTER_NAME}"-conformance-test-results) -exit_on_error ${?} "* Failed to retrieve Sonobuoy Kubernetes conformance test results" -sonobuoy results "${results_file}" -exit_on_error ${?} "* Failed to examine sonobuoy results in ${results_file}" -echo "Sonobuoy test results available at ${results_file}" - -# Exit non-zero if any of the Kubernetes conformance tests fail -(set -o pipefail; \ - sonobuoy results "${results_file}" \ - | awk '$2 == "failed" || $2 == "unknown" {exit 1}') -exit_on_error ${?} "* Found conformance test failures or conformance tests failed to run (unknown status)." diff --git a/tools/conformance-test/setup-test-cluster.sh b/tools/conformance-test/setup-test-cluster.sh deleted file mode 100755 index 9122653f..00000000 --- a/tools/conformance-test/setup-test-cluster.sh +++ /dev/null @@ -1,231 +0,0 @@ -#!/usr/bin/env bash - -# Spins up an EKS test cluster with no initial worker nodes using 'eksctl'. Outputs an env file containing information -# used for setting up cluster worker nodes for Kubernetes conformance testing. - -# If the script execution is interrupted or terminated halfway, it automatically tries to clean up allocated resources. - -# Process flow: -# * Use 'eksctl' to set up cluster with no initial nodes and outputs the kubeconfig file in the current directory with name 'cluster-name-config' -# * Modify security groups to allow traffic needed for Kubernetes conformance testing. -# * Generate a 'cluster-name.env' file containing cluster information in current directory - -# Environment assumptions: -# * aws-cli is set up (via environment or config) to operate EC2 in the given region. -# * Some required tools are available locally; look just below these comments. -# * AWS account has space for an additional VPC (maximum is 5) in specified region. - -# Caveats: -# * Certain us-east-1 AZs (e.g. us-east-1e) do not support Amazon EKS. If that happens, 'eksctl' will prompt an error. -# See: https://github.com/weaveworks/eksctl/issues/817. Use '--zones' to specify AZ to ensure that doesn't happen - -# Check for required tools -for tool in jq aws kubectl eksctl; do - if ! command -v ${tool} > /dev/null; then - echo "* Can't find executable '${tool}'" >&2 - exit 2 - fi -done - -DEFAULT_CLUSTER_NAME=sonobuoy-test -CNI_PLUGIN_CONFIG=https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/release-1.6/config/v1.6/aws-k8s-cni.yaml - -# Helper functions - -usage() { - cat >&2 < - [ --zones us-west-2a,us-west-2b ] - [ --cluster-name my-test-cluster ] -Spins up EKS test cluster with no initial worker nodes with 'eksctl' and outputs an env file containing information used -for setting up cluster worker nodes for Kubernetes conformance testing. - -Required: - --region The AWS region - -Optional: - --zones The availablility zones. Two required if specified. (e.g us-west-2a,us-west-2b) - --cluster-name Name of the cluster to create with 'eksctl'. (default ${DEFAULT_CLUSTER_NAME}) -EOF -} - -required_arg() { - local arg="${1:?}" - local value="${2}" - if [ -z "${value}" ]; then - echo "ERROR: ${arg} is required" >&2 - exit 2 - fi -} - -parse_args() { - while [ ${#} -gt 0 ] ; do - case "${1}" in - --region ) shift; REGION="${1}" ;; - - --cluster-name ) shift; CLUSTER_NAME="${1}" ;; - --zones ) shift; ZONES="${1}" ;; - - --help ) usage; exit 0 ;; - *) - echo "ERROR: Unknown argument: ${1}" >&2 - usage - exit 2 - ;; - esac - shift - done - - CLUSTER_NAME="${CLUSTER_NAME:-${DEFAULT_CLUSTER_NAME}}" - - # Required arguments - required_arg "--region" "${REGION}" -} - -cleanup() { - if [ -n "${eks_cluster_creation_attempted}" ]; then - echo "Deleting the test cluster, whole process can take up to 15 minutes" - eksctl delete cluster -r "${REGION}" -n "${CLUSTER_NAME}" -w - exit_on_error ${?} "* Failed to delete ${CLUSTER_NAME} with eksctl; there might be leftover CloudFormation stacks that needs to be deleted. Look for eksctl-${CLUSTER_NAME}-*" >&2 - fi -} - -trap 'cleanup' EXIT SIGINT SIGTERM - -exit_on_error() { - local rc="${1:?}" - local msg="${2:?}" - - if [ "${rc}" -ne 0 ]; then - echo "${msg}" >&2 - exit 1 - fi -} - -# Initial setup and checks -parse_args "${@}" - -echo "Setting up fresh EKS cluster with eksctl" -eksctl get cluster -r "${REGION}" -n "${CLUSTER_NAME}" > /dev/null 2>&1 -if [ "${?}" -eq 0 ]; then - echo "* An EKS cluster already exists with name ${CLUSTER_NAME}" >&2 - exit 1 -fi - -eks_cluster_creation_attempted=true -eksctl create cluster -r "${REGION}" --zones "${ZONES}" -n "${CLUSTER_NAME}" --nodes 0 -exit_on_error ${?} "* Failed to set up EKS cluster with eksctl" - -kubeconfig_file="${CLUSTER_NAME}"-config -echo "Writing kubeconfig for ${CLUSTER_NAME} to ${kubeconfig_file}" -eksctl utils write-kubeconfig -r "${REGION}" -c "${CLUSTER_NAME}" --kubeconfig "${kubeconfig_file}" -exit_on_error ${?} "* Failed to write kube config" - -KUBECTL="kubectl --kubeconfig ${kubeconfig_file}" -echo "Apply configuration for AWS CNI plugin" -${KUBECTL} apply -f "${CNI_PLUGIN_CONFIG}" -exit_on_error ${?} "* Failed to apply configuration for AWS CNI plugin" - -echo "Generating userdata file for launching Bottlerocket worker nodes" -endpoint=$(set -o pipefail; \ - eksctl get cluster -r "${REGION}" -n "${CLUSTER_NAME}" -o json \ - | jq --raw-output '.[].Endpoint') -exit_on_error ${?} "* Failed to get cluster endpoint" - -certificate_authority=$(set -o pipefail; \ - eksctl get cluster -r "${REGION}" -n "${CLUSTER_NAME}" -o json \ - | jq --raw-output '.[].CertificateAuthority.Data') -exit_on_error ${?} "* Failed to get cluster certificate authority" - -userdata_file="${CLUSTER_NAME}"-user-data.toml -cat > "${userdata_file}" <&2 - exit 1 -fi - -echo "Setting up security groups" -eks_subnet_ids="$(set -o pipefail; \ - eksctl get cluster -r "${REGION}" -n "${CLUSTER_NAME}" -o json \ - | jq --raw-output '.[].ResourcesVpcConfig.SubnetIds[]')" -exit_on_error ${?} "* Failed to get subnet IDs of the EKS cluster" - -subnet_ids=($(set -o pipefail; \ - aws ec2 describe-subnets --subnet-ids \ - ${eks_subnet_ids[@]} \ - --region "${REGION}" \ - --filters "Name=tag:Name,Values=eksctl-${CLUSTER_NAME}-cluster/SubnetPrivate*" \ - --output json | jq --raw-output '.Subnets[].SubnetId')) -exit_on_error ${?} "* Failed to get subnet ID for launching bottlerocket worker nodes" - - -# Allow TCP traffic over ports 1-1024 for Kubernetes conformance testing -nodegroup_sg=$(set -o pipefail; \ - aws ec2 describe-security-groups \ - --region "${REGION}" \ - --filters "Name=tag:Name,Values=*${CLUSTER_NAME}-nodegroup*" \ - --query "SecurityGroups[*].{Name:GroupName,ID:GroupId}" \ - --output json | jq --raw-output '.[].ID') -exit_on_error ${?} "* Failed to get nodegroup security group ID" - -clustershared_sg=$(set -o pipefail; \ - aws ec2 describe-security-groups \ - --region "${REGION}" \ - --filters "Name=tag:Name,Values=*${CLUSTER_NAME}*ClusterShared*" \ - --query "SecurityGroups[*].{Name:GroupName,ID:GroupId}" \ - --output json | jq --raw-output '.[].ID') -exit_on_error ${?} "* Failed to get cluster shared security group ID" - -controlplane_sg=$(set -o pipefail; \ - aws ec2 describe-security-groups \ - --region "${REGION}" \ - --filters "Name=tag:Name,Values=*${CLUSTER_NAME}*ControlPlane*" \ - --query "SecurityGroups[*].{Name:GroupName,ID:GroupId}" \ - --output json | jq --raw-output '.[].ID') -exit_on_error ${?} "* Failed to get control plane security group ID" - -aws ec2 authorize-security-group-ingress \ - --region "${REGION}" \ - --group-id "${nodegroup_sg}" \ - --protocol tcp \ - --port 1-1024 \ - --source-group "${controlplane_sg}" -exit_on_error ${?} "* Failed to authorize nodegroup sg ingress rules" - -aws ec2 authorize-security-group-egress \ - --region "${REGION}" \ - --group-id "${controlplane_sg}" \ - --protocol tcp \ - --port 1-1024 \ - --source-group "${nodegroup_sg}" -exit_on_error ${?} "* Failed to authorize control plane sg egress rules" - -echo "Generating env file for launching Bottlerocket worker nodes" -cat > "${CLUSTER_NAME}.env" < Date: Fri, 6 Mar 2020 09:35:41 -0800 Subject: [PATCH 0270/1356] Comment unused licenses in whitelist so we don't get warnings --- tools/buildsys/deny.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/buildsys/deny.toml b/tools/buildsys/deny.toml index 8685fb59..970abf3a 100644 --- a/tools/buildsys/deny.toml +++ b/tools/buildsys/deny.toml @@ -11,14 +11,14 @@ confidence-threshold = 0.93 allow = [ "Apache-2.0", - "BSD-2-Clause", + #"BSD-2-Clause", # OK but currently unused; commenting to prevent warning "BSD-3-Clause", "BSL-1.0", "ISC", "MIT", "OpenSSL", "Unlicense", - "Zlib", + #"Zlib", # OK but currently unused; commenting to prevent warning ] exceptions = [ From f87e8c77d0625d2293eef1c24062fb71a3107ce6 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Fri, 6 Mar 2020 10:16:50 -0800 Subject: [PATCH 0271/1356] Remove docs and generation tool used in private preview It served as an index for docs in an S3 bucket, but the GitHub repo itself is a better index. --- tools/gen-docs.sh | 45 --------------------------------------------- 1 file changed, 45 deletions(-) delete mode 100755 tools/gen-docs.sh diff --git a/tools/gen-docs.sh b/tools/gen-docs.sh deleted file mode 100755 index 8c19daaf..00000000 --- a/tools/gen-docs.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env bash -DOCS=(START.md README.md BUILDING.md QUICKSTART.md CHANGELOG.md extras/dogswatch/README.md) -EXTRAS=(extras/dogswatch/{dogswatch,dev/deployment}.yaml) - -if ! hash grip; then - >&2 echo "grip is not installed, run 'pip3 install --user grip'" - exit 1 -fi - -top=$(git rev-parse --show-toplevel) -mkdir -p "${top}/html" -for doc in "${DOCS[@]}"; do - out="${top}/html/${doc%.md}.html" - mkdir -p "$(dirname "$out")" - grip --title="${doc}" --export \ - <( - cat <<'EOF' -@@BOTTLEROCKET-SENTINEL-START@@ -**The best way to get in touch with the Bottlerocket development team** during our preview -is via [thar-preview@amazon.com](mailto:thar-preview@amazon.com) -or #thar-preview on the [awsdevelopers Slack workspace](https://awsdevelopers.slack.com) (email us for an invite). -We'd love to talk with you and hear your feedback on Bottlerocket! -

-[← Documentation index](/START.md) - ---- - -EOF - cat "${top}/${doc}" - ) \ - "${out}" - sed -i \ - -e 's/.*.markdown-body .anchor span:before { font-size: 16px; content: "\\1f517"; }<\/style>/' \ - -e '/@@BOTTLEROCKET-SENTINEL-START@@/

/' \ - -e 's/]*\).md/]*">\(#[0-9]\+\)^\1^g' \ - "${out}" -done - -for extra in "${EXTRAS[@]}"; do - out="${top}/html/${extra}" - echo "Copying ${extra} to ${out}" - install -D "${extra}" "${out}" -done From 10e17a3c9ba6f57190aa6359868ec90363504822 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sat, 7 Mar 2020 18:47:11 +0000 Subject: [PATCH 0272/1356] docs: fix minor issues in README.md Signed-off-by: Ben Cressey --- README.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 4a0bfc3c..14a883ee 100644 --- a/README.md +++ b/README.md @@ -75,7 +75,7 @@ From there, you can [change settings](#settings), manually [update Bottlerocket] Bottlerocket has a ["control" container](https://github.com/bottlerocket-os/bottlerocket-control-container), enabled by default, that runs outside of the orchestrator in a separate instance of containerd. This container runs the [AWS SSM agent](https://github.com/aws/amazon-ssm-agent) that lets you run commands, or start shell sessions, on Bottlerocket instances in EC2. -(You can easily replace this control container with your own just by changing the URI; see [Settings](#settings). +(You can easily replace this control container with your own just by changing the URI; see [Settings](#settings).) You need to give your instance the SSM role for this to work; see the [setup guide](QUICKSTART.md#enabling-ssm). @@ -212,7 +212,7 @@ For more details on using the client, see the [apiclient documentation](sources/ If you know what settings you want to change when you start your Bottlerocket instance, you can send them in the user data. In user data, we structure the settings in TOML form to make things a bit simpler. -Here's the user data to change the time zone setting, as we did in the last section: +Here's the user data to change the message of the day setting, as we did in the last section: ``` [settings] @@ -347,15 +347,13 @@ RPM itself is not in the image - it's just a common and convenient package defin We currently package the following major third-party components: * Linux kernel ([background](https://en.wikipedia.org/wiki/Linux), [packaging](packages/kernel/)) * glibc ([background](https://www.gnu.org/software/libc/), [packaging](packages/glibc/)) -* Buildroot as build toolchain ([background](https://buildroot.org/), [packaging](packages/sdk/)) +* Buildroot as build toolchain ([background](https://buildroot.org/), via the [SDK](https://github.com/bottlerocket-os/bottlerocket-sdk)) * GRUB, with patches for partition flip updates ([background](https://www.gnu.org/software/grub/), [packaging](packages/grub/)) * systemd as init ([background](https://en.wikipedia.org/wiki/Systemd), [packaging](packages/systemd/)) * wicked for networking ([background](https://github.com/openSUSE/wicked), [packaging](packages/wicked/)) * containerd ([background](https://containerd.io/), [packaging](packages/containerd/)) * Kubernetes ([background](https://kubernetes.io/), [packaging](packages/kubernetes/)) -* Some helpers to make usage in AWS easier: - * aws-iam-authenticator ([background](https://github.com/kubernetes-sigs/aws-iam-authenticator), [packaging](packages/aws-iam-authenticator/)) - * SSM agent ([background](https://github.com/aws/amazon-ssm-agent), [packaging](packages/ssm/)) +* aws-iam-authenticator ([background](https://github.com/kubernetes-sigs/aws-iam-authenticator), [packaging](packages/aws-iam-authenticator/)) For further documentation or to see the rest of the packages, see the [packaging directory](packages/). From cbee8a36d6647e7ea0151cf00779d890455c9c4c Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sat, 7 Mar 2020 19:03:08 +0000 Subject: [PATCH 0273/1356] docs: refer to security guides in README.md Signed-off-by: Ben Cressey --- README.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 14a883ee..09f729e4 100644 --- a/README.md +++ b/README.md @@ -327,15 +327,15 @@ Be careful, and make sure you have a similar low-level use case before reaching ### Security -We use [dm-verity](https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity) to load a verified read-only root filesystem, preventing some classes of persistent security threats. -Only a few locations are made writable: -* some through [tmpfs mounts](sources/preinit/laika), used for configuration, that don't persist over a restart. -* one [persistent location](packages/release/var-lib-bottlerocket.mount) for the data store. +:shield: :crab: -We enable [SELinux](https://selinuxproject.org/) in enforcing mode. -This protects the data store from tampering, and blocks modification of sensitive files such as container archives. +To learn more about security features in Bottlerocket, please see [SECURITY FEATURES](SECURITY_FEATURES.md). +It describes how we use features like [dm-verity](https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity) and [SELinux](https://selinuxproject.org/) to protect the system from security threats. -Almost all first-party components are written in [Rust](https://www.rust-lang.org/). +To learn more about security recommendations for Bottlerocket, please see [SECURITY GUIDANCE](SECURITY_GUIDANCE.md). +It documents additional steps you can take to secure the OS, and includes resources such as a [Pod Security Policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) for your reference. + +In addition, almost all first-party components are written in [Rust](https://www.rust-lang.org/). Rust eliminates some classes of memory safety issues, and encourages design patterns that help security. ### Packaging From 37ba1e6023b7b1279169d68b8524ecfcd4b93674 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Mon, 9 Mar 2020 09:26:27 -0700 Subject: [PATCH 0274/1356] README: add sentence about preview phase with feedback link --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 09f729e4..06d1caeb 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,7 @@ Welcome to Bottlerocket! Bottlerocket is a free and open-source Linux-based operating system meant for hosting containers. +Bottlerocket is currently in a developer preview phase and we’re looking for your [feedback](/issues). If you’re ready to jump right in, read our [QUICKSTART](QUICKSTART.md) to try Bottlerocket in an Amazon EKS cluster. From f0f6c795ad7fc71575cb1e206e18d7745cc97be3 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Mon, 9 Mar 2020 11:15:41 -0700 Subject: [PATCH 0275/1356] README: fix feedback link to point at existing section --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 06d1caeb..77e27b40 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ Welcome to Bottlerocket! Bottlerocket is a free and open-source Linux-based operating system meant for hosting containers. -Bottlerocket is currently in a developer preview phase and we’re looking for your [feedback](/issues). +Bottlerocket is currently in a developer preview phase and we’re looking for your [feedback](#contact-us). If you’re ready to jump right in, read our [QUICKSTART](QUICKSTART.md) to try Bottlerocket in an Amazon EKS cluster. @@ -29,8 +29,9 @@ If you're interested in contributing, thank you! Please see our [contributor's guide](CONTRIBUTING.md). We use GitHub issues to track other bug reports and feature requests. -You can select from a few templates and get some guidance on the type of information that would be most helpful. +You can look at [existing issues](https://github.com/bottlerocket-os/bottlerocket/issues) to see whether your concern is already known. +If not, you can select from a few templates and get some guidance on the type of information that would be most helpful. [Contact us with a new issue here.](https://github.com/bottlerocket-os/bottlerocket/issues/new/choose) We don't have other communication channels set up quite yet, but don't worry about making an issue! From 6ffa395cbd88531e3d48b2299b2bfd812b9db2e3 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Mon, 9 Mar 2020 15:47:10 -0700 Subject: [PATCH 0276/1356] Add systemd.log_color=0 to remove ANSI color escapes from console log --- tools/rpm2img | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/rpm2img b/tools/rpm2img index 72675a82..07ad392d 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -162,8 +162,8 @@ set timeout="0" menuentry "Bottlerocket OS ${VERSION_ID}" { linux (\$root)/vmlinuz root=/dev/dm-0 rootwait ro init=/sbin/preinit \\ console=tty0 console=ttyS0 random.trust_cpu=on selinux=1 enforcing=1 \\ - systemd.log_target=journal-or-kmsg net.ifnames=0 biosdevname=0 \\ - dm_verity.max_bios=-1 dm_verity.dev_wait=1 \\ + systemd.log_target=journal-or-kmsg systemd.log_color=0 net.ifnames=0 \\ + biosdevname=0 dm_verity.max_bios=-1 dm_verity.dev_wait=1 \\ dm-mod.create="root,,,ro,0 $VERITY_DATA_512B_BLOCKS verity $VERITY_VERSION PARTUUID=\$boot_uuid/PARTNROFF=1 PARTUUID=\$boot_uuid/PARTNROFF=2 \\ $VERITY_DATA_BLOCK_SIZE $VERITY_HASH_BLOCK_SIZE $VERITY_DATA_4K_BLOCKS 1 $VERITY_HASH_ALGORITHM $VERITY_ROOT_HASH $VERITY_SALT 1 restart_on_corruption" } From a0787c06bcc0e25b9c300df92c9f230631c1bde7 Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Tue, 10 Mar 2020 00:16:33 +0000 Subject: [PATCH 0277/1356] Set up GitHub Actions --- .github/workflows/build.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 .github/workflows/build.yml diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 00000000..302e98a0 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,12 @@ +name: Build +on: + pull_request: + branches: [develop] +jobs: + build: + runs-on: [self-hosted, linux, x64] + steps: + - uses: actions/checkout@v1 + - run: cargo install --version 0.28.0 cargo-make + - run: cargo install --version 0.6.6 cargo-deny --no-default-features + - run: cargo make From 59cfaaab634292f474da31ca0d853e4c65dab1f9 Mon Sep 17 00:00:00 2001 From: inductor Date: Sun, 15 Mar 2020 18:33:21 +0900 Subject: [PATCH 0278/1356] Use github checkout v2 for faster fetch --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 302e98a0..2b55ae92 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -6,7 +6,7 @@ jobs: build: runs-on: [self-hosted, linux, x64] steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - run: cargo install --version 0.28.0 cargo-make - run: cargo install --version 0.6.6 cargo-deny --no-default-features - run: cargo make From 9da27bdd7de21c98749161b8f1229ef0d398e4cc Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Mon, 16 Mar 2020 16:44:30 +0000 Subject: [PATCH 0279/1356] Add appropriate ECR policy guidance to QUICKSTART and README --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 77e27b40..ad2acc4e 100644 --- a/README.md +++ b/README.md @@ -73,6 +73,8 @@ There are a couple out-of-band access methods you can use to explore Bottlerocke Either option will give you a shell within Bottlerocket. From there, you can [change settings](#settings), manually [update Bottlerocket](#updates), debug problems, and generally explore. +**Note:** These methods require that your instance has permission to access the ECR repository where these containers live; the appropriate policy to add to your instance's IAM role is `AmazonEC2ContainerRegistryReadOnly`. + ### Control container Bottlerocket has a ["control" container](https://github.com/bottlerocket-os/bottlerocket-control-container), enabled by default, that runs outside of the orchestrator in a separate instance of containerd. From c86550bbfabe475d816be271b1e6652ebcef2ae5 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Wed, 18 Mar 2020 14:21:44 +0000 Subject: [PATCH 0280/1356] Move built rpms to build/packages --- tools/buildsys/src/builder.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index 86bf2873..3593efa2 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -20,6 +20,7 @@ impl PackageBuilder { /// Build RPMs for the specified package. pub(crate) fn build(package: &str) -> Result { let arch = getenv("BUILDSYS_ARCH")?; + let output = getenv("BUILDSYS_PACKAGES_DIR")?; // We do *not* want to rebuild most packages when the variant changes, becauses most aren't // affected; packages that care about variant should "echo cargo:rerun-if-env-changed=VAR" @@ -42,7 +43,7 @@ impl PackageBuilder { arch = arch, ); - build(&target, &build_args, &tag)?; + build(&target, &build_args, &tag, &output)?; Ok(Self) } @@ -60,6 +61,7 @@ impl VariantBuilder { let variant = getenv("BUILDSYS_VARIANT")?; let version_image = getenv("BUILDSYS_VERSION_IMAGE")?; let version_build = getenv("BUILDSYS_VERSION_BUILD")?; + let output = getenv("BUILDSYS_OUTPUT_DIR")?; // Always rebuild variants since they are located in a different workspace, // and don't directly track changes in the underlying packages. @@ -78,16 +80,20 @@ impl VariantBuilder { version_image = version_image, version_build = version_build, ); - let tag = format!("buildsys-var-{variant}-{arch}", variant = variant, arch = arch); + let tag = format!( + "buildsys-var-{variant}-{arch}", + variant = variant, + arch = arch + ); - build(&target, &build_args, &tag)?; + build(&target, &build_args, &tag, &output)?; Ok(Self) } } /// Invoke a series of `docker` commands to drive a package or variant build. -fn build(target: &str, build_args: &str, tag: &str) -> Result<()> { +fn build(target: &str, build_args: &str, tag: &str, output: &str) -> Result<()> { // Our Dockerfile is in the top-level directory. let root = getenv("BUILDSYS_ROOT_DIR")?; std::env::set_current_dir(&root).context(error::DirectoryChange { path: &root })?; @@ -122,7 +128,6 @@ fn build(target: &str, build_args: &str, tag: &str) -> Result<()> { tag = tag, )); - let output = getenv("BUILDSYS_OUTPUT_DIR")?; let create = args(format!("create --name {tag} {tag} true", tag = tag)); let cp = args(format!("cp {}:/output/. {}", tag, output)); let rm = args(format!("rm --force {}", tag)); From 2009568047b1905fc43ab62d5bc1b8b03ae29814 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=A1bor=20Lipt=C3=A1k?= Date: Thu, 26 Mar 2020 08:28:50 -0400 Subject: [PATCH 0281/1356] Bump cargo-make to 0.30.0 --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 2b55ae92..fd56a943 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -7,6 +7,6 @@ jobs: runs-on: [self-hosted, linux, x64] steps: - uses: actions/checkout@v2 - - run: cargo install --version 0.28.0 cargo-make + - run: cargo install --version 0.30.0 cargo-make - run: cargo install --version 0.6.6 cargo-deny --no-default-features - run: cargo make From 096b4ad5bafd769c886c3cc3aa911bd906e54051 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 26 Mar 2020 12:39:00 -0400 Subject: [PATCH 0282/1356] build: Pass proxy environment variables through to docker containers. When building behind a proxy, build via 'cargo make' worked up to the point of 'running Task: fetch-vendored' where it hung. The reason was because necessary proxy environment variables were not passed into the docker container's environment. The fix here is to add these variables to the environment of the docker container if they are set outside the container: http_proxy, https_proxy, no_proxy --- tools/docker-go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/docker-go b/tools/docker-go index 548386e4..bae49199 100755 --- a/tools/docker-go +++ b/tools/docker-go @@ -54,10 +54,19 @@ DOCKER_RUN_ARGS="--network=host" parse_args "${@}" +# Go accepts both lower and uppercase proxy variables, pass both through. +proxy_env=( ) +for i in http_proxy https_proxy no_proxy HTTP_PROXY HTTPS_PROXY NO_PROXY; do + if [ -n "${!i}" ]; then + proxy_env[${#proxy_env[@]}]="--env=$i=${!i}" + fi +done + docker run --rm \ -e GOPRIVATE='*' \ -e GOCACHE='/tmp/.cache' \ -e GOPATH='/tmp/go' \ + "${proxy_env[@]}" \ --user "$(id -u):$(id -g)" \ ${DOCKER_RUN_ARGS} \ -v "${GO_MOD_CACHE}":/tmp/go/pkg/mod \ From dfe02de764dba168d1e33e2f04c189fe9adb6a88 Mon Sep 17 00:00:00 2001 From: Samuel Mendoza-Jonas Date: Thu, 26 Mar 2020 13:13:05 -0700 Subject: [PATCH 0283/1356] kernel: Include objtool in kernel-devel objtool is a requirement for compiling external modules, particularly if stack validation is enabled at build time. Signed-off-by: Samuel Mendoza-Jonas --- packages/kernel/kernel.spec | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/packages/kernel/kernel.spec b/packages/kernel/kernel.spec index 9dc8d8d4..ab061072 100644 --- a/packages/kernel/kernel.spec +++ b/packages/kernel/kernel.spec @@ -107,6 +107,11 @@ mkdir src_squashfs for file in $(cat kernel_devel_files); do install -D ${file} src_squashfs/%{version}/${file} done +# if we have it, include objtool (not all arches support it yet) +if [ "%{_cross_karch}" == "x86" ]; then + install -D tools/objtool/objtool src_squashfs/%{version}/tools/objtool/objtool +fi + mksquashfs src_squashfs kernel-devel.squashfs install -D kernel-devel.squashfs %{buildroot}%{_cross_datadir}/bottlerocket/kernel-devel.squashfs install -d %{buildroot}%{kernel_sourcedir} From 6afcb1ad520f3d38f44d537a195ecf3446ac02c5 Mon Sep 17 00:00:00 2001 From: Matthew James Briggs <6260372+webern@users.noreply.github.com> Date: Fri, 27 Mar 2020 11:45:12 -0700 Subject: [PATCH 0284/1356] logdog: a tool for aggregating logs --- README.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/README.md b/README.md index ad2acc4e..3469dd1f 100644 --- a/README.md +++ b/README.md @@ -327,6 +327,28 @@ We use it for the control container because it needs to be available early to gi Be careful, and make sure you have a similar low-level use case before reaching for host containers. +### Logs + +You can use `logdog` through the [admin container](#admin-container) to obtain an archive of log files from your Bottlerocket host. +SSH to the Bottlerocket host, then run: + +```bash +sudo sheltie +logdog +``` + +This will write an archive of the logs to `/tmp/bottlerocket-logs.tar.gz`. +You can use SSH to retrieve the file. +Once you have exited from the Bottlerocket host, run a command like: + +```bash +ssh -i YOUR_KEY_FILE \ + ec2-user@YOUR_HOST \ + "cat /.bottlerocket/rootfs/tmp/bottlerocket-logs.tar.gz" > bottlerocket-logs.tar.gz +``` + +For a list of what is collected, see the logdog [command list](sources/logdog/src/log_request.rs). + ## Details ### Security From 81f612838cda3ed42552d0356444ae033d2c89f9 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Tue, 7 Apr 2020 16:08:23 -0700 Subject: [PATCH 0285/1356] Remove unused Rust dependencies --- tools/buildsys/Cargo.lock | 10 ---------- tools/buildsys/Cargo.toml | 1 - 2 files changed, 11 deletions(-) diff --git a/tools/buildsys/Cargo.lock b/tools/buildsys/Cargo.lock index fe07b1e1..8ac60a00 100644 --- a/tools/buildsys/Cargo.lock +++ b/tools/buildsys/Cargo.lock @@ -53,7 +53,6 @@ dependencies = [ "snafu 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "toml 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "users 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", "walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -997,14 +996,6 @@ dependencies = [ "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "users" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "version_check" version = "0.1.5" @@ -1327,7 +1318,6 @@ dependencies = [ "checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" "checksum untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60369ef7a31de49bcb3f6ca728d4ba7300d9a1658f94c727d4cab8c8d9f4aece" "checksum url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" -"checksum users 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c72f4267aea0c3ec6d07eaabea6ead7c5ddacfafc5e22bcf8d186706851fb4cf" "checksum version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" "checksum version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce" "checksum walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" diff --git a/tools/buildsys/Cargo.toml b/tools/buildsys/Cargo.toml index 4e8a53ab..5097136e 100644 --- a/tools/buildsys/Cargo.toml +++ b/tools/buildsys/Cargo.toml @@ -17,5 +17,4 @@ sha2 = "0.8" snafu = "0.6" toml = "0.5" url = "2.1" -users = { version = "0.9", default-features = false } walkdir = "2" From 23db5c859f06a51e8a414e0d6050cbf23e98c09d Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Tue, 7 Apr 2020 16:37:16 -0700 Subject: [PATCH 0286/1356] Update Rust dependencies Only itertools and base64 had minor version updates, and their changelogs show nothing relevant to our basic usage of them. --- tools/buildsys/Cargo.lock | 423 +++++++++++++++----------------------- 1 file changed, 170 insertions(+), 253 deletions(-) diff --git a/tools/buildsys/Cargo.lock b/tools/buildsys/Cargo.lock index 8ac60a00..9d7a5053 100644 --- a/tools/buildsys/Cargo.lock +++ b/tools/buildsys/Cargo.lock @@ -1,10 +1,5 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -[[package]] -name = "anyhow" -version = "1.0.26" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "autocfg" version = "1.0.0" @@ -46,11 +41,11 @@ dependencies = [ "duct 0.13.3 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "reqwest 0.10.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "reqwest 0.10.4 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "serde_plain 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "sha2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "snafu 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "snafu 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", "toml 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -58,7 +53,7 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.2.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -76,14 +71,6 @@ name = "bytes" version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "c2-chacha" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "cc" version = "1.0.50" @@ -100,7 +87,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "core-foundation-sys 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -126,7 +113,7 @@ dependencies = [ [[package]] name = "doc-comment" -version = "0.3.1" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -139,7 +126,7 @@ name = "duct" version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", "once_cell 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "os_pipe 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", "shared_child 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -200,10 +187,10 @@ name = "futures-macro" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro-hack 0.5.11 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro-hack 0.5.15 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -227,8 +214,8 @@ dependencies = [ "futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro-hack 0.5.11 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro-nested 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro-hack 0.5.15 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro-nested 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -246,13 +233,13 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", "wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "h2" -version = "0.2.1" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -260,28 +247,20 @@ dependencies = [ "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "indexmap 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-util 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "heck" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "unicode-segmentation 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "hermit-abi" -version = "0.1.8" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -291,7 +270,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "http" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -305,7 +284,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -315,15 +294,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "hyper" -version = "0.13.2" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "futures-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "h2 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "h2 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -331,7 +310,7 @@ dependencies = [ "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", "pin-project 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", "tower-service 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "want 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -344,11 +323,11 @@ dependencies = [ "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "ct-logs 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.13.2 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.13.4 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "rustls 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustls-native-certs 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-rustls 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", "webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -376,7 +355,7 @@ name = "iovec" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -386,10 +365,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "js-sys" -version = "0.3.35" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -408,7 +387,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "libc" -version = "0.2.67" +version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -436,7 +415,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "mime_guess" -version = "2.0.1" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", @@ -453,7 +432,7 @@ dependencies = [ "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", @@ -478,26 +457,17 @@ version = "0.2.33" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "nom" -version = "4.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "num_cpus" version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "hermit-abi 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", + "hermit-abi 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -520,7 +490,7 @@ name = "os_pipe" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -542,9 +512,9 @@ name = "pin-project-internal" version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -564,22 +534,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "proc-macro-hack" -version = "0.5.11" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] name = "proc-macro-nested" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "proc-macro2" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -587,10 +552,10 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -599,18 +564,18 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rand_chacha" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -637,7 +602,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "reqwest" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -645,42 +610,42 @@ dependencies = [ "encoding_rs 0.8.22 (registry+https://github.com/rust-lang/crates.io-index)", "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.13.2 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.13.4 (registry+https://github.com/rust-lang/crates.io-index)", "hyper-rustls 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)", - "js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", + "js-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", - "mime_guess 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "mime_guess 2.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "rustls 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-rustls 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-futures 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-futures 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", + "web-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", "webpki-roots 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)", "winreg 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "ring" -version = "0.16.11" +version = "0.16.12" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", "spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", + "web-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -691,7 +656,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "ring 0.16.11 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.16.12 (registry+https://github.com/rust-lang/crates.io-index)", "sct 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -703,13 +668,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "rustls 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", - "schannel 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", - "security-framework 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "schannel 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", + "security-framework 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "ryu" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -717,12 +682,12 @@ name = "same-file" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-util 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "schannel" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -734,56 +699,57 @@ name = "sct" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "ring 0.16.11 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.16.12 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "security-framework" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "core-foundation 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "core-foundation-sys 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "security-framework-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", + "security-framework-sys 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "security-framework-sys" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "core-foundation-sys 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "serde" -version = "1.0.104" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "serde_derive" -version = "1.0.104" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "serde_json" -version = "1.0.48" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "ryu 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "ryu 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -791,7 +757,7 @@ name = "serde_plain" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -801,7 +767,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "dtoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -821,7 +787,7 @@ name = "shared_child" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -837,28 +803,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "snafu" -version = "0.6.2" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "doc-comment 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "snafu-derive 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "doc-comment 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "snafu-derive 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "snafu-derive" -version = "0.6.2" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "sourcefile" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "spin" version = "0.5.2" @@ -866,11 +827,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "syn" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -879,18 +840,19 @@ name = "time" version = "0.1.42" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio" -version = "0.2.12" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -907,13 +869,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "rustls 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", "webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-util" -version = "0.2.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -921,7 +883,7 @@ dependencies = [ "futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -929,7 +891,7 @@ name = "toml" version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -971,11 +933,6 @@ dependencies = [ "smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "unicode-segmentation" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "unicode-xid" version = "0.2.0" @@ -996,11 +953,6 @@ dependencies = [ "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "version_check" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "version_check" version = "0.9.1" @@ -1013,7 +965,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-util 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1032,91 +984,73 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "wasm-bindgen" -version = "0.2.58" +version = "0.2.60" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-macro 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-macro 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.58" +version = "0.2.60" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bumpalo 3.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bumpalo 3.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-shared 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-shared 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.8" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", - "web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", + "js-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "web-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.58" +version = "0.2.60" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-macro-support 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-macro-support 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.58" +version = "0.2.60" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-backend 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-shared 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-backend 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-shared 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.58" +version = "0.2.60" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "wasm-bindgen-webidl" -version = "0.2.58" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "anyhow 1.0.26 (registry+https://github.com/rust-lang/crates.io-index)", - "heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-backend 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", - "weedle 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "web-sys" -version = "0.3.35" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "anyhow 1.0.26 (registry+https://github.com/rust-lang/crates.io-index)", - "js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", - "sourcefile 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-webidl 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "js-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1124,7 +1058,7 @@ name = "webpki" version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "ring 0.16.11 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.16.12 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1136,14 +1070,6 @@ dependencies = [ "webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "weedle" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "nom 4.2.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "winapi" version = "0.2.8" @@ -1170,7 +1096,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "winapi-util" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1199,24 +1125,22 @@ dependencies = [ ] [metadata] -"checksum anyhow 1.0.26 (registry+https://github.com/rust-lang/crates.io-index)" = "7825f6833612eb2414095684fcf6c635becf3ce97fe48cf6421321e93bfbd53c" "checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" "checksum base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" "checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" "checksum block-buffer 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" "checksum block-padding 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" -"checksum bumpalo 3.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1f359dc14ff8911330a51ef78022d376f25ed00248912803b58f00cb1c27f742" +"checksum bumpalo 3.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "12ae9db68ad7fac5fe51304d20f016c911539251075a214f8e663babefa35187" "checksum byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" "checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" "checksum bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" -"checksum c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "214238caa1bf3a496ec3392968969cab8549f96ff30652c9e56885329315f6bb" "checksum cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" "checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" "checksum core-foundation 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" "checksum core-foundation-sys 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" "checksum ct-logs 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4d3686f5fa27dbc1d76c751300376e167c5a43387f44bb451fd1c24776e49113" "checksum digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -"checksum doc-comment 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "923dea538cea0aa3025e8685b20d6ee21ef99c4f77e954a30febbaac5ec73a97" +"checksum doc-comment 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" "checksum dtoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "4358a9e11b9a09cf52383b451b49a169e8d797b68aa02301ff586d70d9661ea3" "checksum duct 0.13.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1607fa68d55be208e83bcfbcfffbc1ec65c9fbcf9eb1a5d548dc3ac0100743b0" "checksum encoding_rs 0.8.22 (registry+https://github.com/rust-lang/crates.io-index)" = "cd8d03faa7fe0c1431609dfad7bbe827af30f82e1e2ae6f7ee4fca6bd764bc28" @@ -1233,32 +1157,30 @@ dependencies = [ "checksum futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "22766cf25d64306bedf0384da004d05c9974ab104fcc4528f1236181c18004c5" "checksum generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" "checksum getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" -"checksum h2 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b9433d71e471c1736fd5a61b671fc0b148d7a2992f666c958d03cd8feb3b88d1" -"checksum heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" -"checksum hermit-abi 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "1010591b26bbfe835e9faeabeb11866061cc7dcebffd56ad7d0942d0e61aefd8" +"checksum h2 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "377038bf3c89d18d6ca1431e7a5027194fbd724ca10592b9487ede5e8e144f42" +"checksum hermit-abi 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "725cf19794cf90aa94e65050cb4191ff5d8fa87a498383774c47b332e3af952e" "checksum hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" -"checksum http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b708cc7f06493459026f53b9a61a7a121a5d1ec6238dee58ea4941132b30156b" +"checksum http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" "checksum http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" "checksum httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" -"checksum hyper 0.13.2 (registry+https://github.com/rust-lang/crates.io-index)" = "fa1c527bbc634be72aa7ba31e4e4def9bbb020f5416916279b7c705cd838893e" +"checksum hyper 0.13.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ed6081100e960d9d74734659ffc9cc91daf1c0fc7aceb8eaa94ee1a3f5046f2e" "checksum hyper-rustls 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac965ea399ec3a25ac7d13b8affd4b8f39325cca00858ddf5eb29b79e6b14b08" "checksum idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" "checksum indexmap 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "076f042c5b7b98f31d205f1249267e12a6518c1481e9dae9764af19b707d2292" "checksum iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" "checksum itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e" -"checksum js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)" = "7889c7c36282151f6bf465be4700359318aef36baa951462382eae49e9577cf9" +"checksum js-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)" = "6a27d435371a2fa5b6d2b028a74bbdb1234f308da363226a2854ca3ff8ba7055" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" "checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -"checksum libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)" = "eb147597cdf94ed43ab7a9038716637d2d1bf2bc571da995d0028dec06bd3018" +"checksum libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)" = "dea0c0405123bba743ee3f91f49b1c7cfb684eef0da0a50110f758ccf24cdff0" "checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" "checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" "checksum memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" "checksum mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)" = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -"checksum mime_guess 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1a0ed03949aef72dbdf3116a383d7b38b4768e6f960528cd6a6044aa9ed68599" +"checksum mime_guess 2.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" "checksum mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)" = "302dec22bcf6bae6dfb69c647187f4b4d0fb6f535521f7bc022430ce8e12008f" "checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" "checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88" -"checksum nom 4.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2ad2a91a8e869eeb30b9cb3119ae87773a8f4ae617f41b1eb9c154b2905f7bd6" "checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6" "checksum once_cell 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b1c601810575c99596d4afc46f78a678c80105117c379eb3650cf99b8a21ce5b" "checksum opaque-debug 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" @@ -1270,43 +1192,42 @@ dependencies = [ "checksum pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "237844750cfbb86f67afe27eee600dfbbcb6188d734139b534cbfbf4f96792ae" "checksum pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587" "checksum ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" -"checksum proc-macro-hack 0.5.11 (registry+https://github.com/rust-lang/crates.io-index)" = "ecd45702f76d6d3c75a80564378ae228a85f0b59d2f3ed43c91b4a69eb2ebfc5" -"checksum proc-macro-nested 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "369a6ed065f249a159e06c45752c780bda2fb53c995718f9e484d08daa9eb42e" -"checksum proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)" = "6c09721c6781493a2a492a96b5a5bf19b65917fe6728884e7c44dd0c60ca3435" -"checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" +"checksum proc-macro-hack 0.5.15 (registry+https://github.com/rust-lang/crates.io-index)" = "0d659fe7c6d27f25e9d80a1a094c223f5246f6a6596453e09d7229bf42750b63" +"checksum proc-macro-nested 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8e946095f9d3ed29ec38de908c22f95d9ac008e424c7bcae54c75a79c527c694" +"checksum proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)" = "df246d292ff63439fea9bc8c0a270bed0e390d5ebd4db4ba15aba81111b5abe3" +"checksum quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f" "checksum rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -"checksum rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "03a2a90da8c7523f554344f921aa97283eadf6ac484a6d2a7d0212fa7f8d6853" +"checksum rand_chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" "checksum rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" "checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" "checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" -"checksum reqwest 0.10.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a9f62f24514117d09a8fc74b803d3d65faa27cea1c7378fb12b0d002913f3831" -"checksum ring 0.16.11 (registry+https://github.com/rust-lang/crates.io-index)" = "741ba1704ae21999c00942f9f5944f801e977f54302af346b596287599ad1862" +"checksum reqwest 0.10.4 (registry+https://github.com/rust-lang/crates.io-index)" = "02b81e49ddec5109a9dcfc5f2a317ff53377c915e9ae9d4f2fb50914b85614e2" +"checksum ring 0.16.12 (registry+https://github.com/rust-lang/crates.io-index)" = "1ba5a8ec64ee89a76c98c549af81ff14813df09c3e6dc4766c3856da48597a0c" "checksum rustls 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c0d4a31f5d68413404705d6982529b0e11a9aacd4839d1d6222ee3b8cb4015e1" "checksum rustls-native-certs 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a75ffeb84a6bd9d014713119542ce415db3a3e4748f0bfce1e1416cd224a23a5" -"checksum ryu 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bfa8506c1de11c9c4e4c38863ccbe02a305c8188e85a05a784c9e11e1c3910c8" +"checksum ryu 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "535622e6be132bccd223f4bb2b8ac8d53cda3c7a6394944d3b2b33fb974f9d76" "checksum same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -"checksum schannel 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "507a9e6e8ffe0a4e0ebb9a10293e62fdf7657c06f1b8bb07a8fcf697d2abf295" +"checksum schannel 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "039c25b130bd8c1321ee2d7de7fde2659fa9c2744e4bb29711cfc852ea53cd19" "checksum sct 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" -"checksum security-framework 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "97bbedbe81904398b6ebb054b3e912f99d55807125790f3198ac990d98def5b0" -"checksum security-framework-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "06fd2f23e31ef68dd2328cc383bd493142e46107a3a0e24f7d734e3f3b80fe4c" -"checksum serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "414115f25f818d7dfccec8ee535d76949ae78584fc4f79a6f45a904bf8ab4449" -"checksum serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "128f9e303a5a29922045a830221b8f78ec74a5f544944f3d5984f8ec3895ef64" -"checksum serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)" = "9371ade75d4c2d6cb154141b9752cf3781ec9c05e0e5cf35060e1e70ee7b9c25" +"checksum security-framework 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "572dfa3a0785509e7a44b5b4bebcf94d41ba34e9ed9eb9df722545c3b3c4144a" +"checksum security-framework-sys 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8ddb15a5fec93b7021b8a9e96009c5d8d51c15673569f7c0f6b7204e5b7b404f" +"checksum serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)" = "36df6ac6412072f67cf767ebbde4133a5b2e88e76dc6187fa7104cd16f783399" +"checksum serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)" = "9e549e3abf4fb8621bd1609f11dfc9f5e50320802273b12f3811a67e6716ea6c" +"checksum serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)" = "da07b57ee2623368351e9a0488bb0b261322a15a6e0ae53e243cbdc0f4208da9" "checksum serde_plain 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "625fb0da2b006092b426a94acc1611bec52f2ec27bb27b266a9f93c29ee38eda" "checksum serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" "checksum sha2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "27044adfd2e1f077f649f59deb9490d3941d674002f7d062870a60ebe9bd47a0" "checksum shared_child 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8cebcf3a403e4deafaf34dc882c4a1b6a648b43e5670aa2e4bb985914eaeb2d2" "checksum slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" "checksum smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5c2fb2ec9bcd216a5b0d0ccf31ab17b5ed1d627960edff65bbe95d3ce221cefc" -"checksum snafu 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "546db9181bce2aa22ed883c33d65603b76335b4c2533a98289f54265043de7a1" -"checksum snafu-derive 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bdc75da2e0323f297402fd9c8fdba709bb04e4c627cbe31d19a2c91fc8d9f0e2" -"checksum sourcefile 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4bf77cb82ba8453b42b6ae1d692e4cdc92f9a47beaf89a847c8be83f4e328ad3" +"checksum snafu 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "b1ec0ae2ed980f26e1ad62e717feb01df90731df56887b5391a2c79f9f6805be" +"checksum snafu-derive 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0ec32ba84a7a86aeb0bc32fd0c46d31b0285599f68ea72e87eff6127889d99e1" "checksum spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" -"checksum syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)" = "123bd9499cfb380418d509322d7a6d52e5315f064fe4b3ad18a53d6b92c07859" +"checksum syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)" = "0df0eb663f387145cab623dea85b09c2c5b4b0aef44e945d928e682fce71bb03" "checksum time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" -"checksum tokio 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "b34bee1facdc352fba10c9c58b654e6ecb6a2250167772bf86071f7c5f2f5061" +"checksum tokio 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)" = "ee5a0dd887e37d37390c13ff8ac830f992307fe30a1fff0ab8427af67211ba28" "checksum tokio-rustls 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4adb8b3e5f86b707f1b54e7c15b6de52617a823608ccda98a15d3a24222f265a" -"checksum tokio-util 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "571da51182ec208780505a32528fc5512a8fe1443ab960b3f2f3ef093cd16930" +"checksum tokio-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" "checksum toml 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ffc92d160b1eef40665be3a05630d003936a3bc7da7421277846c2613e92c71a" "checksum tower-service 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" "checksum try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" @@ -1314,31 +1235,27 @@ dependencies = [ "checksum unicase 2.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" "checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" "checksum unicode-normalization 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4" -"checksum unicode-segmentation 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" "checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" "checksum untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60369ef7a31de49bcb3f6ca728d4ba7300d9a1658f94c727d4cab8c8d9f4aece" "checksum url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" -"checksum version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" "checksum version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce" "checksum walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" "checksum want 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" "checksum wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)" = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" -"checksum wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "5205e9afdf42282b192e2310a5b463a6d1c1d774e30dc3c791ac37ab42d2616c" -"checksum wasm-bindgen-backend 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "11cdb95816290b525b32587d76419facd99662a07e59d3cdb560488a819d9a45" -"checksum wasm-bindgen-futures 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8bbdd49e3e28b40dec6a9ba8d17798245ce32b019513a845369c641b275135d9" -"checksum wasm-bindgen-macro 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "574094772ce6921576fb6f2e3f7497b8a76273b6db092be18fc48a082de09dc3" -"checksum wasm-bindgen-macro-support 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "e85031354f25eaebe78bb7db1c3d86140312a911a106b2e29f9cc440ce3e7668" -"checksum wasm-bindgen-shared 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "f5e7e61fc929f4c0dddb748b102ebf9f632e2b8d739f2016542b4de2965a9601" -"checksum wasm-bindgen-webidl 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "ef012a0d93fc0432df126a8eaf547b2dce25a8ce9212e1d3cbeef5c11157975d" -"checksum web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)" = "aaf97caf6aa8c2b1dac90faf0db529d9d63c93846cca4911856f78a83cebf53b" +"checksum wasm-bindgen 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)" = "2cc57ce05287f8376e998cbddfb4c8cb43b84a7ec55cf4551d7c00eef317a47f" +"checksum wasm-bindgen-backend 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)" = "d967d37bf6c16cca2973ca3af071d0a2523392e4a594548155d89a678f4237cd" +"checksum wasm-bindgen-futures 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "7add542ea1ac7fdaa9dc25e031a6af33b7d63376292bd24140c637d00d1c312a" +"checksum wasm-bindgen-macro 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)" = "8bd151b63e1ea881bb742cd20e1d6127cef28399558f3b5d415289bc41eee3a4" +"checksum wasm-bindgen-macro-support 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)" = "d68a5b36eef1be7868f668632863292e37739656a80fc4b9acec7b0bd35a4931" +"checksum wasm-bindgen-shared 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)" = "daf76fe7d25ac79748a37538b7daeed1c7a6867c92d3245c12c6222e4a20d639" +"checksum web-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)" = "2d6f51648d8c56c366144378a33290049eafdd784071077f6fe37dae64c1c4cb" "checksum webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f1f50e1972865d6b1adb54167d1c8ed48606004c2c9d0ea5f1eeb34d95e863ef" "checksum webpki-roots 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)" = "91cd5736df7f12a964a5067a12c62fa38e1bd8080aff1f80bc29be7c80d19ab4" -"checksum weedle 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3bb43f70885151e629e2a19ce9e50bd730fd436cfd4b666894c9ce4de9141164" "checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" "checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" "checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -"checksum winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4ccfbf554c6ad11084fb7517daca16cfdcaccbdadba4fc336f032a8b12c2ad80" +"checksum winapi-util 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "fa515c5163a99cc82bab70fd3bfdd36d827be85de63737b40fcef2ce084a436e" "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" "checksum winreg 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" "checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" From 5b19baeba3ec01e4b5cee170389dcea31b1fb3ae Mon Sep 17 00:00:00 2001 From: Matt Briggs Date: Fri, 10 Apr 2020 11:29:28 -0700 Subject: [PATCH 0287/1356] kernel: update to 5.4.20 --- packages/kernel/Cargo.toml | 4 ++-- packages/kernel/kernel.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel/Cargo.toml b/packages/kernel/Cargo.toml index 5353a32f..1d10534d 100644 --- a/packages/kernel/Cargo.toml +++ b/packages/kernel/Cargo.toml @@ -10,5 +10,5 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/8a2ede840234ebc72d835fab144162ba8184b8aa7df163a5d36aad2d3c85529b/kernel-5.4.16-8.72.amzn2.src.rpm" -sha512 = "0f9e04c94fe89b710bd9ad9bdb6ed075aaea30e4fd64c5ab9d42d2a052acd6abf6b91cd6aa561fd13559eed6cc27acb883e8cb27e113db34de29f6f905dd2bfc" +url = "https://cdn.amazonlinux.com/blobstore/2e1a86879ed805e227d81f815fcc9f7575ae98a2fd6573ca71c9be2776c0637d/kernel-5.4.20-12.75.amzn2.src.rpm" +sha512 = "b698803700a05dbd21d761e3efdde20b212374869220b4e509858cdffb8566d946576991ac46351f56235882e57513a5958bc71751c43a6b4f7fe5cc4c8268ac" diff --git a/packages/kernel/kernel.spec b/packages/kernel/kernel.spec index ab061072..cff93c94 100644 --- a/packages/kernel/kernel.spec +++ b/packages/kernel/kernel.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel -Version: 5.4.16 +Version: 5.4.20 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/8a2ede840234ebc72d835fab144162ba8184b8aa7df163a5d36aad2d3c85529b/kernel-5.4.16-8.72.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/2e1a86879ed805e227d81f815fcc9f7575ae98a2fd6573ca71c9be2776c0637d/kernel-5.4.20-12.75.amzn2.src.rpm Source100: config-bottlerocket Patch0001: 0001-lustrefsx-Disable-Werror-stringop-overflow.patch BuildRequires: bc From ac09bd907123d49f551ac64d61c63c77d572cfe1 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 16 Apr 2020 15:41:03 +0000 Subject: [PATCH 0288/1356] build: add partition type UUID for local data image This allows tools to find the partition by UUID as well as by label. Signed-off-by: Ben Cressey --- tools/rpm2img | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/rpm2img b/tools/rpm2img index 07ad392d..4e391c2b 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -52,6 +52,7 @@ BOTTLEROCKET_ROOT_TYPECODE="5526016a-1a97-4ea4-b39a-b7c8c6ca4502" BOTTLEROCKET_HASH_TYPECODE="598f10af-c955-4456-6a99-7720068a6cea" BOTTLEROCKET_RESERVED_TYPECODE="0c5d99a5-d331-4147-baef-08e2b855bdc9" BOTTLEROCKET_PRIVATE_TYPECODE="440408bb-eb0b-4328-a6e5-a29038fad706" +BOTTLEROCKET_DATA_TYPECODE="626f7474-6c65-6474-6861-726d61726b73" if [[ "${ARCH}" == "x86_64" ]]; then FIRM_NAME="BIOS-BOOT" @@ -187,7 +188,7 @@ dd if="${PRIVATE_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek=2005 # BOTTLEROCKET-DATA truncate -s 1G "${DATA_IMAGE}" sgdisk --clear \ - -n 0:1M:1023M -c 0:"BOTTLEROCKET-DATA" -t 0:8300 \ + -n 0:1M:1023M -c 0:"BOTTLEROCKET-DATA" -t 0:"${BOTTLEROCKET_DATA_TYPECODE}" \ --sort --print "${DATA_IMAGE}" # If we build on a host with SELinux enabled, we could end up with labels that # do not match our policy. Since we allow replacing the data volume at runtime, From e9377c686c849ceef4c75d7469b08d78ae59d0d6 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Wed, 29 Apr 2020 16:38:08 -0700 Subject: [PATCH 0289/1356] Allow overriding package name in packages/*/Cargo.toml Cargo doesn't allow some common characters like "." in a package name, which would normally prevent you from naming package artifacts with those characters. With this change, if you specify `package.metadata.build-package.package-name` in Cargo.toml, you can override the package name and tell the build system to look at your preferred name for the package directory, spec file, etc. --- tools/buildsys/src/main.rs | 8 +++++++- tools/buildsys/src/manifest.rs | 32 +++++++++++++++++++++++++++++++- 2 files changed, 38 insertions(+), 2 deletions(-) diff --git a/tools/buildsys/src/main.rs b/tools/buildsys/src/main.rs index 77ff8759..529cd95b 100644 --- a/tools/buildsys/src/main.rs +++ b/tools/buildsys/src/main.rs @@ -121,7 +121,13 @@ fn build_package() -> Result<()> { } } - let package = getenv("CARGO_PKG_NAME")?; + // Package developer can override name of package if desired, e.g. to name package with + // characters invalid in Cargo crate names + let package = if let Some(name_override) = manifest.package_name() { + name_override.clone() + } else { + getenv("CARGO_PKG_NAME")? + }; let spec = format!("{}.spec", package); println!("cargo:rerun-if-changed={}", spec); diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index 55d8f34b..2e726696 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -1,10 +1,14 @@ /*! +# Build system metadata + This module provides deserialization and convenience methods for build system metadata located in `Cargo.toml`. Cargo ignores the `package.metadata` table in its manifest, so it can be used to store configuration for other tools. We recognize the following keys. +## Metadata for packages + `source-groups` is a list of directories in the top-level `sources` directory, each of which contains a set of related Rust projects. Changes to files in these groups should trigger a rebuild. @@ -29,6 +33,26 @@ url = "https://bar" sha512 = "123456" ``` +`package-name` lets you override the package name in Cargo.toml; this is useful +if you have a package with "." in its name, for example, which Cargo doesn't +allow. This means the directory name and spec file name can use your preferred +naming. +``` +[package.metadata.build-package] +package-name = "better.name" +``` + +`variant-sensitive` lets you specify whether the package should be rebuilt when +building a new variant, and defaults to false; set it to true if a package is +using the variant to affect its build process. (Typically this means that it +reads BUILDSYS_VARIANT.) +``` +[package.metadata.build-package] +variant-sensitive = true +``` + +## Metadata for variants + `included-packages` is a list of packages that should be included in a variant. ``` [package.metadata.build-variant] @@ -70,6 +94,11 @@ impl ManifestInfo { self.build_package().and_then(|b| b.external_files.as_ref()) } + /// Convenience method to return the package name override, if any. + pub(crate) fn package_name(&self) -> Option<&String> { + self.build_package().and_then(|b| b.package_name.as_ref()) + } + /// Convenience method to find whether the package is sensitive to variant changes. pub(crate) fn variant_sensitive(&self) -> Option { self.build_package().and_then(|b| b.variant_sensitive) @@ -113,8 +142,9 @@ struct Metadata { #[derive(Deserialize, Debug)] #[serde(rename_all = "kebab-case")] pub(crate) struct BuildPackage { - pub(crate) source_groups: Option>, pub(crate) external_files: Option>, + pub(crate) package_name: Option, + pub(crate) source_groups: Option>, pub(crate) variant_sensitive: Option, } From 06de59f610d4cf8162fca5b3d1595a745520438f Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Wed, 29 Apr 2020 16:41:26 -0700 Subject: [PATCH 0290/1356] Rename Kubernetes package to 1.15 to make way for new versions The main change is adding a version to the name of the kubelet package so that the variant can pick the relevant Kubernetes version. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3469dd1f..ad5cf76f 100644 --- a/README.md +++ b/README.md @@ -378,7 +378,7 @@ We currently package the following major third-party components: * systemd as init ([background](https://en.wikipedia.org/wiki/Systemd), [packaging](packages/systemd/)) * wicked for networking ([background](https://github.com/openSUSE/wicked), [packaging](packages/wicked/)) * containerd ([background](https://containerd.io/), [packaging](packages/containerd/)) -* Kubernetes ([background](https://kubernetes.io/), [packaging](packages/kubernetes/)) +* Kubernetes ([background](https://kubernetes.io/), [packaging](packages/kubernetes-1.15/)) * aws-iam-authenticator ([background](https://github.com/kubernetes-sigs/aws-iam-authenticator), [packaging](packages/aws-iam-authenticator/)) For further documentation or to see the rest of the packages, see the [packaging directory](packages/). From 8dd83ce5a663e7e3fe1ea70722393d4525c227dc Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Tue, 12 May 2020 10:07:10 -0700 Subject: [PATCH 0291/1356] Update to kernel 5.4.38-17.76 --- packages/kernel/Cargo.toml | 4 ++-- packages/kernel/kernel.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel/Cargo.toml b/packages/kernel/Cargo.toml index 1d10534d..54176cf9 100644 --- a/packages/kernel/Cargo.toml +++ b/packages/kernel/Cargo.toml @@ -10,5 +10,5 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/2e1a86879ed805e227d81f815fcc9f7575ae98a2fd6573ca71c9be2776c0637d/kernel-5.4.20-12.75.amzn2.src.rpm" -sha512 = "b698803700a05dbd21d761e3efdde20b212374869220b4e509858cdffb8566d946576991ac46351f56235882e57513a5958bc71751c43a6b4f7fe5cc4c8268ac" +url = "https://cdn.amazonlinux.com/blobstore/e59a3280f4c5fd5c4ad8686c1854327e3d177cc647c19b6a554f0c4b75df8c96/kernel-5.4.38-17.76.amzn2.src.rpm" +sha512 = "542891b79c355930daca09c4e54a61bf2d0b711c66d6d3d8b35ee8e7df2459898ca295a60c1ca3361177129a3c63f70cf034d87412610f77506756fbc3ba76c1" diff --git a/packages/kernel/kernel.spec b/packages/kernel/kernel.spec index cff93c94..2a18335e 100644 --- a/packages/kernel/kernel.spec +++ b/packages/kernel/kernel.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel -Version: 5.4.20 +Version: 5.4.38 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/2e1a86879ed805e227d81f815fcc9f7575ae98a2fd6573ca71c9be2776c0637d/kernel-5.4.20-12.75.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/e59a3280f4c5fd5c4ad8686c1854327e3d177cc647c19b6a554f0c4b75df8c96/kernel-5.4.38-17.76.amzn2.src.rpm Source100: config-bottlerocket Patch0001: 0001-lustrefsx-Disable-Werror-stringop-overflow.patch BuildRequires: bc From c260e877c9f8137a6edde2e52a4eff04c82cff25 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 13 May 2020 18:04:13 +0000 Subject: [PATCH 0292/1356] build: disable SELinux when fetching Go modules This fixes a build failure when the Docker daemon is running with the SELinux feature enabled. An alternative would be to relabel the directories on the host, but if we do that then the user can no longer write to or remove them. Signed-off-by: Ben Cressey --- tools/docker-go | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/docker-go b/tools/docker-go index bae49199..d89e90a4 100755 --- a/tools/docker-go +++ b/tools/docker-go @@ -68,6 +68,7 @@ docker run --rm \ -e GOPATH='/tmp/go' \ "${proxy_env[@]}" \ --user "$(id -u):$(id -g)" \ + --security-opt label:disable \ ${DOCKER_RUN_ARGS} \ -v "${GO_MOD_CACHE}":/tmp/go/pkg/mod \ -v "${GO_MODULE_PATH}":/usr/src/host-ctr \ From 55687d923930bc69c1cf2469f9430eb830b256d7 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 2 Jun 2020 18:10:45 +0000 Subject: [PATCH 0293/1356] ci: expand build matrix Adds coverage for aarch64 and both Kubernetes variants. Signed-off-by: Ben Cressey --- .github/workflows/build.yml | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index fd56a943..f6889937 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -5,8 +5,19 @@ on: jobs: build: runs-on: [self-hosted, linux, x64] + continue-on-error: ${{ matrix.supported }} + strategy: + matrix: + variant: [aws-k8s-1.15, aws-k8s-1.16] + arch: [x86_64, aarch64] + supported: [true] + include: + - variant: aws-dev + arch: x86_64 + supported: false + fail-fast: false steps: - uses: actions/checkout@v2 - run: cargo install --version 0.30.0 cargo-make - run: cargo install --version 0.6.6 cargo-deny --no-default-features - - run: cargo make + - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} -e BUILDSYS_ARCH=${{ matrix.arch }} -e BUILDSYS_JOBS=12 From f797282809da464844f2767170c47608e948344d Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 5 Jun 2020 14:28:51 +0000 Subject: [PATCH 0294/1356] buildsys: retry builds on frontend error Work around a transient failure that makes CI builds unreliable. Signed-off-by: Ben Cressey --- tools/buildsys/Cargo.lock | 7 +++ tools/buildsys/Cargo.toml | 1 + tools/buildsys/src/builder.rs | 91 ++++++++++++++++++++++++----- tools/buildsys/src/builder/error.rs | 7 ++- tools/buildsys/src/main.rs | 15 ++++- 5 files changed, 104 insertions(+), 17 deletions(-) diff --git a/tools/buildsys/Cargo.lock b/tools/buildsys/Cargo.lock index 9d7a5053..5fbb062c 100644 --- a/tools/buildsys/Cargo.lock +++ b/tools/buildsys/Cargo.lock @@ -40,6 +40,7 @@ version = "0.1.0" dependencies = [ "duct 0.13.3 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "nonzero_ext 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "reqwest 0.10.4 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", @@ -461,6 +462,11 @@ dependencies = [ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "nonzero_ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "num_cpus" version = "1.12.0" @@ -1181,6 +1187,7 @@ dependencies = [ "checksum mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)" = "302dec22bcf6bae6dfb69c647187f4b4d0fb6f535521f7bc022430ce8e12008f" "checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" "checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88" +"checksum nonzero_ext 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "44a1290799eababa63ea60af0cbc3f03363e328e58f32fb0294798ed3e85f444" "checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6" "checksum once_cell 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b1c601810575c99596d4afc46f78a678c80105117c379eb3650cf99b8a21ce5b" "checksum opaque-debug 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" diff --git a/tools/buildsys/Cargo.toml b/tools/buildsys/Cargo.toml index 5097136e..d389f24c 100644 --- a/tools/buildsys/Cargo.toml +++ b/tools/buildsys/Cargo.toml @@ -18,3 +18,4 @@ snafu = "0.6" toml = "0.5" url = "2.1" walkdir = "2" +nonzero_ext = "0.2.0" diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index 3593efa2..4d5976cc 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -8,12 +8,33 @@ pub(crate) mod error; use error::Result; use duct::cmd; +use nonzero_ext::nonzero; use rand::Rng; use sha2::{Digest, Sha512}; -use snafu::ResultExt; +use snafu::{ensure, ResultExt}; use std::env; +use std::num::NonZeroU16; use std::process::Output; +/* +There's a bug in BuildKit that can lead to a build failure during parallel +`docker build` executions: + https://github.com/moby/buildkit/issues/1090 + +Unfortunately we can't do much to control the concurrency here, and even when +the bug is fixed there will be many older versions of Docker in the wild. + +The failure has an exit code of 1, which is too generic to be helpful. All we +can do is check the output for the error's signature, and retry if we find it. +*/ +static DOCKER_BUILD_FRONTEND_ERROR: &str = concat!( + r#"failed to solve with frontend dockerfile.v0: "#, + r#"failed to solve with frontend gateway.v0: "#, + r#"frontend grpc server closed unexpectedly"# +); + +static DOCKER_BUILD_MAX_ATTEMPTS: NonZeroU16 = nonzero!(10u16); + pub(crate) struct PackageBuilder; impl PackageBuilder { @@ -134,35 +155,79 @@ fn build(target: &str, build_args: &str, tag: &str, output: &str) -> Result<()> let rmi = args(format!("rmi --force {}", tag)); // Clean up the stopped container if it exists. - let _ = docker(&rm); + let _ = docker(&rm, Retry::No); // Clean up the previous image if it exists. - let _ = docker(&rmi); + let _ = docker(&rmi, Retry::No); // Build the image, which builds the artifacts we want. - docker(&build)?; + // Work around a transient, known failure case with Docker. + docker( + &build, + Retry::Yes { + attempts: DOCKER_BUILD_MAX_ATTEMPTS, + messages: &[DOCKER_BUILD_FRONTEND_ERROR], + }, + )?; // Create a stopped container so we can copy artifacts out. - docker(&create)?; + docker(&create, Retry::No)?; // Copy artifacts into our output directory. - docker(&cp)?; + docker(&cp, Retry::No)?; // Clean up our stopped container after copying artifacts out. - docker(&rm)?; + docker(&rm, Retry::No)?; // Clean up our image now that we're done. - docker(&rmi)?; + docker(&rmi, Retry::No)?; Ok(()) } /// Run `docker` with the specified arguments. -fn docker(args: &[String]) -> Result { - cmd("docker", args) - .stderr_to_stdout() - .run() - .context(error::CommandExecution) +fn docker(args: &[String], retry: Retry) -> Result { + let mut max_attempts: u16 = 1; + let mut retry_messages: &[&str] = &[]; + if let Retry::Yes { attempts, messages } = retry { + max_attempts = attempts.into(); + retry_messages = messages; + } + + let mut attempt = 1; + loop { + let output = cmd("docker", args) + .stderr_to_stdout() + .stdout_capture() + .unchecked() + .run() + .context(error::CommandStart)?; + + let stdout = String::from_utf8_lossy(&output.stdout); + println!("{}", &stdout); + if output.status.success() { + return Ok(output); + } + + ensure!( + retry_messages.iter().any(|&m| stdout.contains(m)) && attempt < max_attempts, + error::DockerExecution { + args: &args.join(" ") + } + ); + + attempt += 1; + } +} + +/// Allow the caller to configure retry behavior, since the command may fail +/// for spurious reasons that should not be treated as an error. +enum Retry<'a> { + No, + Yes { + attempts: NonZeroU16, + messages: &'a [&'a str], + }, } /// Convert an argument string into a collection of positional arguments. diff --git a/tools/buildsys/src/builder/error.rs b/tools/buildsys/src/builder/error.rs index 5bc78db6..0a20a91e 100644 --- a/tools/buildsys/src/builder/error.rs +++ b/tools/buildsys/src/builder/error.rs @@ -4,8 +4,11 @@ use std::path::PathBuf; #[derive(Debug, Snafu)] #[snafu(visibility = "pub(super)")] pub(crate) enum Error { - #[snafu(display("Failed to execute command: {}", source))] - CommandExecution { source: std::io::Error }, + #[snafu(display("Failed to start command: {}", source))] + CommandStart { source: std::io::Error }, + + #[snafu(display("Failed to execute command: 'docker {}'", args))] + DockerExecution { args: String }, #[snafu(display("Failed to change directory to '{}': {}", path.display(), source))] DirectoryChange { diff --git a/tools/buildsys/src/main.rs b/tools/buildsys/src/main.rs index 529cd95b..a7794872 100644 --- a/tools/buildsys/src/main.rs +++ b/tools/buildsys/src/main.rs @@ -23,6 +23,7 @@ use snafu::ResultExt; use spec::SpecInfo; use std::env; use std::path::PathBuf; +use std::process; mod error { use snafu::Snafu; @@ -77,10 +78,20 @@ SUBCOMMANDS: build-package Build RPMs from a spec file and sources. build-variant Build filesystem and disk images from RPMs." ); - std::process::exit(1) + process::exit(1) } -fn main() -> Result<()> { +// Returning a Result from main makes it print a Debug representation of the error, but with Snafu +// we have nice Display representations of the error, so we wrap "main" (run) and print any error. +// https://github.com/shepmaster/snafu/issues/110 +fn main() { + if let Err(e) = run() { + eprintln!("{}", e); + process::exit(1); + } +} + +fn run() -> Result<()> { let command_str = std::env::args().nth(1).unwrap_or_else(|| usage()); let command = serde_plain::from_str::(&command_str).unwrap_or_else(|_| usage()); match command { From b00cf55b0e9652d9e6c295f2f541218aae45dbdd Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 8 Jun 2020 19:25:08 +0000 Subject: [PATCH 0295/1356] ci: run unit tests Signed-off-by: Ben Cressey --- .github/workflows/build.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f6889937..60e7b0a2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -20,4 +20,5 @@ jobs: - uses: actions/checkout@v2 - run: cargo install --version 0.30.0 cargo-make - run: cargo install --version 0.6.6 cargo-deny --no-default-features + - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} unit-tests - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} -e BUILDSYS_ARCH=${{ matrix.arch }} -e BUILDSYS_JOBS=12 From a420dc168c4e132b8fcd53f08f343da40bb8c4ff Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Wed, 10 Jun 2020 10:20:25 -0700 Subject: [PATCH 0296/1356] Document platform-specific settings in README --- README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/README.md b/README.md index ad5cf76f..9bd2e4ea 100644 --- a/README.md +++ b/README.md @@ -327,6 +327,17 @@ We use it for the control container because it needs to be available early to gi Be careful, and make sure you have a similar low-level use case before reaching for host containers. +#### Platform-specific settings + +Platform-specific settings are automatically set at boot time by [early-boot-config](sources/api/early-boot-config) based on metadata available on the running platform. +They can be overridden for testing purposes in [the same way as other settings](#interacting-with-settings). + +##### AWS-specific settings + +AWS-specific settings are automatically set based on calls to the Instance MetaData Service (IMDS). + +* `settings.aws.region`: This is set to the AWS region in which the instance is running, for example `us-west-2`. + ### Logs You can use `logdog` through the [admin container](#admin-container) to obtain an archive of log files from your Bottlerocket host. From e4d088295470e13cca4d7e2c0d83ea41d4c014df Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Mon, 22 Jun 2020 23:15:24 +0000 Subject: [PATCH 0297/1356] Update to kernel 5.4.46-19.75 --- packages/kernel/Cargo.toml | 4 ++-- packages/kernel/kernel.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel/Cargo.toml b/packages/kernel/Cargo.toml index 54176cf9..35fc14a2 100644 --- a/packages/kernel/Cargo.toml +++ b/packages/kernel/Cargo.toml @@ -10,5 +10,5 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/e59a3280f4c5fd5c4ad8686c1854327e3d177cc647c19b6a554f0c4b75df8c96/kernel-5.4.38-17.76.amzn2.src.rpm" -sha512 = "542891b79c355930daca09c4e54a61bf2d0b711c66d6d3d8b35ee8e7df2459898ca295a60c1ca3361177129a3c63f70cf034d87412610f77506756fbc3ba76c1" +url = "https://cdn.amazonlinux.com/blobstore/92ec64be321f67c52afa1eb77b3c301b082c2850ae3d45609bf4802cd6a20400/kernel-5.4.46-19.75.amzn2.src.rpm" +sha512 = "0ebc6e27448513ffc40d295f8a844d367a345ed242e19c862b1290bec895100dd220526c9f3b274bc75399b35c03c69356ce508872669c1d155dd47a91981cbe" diff --git a/packages/kernel/kernel.spec b/packages/kernel/kernel.spec index 2a18335e..3c2e9173 100644 --- a/packages/kernel/kernel.spec +++ b/packages/kernel/kernel.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel -Version: 5.4.38 +Version: 5.4.46 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/e59a3280f4c5fd5c4ad8686c1854327e3d177cc647c19b6a554f0c4b75df8c96/kernel-5.4.38-17.76.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/92ec64be321f67c52afa1eb77b3c301b082c2850ae3d45609bf4802cd6a20400/kernel-5.4.46-19.75.amzn2.src.rpm Source100: config-bottlerocket Patch0001: 0001-lustrefsx-Disable-Werror-stringop-overflow.patch BuildRequires: bc From 8cb44f67047b07d8371b54e9c37025eb47fcabf0 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Mon, 8 Jun 2020 14:33:16 -0700 Subject: [PATCH 0298/1356] settings: add new 'version-lock' and 'ignore-waves' settings Adds a 'version-lock' setting for specifying the version to update to when updating. Adds a 'ignore-waves' setting for specifying whether to respect update waves when updating. --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 9bd2e4ea..c07a60e0 100644 --- a/README.md +++ b/README.md @@ -276,6 +276,8 @@ The following settings are set for you automatically by [pluto](sources/api/) ba * `settings.updates.metadata-base-url`: The common portion of all URIs used to download update metadata. * `settings.updates.targets-base-url`: The common portion of all URIs used to download update files. * `settings.updates.seed`: A `u32` value that determines how far into in the update schedule this machine will accept an update. We recommending leaving this at its default generated value so that updates can be somewhat randomized in your cluster. +* `settings.updates.version-lock`: Controls the version that will be selected when you issue an update request. Can be locked to a specific version like `v1.0.0`, or `latest` to take the latest available version. Defaults to `latest`. +* `settings.updates.ignore-waves`: Updates are rolled out in waves to reduce the impact of issues. For testing purposes, you can set this to `true` to ignore those waves and update immediately. #### Time settings From aa2c4dbe4f218d042f966e0c7f68b7b3f918b969 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 30 Jun 2020 13:12:55 -0700 Subject: [PATCH 0299/1356] README: document more update methods Adds information about the update API and brupop to the "Updates" section of the README. --- README.md | 96 +++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 89 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index c07a60e0..fb3736dd 100644 --- a/README.md +++ b/README.md @@ -96,7 +96,7 @@ Then you'd be able to start a session using only your instance ID, like this: aws ssm start-session --target INSTANCE_ID ``` -With the [default control container](https://github.com/bottlerocket-os/bottlerocket-control-container), you can make API calls to change settings in your Bottlerocket host. +With the [default control container](https://github.com/bottlerocket-os/bottlerocket-control-container), you can make [API calls](#api) to configure and manage your Bottlerocket host. To do even more, read the next section about the [admin container](#admin-container). ### Admin container @@ -133,7 +133,88 @@ Be careful; while you can inspect and change even more as root, Bottlerocket's f Rather than a package manager that updates individual pieces of software, Bottlerocket downloads a full filesystem image and reboots into it. It can automatically roll back if boot failures occur, and workload failures can trigger manual rollbacks. -Currently, you can update using a CLI tool, updog. +The update process uses images secured by [TUF](https://theupdateframework.github.io/). +For more details, see the [update system documentation](sources/updater/). + +### Update methods + +There are several ways of updating your Bottlerocket hosts: + +#### Update API + +The [Bottlerocket API](#api) allows you to update and reboot your host with simple API calls. You can change [settings](#updates-settings) to control which updates are selected. + +In general, the process of using the update API looks like this. You refresh the list of known updates, then apply one to the system. Calls to `/updates/status` will tell you the current state and give more details on any errors. + +![Update API overview](sources/api/update_api.png) + +First, refresh the list of available updates: +``` +apiclient -u /actions/refresh-updates -m POST +``` + +Now you can see the list of available updates, along with the chosen update, according to your `version-lock` [setting](#updates-settings): +``` +apiclient -u /updates/status +``` + +This will return the current update status in JSON format. The status should look something like the following (pretty-printed): +``` +{ + "update_state": "Available", + "available_updates": [ + "0.4.0", + "0.3.4", + ... + ], + "chosen_update": { + "arch": "x86_64", + "version": "0.4.0", + "variant": "aws-k8s-1.15" + }, + "active_partition": { + "image": { + "arch": "x86_64", + "version": "0.3.2", + "variant": "aws-k8s-1.15" + }, + "next_to_boot": true + }, + "staging_partition": null, + "most_recent_command": { + "cmd_type": "refresh", + "cmd_status": "Success", + ... + } +} +``` + +You can see that the we're running `v0.3.2` in the active partition, and that `v0.4.0` is available. +If you're happy with that selection, you can request that the update be downloaded and applied to disk. (The update will remain inactive until you make the `activate-update` call below.) +``` +apiclient -u /actions/prepare-update -m POST +``` + +After you request that the update be prepared, you can check the update status again until it reflects the new version in the staging partition. +``` +apiclient -u /updates/status +``` + +If the staging partition shows the new version, you can proceed to "activate" the update. +This means that as soon as the host is rebooted it will try to run the new version. (If the new version can't boot, we automatically flip back to the old version.) +``` +apiclient -u /actions/activate-update -m POST +``` + +You can reboot the host with: +``` +apiclient -u /actions/reboot -m POST +``` + +#### Updog + +You can also update using a CLI tool, `updog`, if you [connect through a host container](#exploration). + Here's how you can see whether there's an update: ``` @@ -149,6 +230,12 @@ reboot (If you know what you're doing and want to update *now*, you can run `updog update --reboot --now`) +#### Bottlerocket Update Operator + +If you are running the Kubernetes variant of Bottlerocket, you can use the [Bottlerocket update operator](https://github.com/bottlerocket-os/bottlerocket-update-operator) to automate Bottlerocket updates. + +### Update rollback + The system will automatically roll back if it's unable to boot. If the update is not functional for a given container workload, you can do a manual rollback: @@ -157,11 +244,6 @@ signpost rollback-to-inactive reboot ``` -We're working on more automated update methods. - -The update process uses images secured by [TUF](https://theupdateframework.github.io/). -For more details, see the [update system documentation](sources/updater/). - ## Settings Here we'll describe the settings you can configure on your Bottlerocket instance, and how to do it. From 77c11d99e57831305bf384a9d677b14e97566057 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sat, 4 Jul 2020 15:37:13 +0000 Subject: [PATCH 0300/1356] build: exclude more paths in .dockerignore Otherwise many unnecessary build artifacts are sent as context. Signed-off-by: Ben Cressey --- .dockerignore | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.dockerignore b/.dockerignore index 556e536f..8a235523 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,8 +1,8 @@ /.git /.gomodcache -/build/*.img -/build/*.lz4 -/build/*.tar -/build/*-debuginfo-*.rpm -/build/*-debugsource-*.rpm +/build/**/*.img +/build/**/*.lz4 +/build/**/*.tar +/build/**/*-debuginfo-*.rpm +/build/**/*-debugsource-*.rpm **/target/* From 6ce09339add20002b0737a0da53f187e3fcbaa0f Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Tue, 30 Jun 2020 17:42:30 -0700 Subject: [PATCH 0301/1356] Add aws-k8s-1.17 variant with Kubernetes 1.17 There are only minor changes from the aws-k8s-1.16 variant: * Rebase the aws-sdk-go update patch * No longer need the license clarification for github.com/munnerz/goautoneg because the project added a license file * Disable new CSIMigration in kubelet-config until further supported: https://kubernetes.io/blog/2019/12/09/kubernetes-1-17-feature-csi-migration-beta/ --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 60e7b0a2..c8bc5680 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -8,7 +8,7 @@ jobs: continue-on-error: ${{ matrix.supported }} strategy: matrix: - variant: [aws-k8s-1.15, aws-k8s-1.16] + variant: [aws-k8s-1.15, aws-k8s-1.16, aws-k8s-1.17] arch: [x86_64, aarch64] supported: [true] include: From af3cab98f0d5d6f1c2e7535d148d8513a70fef44 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 9 Jul 2020 21:39:42 +0000 Subject: [PATCH 0302/1356] remove laika Now that systemd handles the /etc mount, we no longer need it. Signed-off-by: Ben Cressey --- GLOSSARY.md | 1 - tools/rpm2img | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/GLOSSARY.md b/GLOSSARY.md index 546fe145..060c75f1 100644 --- a/GLOSSARY.md +++ b/GLOSSARY.md @@ -12,7 +12,6 @@ Used for system maintenance and connectivity. * [**host-ctr**](sources/host-ctr): The program started by `host-containers@.service` for each host container. Its job is to start the specified host container on the “host” instance of containerd, which is separate from the “user” instance of containerd used for Kubernetes pods. -* [**laika**](sources/preinit/laika): A crate that builds a binary (`/sbin/preinit`) that's used to mount filesystems before starting init (`systemd`). * [**model**](sources/models): The API system has a data model defined for each variant, and this model is used by other programs to serialize and deserialize requests while maintaining safety around data types. * [**netdog**](sources/api/netdog): A program called by wicked to retrieve and write out network configuration from DHCP. * [**pluto**](sources/api/pluto): A setting generator called by sundog to find networking settings required by Kubernetes. diff --git a/tools/rpm2img b/tools/rpm2img index 4e391c2b..d02795bc 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -161,7 +161,7 @@ set default="0" set timeout="0" menuentry "Bottlerocket OS ${VERSION_ID}" { - linux (\$root)/vmlinuz root=/dev/dm-0 rootwait ro init=/sbin/preinit \\ + linux (\$root)/vmlinuz root=/dev/dm-0 rootwait ro \\ console=tty0 console=ttyS0 random.trust_cpu=on selinux=1 enforcing=1 \\ systemd.log_target=journal-or-kmsg systemd.log_color=0 net.ifnames=0 \\ biosdevname=0 dm_verity.max_bios=-1 dm_verity.dev_wait=1 \\ From cda8088cd20bb6c9405501713d9070ad908d53da Mon Sep 17 00:00:00 2001 From: spoonofpower Date: Wed, 15 Jul 2020 07:58:40 -0700 Subject: [PATCH 0303/1356] Configurable cluster domain (#988) Support setting the kubelet's clusterDomain config value --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index fb3736dd..0988c11d 100644 --- a/README.md +++ b/README.md @@ -347,6 +347,9 @@ The following settings can be optionally set to customize the node labels and ta special = "true:NoSchedule" ``` +The following settings are optional and allow you to further configure your cluster. +* `settings.kubernetes.cluster-domain`: The DNS domain for this cluster, allowing all Kubernetes-run containers to search this domain before the host's search domains. Defaults to `cluster.local`. + The following settings are set for you automatically by [pluto](sources/api/) based on runtime instance information, but you can override them if you know what you're doing! * `settings.kubernetes.max-pods`: The maximum number of pods that can be scheduled on this node (limited by number of available IPv4 addresses) * `settings.kubernetes.cluster-dns-ip`: The CIDR block of the primary network interface. From e0250bfe4102b79b0cd4bcbe5af66e4f7e0daa8b Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Fri, 17 Jul 2020 14:18:14 -0700 Subject: [PATCH 0304/1356] Exclude README.md from cargo change tracking In packages using cargo-readme, README.md files are touched by build.rs, which triggers cargo's change tracking in the next build. This means cargo is rebuilding the package every time. This change adds README.md to the exclude list in Cargo.toml files so that cargo doesn't think a README change requires a rebuild. Most of our packages use cargo-readme to generate README.md, and for packages that don't, this still isn't harmful; manual updates to README.md don't need a Rust build. (Special cases can just remove this exclude line.) The downside is that accidental changes to README.md, like removing it, won't be automatically corrected with a rebuild if you haven't touched anything else. This should be rare, and will correct itself upon any other change. (And a developer is unlikely to commit such an accidental change.) --- tools/buildsys/Cargo.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/buildsys/Cargo.toml b/tools/buildsys/Cargo.toml index d389f24c..f658c2b7 100644 --- a/tools/buildsys/Cargo.toml +++ b/tools/buildsys/Cargo.toml @@ -5,6 +5,8 @@ authors = ["Ben Cressey "] license = "Apache-2.0 OR MIT" edition = "2018" publish = false +# Don't rebuild crate just because of changes to README. +exclude = ["README.md"] [dependencies] duct = "0.13.0" From 77df87edbb105b727b670c2ebbc87f364df536b7 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sat, 18 Jul 2020 16:38:13 +0000 Subject: [PATCH 0305/1356] buildsys: exclude README.md from change tracking Follow-up to cd7e66db. We don't use or generate README.md docs during our package builds, so they don't need to trigger a rebuild. Signed-off-by: Ben Cressey --- tools/buildsys/src/project.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/buildsys/src/project.rs b/tools/buildsys/src/project.rs index 434da15c..0ab8df24 100644 --- a/tools/buildsys/src/project.rs +++ b/tools/buildsys/src/project.rs @@ -45,7 +45,7 @@ impl ProjectInfo { entry .file_name() .to_str() - .map(|s| s.starts_with('.') || s == "target" || s == "vendor") + .map(|s| s.starts_with('.') || s == "target" || s == "vendor" || s == "README.md") .unwrap_or(false) } } From 8bc17a9df38133fd7261c6749575d9a0bfd68d31 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Tue, 23 Jun 2020 20:26:19 +0000 Subject: [PATCH 0306/1356] Add pubsys: make Bottlerocket repos using `cargo make repo` Simplify repo creation and tie it into the existing `cargo make` build system using a new `repo` target. This is intended to replace the common pattern of calls to tuftool and updata - either creating a repo or extending an existing one, adding the latest built artifacts, then updating the manifest to match. **Usage:** `cargo make repo` depends on the `build` target, so if your goal is building an update for a repo you don't have to separately build it first. It uses the same `BUILDSYS_VARIANT` and `BUILDSYS_ARCH` variables to determine the metadata for the update added to the repo. **Requirements:** * An Infra.toml file, based on Infra.toml.example, listing paths to keys, existing repos, etc. * Release.toml updated with a new version **Optional further configuration:** * Repo expiration policy - by default, uses a policy file with 2 week target and snapshot expiration and 1 week timestamp expiration. * Wave policy - same policy files you give to updata today; defaults to "default" wave policy. * Release start time - when waves start and when expiration starts counting down; defaults to now. * Can select which named repo and signing key to use from Infra.toml. **Design decisions:** * Built repo metadata is written to a directory like /build/repos/bottlerocket-0.4.1-5880e5d/aws-k8s-1.15/x86_64 so that you can prepare repos for multiple releases in parallel. Targets are written to a shared directory like /build/repos/bottlerocket-0.4.1-5880e5d/targets - they're unique across variants and arches so there's no conflict. The directory structure as a whole can be synced to your final repo location; it's the structure expected by Bottlerocket and updog. * buildsys uses environment variables set by cargo-make; we opted instead for more standard arg parsing. It seems more likely that someone would use pubsys separately from cargo-make, and pubsys has more input information, so arg parsing was clearer. * cargo-make environment variable expansion is done in phases, and you can't refer to a variable defined in the same section if you intend to let the user override the earlier variable on the command line. If you do, the variable won't expand, as seen in https://github.com/bottlerocket-os/bottlerocket/pull/963. Because of this, until we figure out a better strategy, a couple of variables can't be overridden - the path to Release.toml (which we made a variable in this change) and the repo output directory. Co-authored-by: Zac Mrowicki Co-authored-by: Tom Kirchner --- .gitignore | 2 + tools/.gitignore | 1 + tools/Cargo.lock | 2272 +++++++++++++++++ tools/Cargo.toml | 5 + tools/buildsys/Cargo.lock | 1268 --------- tools/buildsys/Cargo.toml | 4 +- tools/buildsys/deny.toml | 2 +- tools/pubsys/Cargo.toml | 28 + tools/pubsys/Infra.toml.example | 26 + tools/pubsys/deny.toml | 41 + .../policies/repo-expiration/2w-2w-1w.toml | 3 + tools/pubsys/src/config.rs | 91 + tools/pubsys/src/main.rs | 107 + tools/pubsys/src/repo.rs | 644 +++++ tools/pubsys/src/repo/transport.rs | 63 + 15 files changed, 3286 insertions(+), 1271 deletions(-) create mode 100644 tools/Cargo.lock create mode 100644 tools/Cargo.toml delete mode 100644 tools/buildsys/Cargo.lock create mode 100644 tools/pubsys/Cargo.toml create mode 100644 tools/pubsys/Infra.toml.example create mode 100644 tools/pubsys/deny.toml create mode 100644 tools/pubsys/policies/repo-expiration/2w-2w-1w.toml create mode 100644 tools/pubsys/src/config.rs create mode 100644 tools/pubsys/src/main.rs create mode 100644 tools/pubsys/src/repo.rs create mode 100644 tools/pubsys/src/repo/transport.rs diff --git a/.gitignore b/.gitignore index 68ff48e1..f152b238 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,5 @@ /.cargo /.gomodcache /html +/Infra.toml +/*.pem diff --git a/tools/.gitignore b/tools/.gitignore index 89ef2cb9..9f76dddb 100644 --- a/tools/.gitignore +++ b/tools/.gitignore @@ -1,3 +1,4 @@ /bin/buildsys +/bin/pubsys /.crates.toml /.crates2.json diff --git a/tools/Cargo.lock b/tools/Cargo.lock new file mode 100644 index 00000000..be1ac497 --- /dev/null +++ b/tools/Cargo.lock @@ -0,0 +1,2272 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "addr2line" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b6a2d3371669ab3ca9797670853d61402b03d0b4b9ebf33d677dfa720203072" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccc9a9dd069569f212bc4330af9f17c4afb5e8ce185e83dbb14f1349dda18b10" + +[[package]] +name = "aho-corasick" +version = "0.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "043164d8ba5c4c3035fec9bbee8647c0261d788f3474306f93bb65901cae0e86" +dependencies = [ + "memchr", +] + +[[package]] +name = "ansi_term" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" +dependencies = [ + "winapi 0.3.9", +] + +[[package]] +name = "arc-swap" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d25d88fd6b8041580a654f9d0c581a047baee2b3efee13275f2fc392fc75034" + +[[package]] +name = "arrayref" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" + +[[package]] +name = "arrayvec" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" + +[[package]] +name = "async-trait" +version = "0.1.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a265e3abeffdce30b2e26b7a11b222fe37c6067404001b434101457d0385eb92" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "autocfg" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" + +[[package]] +name = "backtrace" +version = "0.3.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46254cf2fdcdf1badb5934448c1bcbe046a56537b3987d96c51a7afc5d03f293" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "base-x" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b20b618342cf9891c292c4f5ac2cde7287cc5c87e87e9c769d617793607dec1" + +[[package]] +name = "base64" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" + +[[package]] +name = "base64" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" + +[[package]] +name = "bitflags" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" + +[[package]] +name = "blake2b_simd" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8fb2d74254a3a0b5cac33ac9f8ed0e44aa50378d9dbb2e5d83bd21ed1dc2c8a" +dependencies = [ + "arrayref", + "arrayvec", + "constant_time_eq", +] + +[[package]] +name = "block-buffer" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" +dependencies = [ + "block-padding", + "byte-tools", + "byteorder", + "generic-array", +] + +[[package]] +name = "block-padding" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +dependencies = [ + "byte-tools", +] + +[[package]] +name = "bstr" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31accafdb70df7871592c058eca3985b71104e15ac32f64706022c58867da931" +dependencies = [ + "memchr", +] + +[[package]] +name = "buildsys" +version = "0.1.0" +dependencies = [ + "duct", + "hex", + "nonzero_ext", + "rand", + "reqwest", + "serde", + "serde_plain", + "sha2", + "snafu", + "toml", + "url", + "walkdir", +] + +[[package]] +name = "bumpalo" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" + +[[package]] +name = "byte-tools" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" + +[[package]] +name = "byteorder" +version = "1.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" + +[[package]] +name = "bytes" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "118cf036fbb97d0816e3c34b2d7a1e8cfc60f68fcf63d550ddbe9bd5f59c213b" +dependencies = [ + "loom", +] + +[[package]] +name = "cargo-readme" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66dbfc9307f5b2429656e07533613cd3f26803fd2857fc33be22aa2711181d58" +dependencies = [ + "clap", + "lazy_static", + "percent-encoding", + "regex", + "serde", + "serde_derive", + "toml", +] + +[[package]] +name = "cc" +version = "1.0.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9a06fb2e53271d7c279ec1efea6ab691c35a2ae67ec0d91d7acec0caf13b518" + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + +[[package]] +name = "chrono" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c74d84029116787153e02106bf53e66828452a4b325cc8652b788b5967c0a0b6" +dependencies = [ + "num-integer", + "num-traits", + "serde", + "time 0.1.43", +] + +[[package]] +name = "clap" +version = "2.33.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdfa80d47f954d53a35a64987ca1422f495b8d6483c0fe9f7117b36c2a792129" +dependencies = [ + "ansi_term", + "atty", + "bitflags", + "strsim", + "textwrap", + "unicode-width", + "vec_map", +] + +[[package]] +name = "constant_time_eq" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" + +[[package]] +name = "core-foundation" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" + +[[package]] +name = "crossbeam-utils" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" +dependencies = [ + "autocfg", + "cfg-if", + "lazy_static", +] + +[[package]] +name = "crypto-mac" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" +dependencies = [ + "generic-array", + "subtle", +] + +[[package]] +name = "ct-logs" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d3686f5fa27dbc1d76c751300376e167c5a43387f44bb451fd1c24776e49113" +dependencies = [ + "sct", +] + +[[package]] +name = "digest" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +dependencies = [ + "generic-array", +] + +[[package]] +name = "dirs" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13aea89a5c93364a98e9b37b2fa237effbb694d5cfe01c5b70941f7eb087d5e3" +dependencies = [ + "cfg-if", + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" +dependencies = [ + "libc", + "redox_users", + "winapi 0.3.9", +] + +[[package]] +name = "discard" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" + +[[package]] +name = "doc-comment" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" + +[[package]] +name = "dtoa" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b" + +[[package]] +name = "duct" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90a9c3a25aafbd538c7d40a53f83c4487ee8216c12d1c8ef2c01eb2f6ea1553" +dependencies = [ + "libc", + "once_cell", + "os_pipe", + "shared_child", +] + +[[package]] +name = "encoding_rs" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8ac63f94732332f44fe654443c46f6375d1939684c17b0afb6cb56b0456e171" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "fake-simd" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "fuchsia-zircon" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" +dependencies = [ + "bitflags", + "fuchsia-zircon-sys", +] + +[[package]] +name = "fuchsia-zircon-sys" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" + +[[package]] +name = "futures" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e05b85ec287aac0dc34db7d4a569323df697f9c55b99b15d6b4ef8cde49f613" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f366ad74c28cca6ba456d95e6422883cfb4b252a83bed929c83abfdbbf2967d5" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59f5fff90fd5d971f936ad674802482ba441b6f09ba5e15fd8b39145582ca399" + +[[package]] +name = "futures-executor" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10d6bb888be1153d3abeb9006b11b02cf5e9b209fda28693c31ae1e4e012e314" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de27142b013a8e869c14957e6d2edeef89e97c289e69d042ee3a49acd8b51789" + +[[package]] +name = "futures-macro" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" +dependencies = [ + "proc-macro-hack", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f2032893cb734c7a05d85ce0cc8b8c4075278e93b24b66f9de99d6eb0fa8acc" + +[[package]] +name = "futures-task" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdb66b5f09e22019b1ab0830f7785bcea8e7a42148683f99214f73f8ec21a626" +dependencies = [ + "once_cell", +] + +[[package]] +name = "futures-util" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8764574ff08b701a084482c3c7031349104b07ac897393010494beaa18ce32c6" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project", + "pin-utils", + "proc-macro-hack", + "proc-macro-nested", + "slab", +] + +[[package]] +name = "generator" +version = "0.6.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "add72f17bb81521258fcc8a7a3245b1e184e916bfbe34f0ea89558f440df5c68" +dependencies = [ + "cc", + "libc", + "log", + "rustc_version", + "winapi 0.3.9", +] + +[[package]] +name = "generic-array" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +dependencies = [ + "typenum", +] + +[[package]] +name = "getrandom" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "gimli" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724" + +[[package]] +name = "globset" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ad1da430bd7281dde2576f44c84cc3f0f7b475e7202cd503042dff01a8c8120" +dependencies = [ + "aho-corasick", + "bstr", + "fnv", + "log", + "regex", +] + +[[package]] +name = "h2" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79b7246d7e4b979c03fa093da39cfb3617a96bbeee6310af63991668d7e843ff" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "log", + "slab", + "tokio", + "tokio-util", +] + +[[package]] +name = "heck" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "hermit-abi" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3deed196b6e7f9e44a2ae8d94225d80302d81208b1bb673fd21fe634645c85a9" +dependencies = [ + "libc", +] + +[[package]] +name = "hex" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" + +[[package]] +name = "hmac" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" +dependencies = [ + "crypto-mac", + "digest", +] + +[[package]] +name = "http" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "httparse" +version = "1.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" + +[[package]] +name = "hyper" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6e7655b9594024ad0ee439f3b5a7299369dc2a3f459b47c696f9ff676f9aa1f" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "itoa", + "log", + "pin-project", + "socket2", + "time 0.1.43", + "tokio", + "tower-service", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac965ea399ec3a25ac7d13b8affd4b8f39325cca00858ddf5eb29b79e6b14b08" +dependencies = [ + "bytes", + "ct-logs", + "futures-util", + "hyper", + "log", + "rustls", + "rustls-native-certs", + "tokio", + "tokio-rustls", + "webpki", +] + +[[package]] +name = "idna" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" +dependencies = [ + "matches", + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "indexmap" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c398b2b113b55809ceb9ee3e753fcbac793f1956663f3c36549c1346015c2afe" +dependencies = [ + "autocfg", +] + +[[package]] +name = "iovec" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" +dependencies = [ + "libc", +] + +[[package]] +name = "itoa" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" + +[[package]] +name = "js-sys" +version = "0.3.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4b9172132a62451e56142bff9afc91c8e4a4500aa5b847da36815b63bfda916" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "kernel32-sys" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" +dependencies = [ + "winapi 0.2.8", + "winapi-build", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.72" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9f8082297d534141b30c8d39e9b1773713ab50fdbe4ff30f750d063b3bfd701" + +[[package]] +name = "log" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "loom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ecc775857611e1df29abba5c41355cdf540e7e9d4acfdf0f355eefee82330b7" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", +] + +[[package]] +name = "matches" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" + +[[package]] +name = "md5" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" + +[[package]] +name = "memchr" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" + +[[package]] +name = "mime" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" + +[[package]] +name = "mime_guess" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" +dependencies = [ + "mime", + "unicase", +] + +[[package]] +name = "miniz_oxide" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be0f75932c1f6cfae3c04000e40114adf955636e19040f9c0a2c380702aa1c7f" +dependencies = [ + "adler", +] + +[[package]] +name = "mio" +version = "0.6.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430" +dependencies = [ + "cfg-if", + "fuchsia-zircon", + "fuchsia-zircon-sys", + "iovec", + "kernel32-sys", + "libc", + "log", + "miow 0.2.1", + "net2", + "slab", + "winapi 0.2.8", +] + +[[package]] +name = "mio-named-pipes" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0840c1c50fd55e521b247f949c241c9997709f23bd7f023b9762cd561e935656" +dependencies = [ + "log", + "mio", + "miow 0.3.5", + "winapi 0.3.9", +] + +[[package]] +name = "mio-uds" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" +dependencies = [ + "iovec", + "libc", + "mio", +] + +[[package]] +name = "miow" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" +dependencies = [ + "kernel32-sys", + "net2", + "winapi 0.2.8", + "ws2_32-sys", +] + +[[package]] +name = "miow" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07b88fb9795d4d36d62a012dfbf49a8f5cf12751f36d31a9dbe66d528e58979e" +dependencies = [ + "socket2", + "winapi 0.3.9", +] + +[[package]] +name = "net2" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ba7c918ac76704fb42afcbbb43891e72731f3dcca3bef2a19786297baf14af7" +dependencies = [ + "cfg-if", + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "nonzero_ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44a1290799eababa63ea60af0cbc3f03363e328e58f32fb0294798ed3e85f444" + +[[package]] +name = "num-integer" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d59457e662d541ba17869cf51cf177c0b5f0cbf476c66bdc90bf1edac4f875b" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "object" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5" + +[[package]] +name = "olpc-cjson" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9409e2493366c8f19387c98c5189ab9c937541b5bf48f11390d038a59fdfd9c1" +dependencies = [ + "serde", + "serde_json", + "unicode-normalization", +] + +[[package]] +name = "once_cell" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b631f7e854af39a1739f401cf34a8a013dfe09eac4fa4dba91e9768bd28168d" + +[[package]] +name = "opaque-debug" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" + +[[package]] +name = "openssl-probe" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" + +[[package]] +name = "os_pipe" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb233f06c2307e1f5ce2ecad9f8121cffbbee2c95428f44ea85222e460d0d213" +dependencies = [ + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "parse-datetime" +version = "0.1.0" +dependencies = [ + "cargo-readme", + "chrono", + "snafu", +] + +[[package]] +name = "pem" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1581760c757a756a41f0ee3ff01256227bdf64cb752839779b95ffb01c59793" +dependencies = [ + "base64 0.11.0", + "lazy_static", + "regex", +] + +[[package]] +name = "pem" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59698ea79df9bf77104aefd39cc3ec990cb9693fb59c3b0a70ddf2646fdffb4b" +dependencies = [ + "base64 0.12.3", + "once_cell", + "regex", +] + +[[package]] +name = "percent-encoding" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" + +[[package]] +name = "pin-project" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12e3a6cdbfe94a5e4572812a0201f8c0ed98c1c452c7b8563ce2276988ef9c17" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a0ffd45cf79d88737d7cc85bfd5d2894bee1139b356e616fe85dc389c61aaf7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pin-project-lite" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282adbf10f2698a7a77f8e983a74b2d18176c19a7fd32a45446139ae7b02b715" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "ppv-lite86" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" + +[[package]] +name = "proc-macro-error" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc175e9777c3116627248584e8f8b3e2987405cabe1c0adf7d1dd28f09dc7880" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cc9795ca17eb581285ec44936da7fc2335a3f34f2ddd13118b6f4d515435c50" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "syn-mid", + "version_check", +] + +[[package]] +name = "proc-macro-hack" +version = "0.5.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e0456befd48169b9f13ef0f0ad46d492cf9d2dbb918bcf38e01eed4ce3ec5e4" + +[[package]] +name = "proc-macro-nested" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" + +[[package]] +name = "proc-macro2" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "pubsys" +version = "0.1.0" +dependencies = [ + "chrono", + "clap", + "lazy_static", + "log", + "parse-datetime", + "reqwest", + "semver", + "serde", + "serde_json", + "simplelog", + "snafu", + "structopt", + "tempfile", + "toml", + "tough 0.8.0", + "tough-ssm", + "update_metadata", + "url", +] + +[[package]] +name = "quote" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom", + "libc", + "rand_chacha", + "rand_core", + "rand_hc", +] + +[[package]] +name = "rand_chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core", +] + +[[package]] +name = "redox_syscall" +version = "0.1.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" + +[[package]] +name = "redox_users" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431" +dependencies = [ + "getrandom", + "redox_syscall", + "rust-argon2", +] + +[[package]] +name = "regex" +version = "1.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", + "thread_local", +] + +[[package]] +name = "regex-syntax" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" + +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi 0.3.9", +] + +[[package]] +name = "reqwest" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b82c9238b305f26f53443e3a4bc8528d64b8d0bee408ec949eb7bf5635ec680" +dependencies = [ + "base64 0.12.3", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "http", + "http-body", + "hyper", + "hyper-rustls", + "js-sys", + "lazy_static", + "log", + "mime", + "mime_guess", + "percent-encoding", + "pin-project-lite", + "rustls", + "serde", + "serde_urlencoded", + "tokio", + "tokio-rustls", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "webpki-roots", + "winreg", +] + +[[package]] +name = "ring" +version = "0.16.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "952cd6b98c85bbc30efa1ba5783b8abf12fec8b3287ffa52605b9432313e34e4" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin", + "untrusted", + "web-sys", + "winapi 0.3.9", +] + +[[package]] +name = "rusoto_core" +version = "0.44.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841ca8f73e7498ba39146ab43acea906bbbb807d92ec0b7ea4b6293d2621f80d" +dependencies = [ + "async-trait", + "base64 0.12.3", + "bytes", + "futures", + "hmac", + "http", + "hyper", + "hyper-rustls", + "lazy_static", + "log", + "md5", + "percent-encoding", + "pin-project", + "rusoto_credential", + "rusoto_signature", + "rustc_version", + "serde", + "serde_json", + "sha2", + "tokio", + "xml-rs", +] + +[[package]] +name = "rusoto_credential" +version = "0.44.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60669ddc1bdbb83ce225593649d36b4c5f6bf9db47cc1ab3e81281abffc853f4" +dependencies = [ + "async-trait", + "chrono", + "dirs", + "futures", + "hyper", + "pin-project", + "regex", + "serde", + "serde_json", + "shlex", + "tokio", + "zeroize", +] + +[[package]] +name = "rusoto_signature" +version = "0.44.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9eddff187ac18c5a91d9ccda9353f30cf531620dce437c4db661dfe2e23b2029" +dependencies = [ + "base64 0.12.3", + "bytes", + "futures", + "hex", + "hmac", + "http", + "hyper", + "log", + "md5", + "percent-encoding", + "pin-project", + "rusoto_credential", + "rustc_version", + "serde", + "sha2", + "time 0.2.16", + "tokio", +] + +[[package]] +name = "rusoto_ssm" +version = "0.44.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e9224ad97be05dae1a0f6745252f3fa1430d6bea97c93f59e99edaeb7d70f5d" +dependencies = [ + "async-trait", + "bytes", + "futures", + "rusoto_core", + "serde", + "serde_json", +] + +[[package]] +name = "rust-argon2" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bc8af4bda8e1ff4932523b94d3dd20ee30a87232323eda55903ffd71d2fb017" +dependencies = [ + "base64 0.11.0", + "blake2b_simd", + "constant_time_eq", + "crossbeam-utils", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" + +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver", +] + +[[package]] +name = "rustls" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0d4a31f5d68413404705d6982529b0e11a9aacd4839d1d6222ee3b8cb4015e1" +dependencies = [ + "base64 0.11.0", + "log", + "ring", + "sct", + "webpki", +] + +[[package]] +name = "rustls-native-certs" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75ffeb84a6bd9d014713119542ce415db3a3e4748f0bfce1e1416cd224a23a5" +dependencies = [ + "openssl-probe", + "rustls", + "schannel", + "security-framework", +] + +[[package]] +name = "ryu" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "schannel" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +dependencies = [ + "lazy_static", + "winapi 0.3.9", +] + +[[package]] +name = "scoped-tls" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "332ffa32bf586782a3efaeb58f127980944bbc8c4d6913a86107ac2a5ab24b28" + +[[package]] +name = "sct" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "security-framework" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64808902d7d99f78eaddd2b4e2509713babc3dc3c85ad6f4c447680f3c01e535" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17bf11d99252f512695eb468de5516e5cf75455521e69dfe343f3b74e4748405" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", + "serde", +] + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + +[[package]] +name = "serde" +version = "1.0.114" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5317f7588f0a5078ee60ef675ef96735a1442132dc645eb1d12c018620ed8cd3" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.114" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3433e879a558dde8b5e8feb2a04899cf34fdde1fafb894687e52105fc1162ac3" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_plain" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "625fb0da2b006092b426a94acc1611bec52f2ec27bb27b266a9f93c29ee38eda" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" +dependencies = [ + "dtoa", + "itoa", + "serde", + "url", +] + +[[package]] +name = "sha1" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" + +[[package]] +name = "sha2" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" +dependencies = [ + "block-buffer", + "digest", + "fake-simd", + "opaque-debug", +] + +[[package]] +name = "shared_child" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cebcf3a403e4deafaf34dc882c4a1b6a648b43e5670aa2e4bb985914eaeb2d2" +dependencies = [ + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "shlex" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" + +[[package]] +name = "signal-hook-registry" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94f478ede9f64724c5d173d7bb56099ec3e2d9fc2774aac65d34b8b890405f41" +dependencies = [ + "arc-swap", + "libc", +] + +[[package]] +name = "simplelog" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cf9a002ccce717d066b3ccdb8a28829436249867229291e91b25d99bd723f0d" +dependencies = [ + "chrono", + "log", + "term", +] + +[[package]] +name = "slab" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" + +[[package]] +name = "snafu" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7f5aed652511f5c9123cf2afbe9c244c29db6effa2abb05c866e965c82405ce" +dependencies = [ + "backtrace", + "doc-comment", + "snafu-derive", +] + +[[package]] +name = "snafu-derive" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebf8f7d5720104a9df0f7076a8682024e958bba0fe9848767bb44f251f3648e9" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "socket2" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03088793f677dce356f3ccc2edb1b314ad191ab702a5de3faf49304f7e104918" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "winapi 0.3.9", +] + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "standback" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0437cfb83762844799a60e1e3b489d5ceb6a650fbacb86437badc1b6d87b246" +dependencies = [ + "version_check", +] + +[[package]] +name = "stdweb" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" +dependencies = [ + "discard", + "rustc_version", + "stdweb-derive", + "stdweb-internal-macros", + "stdweb-internal-runtime", + "wasm-bindgen", +] + +[[package]] +name = "stdweb-derive" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" +dependencies = [ + "proc-macro2", + "quote", + "serde", + "serde_derive", + "syn", +] + +[[package]] +name = "stdweb-internal-macros" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" +dependencies = [ + "base-x", + "proc-macro2", + "quote", + "serde", + "serde_derive", + "serde_json", + "sha1", + "syn", +] + +[[package]] +name = "stdweb-internal-runtime" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" + +[[package]] +name = "strsim" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" + +[[package]] +name = "structopt" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de2f5e239ee807089b62adce73e48c625e0ed80df02c7ab3f068f5db5281065c" +dependencies = [ + "clap", + "lazy_static", + "structopt-derive", +] + +[[package]] +name = "structopt-derive" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "510413f9de616762a4fbeab62509bf15c729603b72d7cd71280fbca431b1c118" +dependencies = [ + "heck", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "subtle" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" + +[[package]] +name = "syn" +version = "1.0.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8d5d96e8cbb005d6959f119f773bfaebb5684296108fb32600c00cde305b2cd" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "syn-mid" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7be3539f6c128a931cf19dcee741c1af532c7fd387baa739c03dd2e96479338a" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tempfile" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" +dependencies = [ + "cfg-if", + "libc", + "rand", + "redox_syscall", + "remove_dir_all", + "winapi 0.3.9", +] + +[[package]] +name = "term" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0863a3345e70f61d613eab32ee046ccd1bcc5f9105fe402c61fcd0c13eeb8b5" +dependencies = [ + "dirs", + "winapi 0.3.9", +] + +[[package]] +name = "textwrap" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +dependencies = [ + "unicode-width", +] + +[[package]] +name = "thread_local" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "time" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +dependencies = [ + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "time" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a51cadc5b1eec673a685ff7c33192ff7b7603d0b75446fb354939ee615acb15" +dependencies = [ + "cfg-if", + "libc", + "standback", + "stdweb", + "time-macros", + "version_check", + "winapi 0.3.9", +] + +[[package]] +name = "time-macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ae9b6e9f095bc105e183e3cd493d72579be3181ad4004fceb01adbe9eecab2d" +dependencies = [ + "proc-macro-hack", + "time-macros-impl", +] + +[[package]] +name = "time-macros-impl" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5c3be1edfad6027c69f5491cf4cb310d1a71ecd6af742788c6ff8bced86b8fa" +dependencies = [ + "proc-macro-hack", + "proc-macro2", + "quote", + "standback", + "syn", +] + +[[package]] +name = "tinyvec" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed" + +[[package]] +name = "tokio" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d099fa27b9702bed751524694adbe393e18b36b204da91eb1cbbbbb4a5ee2d58" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "iovec", + "lazy_static", + "libc", + "memchr", + "mio", + "mio-named-pipes", + "mio-uds", + "num_cpus", + "pin-project-lite", + "signal-hook-registry", + "slab", + "tokio-macros", + "winapi 0.3.9", +] + +[[package]] +name = "tokio-macros" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-rustls" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15cb62a0d2770787abc96e99c1cd98fcf17f94959f3af63ca85bdfb203f051b4" +dependencies = [ + "futures-core", + "rustls", + "tokio", + "webpki", +] + +[[package]] +name = "tokio-util" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "log", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "toml" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffc92d160b1eef40665be3a05630d003936a3bc7da7421277846c2613e92c71a" +dependencies = [ + "serde", +] + +[[package]] +name = "tough" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb6b9c013f32b9fc52a70268c17b5ffb0562323b9213921f22f1b755cf2c4ad" +dependencies = [ + "chrono", + "hex", + "olpc-cjson", + "pem 0.7.0", + "ring", + "serde", + "serde_json", + "serde_plain", + "snafu", + "untrusted", + "url", + "walkdir", +] + +[[package]] +name = "tough" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71b8d86994e9da2233fc30c54223bc448a15bdb782f8060c66107fc6b88619ba" +dependencies = [ + "chrono", + "globset", + "hex", + "log", + "olpc-cjson", + "pem 0.8.1", + "reqwest", + "ring", + "serde", + "serde_json", + "serde_plain", + "snafu", + "untrusted", + "url", + "walkdir", +] + +[[package]] +name = "tough-ssm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54e670640f67e719671a87fac948eabba0fd33633aa8be7804b38a1a1d2da32b" +dependencies = [ + "rusoto_core", + "rusoto_credential", + "rusoto_ssm", + "serde", + "serde_json", + "snafu", + "tokio", + "tough 0.8.0", +] + +[[package]] +name = "tower-service" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" + +[[package]] +name = "try-lock" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" + +[[package]] +name = "typenum" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" + +[[package]] +name = "unicase" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +dependencies = [ + "version_check", +] + +[[package]] +name = "unicode-bidi" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" +dependencies = [ + "matches", +] + +[[package]] +name = "unicode-normalization" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-segmentation" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" + +[[package]] +name = "unicode-width" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" + +[[package]] +name = "unicode-xid" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" + +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + +[[package]] +name = "update_metadata" +version = "0.1.0" +dependencies = [ + "chrono", + "parse-datetime", + "rand", + "regex", + "semver", + "serde", + "serde_json", + "serde_plain", + "snafu", + "toml", + "tough 0.7.1", +] + +[[package]] +name = "url" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" +dependencies = [ + "idna", + "matches", + "percent-encoding", + "serde", +] + +[[package]] +name = "vec_map" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" + +[[package]] +name = "version_check" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" + +[[package]] +name = "walkdir" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" +dependencies = [ + "same-file", + "winapi 0.3.9", + "winapi-util", +] + +[[package]] +name = "want" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +dependencies = [ + "log", + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + +[[package]] +name = "wasm-bindgen" +version = "0.2.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a634620115e4a229108b71bde263bb4220c483b3f07f5ba514ee8d15064c4c2" +dependencies = [ + "cfg-if", + "serde", + "serde_json", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e53963b583d18a5aa3aaae4b4c1cb535218246131ba22a71f05b518098571df" +dependencies = [ + "bumpalo", + "lazy_static", + "log", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dba48d66049d2a6cc8488702e7259ab7afc9043ad0dc5448444f46f2a453b362" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fcfd5ef6eec85623b4c6e844293d4516470d8f19cd72d0d12246017eb9060b8" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9adff9ee0e94b926ca81b57f57f86d5545cdcb1d259e21ec9bdd95b901754c75" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f7b90ea6c632dd06fd765d44542e234d5e63d9bb917ecd64d79778a13bd79ae" + +[[package]] +name = "web-sys" +version = "0.3.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "863539788676619aac1a23e2df3655e96b32b0e05eb72ca34ba045ad573c625d" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab146130f5f790d45f82aeeb09e55a256573373ec64409fc19a6fb82fb1032ae" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "webpki-roots" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8eff4b7516a57307f9349c64bf34caa34b940b66fed4b2fb3136cb7386e5739" +dependencies = [ + "webpki", +] + +[[package]] +name = "winapi" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-build" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi 0.3.9", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "winreg" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" +dependencies = [ + "winapi 0.3.9", +] + +[[package]] +name = "ws2_32-sys" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" +dependencies = [ + "winapi 0.2.8", + "winapi-build", +] + +[[package]] +name = "xml-rs" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b07db065a5cf61a7e4ba64f29e67db906fb1787316516c4e6e5ff0fea1efcd8a" + +[[package]] +name = "zeroize" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cbac2ed2ba24cc90f5e06485ac8c7c1e5449fe8911aef4d8877218af021a5b8" diff --git a/tools/Cargo.toml b/tools/Cargo.toml new file mode 100644 index 00000000..68d887d6 --- /dev/null +++ b/tools/Cargo.toml @@ -0,0 +1,5 @@ +[workspace] +members = [ + "buildsys", + "pubsys" +] diff --git a/tools/buildsys/Cargo.lock b/tools/buildsys/Cargo.lock deleted file mode 100644 index 5fbb062c..00000000 --- a/tools/buildsys/Cargo.lock +++ /dev/null @@ -1,1268 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -[[package]] -name = "autocfg" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "base64" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "bitflags" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "block-buffer" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "block-padding 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "block-padding" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "buildsys" -version = "0.1.0" -dependencies = [ - "duct 0.13.3 (registry+https://github.com/rust-lang/crates.io-index)", - "hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "nonzero_ext 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "reqwest 0.10.4 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_plain 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sha2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "snafu 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", - "toml 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", - "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "bumpalo" -version = "3.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "byte-tools" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "byteorder" -version = "1.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "bytes" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "cc" -version = "1.0.50" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "core-foundation" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "core-foundation-sys 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "core-foundation-sys" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "ct-logs" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "sct 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "digest" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "doc-comment" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "dtoa" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "duct" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", - "once_cell 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "os_pipe 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", - "shared_child 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "encoding_rs" -version = "0.8.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "fnv" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "fuchsia-zircon" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "futures-channel" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "futures-core" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "futures-io" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "futures-macro" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro-hack 0.5.15 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "futures-sink" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "futures-task" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "futures-util" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-io 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-macro 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro-hack 0.5.15 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro-nested 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "generic-array" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "typenum 1.11.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "getrandom" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", - "wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "h2" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "indexmap 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "hermit-abi" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "hex" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "http" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "http-body" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "httparse" -version = "1.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "hyper" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "h2 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-project 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", - "tower-service 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "want 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "hyper-rustls" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "ct-logs 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.13.4 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "rustls 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustls-native-certs 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-rustls 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", - "webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "idna" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-normalization 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "indexmap" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "iovec" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "itoa" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "js-sys" -version = "0.3.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "wasm-bindgen 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "libc" -version = "0.2.68" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "log" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "matches" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "memchr" -version = "2.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "mime" -version = "0.3.16" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "mime_guess" -version = "2.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", - "unicase 2.6.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "mio" -version = "0.6.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "miow" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "net2" -version = "0.2.33" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "nonzero_ext" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "num_cpus" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "hermit-abi 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "once_cell" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "opaque-debug" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "openssl-probe" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "os_pipe" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "percent-encoding" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "pin-project" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "pin-project-internal 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "pin-project-internal" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "pin-project-lite" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "pin-utils" -version = "0.1.0-alpha.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "ppv-lite86" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "proc-macro-hack" -version = "0.5.15" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "proc-macro-nested" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "proc-macro2" -version = "1.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "quote" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "redox_syscall" -version = "0.1.56" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "reqwest" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "encoding_rs 0.8.22 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.13.4 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper-rustls 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)", - "js-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", - "mime_guess 2.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "rustls 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-rustls 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", - "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-futures 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", - "web-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", - "webpki-roots 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winreg 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "ring" -version = "0.16.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", - "spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "web-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rustls" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "ring 0.16.12 (registry+https://github.com/rust-lang/crates.io-index)", - "sct 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rustls-native-certs" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rustls 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", - "schannel 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", - "security-framework 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "ryu" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "same-file" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi-util 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "schannel" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "sct" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "ring 0.16.12 (registry+https://github.com/rust-lang/crates.io-index)", - "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "security-framework" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "core-foundation 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "core-foundation-sys 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", - "security-framework-sys 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "security-framework-sys" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "core-foundation-sys 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "serde" -version = "1.0.106" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "serde_derive" -version = "1.0.106" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "serde_json" -version = "1.0.51" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "ryu 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "serde_plain" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "serde_urlencoded" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "dtoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", - "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "sha2" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "block-buffer 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "opaque-debug 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "shared_child" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "slab" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "smallvec" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "snafu" -version = "0.6.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "doc-comment 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "snafu-derive 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "snafu-derive" -version = "0.6.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "syn" -version = "1.0.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "time" -version = "0.1.42" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-rustls" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "rustls 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", - "webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tokio-util" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "toml" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tower-service" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "try-lock" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "typenum" -version = "1.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "unicode-bidi" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "unicode-normalization" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "unicode-xid" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "untrusted" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "url" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "version_check" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "walkdir" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-util 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "want" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "wasm-bindgen" -version = "0.2.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-macro 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bumpalo 3.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-shared 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "js-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", - "web-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-macro-support 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-backend 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-shared 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.60" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "web-sys" -version = "0.3.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "js-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "webpki" -version = "0.21.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "ring 0.16.12 (registry+https://github.com/rust-lang/crates.io-index)", - "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "webpki-roots" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "winapi" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "winapi-util" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "winreg" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "ws2_32-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[metadata] -"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" -"checksum base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" -"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" -"checksum block-buffer 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -"checksum block-padding 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" -"checksum bumpalo 3.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "12ae9db68ad7fac5fe51304d20f016c911539251075a214f8e663babefa35187" -"checksum byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" -"checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" -"checksum bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" -"checksum cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" -"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" -"checksum core-foundation 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" -"checksum core-foundation-sys 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" -"checksum ct-logs 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4d3686f5fa27dbc1d76c751300376e167c5a43387f44bb451fd1c24776e49113" -"checksum digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -"checksum doc-comment 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" -"checksum dtoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "4358a9e11b9a09cf52383b451b49a169e8d797b68aa02301ff586d70d9661ea3" -"checksum duct 0.13.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1607fa68d55be208e83bcfbcfffbc1ec65c9fbcf9eb1a5d548dc3ac0100743b0" -"checksum encoding_rs 0.8.22 (registry+https://github.com/rust-lang/crates.io-index)" = "cd8d03faa7fe0c1431609dfad7bbe827af30f82e1e2ae6f7ee4fca6bd764bc28" -"checksum fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" -"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" -"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" -"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" -"checksum futures-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f0c77d04ce8edd9cb903932b608268b3fffec4163dc053b3b402bf47eac1f1a8" -"checksum futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f25592f769825e89b92358db00d26f965761e094951ac44d3663ef25b7ac464a" -"checksum futures-io 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a638959aa96152c7a4cddf50fcb1e3fede0583b27157c26e67d6f99904090dc6" -"checksum futures-macro 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "9a5081aa3de1f7542a794a397cde100ed903b0630152d0973479018fd85423a7" -"checksum futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3466821b4bc114d95b087b850a724c6f83115e929bc88f1fa98a3304a944c8a6" -"checksum futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "7b0a34e53cf6cdcd0178aa573aed466b646eb3db769570841fda0c7ede375a27" -"checksum futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "22766cf25d64306bedf0384da004d05c9974ab104fcc4528f1236181c18004c5" -"checksum generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" -"checksum getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" -"checksum h2 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "377038bf3c89d18d6ca1431e7a5027194fbd724ca10592b9487ede5e8e144f42" -"checksum hermit-abi 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "725cf19794cf90aa94e65050cb4191ff5d8fa87a498383774c47b332e3af952e" -"checksum hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" -"checksum http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" -"checksum http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" -"checksum httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" -"checksum hyper 0.13.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ed6081100e960d9d74734659ffc9cc91daf1c0fc7aceb8eaa94ee1a3f5046f2e" -"checksum hyper-rustls 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac965ea399ec3a25ac7d13b8affd4b8f39325cca00858ddf5eb29b79e6b14b08" -"checksum idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" -"checksum indexmap 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "076f042c5b7b98f31d205f1249267e12a6518c1481e9dae9764af19b707d2292" -"checksum iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" -"checksum itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e" -"checksum js-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)" = "6a27d435371a2fa5b6d2b028a74bbdb1234f308da363226a2854ca3ff8ba7055" -"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -"checksum libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)" = "dea0c0405123bba743ee3f91f49b1c7cfb684eef0da0a50110f758ccf24cdff0" -"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" -"checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" -"checksum memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" -"checksum mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)" = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -"checksum mime_guess 2.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" -"checksum mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)" = "302dec22bcf6bae6dfb69c647187f4b4d0fb6f535521f7bc022430ce8e12008f" -"checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" -"checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88" -"checksum nonzero_ext 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "44a1290799eababa63ea60af0cbc3f03363e328e58f32fb0294798ed3e85f444" -"checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6" -"checksum once_cell 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b1c601810575c99596d4afc46f78a678c80105117c379eb3650cf99b8a21ce5b" -"checksum opaque-debug 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" -"checksum openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" -"checksum os_pipe 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "db4d06355a7090ce852965b2d08e11426c315438462638c6d721448d0b47aa22" -"checksum percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" -"checksum pin-project 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7804a463a8d9572f13453c516a5faea534a2403d7ced2f0c7e100eeff072772c" -"checksum pin-project-internal 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "385322a45f2ecf3410c68d2a549a4a2685e8051d0f278e39743ff4e451cb9b3f" -"checksum pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "237844750cfbb86f67afe27eee600dfbbcb6188d734139b534cbfbf4f96792ae" -"checksum pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587" -"checksum ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" -"checksum proc-macro-hack 0.5.15 (registry+https://github.com/rust-lang/crates.io-index)" = "0d659fe7c6d27f25e9d80a1a094c223f5246f6a6596453e09d7229bf42750b63" -"checksum proc-macro-nested 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8e946095f9d3ed29ec38de908c22f95d9ac008e424c7bcae54c75a79c527c694" -"checksum proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)" = "df246d292ff63439fea9bc8c0a270bed0e390d5ebd4db4ba15aba81111b5abe3" -"checksum quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f" -"checksum rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -"checksum rand_chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -"checksum rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -"checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" -"checksum reqwest 0.10.4 (registry+https://github.com/rust-lang/crates.io-index)" = "02b81e49ddec5109a9dcfc5f2a317ff53377c915e9ae9d4f2fb50914b85614e2" -"checksum ring 0.16.12 (registry+https://github.com/rust-lang/crates.io-index)" = "1ba5a8ec64ee89a76c98c549af81ff14813df09c3e6dc4766c3856da48597a0c" -"checksum rustls 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c0d4a31f5d68413404705d6982529b0e11a9aacd4839d1d6222ee3b8cb4015e1" -"checksum rustls-native-certs 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a75ffeb84a6bd9d014713119542ce415db3a3e4748f0bfce1e1416cd224a23a5" -"checksum ryu 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "535622e6be132bccd223f4bb2b8ac8d53cda3c7a6394944d3b2b33fb974f9d76" -"checksum same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -"checksum schannel 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "039c25b130bd8c1321ee2d7de7fde2659fa9c2744e4bb29711cfc852ea53cd19" -"checksum sct 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" -"checksum security-framework 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "572dfa3a0785509e7a44b5b4bebcf94d41ba34e9ed9eb9df722545c3b3c4144a" -"checksum security-framework-sys 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8ddb15a5fec93b7021b8a9e96009c5d8d51c15673569f7c0f6b7204e5b7b404f" -"checksum serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)" = "36df6ac6412072f67cf767ebbde4133a5b2e88e76dc6187fa7104cd16f783399" -"checksum serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)" = "9e549e3abf4fb8621bd1609f11dfc9f5e50320802273b12f3811a67e6716ea6c" -"checksum serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)" = "da07b57ee2623368351e9a0488bb0b261322a15a6e0ae53e243cbdc0f4208da9" -"checksum serde_plain 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "625fb0da2b006092b426a94acc1611bec52f2ec27bb27b266a9f93c29ee38eda" -"checksum serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" -"checksum sha2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "27044adfd2e1f077f649f59deb9490d3941d674002f7d062870a60ebe9bd47a0" -"checksum shared_child 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8cebcf3a403e4deafaf34dc882c4a1b6a648b43e5670aa2e4bb985914eaeb2d2" -"checksum slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" -"checksum smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5c2fb2ec9bcd216a5b0d0ccf31ab17b5ed1d627960edff65bbe95d3ce221cefc" -"checksum snafu 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "b1ec0ae2ed980f26e1ad62e717feb01df90731df56887b5391a2c79f9f6805be" -"checksum snafu-derive 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0ec32ba84a7a86aeb0bc32fd0c46d31b0285599f68ea72e87eff6127889d99e1" -"checksum spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" -"checksum syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)" = "0df0eb663f387145cab623dea85b09c2c5b4b0aef44e945d928e682fce71bb03" -"checksum time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" -"checksum tokio 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)" = "ee5a0dd887e37d37390c13ff8ac830f992307fe30a1fff0ab8427af67211ba28" -"checksum tokio-rustls 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4adb8b3e5f86b707f1b54e7c15b6de52617a823608ccda98a15d3a24222f265a" -"checksum tokio-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" -"checksum toml 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ffc92d160b1eef40665be3a05630d003936a3bc7da7421277846c2613e92c71a" -"checksum tower-service 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" -"checksum try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" -"checksum typenum 1.11.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6d2783fe2d6b8c1101136184eb41be8b1ad379e4657050b8aaff0c79ee7575f9" -"checksum unicase 2.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -"checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" -"checksum unicode-normalization 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4" -"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" -"checksum untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60369ef7a31de49bcb3f6ca728d4ba7300d9a1658f94c727d4cab8c8d9f4aece" -"checksum url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" -"checksum version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce" -"checksum walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" -"checksum want 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" -"checksum wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)" = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" -"checksum wasm-bindgen 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)" = "2cc57ce05287f8376e998cbddfb4c8cb43b84a7ec55cf4551d7c00eef317a47f" -"checksum wasm-bindgen-backend 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)" = "d967d37bf6c16cca2973ca3af071d0a2523392e4a594548155d89a678f4237cd" -"checksum wasm-bindgen-futures 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "7add542ea1ac7fdaa9dc25e031a6af33b7d63376292bd24140c637d00d1c312a" -"checksum wasm-bindgen-macro 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)" = "8bd151b63e1ea881bb742cd20e1d6127cef28399558f3b5d415289bc41eee3a4" -"checksum wasm-bindgen-macro-support 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)" = "d68a5b36eef1be7868f668632863292e37739656a80fc4b9acec7b0bd35a4931" -"checksum wasm-bindgen-shared 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)" = "daf76fe7d25ac79748a37538b7daeed1c7a6867c92d3245c12c6222e4a20d639" -"checksum web-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)" = "2d6f51648d8c56c366144378a33290049eafdd784071077f6fe37dae64c1c4cb" -"checksum webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f1f50e1972865d6b1adb54167d1c8ed48606004c2c9d0ea5f1eeb34d95e863ef" -"checksum webpki-roots 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)" = "91cd5736df7f12a964a5067a12c62fa38e1bd8080aff1f80bc29be7c80d19ab4" -"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" -"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" -"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" -"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -"checksum winapi-util 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "fa515c5163a99cc82bab70fd3bfdd36d827be85de63737b40fcef2ce084a436e" -"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -"checksum winreg 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" -"checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" diff --git a/tools/buildsys/Cargo.toml b/tools/buildsys/Cargo.toml index f658c2b7..aeb01853 100644 --- a/tools/buildsys/Cargo.toml +++ b/tools/buildsys/Cargo.toml @@ -12,12 +12,12 @@ exclude = ["README.md"] duct = "0.13.0" hex = "0.4.0" rand = { version = "0.7", default-features = false, features = ["std"] } -reqwest = { version = "0.10", default-features = false, features = ["rustls-tls", "blocking"] } +reqwest = { version = "0.10.1", default-features = false, features = ["rustls-tls", "blocking"] } serde = { version = "1.0", features = ["derive"] } serde_plain = "0.3.0" sha2 = "0.8" snafu = "0.6" toml = "0.5" -url = "2.1" +url = { version = "2.1.0", features = ["serde"] } walkdir = "2" nonzero_ext = "0.2.0" diff --git a/tools/buildsys/deny.toml b/tools/buildsys/deny.toml index 970abf3a..91e56ffc 100644 --- a/tools/buildsys/deny.toml +++ b/tools/buildsys/deny.toml @@ -18,7 +18,7 @@ allow = [ "MIT", "OpenSSL", "Unlicense", - #"Zlib", # OK but currently unused; commenting to prevent warning + "Zlib", ] exceptions = [ diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml new file mode 100644 index 00000000..a3a0ca0f --- /dev/null +++ b/tools/pubsys/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "pubsys" +version = "0.1.0" +authors = ["Zac Mrowicki ", "Tom Kirchner "] +license = "Apache-2.0 OR MIT" +edition = "2018" +publish = false + +[dependencies] +chrono = "0.4" +clap = "2.33" +lazy_static = "1.4" +log = "0.4" +parse-datetime = { path = "../../sources/parse-datetime" } +# Need to bring in reqwest with a TLS feature so tough can support TLS repos. +reqwest = { version = "0.10.1", default-features = false, features = ["rustls-tls", "blocking"] } +simplelog = "0.7" +snafu = "0.6" +semver = "0.9" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +structopt = { version = "0.3", default-features = false } +toml = "0.5" +tough = { version = "0.8", features = ["http"] } +tough-ssm = "0.3" +update_metadata = { path = "../../sources/updater/update_metadata/" } +url = { version = "2.1.0", features = ["serde"] } +tempfile = "3.1" diff --git a/tools/pubsys/Infra.toml.example b/tools/pubsys/Infra.toml.example new file mode 100644 index 00000000..2bb5276f --- /dev/null +++ b/tools/pubsys/Infra.toml.example @@ -0,0 +1,26 @@ +# This is an example infrastructure configuration for pubsys, the tool that +# creates repos when you call `cargo make repo`. Save a copy as `Infra.toml` +# at the root of the repo, then edit the settings below to match your use case. + +# Path to your root role JSON file. +root_role_path = "/home/user/root.json" + +# You would normally create repo signing keys using `tuftool root gen-rsa-key` +# as part of the initial setup of your TUF roles. pubsys assumes a single +# publication key that lives in the snapshot, targets, and timestamp roles. +# Here you specify where that key lives so we can sign the created repo. +# (Don't keep your keys in the repo!) +# You can keep your private key in a file or an SSM parameter; pick one: +# (Need inline table syntax until this is fixed: https://github.com/alexcrichton/toml-rs/issues/225) +signing_keys = { default = { file = { path = "/home/user/key.pem" } } } +# signing_keys = { default = { ssm = { parameter = "/my/parameter" } } } + +# You can have any number of repos defined and build a specific one by running like this: +# cargo make repo -e REPO=myrepo +# If the URLs are uncommented, they will be pulled and used as a starting +# point, and your images (and related files) will be added as a new update in +# the created repo. Otherwise, we build a new repo from scratch. + +[repo.default] +# metadata_base_url = "https://example.com/metadata/" +# targets_url = "https://example.com/targets/" diff --git a/tools/pubsys/deny.toml b/tools/pubsys/deny.toml new file mode 100644 index 00000000..e7afc76f --- /dev/null +++ b/tools/pubsys/deny.toml @@ -0,0 +1,41 @@ +[licenses] +unlicensed = "deny" + +# Deny licenses unless they are specifically listed here +copyleft = "deny" +allow-osi-fsf-free = "neither" +default = "deny" + +# We want really high confidence when inferring licenses from text +confidence-threshold = 0.93 + +allow = [ + "Apache-2.0", + "BSD-2-Clause", + "BSD-3-Clause", + "BSL-1.0", + "CC0-1.0", + "ISC", + "MIT", + "OpenSSL", + "Unlicense", + "Zlib", +] + +exceptions = [ + { name = "webpki-roots", allow = ["MPL-2.0"], version = "*" }, +] + +[[licenses.clarify]] +name = "ring" +expression = "MIT AND ISC AND OpenSSL" +license-files = [ + { path = "LICENSE", hash = 0xbd0eed23 }, +] + +[[licenses.clarify]] +name = "webpki" +expression = "ISC" +license-files = [ + { path = "LICENSE", hash = 0x001c7e6c }, +] diff --git a/tools/pubsys/policies/repo-expiration/2w-2w-1w.toml b/tools/pubsys/policies/repo-expiration/2w-2w-1w.toml new file mode 100644 index 00000000..7a3a7b85 --- /dev/null +++ b/tools/pubsys/policies/repo-expiration/2w-2w-1w.toml @@ -0,0 +1,3 @@ +snapshot_expiration = '2 weeks' +targets_expiration = '2 weeks' +timestamp_expiration = '1 week' diff --git a/tools/pubsys/src/config.rs b/tools/pubsys/src/config.rs new file mode 100644 index 00000000..0963972c --- /dev/null +++ b/tools/pubsys/src/config.rs @@ -0,0 +1,91 @@ +//! The config module owns the definition and loading process for our configuration sources. + +use crate::deserialize_offset; +use chrono::Duration; +use serde::Deserialize; +use snafu::ResultExt; +use std::collections::HashMap; +use std::fs; +use std::path::{Path, PathBuf}; +use url::Url; + +/// Configuration needed to load and create repos +#[derive(Debug, Deserialize)] +pub(crate) struct InfraConfig { + pub(crate) root_role_path: PathBuf, + pub(crate) signing_keys: HashMap, + pub(crate) repo: HashMap, +} + +impl InfraConfig { + /// Deserializes an InfraConfig from a given path + pub(crate) fn from_path

(path: P) -> Result + where + P: AsRef, + { + let path = path.as_ref(); + let infra_config_str = fs::read_to_string(path).context(error::File { path })?; + toml::from_str(&infra_config_str).context(error::InvalidToml { path }) + } +} + +/// Location of signing keys +// These variant names are lowercase because they have to match the text in Infra.toml, and it's +// more common for TOML config to be lowercase. +#[allow(non_camel_case_types)] +#[derive(Debug, Deserialize)] +pub(crate) enum SigningKeyConfig { + file { path: PathBuf }, + ssm { parameter: String }, +} + +/// Location of existing published repo +#[derive(Debug, Deserialize)] +pub(crate) struct RepoConfig { + pub(crate) metadata_base_url: Option, + pub(crate) targets_url: Option, +} + +/// How long it takes for each metadata type to expire +#[derive(Debug, Deserialize)] +pub(crate) struct RepoExpirationPolicy { + #[serde(deserialize_with = "deserialize_offset")] + pub(crate) snapshot_expiration: Duration, + #[serde(deserialize_with = "deserialize_offset")] + pub(crate) targets_expiration: Duration, + #[serde(deserialize_with = "deserialize_offset")] + pub(crate) timestamp_expiration: Duration, +} + +impl RepoExpirationPolicy { + /// Deserializes a RepoExpirationPolicy from a given path + pub(crate) fn from_path

(path: P) -> Result + where + P: AsRef, + { + let path = path.as_ref(); + let expiration_str = fs::read_to_string(path).context(error::File { path })?; + toml::from_str(&expiration_str).context(error::InvalidToml { path }) + } +} + +mod error { + use snafu::Snafu; + use std::io; + use std::path::PathBuf; + + #[derive(Debug, Snafu)] + #[snafu(visibility = "pub(super)")] + pub(crate) enum Error { + #[snafu(display("Failed to read '{}': {}", path.display(), source))] + File { path: PathBuf, source: io::Error }, + + #[snafu(display("Invalid config file at '{}': {}", path.display(), source))] + InvalidToml { + path: PathBuf, + source: toml::de::Error, + }, + } +} +pub(crate) use error::Error; +type Result = std::result::Result; diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs new file mode 100644 index 00000000..c96ce684 --- /dev/null +++ b/tools/pubsys/src/main.rs @@ -0,0 +1,107 @@ +/*! +`pubsys` simplifies the process of publishing Bottlerocket updates. + +Currently implemented: +* building repos, whether starting from an existing repo or from scratch + +To be implemented: +* building AMIs +* updating SSM parameters + +Configuration comes from: +* command-line parameters, to specify basic options and paths to the below files +* Infra.toml, for repo configuration +* Release.toml, for migrations +* Policy files for repo metadata expiration and update wave timing +*/ + +#![deny(rust_2018_idioms)] + +mod config; +mod repo; + +use chrono::Duration; +use parse_datetime::parse_offset; +use semver::Version; +use serde::{Deserialize, Deserializer}; +use simplelog::{Config as LogConfig, LevelFilter, TermLogger, TerminalMode}; +use snafu::ResultExt; +use std::path::PathBuf; +use std::process; +use structopt::StructOpt; + +fn run() -> Result<()> { + // Parse and store the args passed to the program + let args = Args::from_args(); + + // TerminalMode::Mixed will send errors to stderr and anything less to stdout. + TermLogger::init(args.log_level, LogConfig::default(), TerminalMode::Mixed) + .context(error::Logger)?; + + match args.subcommand { + SubCommand::Repo(ref repo_args) => repo::run(&args, &repo_args).context(error::Repo), + } +} + +fn main() { + if let Err(e) = run() { + eprintln!("{}", e); + process::exit(1); + } +} + +/// Automates publishing of Bottlerocket updates +#[derive(Debug, StructOpt)] +#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +struct Args { + #[structopt(global = true, long, default_value = "INFO")] + /// How much detail to log; from least to most: ERROR, WARN, INFO, DEBUG, TRACE + log_level: LevelFilter, + + #[structopt(long, parse(from_os_str))] + /// Path to Infra.toml (NOTE: must be specified before subcommand) + infra_config_path: PathBuf, + + #[structopt(subcommand)] + subcommand: SubCommand, +} + +#[derive(Debug, StructOpt)] +enum SubCommand { + Repo(repo::RepoArgs), +} + +/// Parses a SemVer, stripping a leading 'v' if present +pub(crate) fn friendly_version( + mut version_str: &str, +) -> std::result::Result { + if version_str.starts_with('v') { + version_str = &version_str[1..]; + }; + + Version::parse(version_str) +} + +/// Deserializes a Duration in the form of "in X hours/days/weeks" +pub(crate) fn deserialize_offset<'de, D>(deserializer: D) -> std::result::Result +where + D: Deserializer<'de>, +{ + let s: &str = Deserialize::deserialize(deserializer)?; + parse_offset(s).map_err(serde::de::Error::custom) +} + +mod error { + use snafu::Snafu; + + #[derive(Debug, Snafu)] + #[snafu(visibility = "pub(super)")] + pub(super) enum Error { + #[snafu(display("Logger setup error: {}", source))] + Logger { source: simplelog::TermLogError }, + + #[snafu(display("Failed to build repo: {}", source))] + Repo { source: crate::repo::Error }, + } +} +type Result = std::result::Result; diff --git a/tools/pubsys/src/repo.rs b/tools/pubsys/src/repo.rs new file mode 100644 index 00000000..5b62b98c --- /dev/null +++ b/tools/pubsys/src/repo.rs @@ -0,0 +1,644 @@ +//! The repo module owns the 'repo' subcommand and controls the process of building a repository. + +mod transport; + +use crate::config::{InfraConfig, RepoExpirationPolicy, SigningKeyConfig}; +use crate::{friendly_version, Args}; +use chrono::{DateTime, Utc}; +use lazy_static::lazy_static; +use log::{debug, info, trace}; +use parse_datetime::parse_datetime; +use semver::Version; +use snafu::{ensure, OptionExt, ResultExt}; +use std::convert::TryInto; +use std::fs::{self, File}; +use std::num::NonZeroU64; +use std::path::{Path, PathBuf}; +use structopt::StructOpt; +use tempfile::{tempdir, NamedTempFile}; +use tough::{ + editor::signed::PathExists, + editor::RepositoryEditor, + key_source::{KeySource, LocalKeySource}, + schema::Target, + ExpirationEnforcement, Limits, Repository, Settings, +}; +use tough_ssm::SsmKeySource; +use transport::RepoTransport; +use update_metadata::{Images, Manifest, Release, UpdateWaves}; +use url::Url; + +lazy_static! { + static ref DEFAULT_START_TIME: DateTime = Utc::now(); +} + +/// Builds Bottlerocket repos using latest build artifacts +#[derive(Debug, StructOpt)] +#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +pub(crate) struct RepoArgs { + // Metadata about the update + #[structopt(long)] + /// Use this named repo from Infra.toml + repo: String, + #[structopt(long)] + /// The architecture of the repo and the update being added + arch: String, + #[structopt(long, parse(try_from_str=friendly_version))] + /// The version of the update being added + version: Version, + #[structopt(long)] + /// The variant of the update being added + variant: String, + + // The images to add in this update + #[structopt(long, parse(from_os_str))] + /// Path to the image containing the boot partition + boot_image: PathBuf, + #[structopt(long, parse(from_os_str))] + /// Path to the image containing the root partition + root_image: PathBuf, + #[structopt(long, parse(from_os_str))] + /// Path to the image containing the verity hashes + hash_image: PathBuf, + + // Optionally add other files to the repo + #[structopt(long = "link-target", parse(from_os_str))] + /// Optional paths to add as targets and symlink into repo + link_targets: Vec, + #[structopt(long = "copy-target", parse(from_os_str))] + /// Optional paths to add as targets and copy into repo + copy_targets: Vec, + + // Policies that pubsys interprets to set repo parameters + #[structopt(long, parse(from_os_str))] + /// Path to file that defines when repo metadata should expire + repo_expiration_policy_path: PathBuf, + + // Policies that pubsys passes on to other tools + #[structopt(long, parse(from_os_str))] + /// Path to Release.toml + release_config_path: PathBuf, + #[structopt(long, parse(from_os_str))] + /// Path to file that defines when this update will become available + wave_policy_path: PathBuf, + + #[structopt(long, parse(try_from_str = parse_datetime))] + /// When the waves and expiration timer will start; RFC3339 date or "in X hours/days/weeks" + release_start_time: Option>, + + #[structopt(long)] + /// Use this named key from Infra.toml + signing_key: String, + + #[structopt(long, parse(from_os_str))] + /// Where to store the created repo + outdir: PathBuf, +} + +/// Adds update, migrations, and waves to the Manifest +fn update_manifest(repo_args: &RepoArgs, manifest: &mut Manifest) -> Result<()> { + // Add update =^..^= =^..^= =^..^= =^..^= + + let filename = |path: &PathBuf| -> Result { + Ok(path + .file_name() + .context(error::InvalidImagePath { path })? + .to_str() + .context(error::NonUtf8Path { path })? + .to_string()) + }; + + let images = Images { + boot: filename(&repo_args.boot_image)?, + root: filename(&repo_args.root_image)?, + hash: filename(&repo_args.hash_image)?, + }; + + info!( + "Adding update to manifest for version: {}, arch: {}, variant: {}", + repo_args.version, repo_args.arch, repo_args.variant + ); + manifest + .add_update( + repo_args.version.clone(), + None, + repo_args.arch.clone(), + repo_args.variant.clone(), + images, + ) + .context(error::AddUpdate)?; + + // Add migrations =^..^= =^..^= =^..^= =^..^= + + info!( + "Using release config from path: {}", + repo_args.release_config_path.display() + ); + let release = + Release::from_path(&repo_args.release_config_path).context(error::UpdateMetadataRead { + path: &repo_args.release_config_path, + })?; + trace!( + "Adding migrations to manifest for versions: {:#?}", + release + .migrations + .keys() + .map(|(from, to)| format!("({}, {})", from, to)) + .collect::>() + ); + // Replace the manifest 'migrations' section with the new data + manifest.migrations = release.migrations; + + // Add update waves =^..^= =^..^= =^..^= =^..^= + + let wave_start_time = repo_args.release_start_time.unwrap_or(*DEFAULT_START_TIME); + info!( + "Using wave policy from path: {}", + repo_args.wave_policy_path.display() + ); + info!( + "Offsets from that file will be added to the release start time of: {}", + wave_start_time + ); + let waves = + UpdateWaves::from_path(&repo_args.wave_policy_path).context(error::UpdateMetadataRead { + path: &repo_args.wave_policy_path, + })?; + manifest + .set_waves( + repo_args.variant.clone(), + repo_args.arch.clone(), + repo_args.version.clone(), + wave_start_time, + &waves, + ) + .context(error::SetWaves { + wave_policy_path: &repo_args.wave_policy_path, + })?; + + Ok(()) +} + +/// Adds targets, expirations, and version to the RepositoryEditor +fn update_editor<'a, P>( + repo_args: &'a RepoArgs, + editor: &mut RepositoryEditor, + targets: impl Iterator, + manifest_path: P, +) -> Result<()> +where + P: AsRef, +{ + // Add targets =^..^= =^..^= =^..^= =^..^= + + for target_path in targets { + debug!("Adding target from path: {}", target_path.display()); + editor + .add_target_path(&target_path) + .context(error::AddTarget { path: &target_path })?; + } + + let manifest_target = Target::from_path(&manifest_path).context(error::BuildTarget { + path: manifest_path.as_ref(), + })?; + debug!("Adding target for manifest.json"); + editor.add_target("manifest.json".to_string(), manifest_target); + + // Add expirations =^..^= =^..^= =^..^= =^..^= + + info!( + "Using repo expiration policy from path: {}", + repo_args.repo_expiration_policy_path.display() + ); + let expiration = RepoExpirationPolicy::from_path(&repo_args.repo_expiration_policy_path) + .context(error::Config)?; + + let expiration_start_time = repo_args.release_start_time.unwrap_or(*DEFAULT_START_TIME); + let snapshot_expiration = expiration_start_time + expiration.snapshot_expiration; + let targets_expiration = expiration_start_time + expiration.targets_expiration; + let timestamp_expiration = expiration_start_time + expiration.timestamp_expiration; + info!( + "Repo expiration times:\n\tsnapshot: {}\n\ttargets: {}\n\ttimestamp: {}", + snapshot_expiration, targets_expiration, timestamp_expiration + ); + editor + .snapshot_expires(snapshot_expiration) + .targets_expires(targets_expiration) + .timestamp_expires(timestamp_expiration); + + // Add version =^..^= =^..^= =^..^= =^..^= + + let seconds = Utc::now().timestamp(); + let unsigned_seconds = seconds.try_into().expect("System clock before 1970??"); + let version = NonZeroU64::new(unsigned_seconds).expect("System clock exactly 1970??"); + debug!("Repo version: {}", version); + editor + .snapshot_version(version) + .targets_version(version) + .timestamp_version(version); + + Ok(()) +} + +/// If the infra config has a repo section defined for the given repo, and it has metadata base and +/// targets URLs defined, returns those URLs, otherwise None. +fn repo_urls<'a>( + repo_args: &RepoArgs, + infra_config: &'a InfraConfig, +) -> Result> { + let repo_config = infra_config + .repo + .get(&repo_args.repo) + .context(error::MissingRepoConfig { + repo: &repo_args.repo, + })?; + + // Check if both URLs are set + if let Some(metadata_base_url) = repo_config.metadata_base_url.as_ref() { + if let Some(targets_url) = repo_config.targets_url.as_ref() { + let base_slash = if metadata_base_url.as_str().ends_with('/') { + "" + } else { + "/" + }; + let metadata_url_str = format!( + "{}{}{}/{}", + metadata_base_url, base_slash, repo_args.variant, repo_args.arch + ); + let metadata_url = Url::parse(&metadata_url_str).context(error::ParseUrl { + input: &metadata_url_str, + })?; + + debug!("Using metadata url: {}", metadata_url); + return Ok(Some((metadata_url, targets_url))); + } + } + + Ok(None) +} + +/// Builds an editor and manifest; will start from an existing repo if one is specified in the +/// configuration. Returns Err if we fail to read from the repo. Returns Ok(None) if we detect +/// that the repo does not exist. +fn load_editor_and_manifest

( + root_role_path: P, + metadata_url: &Url, + targets_url: &Url, +) -> Result> +where + P: AsRef, +{ + let root_role_path = root_role_path.as_ref(); + + // Create a temporary directory where the TUF client can store metadata + let workdir = tempdir().context(error::TempDir)?; + let settings = Settings { + root: File::open(root_role_path).context(error::File { + path: root_role_path, + })?, + datastore: workdir.path(), + metadata_base_url: metadata_url.as_str(), + targets_base_url: targets_url.as_str(), + limits: Limits::default(), + expiration_enforcement: ExpirationEnforcement::Safe, + }; + + // Try to load the repo... + let transport = RepoTransport::default(); + match Repository::load(&transport, settings) { + // If we load it successfully, build an editor and manifest from it. + Ok(repo) => { + let reader = repo + .read_target("manifest.json") + .context(error::ReadTarget { + target: "manifest.json", + })? + .with_context(|| error::NoManifest { + metadata_url: metadata_url.clone(), + })?; + let manifest = serde_json::from_reader(reader).context(error::InvalidJson { + path: "manifest.json", + })?; + + let editor = + RepositoryEditor::from_repo(root_role_path, repo).context(error::EditorFromRepo)?; + + Ok(Some((editor, manifest))) + } + // If we fail to load, but we only failed because the repo doesn't exist yet, then start + // fresh by signalling that there is no known repo. Otherwise, fail hard. + Err(e) => { + if transport.repo_not_found.get() { + Ok(None) + } else { + Err(e).with_context(|| error::RepoLoad { + metadata_base_url: metadata_url.clone(), + })? + } + } + } +} + +/// Common entrypoint from main() +pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { + let metadata_out_dir = repo_args + .outdir + .join(&repo_args.variant) + .join(&repo_args.arch); + let targets_out_dir = repo_args.outdir.join("targets"); + + // If the given metadata directory exists, throw an error. We dont want to overwrite a user's + // existing repository. (The targets directory is shared, so it's fine if that exists.) + ensure!( + !Path::exists(&metadata_out_dir), + error::RepoExists { + path: metadata_out_dir + } + ); + + // Build repo =^..^= =^..^= =^..^= =^..^= + + info!( + "Using infra config from path: {}", + args.infra_config_path.display() + ); + let infra_config = InfraConfig::from_path(&args.infra_config_path).context(error::Config)?; + trace!("Parsed infra config: {:?}", infra_config); + + // Build a repo editor and manifest, from an existing repo if available, otherwise fresh + let maybe_urls = repo_urls(&repo_args, &infra_config)?; + let (mut editor, mut manifest) = if let Some((metadata_url, targets_url)) = maybe_urls { + info!("Found metadata and target URLs, loading existing repository"); + match load_editor_and_manifest(&infra_config.root_role_path, &metadata_url, &targets_url)? { + Some((editor, manifest)) => (editor, manifest), + None => { + info!( + "Did not find repo at '{}', starting a new one", + metadata_url + ); + ( + RepositoryEditor::new(&infra_config.root_role_path) + .context(error::NewEditor)?, + Manifest::default(), + ) + } + } + } else { + info!("Did not find metadata and target URLs in infra config, creating a new repository"); + ( + RepositoryEditor::new(&infra_config.root_role_path).context(error::NewEditor)?, + Manifest::default(), + ) + }; + + // Add update information to manifest + update_manifest(&repo_args, &mut manifest)?; + // Write manifest to tempfile so it can be copied in as target later + let manifest_path = NamedTempFile::new() + .context(error::TempFile)? + .into_temp_path(); + update_metadata::write_file(&manifest_path, &manifest).context(error::ManifestWrite { + path: &manifest_path, + })?; + + // Add manifest and targets to editor + let copy_targets = &repo_args.copy_targets; + let link_targets = repo_args.link_targets.iter().chain(vec![ + &repo_args.boot_image, + &repo_args.root_image, + &repo_args.hash_image, + ]); + let all_targets = copy_targets.iter().chain(link_targets.clone()); + + update_editor(&repo_args, &mut editor, all_targets, &manifest_path)?; + + // Sign repo =^..^= =^..^= =^..^= =^..^= + + let signing_key_config = infra_config + .signing_keys + .get(&repo_args.signing_key) + .context(error::MissingSigningKey { + profile: &repo_args.signing_key, + })?; + + let key_source: Box = match signing_key_config { + SigningKeyConfig::file { path } => Box::new(LocalKeySource { path: path.clone() }), + SigningKeyConfig::ssm { parameter } => Box::new(SsmKeySource { + profile: None, + parameter_name: parameter.clone(), + key_id: None, + }), + }; + + let signed_repo = editor.sign(&[key_source]).context(error::RepoSign)?; + + // Write repo =^..^= =^..^= =^..^= =^..^= + + // Write targets first so we don't have invalid metadata if targets fail + info!("Writing repo targets to: {}", targets_out_dir.display()); + fs::create_dir_all(&targets_out_dir).context(error::CreateDir { + path: &targets_out_dir, + })?; + + // Copy manifest with proper name instead of tempfile name + debug!("Copying manifest.json into {}", targets_out_dir.display()); + signed_repo + .copy_target( + &manifest_path, + &targets_out_dir, + // We should never have matching manifests from different repos + PathExists::Fail, + Some("manifest.json"), + ) + .context(error::CopyTarget { + target: &manifest_path, + path: &targets_out_dir, + })?; + + // Copy / link any other user requested targets + for copy_target in copy_targets { + debug!( + "Copying target '{}' into {}", + copy_target.display(), + targets_out_dir.display() + ); + signed_repo + .copy_target(copy_target, &targets_out_dir, PathExists::Skip, None) + .context(error::CopyTarget { + target: copy_target, + path: &targets_out_dir, + })?; + } + for link_target in link_targets { + debug!( + "Linking target '{}' into {}", + link_target.display(), + targets_out_dir.display() + ); + signed_repo + .link_target(link_target, &targets_out_dir, PathExists::Skip, None) + .context(error::LinkTarget { + target: link_target, + path: &targets_out_dir, + })?; + } + + info!("Writing repo metadata to: {}", metadata_out_dir.display()); + fs::create_dir_all(&metadata_out_dir).context(error::CreateDir { + path: &metadata_out_dir, + })?; + signed_repo + .write(&metadata_out_dir) + .context(error::RepoWrite { + path: &repo_args.outdir, + })?; + + Ok(()) +} + +mod error { + use snafu::Snafu; + use std::io; + use std::path::PathBuf; + use url::Url; + + #[derive(Debug, Snafu)] + #[snafu(visibility = "pub(super)")] + pub(crate) enum Error { + #[snafu(display("Failed to add new update to manifest: {}", source))] + AddUpdate { + source: update_metadata::error::Error, + }, + + #[snafu(display("Failed to add new target '{}' to repo: {}", path.display(), source))] + AddTarget { + path: PathBuf, + source: tough::error::Error, + }, + + #[snafu(display("Failed to build target metadata from path '{}': {}", path.display(), source))] + BuildTarget { + path: PathBuf, + source: tough::schema::Error, + }, + + #[snafu(display("Failed to copy target '{}' to '{}': {}", target.display(), path.display(), source))] + CopyTarget { + target: PathBuf, + path: PathBuf, + source: tough::error::Error, + }, + + #[snafu(display("Error reading config: {}", source))] + Config { source: crate::config::Error }, + + #[snafu(display("Failed to create directory '{}': {}", path.display(), source))] + CreateDir { path: PathBuf, source: io::Error }, + + #[snafu(display("Failed to create repo editor from given repo: {}", source))] + EditorFromRepo { source: tough::error::Error }, + + #[snafu(display("Failed to read '{}': {}", path.display(), source))] + File { path: PathBuf, source: io::Error }, + + #[snafu(display("Invalid path given for image file: '{}'", path.display()))] + InvalidImagePath { path: PathBuf }, + + #[snafu(display("Invalid config file at '{}': {}", path.display(), source))] + InvalidJson { + path: PathBuf, + source: serde_json::Error, + }, + + #[snafu(display("Failed to symlink target '{}' to '{}': {}", target.display(), path.display(), source))] + LinkTarget { + target: PathBuf, + path: PathBuf, + source: tough::error::Error, + }, + + #[snafu(display("Failed to write Manifest to '{}': {}", path.display(), source))] + ManifestWrite { + path: PathBuf, + source: update_metadata::error::Error, + }, + + #[snafu(display( + "Requested build of repo '{}' that isn't specified in Infra.toml", + repo + ))] + MissingRepoConfig { repo: String }, + + #[snafu(display("No profile '{}' for signing key in Infra.toml", profile))] + MissingSigningKey { profile: String }, + + #[snafu(display("Failed to create new repo editor: {}", source))] + NewEditor { source: tough::error::Error }, + + #[snafu(display("Repo does not have a manifest.json: {}", metadata_url))] + NoManifest { metadata_url: Url }, + + #[snafu(display("Non-UTF8 path '{}' not supported", path.display()))] + NonUtf8Path { path: PathBuf }, + + #[snafu(display("Invalid URL '{}': {}", input, source))] + ParseUrl { + input: String, + source: url::ParseError, + }, + + #[snafu(display("Failed to read target '{}' from repo: {}", target, source))] + ReadTarget { + target: String, + source: tough::error::Error, + }, + + #[snafu(display("Repo exists at '{}' - remove it and try again", path.display()))] + RepoExists { path: PathBuf }, + + #[snafu(display("Could not fetch repo at '{}': {}", url, msg))] + RepoFetch { url: Url, msg: String }, + + #[snafu(display( + "Failed to load repo from metadata URL '{}': {}", + metadata_base_url, + source + ))] + RepoLoad { + metadata_base_url: Url, + source: tough::error::Error, + }, + + #[snafu(display("Requested repository does not exist: '{}'", url))] + RepoNotFound { url: Url }, + + #[snafu(display("Failed to sign repository: {}", source))] + RepoSign { source: tough::error::Error }, + + #[snafu(display("Failed to write repository to {}: {}", path.display(), source))] + RepoWrite { + path: PathBuf, + source: tough::error::Error, + }, + + #[snafu(display("Failed to set waves from '{}': {}", wave_policy_path.display(), source))] + SetWaves { + wave_policy_path: PathBuf, + source: update_metadata::error::Error, + }, + + #[snafu(display("Failed to create tempdir: {}", source))] + TempDir { source: io::Error }, + + #[snafu(display("Failed to create temporary file: {}", source))] + TempFile { source: io::Error }, + + #[snafu(display("Failed to read update metadata '{}': {}", path.display(), source))] + UpdateMetadataRead { + path: PathBuf, + source: update_metadata::error::Error, + }, + } +} +pub(crate) use error::Error; +type Result = std::result::Result; diff --git a/tools/pubsys/src/repo/transport.rs b/tools/pubsys/src/repo/transport.rs new file mode 100644 index 00000000..40b590e0 --- /dev/null +++ b/tools/pubsys/src/repo/transport.rs @@ -0,0 +1,63 @@ +use super::error; +use std::cell::Cell; +use std::io::Read; +use tough::{FilesystemTransport, HttpTransport, Transport}; +use url::Url; + +/// RepoTransport delegates to FilesystemTransport or HttpTransport based on the url scheme. If we +/// detect that the repo isn't found we return a special error so we can start a new repo. +#[derive(Debug, Default, Clone)] +pub(crate) struct RepoTransport { + // If we fail to fetch the repo, we need a way of conveying whether it happened because the + // repo doesn't exist or because we failed to fetch/load a repo that does exist. This + // information can be used to determine whether we want to start a new repo from scratch or to + // fail early, for example. + // + // tough uses a trait object to represent the source error inside its Error::Transport variant, + // so we can't check our own, inner error type to determine which of our variants is inside. + // Also, it defines the `fetch` method of `Transport` to take an immutable reference to self, + // so we can't use a struct field naively to communicate back. + // + // So, we use this Cell to safely convey the information outward in our single-threaded usage. + pub(crate) repo_not_found: Cell, +} + +impl Transport for RepoTransport { + type Stream = Box; + type Error = error::Error; + + fn fetch(&self, url: Url) -> std::result::Result { + if url.scheme() == "file" { + match FilesystemTransport.fetch(url.clone()) { + Ok(reader) => Ok(Box::new(reader)), + Err(e) => match e.kind() { + std::io::ErrorKind::NotFound => { + self.repo_not_found.set(true); + error::RepoNotFound { url }.fail() + } + _ => error::RepoFetch { + url, + msg: e.to_string(), + } + .fail(), + }, + } + } else { + let transport = HttpTransport::new(); + match transport.fetch(url.clone()) { + Ok(reader) => Ok(Box::new(reader)), + Err(e) => match e { + tough::error::Error::HttpFetch { .. } => { + self.repo_not_found.set(true); + error::RepoNotFound { url }.fail() + } + _ => error::RepoFetch { + url, + msg: e.to_string(), + } + .fail(), + }, + } + } + } +} From b9223753ad77498a91c4afeb51a5d53dd294744a Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Thu, 23 Jul 2020 13:36:58 -0700 Subject: [PATCH 0307/1356] Change default variant to aws-k8s-1.17 --- BUILDING.md | 10 +++++----- README.md | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/BUILDING.md b/BUILDING.md index 17276829..4e58c9c6 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -53,11 +53,11 @@ The script has some assumptions about your setup, in particular that you: * have a few other common tools installed, like `jq`, `du`, and `rsync` First, decompress the images. -(Note: these filenames assume an `x86_64` architecture and `aws-k8s-1.15` [variant](README.md).) +(Note: these filenames assume an `x86_64` architecture and `aws-k8s-1.17` [variant](README.md).) ``` -lz4 -d build/latest/bottlerocket-aws-k8s-1.15-x86_64.img.lz4 && \ -lz4 -d build/latest/bottlerocket-aws-k8s-1.15-x86_64-data.img.lz4 +lz4 -d build/images/x86_64-aws-k8s-1.17/latest/bottlerocket-aws-k8s-1.17-x86_64.img.lz4 && \ +lz4 -d build/images/x86_64-aws-k8s-1.17/latest/bottlerocket-aws-k8s-1.17-x86_64-data.img.lz4 ``` Next, register an AMI: @@ -65,8 +65,8 @@ Next, register an AMI: ``` bin/amiize.sh --name YOUR-AMI-NAME-HERE \ --ssh-keypair YOUR-EC2-SSH-KEYPAIR-NAME-HERE \ - --root-image build/latest/bottlerocket-aws-k8s-1.15-x86_64.img \ - --data-image build/latest/bottlerocket-aws-k8s-1.15-x86_64-data.img \ + --root-image build/images/x86_64-aws-k8s-1.17/latest/bottlerocket-aws-k8s-1.17-x86_64.img \ + --data-image build/images/x86_64-aws-k8s-1.17/latest/bottlerocket-aws-k8s-1.17-x86_64-data.img \ --region us-west-2 \ --instance-type m3.xlarge \ --arch x86_64 \ diff --git a/README.md b/README.md index 0988c11d..3df45bb8 100644 --- a/README.md +++ b/README.md @@ -45,9 +45,9 @@ We’re excited to get early feedback and to continue working on more use cases! Bottlerocket is architected such that different cloud environments and container orchestrators can be supported in the future. A build of Bottlerocket that supports different features or integration characteristics is known as a 'variant'. The artifacts of a build will include the architecture and variant name. -For example, an `x86_64` build of the `aws-k8s-1.15` variant will produce an image named `bottlerocket-aws-k8s-1.15-x86_64--.img`. +For example, an `x86_64` build of the `aws-k8s-1.17` variant will produce an image named `bottlerocket-aws-k8s-1.17-x86_64--.img`. -Our first supported variant, `aws-k8s-1.15`, supports EKS as described above. +Our first supported variants, `aws-k8s-1.15`, `aws-k8s-1.16`, and `aws-k8s-1.17`, supports EKS as described above. ## Setup From 579b0b66a36db89f97e9989921dcc8bebc11715d Mon Sep 17 00:00:00 2001 From: Samuel Karp Date: Fri, 17 Jul 2020 14:36:42 -0700 Subject: [PATCH 0308/1356] .github: add aws-ecs-1 variant to Build workflow --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c8bc5680..aeb97fca 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -8,7 +8,7 @@ jobs: continue-on-error: ${{ matrix.supported }} strategy: matrix: - variant: [aws-k8s-1.15, aws-k8s-1.16, aws-k8s-1.17] + variant: [aws-k8s-1.15, aws-k8s-1.16, aws-k8s-1.17, aws-ecs-1] arch: [x86_64, aarch64] supported: [true] include: From 24072b52dad99e328b0adda079c0e794599ff1bb Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Mon, 27 Jul 2020 18:16:56 -0700 Subject: [PATCH 0309/1356] pubsys: minor comment changes in Infra.toml * Corrects the env variable to override for changing the `repo` pubsys reads from. Should be `PUBLISH_REPO` and not `REPO`. * Adds a similar comment for changing the `signing-key` used by pubsys. --- tools/pubsys/Infra.toml.example | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/pubsys/Infra.toml.example b/tools/pubsys/Infra.toml.example index 2bb5276f..b60dc11e 100644 --- a/tools/pubsys/Infra.toml.example +++ b/tools/pubsys/Infra.toml.example @@ -9,6 +9,8 @@ root_role_path = "/home/user/root.json" # as part of the initial setup of your TUF roles. pubsys assumes a single # publication key that lives in the snapshot, targets, and timestamp roles. # Here you specify where that key lives so we can sign the created repo. +# You can specify multiple keys, if you like, and select one by name: +# cargo make repo -e PUBLISH_KEY=mysigningkey # (Don't keep your keys in the repo!) # You can keep your private key in a file or an SSM parameter; pick one: # (Need inline table syntax until this is fixed: https://github.com/alexcrichton/toml-rs/issues/225) @@ -16,7 +18,7 @@ signing_keys = { default = { file = { path = "/home/user/key.pem" } } } # signing_keys = { default = { ssm = { parameter = "/my/parameter" } } } # You can have any number of repos defined and build a specific one by running like this: -# cargo make repo -e REPO=myrepo +# cargo make repo -e PUBLISH_REPO=myrepo # If the URLs are uncommented, they will be pulled and used as a starting # point, and your images (and related files) will be added as a new update in # the created repo. Otherwise, we build a new repo from scratch. From 830703804a4094c3646e703eae28582ba20c12b3 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Sat, 8 Aug 2020 20:54:41 -0700 Subject: [PATCH 0310/1356] Update kernel to 5.4.50-25.83 --- packages/kernel/Cargo.toml | 4 ++-- packages/kernel/kernel.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel/Cargo.toml b/packages/kernel/Cargo.toml index 35fc14a2..b68bc635 100644 --- a/packages/kernel/Cargo.toml +++ b/packages/kernel/Cargo.toml @@ -10,5 +10,5 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/92ec64be321f67c52afa1eb77b3c301b082c2850ae3d45609bf4802cd6a20400/kernel-5.4.46-19.75.amzn2.src.rpm" -sha512 = "0ebc6e27448513ffc40d295f8a844d367a345ed242e19c862b1290bec895100dd220526c9f3b274bc75399b35c03c69356ce508872669c1d155dd47a91981cbe" +url = "https://cdn.amazonlinux.com/blobstore/9e3beaecef0b030d83fb215be7ca67c01009cfec52fe9b12eb4b24fdb46eebce/kernel-5.4.50-25.83.amzn2.src.rpm" +sha512 = "edc81ee7acdb9f34da2ca1a9ecef42a8c8daab01b7bc0fb130c04d53278091856fa9c65b740f41839a5d65b374f6953d553e6076c94aea578469ef0181014a76" diff --git a/packages/kernel/kernel.spec b/packages/kernel/kernel.spec index 3c2e9173..fa97cc9a 100644 --- a/packages/kernel/kernel.spec +++ b/packages/kernel/kernel.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel -Version: 5.4.46 +Version: 5.4.50 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/92ec64be321f67c52afa1eb77b3c301b082c2850ae3d45609bf4802cd6a20400/kernel-5.4.46-19.75.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/9e3beaecef0b030d83fb215be7ca67c01009cfec52fe9b12eb4b24fdb46eebce/kernel-5.4.50-25.83.amzn2.src.rpm Source100: config-bottlerocket Patch0001: 0001-lustrefsx-Disable-Werror-stringop-overflow.patch BuildRequires: bc From 943c4fdc3220e9ac56b7dc6eb32110849c7e56d0 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Sun, 9 Aug 2020 17:05:59 -0700 Subject: [PATCH 0311/1356] Update Rust deps in tools/ There were only minor compatibility changes required in buildsys (from sha2). --- tools/Cargo.lock | 404 ++++++++++++++++++++-------------- tools/buildsys/Cargo.toml | 2 +- tools/buildsys/src/builder.rs | 4 +- tools/buildsys/src/cache.rs | 2 +- tools/pubsys/Cargo.toml | 4 +- 5 files changed, 239 insertions(+), 177 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index be1ac497..5595483d 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -11,9 +11,9 @@ dependencies = [ [[package]] name = "adler" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccc9a9dd069569f212bc4330af9f17c4afb5e8ce185e83dbb14f1349dda18b10" +checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" [[package]] name = "aho-corasick" @@ -137,7 +137,16 @@ dependencies = [ "block-padding", "byte-tools", "byteorder", - "generic-array", + "generic-array 0.12.3", +] + +[[package]] +name = "block-buffer" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +dependencies = [ + "generic-array 0.14.4", ] [[package]] @@ -169,7 +178,7 @@ dependencies = [ "reqwest", "serde", "serde_plain", - "sha2", + "sha2 0.9.1", "snafu", "toml", "url", @@ -196,12 +205,9 @@ checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" [[package]] name = "bytes" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "118cf036fbb97d0816e3c34b2d7a1e8cfc60f68fcf63d550ddbe9bd5f59c213b" -dependencies = [ - "loom", -] +checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" [[package]] name = "cargo-readme" @@ -244,9 +250,9 @@ dependencies = [ [[package]] name = "clap" -version = "2.33.1" +version = "2.33.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdfa80d47f954d53a35a64987ca1422f495b8d6483c0fe9f7117b36c2a792129" +checksum = "10040cdf04294b565d9e0319955430099ec3813a64c952b86a41200ad714ae48" dependencies = [ "ansi_term", "atty", @@ -279,6 +285,12 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" +[[package]] +name = "cpuid-bool" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634" + [[package]] name = "crossbeam-utils" version = "0.7.2" @@ -296,7 +308,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" dependencies = [ - "generic-array", + "generic-array 0.12.3", "subtle", ] @@ -315,7 +327,16 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" dependencies = [ - "generic-array", + "generic-array 0.12.3", +] + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array 0.14.4", ] [[package]] @@ -502,25 +523,22 @@ dependencies = [ ] [[package]] -name = "generator" -version = "0.6.21" +name = "generic-array" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add72f17bb81521258fcc8a7a3245b1e184e916bfbe34f0ea89558f440df5c68" +checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" dependencies = [ - "cc", - "libc", - "log", - "rustc_version", - "winapi 0.3.9", + "typenum", ] [[package]] name = "generic-array" -version = "0.12.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" dependencies = [ "typenum", + "version_check", ] [[package]] @@ -555,9 +573,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79b7246d7e4b979c03fa093da39cfb3617a96bbeee6310af63991668d7e843ff" +checksum = "993f9e0baeed60001cf565546b0d3dbe6a6ad23f2bd31644a133c641eccf6d53" dependencies = [ "bytes", "fnv", @@ -566,10 +584,19 @@ dependencies = [ "futures-util", "http", "indexmap", - "log", "slab", "tokio", "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91b62f79061a0bc2e046024cb7ba44b08419ed238ecbd9adbd787434b9e8c25" +dependencies = [ + "autocfg", ] [[package]] @@ -603,7 +630,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" dependencies = [ "crypto-mac", - "digest", + "digest 0.8.1", ] [[package]] @@ -635,9 +662,9 @@ checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" [[package]] name = "hyper" -version = "0.13.6" +version = "0.13.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6e7655b9594024ad0ee439f3b5a7299369dc2a3f459b47c696f9ff676f9aa1f" +checksum = "3e68a8dd9716185d9e64ea473ea6ef63529252e3e27623295a0378a19665d5eb" dependencies = [ "bytes", "futures-channel", @@ -648,12 +675,12 @@ dependencies = [ "http-body", "httparse", "itoa", - "log", "pin-project", "socket2", "time 0.1.43", "tokio", "tower-service", + "tracing", "want", ] @@ -668,10 +695,26 @@ dependencies = [ "futures-util", "hyper", "log", - "rustls", + "rustls 0.17.0", "rustls-native-certs", "tokio", - "tokio-rustls", + "tokio-rustls 0.13.1", + "webpki", +] + +[[package]] +name = "hyper-rustls" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37743cc83e8ee85eacfce90f2f4102030d9ff0a95244098d781e9bee4a90abb6" +dependencies = [ + "bytes", + "futures-util", + "hyper", + "log", + "rustls 0.18.0", + "tokio", + "tokio-rustls 0.14.0", "webpki", ] @@ -688,11 +731,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.4.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c398b2b113b55809ceb9ee3e753fcbac793f1956663f3c36549c1346015c2afe" +checksum = "86b45e59b16c76b11bf9738fd5d38879d3bd28ad292d7b313608becb17ae2df9" dependencies = [ "autocfg", + "hashbrown", ] [[package]] @@ -704,6 +748,12 @@ dependencies = [ "libc", ] +[[package]] +name = "ipnet" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" + [[package]] name = "itoa" version = "0.4.6" @@ -712,9 +762,9 @@ checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" [[package]] name = "js-sys" -version = "0.3.41" +version = "0.3.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4b9172132a62451e56142bff9afc91c8e4a4500aa5b847da36815b63bfda916" +checksum = "85a7e2c92a4804dd459b86c339278d0fe87cf93757fae222c3fa3ae75458bc73" dependencies = [ "wasm-bindgen", ] @@ -737,28 +787,17 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.72" +version = "0.2.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9f8082297d534141b30c8d39e9b1773713ab50fdbe4ff30f750d063b3bfd701" +checksum = "a2f02823cf78b754822df5f7f268fb59822e7296276d3e069d8e8cb26a14bd10" [[package]] name = "log" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "loom" -version = "0.3.4" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ecc775857611e1df29abba5c41355cdf540e7e9d4acfdf0f355eefee82330b7" +checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" dependencies = [ "cfg-if", - "generator", - "scoped-tls", ] [[package]] @@ -943,6 +982,12 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" +[[package]] +name = "opaque-debug" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" + [[package]] name = "openssl-probe" version = "0.1.2" @@ -968,17 +1013,6 @@ dependencies = [ "snafu", ] -[[package]] -name = "pem" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1581760c757a756a41f0ee3ff01256227bdf64cb752839779b95ffb01c59793" -dependencies = [ - "base64 0.11.0", - "lazy_static", - "regex", -] - [[package]] name = "pem" version = "0.8.1" @@ -998,18 +1032,18 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project" -version = "0.4.22" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12e3a6cdbfe94a5e4572812a0201f8c0ed98c1c452c7b8563ce2276988ef9c17" +checksum = "ca4433fff2ae79342e497d9f8ee990d174071408f28f726d6d83af93e58e48aa" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "0.4.22" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a0ffd45cf79d88737d7cc85bfd5d2894bee1139b356e616fe85dc389c61aaf7" +checksum = "2c0e815c3ee9a031fdf5af21c10aa17c573c9c6a566328d99e3936c34e36461f" dependencies = [ "proc-macro2", "quote", @@ -1036,9 +1070,9 @@ checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" [[package]] name = "proc-macro-error" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc175e9777c3116627248584e8f8b3e2987405cabe1c0adf7d1dd28f09dc7880" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", "proc-macro2", @@ -1049,22 +1083,20 @@ dependencies = [ [[package]] name = "proc-macro-error-attr" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cc9795ca17eb581285ec44936da7fc2335a3f34f2ddd13118b6f4d515435c50" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2", "quote", - "syn", - "syn-mid", "version_check", ] [[package]] name = "proc-macro-hack" -version = "0.5.16" +version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e0456befd48169b9f13ef0f0ad46d492cf9d2dbb918bcf38e01eed4ce3ec5e4" +checksum = "99c605b9a0adc77b7211c6b1f722dcb613d68d66859a44f3d485a6da332b0598" [[package]] name = "proc-macro-nested" @@ -1074,9 +1106,9 @@ checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" [[package]] name = "proc-macro2" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa" +checksum = "04f5f085b5d71e2188cb8271e5da0161ad52c3f227a661a3c135fdf28e258b12" dependencies = [ "unicode-xid", ] @@ -1091,7 +1123,7 @@ dependencies = [ "log", "parse-datetime", "reqwest", - "semver", + "semver 0.10.0", "serde", "serde_json", "simplelog", @@ -1099,7 +1131,7 @@ dependencies = [ "structopt", "tempfile", "toml", - "tough 0.8.0", + "tough", "tough-ssm", "update_metadata", "url", @@ -1157,9 +1189,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.1.56" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" +checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" [[package]] name = "redox_users" @@ -1201,9 +1233,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b82c9238b305f26f53443e3a4bc8528d64b8d0bee408ec949eb7bf5635ec680" +checksum = "12427a5577082c24419c9c417db35cfeb65962efc7675bb6b0d5f1f9d315bfe6" dependencies = [ "base64 0.12.3", "bytes", @@ -1213,7 +1245,8 @@ dependencies = [ "http", "http-body", "hyper", - "hyper-rustls", + "hyper-rustls 0.21.0", + "ipnet", "js-sys", "lazy_static", "log", @@ -1221,11 +1254,11 @@ dependencies = [ "mime_guess", "percent-encoding", "pin-project-lite", - "rustls", + "rustls 0.18.0", "serde", "serde_urlencoded", "tokio", - "tokio-rustls", + "tokio-rustls 0.14.0", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -1262,7 +1295,7 @@ dependencies = [ "hmac", "http", "hyper", - "hyper-rustls", + "hyper-rustls 0.20.0", "lazy_static", "log", "md5", @@ -1273,7 +1306,7 @@ dependencies = [ "rustc_version", "serde", "serde_json", - "sha2", + "sha2 0.8.2", "tokio", "xml-rs", ] @@ -1318,7 +1351,7 @@ dependencies = [ "rusoto_credential", "rustc_version", "serde", - "sha2", + "sha2 0.8.2", "time 0.2.16", "tokio", ] @@ -1361,7 +1394,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" dependencies = [ - "semver", + "semver 0.9.0", ] [[package]] @@ -1377,6 +1410,19 @@ dependencies = [ "webpki", ] +[[package]] +name = "rustls" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cac94b333ee2aac3284c5b8a1b7fb4dd11cba88c244e3fe33cdbd047af0eb693" +dependencies = [ + "base64 0.12.3", + "log", + "ring", + "sct", + "webpki", +] + [[package]] name = "rustls-native-certs" version = "0.3.0" @@ -1384,7 +1430,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75ffeb84a6bd9d014713119542ce415db3a3e4748f0bfce1e1416cd224a23a5" dependencies = [ "openssl-probe", - "rustls", + "rustls 0.17.0", "schannel", "security-framework", ] @@ -1414,12 +1460,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "scoped-tls" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "332ffa32bf586782a3efaeb58f127980944bbc8c4d6913a86107ac2a5ab24b28" - [[package]] name = "sct" version = "0.6.0" @@ -1458,6 +1498,15 @@ name = "semver" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "394cec28fa623e00903caf7ba4fa6fb9a0e260280bb8cdbbba029611108a0190" dependencies = [ "semver-parser", "serde", @@ -1491,9 +1540,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.56" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3433e879a558dde8b5e8feb2a04899cf34fdde1fafb894687e52105fc1162ac3" +checksum = "164eacbdb13512ec2745fb09d51fd5b22b0d65ed294a1dcf7285a360c80a675c" dependencies = [ "itoa", "ryu", @@ -1533,10 +1582,23 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" dependencies = [ - "block-buffer", - "digest", + "block-buffer 0.7.3", + "digest 0.8.1", "fake-simd", - "opaque-debug", + "opaque-debug 0.2.3", +] + +[[package]] +name = "sha2" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2933378ddfeda7ea26f48c555bdad8bb446bf8a3d17832dc83e380d444cfb8c1" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if", + "cpuid-bool", + "digest 0.9.0", + "opaque-debug 0.3.0", ] [[package]] @@ -1557,9 +1619,9 @@ checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" [[package]] name = "signal-hook-registry" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94f478ede9f64724c5d173d7bb56099ec3e2d9fc2774aac65d34b8b890405f41" +checksum = "a3e12110bc539e657a646068aaf5eb5b63af9d0c1f7b29c97113fad80e15f035" dependencies = [ "arc-swap", "libc", @@ -1567,13 +1629,13 @@ dependencies = [ [[package]] name = "simplelog" -version = "0.7.6" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cf9a002ccce717d066b3ccdb8a28829436249867229291e91b25d99bd723f0d" +checksum = "2b2736f58087298a448859961d3f4a0850b832e72619d75adc69da7993c2cd3c" dependencies = [ "chrono", "log", - "term", + "termcolor", ] [[package]] @@ -1688,9 +1750,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "structopt" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de2f5e239ee807089b62adce73e48c625e0ed80df02c7ab3f068f5db5281065c" +checksum = "de5472fb24d7e80ae84a7801b7978f95a19ec32cb1876faea59ab711eb901976" dependencies = [ "clap", "lazy_static", @@ -1699,9 +1761,9 @@ dependencies = [ [[package]] name = "structopt-derive" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "510413f9de616762a4fbeab62509bf15c729603b72d7cd71280fbca431b1c118" +checksum = "1e0eb37335aeeebe51be42e2dc07f031163fbabfa6ac67d7ea68b5c2f68d5f99" dependencies = [ "heck", "proc-macro-error", @@ -1718,26 +1780,15 @@ checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" [[package]] name = "syn" -version = "1.0.33" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8d5d96e8cbb005d6959f119f773bfaebb5684296108fb32600c00cde305b2cd" +checksum = "e69abc24912995b3038597a7a593be5053eb0fb44f3cc5beec0deb421790c1f4" dependencies = [ "proc-macro2", "quote", "unicode-xid", ] -[[package]] -name = "syn-mid" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7be3539f6c128a931cf19dcee741c1af532c7fd387baa739c03dd2e96479338a" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "tempfile" version = "3.1.0" @@ -1753,13 +1804,12 @@ dependencies = [ ] [[package]] -name = "term" -version = "0.6.1" +name = "termcolor" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0863a3345e70f61d613eab32ee046ccd1bcc5f9105fe402c61fcd0c13eeb8b5" +checksum = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f" dependencies = [ - "dirs", - "winapi 0.3.9", + "winapi-util", ] [[package]] @@ -1836,9 +1886,9 @@ checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed" [[package]] name = "tokio" -version = "0.2.21" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d099fa27b9702bed751524694adbe393e18b36b204da91eb1cbbbbb4a5ee2d58" +checksum = "5d34ca54d84bf2b5b4d7d31e901a8464f7b60ac145a284fba25ceb801f2ddccd" dependencies = [ "bytes", "fnv", @@ -1876,7 +1926,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "15cb62a0d2770787abc96e99c1cd98fcf17f94959f3af63ca85bdfb203f051b4" dependencies = [ "futures-core", - "rustls", + "rustls 0.17.0", + "tokio", + "webpki", +] + +[[package]] +name = "tokio-rustls" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "228139ddd4fea3fa345a29233009635235833e52807af7ea6448ead03890d6a9" +dependencies = [ + "futures-core", + "rustls 0.18.0", "tokio", "webpki", ] @@ -1904,26 +1966,6 @@ dependencies = [ "serde", ] -[[package]] -name = "tough" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb6b9c013f32b9fc52a70268c17b5ffb0562323b9213921f22f1b755cf2c4ad" -dependencies = [ - "chrono", - "hex", - "olpc-cjson", - "pem 0.7.0", - "ring", - "serde", - "serde_json", - "serde_plain", - "snafu", - "untrusted", - "url", - "walkdir", -] - [[package]] name = "tough" version = "0.8.0" @@ -1935,7 +1977,7 @@ dependencies = [ "hex", "log", "olpc-cjson", - "pem 0.8.1", + "pem", "reqwest", "ring", "serde", @@ -1960,7 +2002,7 @@ dependencies = [ "serde_json", "snafu", "tokio", - "tough 0.8.0", + "tough", ] [[package]] @@ -1969,11 +2011,31 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" +[[package]] +name = "tracing" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0aae59226cf195d8e74d4b34beae1859257efb4e5fed3f147d2dc2c7d372178" +dependencies = [ + "cfg-if", + "log", + "tracing-core", +] + +[[package]] +name = "tracing-core" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d593f98af59ebc017c0648f0117525db358745a8894a8d684e185ba3f45954f9" +dependencies = [ + "lazy_static", +] + [[package]] name = "try-lock" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" +checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "typenum" @@ -2040,13 +2102,13 @@ dependencies = [ "parse-datetime", "rand", "regex", - "semver", + "semver 0.10.0", "serde", "serde_json", "serde_plain", "snafu", "toml", - "tough 0.7.1", + "tough", ] [[package]] @@ -2102,9 +2164,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasm-bindgen" -version = "0.2.64" +version = "0.2.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a634620115e4a229108b71bde263bb4220c483b3f07f5ba514ee8d15064c4c2" +checksum = "f0563a9a4b071746dd5aedbc3a28c6fe9be4586fb3fbadb67c400d4f53c6b16c" dependencies = [ "cfg-if", "serde", @@ -2114,9 +2176,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.64" +version = "0.2.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e53963b583d18a5aa3aaae4b4c1cb535218246131ba22a71f05b518098571df" +checksum = "bc71e4c5efa60fb9e74160e89b93353bc24059999c0ae0fb03affc39770310b0" dependencies = [ "bumpalo", "lazy_static", @@ -2129,9 +2191,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.14" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dba48d66049d2a6cc8488702e7259ab7afc9043ad0dc5448444f46f2a453b362" +checksum = "95f8d235a77f880bcef268d379810ea6c0af2eacfa90b1ad5af731776e0c4699" dependencies = [ "cfg-if", "js-sys", @@ -2141,9 +2203,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.64" +version = "0.2.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fcfd5ef6eec85623b4c6e844293d4516470d8f19cd72d0d12246017eb9060b8" +checksum = "97c57cefa5fa80e2ba15641578b44d36e7a64279bc5ed43c6dbaf329457a2ed2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2151,9 +2213,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.64" +version = "0.2.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9adff9ee0e94b926ca81b57f57f86d5545cdcb1d259e21ec9bdd95b901754c75" +checksum = "841a6d1c35c6f596ccea1f82504a192a60378f64b3bb0261904ad8f2f5657556" dependencies = [ "proc-macro2", "quote", @@ -2164,15 +2226,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.64" +version = "0.2.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7b90ea6c632dd06fd765d44542e234d5e63d9bb917ecd64d79778a13bd79ae" +checksum = "93b162580e34310e5931c4b792560108b10fd14d64915d7fff8ff00180e70092" [[package]] name = "web-sys" -version = "0.3.41" +version = "0.3.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "863539788676619aac1a23e2df3655e96b32b0e05eb72ca34ba045ad573c625d" +checksum = "dda38f4e5ca63eda02c059d243aa25b5f35ab98451e518c51612cd0f1bd19a47" dependencies = [ "js-sys", "wasm-bindgen", diff --git a/tools/buildsys/Cargo.toml b/tools/buildsys/Cargo.toml index aeb01853..5be0f61f 100644 --- a/tools/buildsys/Cargo.toml +++ b/tools/buildsys/Cargo.toml @@ -15,7 +15,7 @@ rand = { version = "0.7", default-features = false, features = ["std"] } reqwest = { version = "0.10.1", default-features = false, features = ["rustls-tls", "blocking"] } serde = { version = "1.0", features = ["derive"] } serde_plain = "0.3.0" -sha2 = "0.8" +sha2 = "0.9" snafu = "0.6" toml = "0.5" url = { version = "2.1.0", features = ["serde"] } diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index 4d5976cc..fdceec15 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -121,8 +121,8 @@ fn build(target: &str, build_args: &str, tag: &str, output: &str) -> Result<()> // Compute a per-checkout prefix for the tag to avoid collisions. let mut d = Sha512::new(); - d.input(&root); - let digest = hex::encode(d.result()); + d.update(&root); + let digest = hex::encode(d.finalize()); let suffix = &digest[..12]; let tag = format!("{}-{}", tag, suffix); diff --git a/tools/buildsys/src/cache.rs b/tools/buildsys/src/cache.rs index 50ebe04e..6d3e6a47 100644 --- a/tools/buildsys/src/cache.rs +++ b/tools/buildsys/src/cache.rs @@ -115,7 +115,7 @@ impl LookasideCache { let mut d = Sha512::new(); io::copy(&mut f, &mut d).context(error::ExternalFileLoad { path })?; - let digest = hex::encode(d.result()); + let digest = hex::encode(d.finalize()); ensure!(digest == hash, error::ExternalFileVerify { path, hash }); Ok(()) diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index a3a0ca0f..3ed616c4 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -14,9 +14,9 @@ log = "0.4" parse-datetime = { path = "../../sources/parse-datetime" } # Need to bring in reqwest with a TLS feature so tough can support TLS repos. reqwest = { version = "0.10.1", default-features = false, features = ["rustls-tls", "blocking"] } -simplelog = "0.7" +simplelog = "0.8" snafu = "0.6" -semver = "0.9" +semver = "0.10.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" structopt = { version = "0.3", default-features = false } From 93b4769950f168ef50698bbbc9d6951f2f9c95b3 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 30 Jul 2020 18:59:34 +0000 Subject: [PATCH 0312/1356] kernel: set checkreqprot=0 by default The feature will eventually be deprecated so that only the secure form is available, per upstream discussion: https://patchwork.kernel.org/patch/11324099/ Signed-off-by: Ben Cressey --- packages/kernel/config-bottlerocket | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/kernel/config-bottlerocket b/packages/kernel/config-bottlerocket index a539b86c..1494d44e 100644 --- a/packages/kernel/config-bottlerocket +++ b/packages/kernel/config-bottlerocket @@ -34,6 +34,10 @@ CONFIG_SECURITY_SELINUX_DISABLE=n # Do not allow SELinux to use `enforcing=0` behavior. CONFIG_SECURITY_SELINUX_DEVELOP=n +# Check the protection applied by the kernel for mmap and mprotect, +# rather than the protection requested by userspace. +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=0 + # enable /proc/config.gz CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y From 9326b680a52a1cad5ef7f7265fc318b89389e3a2 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sun, 2 Aug 2020 21:47:32 +0000 Subject: [PATCH 0313/1356] selinux: move policy directory back to /etc To allow the policy to be extended or modified at runtime, we need to store the files in /etc rather than on the immutable root filesystem. Signed-off-by: Ben Cressey --- tools/rpm2img | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/rpm2img b/tools/rpm2img index d02795bc..d74575ad 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -34,7 +34,7 @@ BOOT_MOUNT="$(mktemp -d)" DATA_MOUNT="$(mktemp -d)" EFI_MOUNT="$(mktemp -d)" -SELINUX_ROOT="/usr/lib/selinux" +SELINUX_ROOT="/etc/selinux" SELINUX_POLICY="fortified" SELINUX_FILE_CONTEXTS="${ROOT_MOUNT}/${SELINUX_ROOT}/${SELINUX_POLICY}/contexts/files/file_contexts" From 0688e8b29c4c140a98bf5854bebe3d1046ebe5a6 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Tue, 28 Jul 2020 23:25:43 +0000 Subject: [PATCH 0314/1356] Make all fields of Infra.toml optional Pubsys subcommands do not use the same parts of Infra.toml, so they have to be optional if we want to have a common config file. The subcommands will confirm that the necessary fields are present. --- tools/pubsys/src/config.rs | 6 +++--- tools/pubsys/src/repo.rs | 39 ++++++++++++++++++++++---------------- 2 files changed, 26 insertions(+), 19 deletions(-) diff --git a/tools/pubsys/src/config.rs b/tools/pubsys/src/config.rs index 0963972c..9d9834f7 100644 --- a/tools/pubsys/src/config.rs +++ b/tools/pubsys/src/config.rs @@ -12,9 +12,9 @@ use url::Url; /// Configuration needed to load and create repos #[derive(Debug, Deserialize)] pub(crate) struct InfraConfig { - pub(crate) root_role_path: PathBuf, - pub(crate) signing_keys: HashMap, - pub(crate) repo: HashMap, + pub(crate) root_role_path: Option, + pub(crate) signing_keys: Option>, + pub(crate) repo: Option>, } impl InfraConfig { diff --git a/tools/pubsys/src/repo.rs b/tools/pubsys/src/repo.rs index 5b62b98c..5a4ada78 100644 --- a/tools/pubsys/src/repo.rs +++ b/tools/pubsys/src/repo.rs @@ -248,9 +248,13 @@ fn repo_urls<'a>( ) -> Result> { let repo_config = infra_config .repo + .as_ref() + .context(error::MissingConfig { + missing: "repo section", + })? .get(&repo_args.repo) - .context(error::MissingRepoConfig { - repo: &repo_args.repo, + .context(error::MissingConfig { + missing: format!("definition for repo {}", &repo_args.repo), })?; // Check if both URLs are set @@ -364,12 +368,18 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { ); let infra_config = InfraConfig::from_path(&args.infra_config_path).context(error::Config)?; trace!("Parsed infra config: {:?}", infra_config); + let root_role_path = infra_config + .root_role_path + .as_ref() + .context(error::MissingConfig { + missing: "root_role_path", + })?; // Build a repo editor and manifest, from an existing repo if available, otherwise fresh let maybe_urls = repo_urls(&repo_args, &infra_config)?; let (mut editor, mut manifest) = if let Some((metadata_url, targets_url)) = maybe_urls { info!("Found metadata and target URLs, loading existing repository"); - match load_editor_and_manifest(&infra_config.root_role_path, &metadata_url, &targets_url)? { + match load_editor_and_manifest(root_role_path, &metadata_url, &targets_url)? { Some((editor, manifest)) => (editor, manifest), None => { info!( @@ -377,8 +387,7 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { metadata_url ); ( - RepositoryEditor::new(&infra_config.root_role_path) - .context(error::NewEditor)?, + RepositoryEditor::new(root_role_path).context(error::NewEditor)?, Manifest::default(), ) } @@ -386,7 +395,7 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { } else { info!("Did not find metadata and target URLs in infra config, creating a new repository"); ( - RepositoryEditor::new(&infra_config.root_role_path).context(error::NewEditor)?, + RepositoryEditor::new(root_role_path).context(error::NewEditor)?, Manifest::default(), ) }; @@ -416,9 +425,13 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { let signing_key_config = infra_config .signing_keys + .as_ref() + .context(error::MissingConfig { + missing: "signing_keys", + })? .get(&repo_args.signing_key) - .context(error::MissingSigningKey { - profile: &repo_args.signing_key, + .context(error::MissingConfig { + missing: format!("profile {} in signing_keys", &repo_args.signing_key), })?; let key_source: Box = match signing_key_config { @@ -563,14 +576,8 @@ mod error { source: update_metadata::error::Error, }, - #[snafu(display( - "Requested build of repo '{}' that isn't specified in Infra.toml", - repo - ))] - MissingRepoConfig { repo: String }, - - #[snafu(display("No profile '{}' for signing key in Infra.toml", profile))] - MissingSigningKey { profile: String }, + #[snafu(display("Infra.toml is missing {}", missing))] + MissingConfig { missing: String }, #[snafu(display("Failed to create new repo editor: {}", source))] NewEditor { source: tough::error::Error }, From cce9b06be0c038ecb6022b626b09d194ab127383 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Thu, 30 Jul 2020 23:30:39 +0000 Subject: [PATCH 0315/1356] Add the "ami" subcommand to pubsys This lets you register an AMI from the latest build and copy it to a list of regions. Co-authored-by: Zac Mrowicki Co-authored-by: Tom Kirchner --- tools/Cargo.lock | 390 ++++++++++++++++++++++++++- tools/pubsys/Cargo.toml | 11 + tools/pubsys/Infra.toml.example | 17 ++ tools/pubsys/src/aws/ami/mod.rs | 324 ++++++++++++++++++++++ tools/pubsys/src/aws/ami/register.rs | 256 ++++++++++++++++++ tools/pubsys/src/aws/ami/snapshot.rs | 58 ++++ tools/pubsys/src/aws/ami/wait.rs | 149 ++++++++++ tools/pubsys/src/aws/client.rs | 127 +++++++++ tools/pubsys/src/aws/mod.rs | 4 + tools/pubsys/src/config.rs | 22 +- tools/pubsys/src/main.rs | 19 +- 11 files changed, 1361 insertions(+), 16 deletions(-) create mode 100644 tools/pubsys/src/aws/ami/mod.rs create mode 100644 tools/pubsys/src/aws/ami/register.rs create mode 100644 tools/pubsys/src/aws/ami/snapshot.rs create mode 100644 tools/pubsys/src/aws/ami/wait.rs create mode 100644 tools/pubsys/src/aws/client.rs create mode 100644 tools/pubsys/src/aws/mod.rs diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 5595483d..287144b1 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -39,6 +39,35 @@ version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d25d88fd6b8041580a654f9d0c581a047baee2b3efee13275f2fc392fc75034" +[[package]] +name = "argh" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca1877e24cecacd700d469066e0160c4f8497cc5635367163f50c8beec820154" +dependencies = [ + "argh_derive", + "argh_shared", +] + +[[package]] +name = "argh_derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e742194e0f43fc932bcb801708c2b279d3ec8f527e3acda05a6a9f342c5ef764" +dependencies = [ + "argh_shared", + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "argh_shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1ba68f4276a778591e36a0c348a269888f3a177c8d2054969389e3b59611ff5" + [[package]] name = "arrayref" version = "0.3.6" @@ -263,6 +292,45 @@ dependencies = [ "vec_map", ] +[[package]] +name = "coldsnap" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89fd66a760caaf8647b3e60a068a03f4adee0091338c69b05a46d263e9efa3bd" +dependencies = [ + "argh", + "base64 0.12.3", + "bytes", + "futures", + "indicatif", + "rusoto_core 0.45.0", + "rusoto_credential 0.45.0", + "rusoto_ebs", + "rusoto_ec2", + "rusoto_signature 0.45.0", + "sha2 0.9.1", + "snafu", + "tempfile", + "tokio", +] + +[[package]] +name = "console" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0b1aacfaffdbff75be81c15a399b4bedf78aaefe840e8af1d299ac2ade885d2" +dependencies = [ + "encode_unicode", + "lazy_static", + "libc", + "regex", + "terminal_size", + "termios", + "unicode-width", + "winapi 0.3.9", + "winapi-util", +] + [[package]] name = "constant_time_eq" version = "0.1.5" @@ -291,6 +359,15 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634" +[[package]] +name = "crc32fast" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" +dependencies = [ + "cfg-if", +] + [[package]] name = "crossbeam-utils" version = "0.7.2" @@ -309,7 +386,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" dependencies = [ "generic-array 0.12.3", - "subtle", + "subtle 1.0.0", +] + +[[package]] +name = "crypto-mac" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" +dependencies = [ + "generic-array 0.14.4", + "subtle 2.2.3", ] [[package]] @@ -390,6 +477,12 @@ dependencies = [ "shared_child", ] +[[package]] +name = "encode_unicode" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" + [[package]] name = "encoding_rs" version = "0.8.23" @@ -411,6 +504,21 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "fuchsia-zircon" version = "0.3.3" @@ -629,10 +737,20 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" dependencies = [ - "crypto-mac", + "crypto-mac 0.7.0", "digest 0.8.1", ] +[[package]] +name = "hmac" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +dependencies = [ + "crypto-mac 0.8.0", + "digest 0.9.0", +] + [[package]] name = "http" version = "0.2.1" @@ -718,6 +836,19 @@ dependencies = [ "webpki", ] +[[package]] +name = "hyper-tls" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" +dependencies = [ + "bytes", + "hyper", + "native-tls", + "tokio", + "tokio-tls", +] + [[package]] name = "idna" version = "0.2.0" @@ -739,6 +870,18 @@ dependencies = [ "hashbrown", ] +[[package]] +name = "indicatif" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7baab56125e25686df467fe470785512329883aab42696d661247aca2a2896e4" +dependencies = [ + "console", + "lazy_static", + "number_prefix", + "regex", +] + [[package]] name = "iovec" version = "0.1.4" @@ -907,6 +1050,24 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "native-tls" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b0d88c06fe90d5ee94048ba40409ef1d9315d86f6f38c2efdaad4fb50c58b2d" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + [[package]] name = "net2" version = "0.2.34" @@ -953,6 +1114,12 @@ dependencies = [ "libc", ] +[[package]] +name = "number_prefix" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17b02fc0ff9a9e4b35b3342880f48e896ebf69f2967921fe8646bf5b7125956a" + [[package]] name = "object" version = "0.20.0" @@ -988,12 +1155,39 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +[[package]] +name = "openssl" +version = "0.10.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d575eff3665419f9b83678ff2815858ad9d11567e082f5ac1814baba4e2bcb4" +dependencies = [ + "bitflags", + "cfg-if", + "foreign-types", + "lazy_static", + "libc", + "openssl-sys", +] + [[package]] name = "openssl-probe" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" +[[package]] +name = "openssl-sys" +version = "0.9.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de" +dependencies = [ + "autocfg", + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "os_pipe" version = "0.9.2" @@ -1062,6 +1256,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkg-config" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33" + [[package]] name = "ppv-lite86" version = "0.2.8" @@ -1117,12 +1317,22 @@ dependencies = [ name = "pubsys" version = "0.1.0" dependencies = [ + "async-trait", "chrono", "clap", + "coldsnap", + "futures", + "indicatif", "lazy_static", "log", "parse-datetime", "reqwest", + "rusoto_core 0.45.0", + "rusoto_credential 0.45.0", + "rusoto_ebs", + "rusoto_ec2", + "rusoto_signature 0.45.0", + "rusoto_sts", "semver 0.10.0", "serde", "serde_json", @@ -1130,6 +1340,7 @@ dependencies = [ "snafu", "structopt", "tempfile", + "tokio", "toml", "tough", "tough-ssm", @@ -1292,7 +1503,7 @@ dependencies = [ "base64 0.12.3", "bytes", "futures", - "hmac", + "hmac 0.7.1", "http", "hyper", "hyper-rustls 0.20.0", @@ -1301,8 +1512,8 @@ dependencies = [ "md5", "percent-encoding", "pin-project", - "rusoto_credential", - "rusoto_signature", + "rusoto_credential 0.44.0", + "rusoto_signature 0.44.0", "rustc_version", "serde", "serde_json", @@ -1311,6 +1522,34 @@ dependencies = [ "xml-rs", ] +[[package]] +name = "rusoto_core" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e977941ee0658df96fca7291ecc6fc9a754600b21ad84b959eb1dbbc9d5abcc7" +dependencies = [ + "async-trait", + "base64 0.12.3", + "bytes", + "crc32fast", + "futures", + "http", + "hyper", + "hyper-tls", + "lazy_static", + "log", + "md5", + "percent-encoding", + "pin-project", + "rusoto_credential 0.45.0", + "rusoto_signature 0.45.0", + "rustc_version", + "serde", + "serde_json", + "tokio", + "xml-rs", +] + [[package]] name = "rusoto_credential" version = "0.44.0" @@ -1331,6 +1570,55 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rusoto_credential" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ac05563f83489b19b4d413607a30821ab08bbd9007d14fa05618da3ef09d8b" +dependencies = [ + "async-trait", + "chrono", + "dirs", + "futures", + "hyper", + "pin-project", + "regex", + "serde", + "serde_json", + "shlex", + "tokio", + "zeroize", +] + +[[package]] +name = "rusoto_ebs" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec4b9c1321c10ac639bb285e46bc558d24937b1744183c0da17f1acc752fe01c" +dependencies = [ + "async-trait", + "bytes", + "futures", + "rusoto_core 0.45.0", + "serde", + "serde_derive", + "serde_json", +] + +[[package]] +name = "rusoto_ec2" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5145366791ba9097d917330944ef460e1ebd67da871a8e04ad9f51cecc64375f" +dependencies = [ + "async-trait", + "bytes", + "futures", + "rusoto_core 0.45.0", + "serde_urlencoded", + "xml-rs", +] + [[package]] name = "rusoto_signature" version = "0.44.0" @@ -1341,14 +1629,14 @@ dependencies = [ "bytes", "futures", "hex", - "hmac", + "hmac 0.7.1", "http", "hyper", "log", "md5", "percent-encoding", "pin-project", - "rusoto_credential", + "rusoto_credential 0.44.0", "rustc_version", "serde", "sha2 0.8.2", @@ -1356,6 +1644,31 @@ dependencies = [ "tokio", ] +[[package]] +name = "rusoto_signature" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97a740a88dde8ded81b6f2cff9cd5e054a5a2e38a38397260f7acdd2c85d17dd" +dependencies = [ + "base64 0.12.3", + "bytes", + "futures", + "hex", + "hmac 0.8.1", + "http", + "hyper", + "log", + "md5", + "percent-encoding", + "pin-project", + "rusoto_credential 0.45.0", + "rustc_version", + "serde", + "sha2 0.9.1", + "time 0.2.16", + "tokio", +] + [[package]] name = "rusoto_ssm" version = "0.44.0" @@ -1365,11 +1678,27 @@ dependencies = [ "async-trait", "bytes", "futures", - "rusoto_core", + "rusoto_core 0.44.0", "serde", "serde_json", ] +[[package]] +name = "rusoto_sts" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3815b8c0fc1c50caf9e87603f23daadfedb18d854de287b361c69f68dc9d49e0" +dependencies = [ + "async-trait", + "bytes", + "chrono", + "futures", + "rusoto_core 0.45.0", + "serde_urlencoded", + "tempfile", + "xml-rs", +] + [[package]] name = "rust-argon2" version = "0.7.0" @@ -1778,6 +2107,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" +[[package]] +name = "subtle" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "502d53007c02d7605a05df1c1a73ee436952781653da5d0bf57ad608f66932c1" + [[package]] name = "syn" version = "1.0.38" @@ -1812,6 +2147,25 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "terminal_size" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a14cd9f8c72704232f0bfc8455c0e861f0ad4eb60cc9ec8a170e231414c1e13" +dependencies = [ + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "termios" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f0fcee7b24a25675de40d5bb4de6e41b0df07bc9856295e7e2b3a3600c400c2" +dependencies = [ + "libc", +] + [[package]] name = "textwrap" version = "0.11.0" @@ -1943,6 +2297,16 @@ dependencies = [ "webpki", ] +[[package]] +name = "tokio-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-util" version = "0.3.1" @@ -1995,8 +2359,8 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54e670640f67e719671a87fac948eabba0fd33633aa8be7804b38a1a1d2da32b" dependencies = [ - "rusoto_core", - "rusoto_credential", + "rusoto_core 0.44.0", + "rusoto_credential 0.44.0", "rusoto_ssm", "serde", "serde_json", @@ -2123,6 +2487,12 @@ dependencies = [ "serde", ] +[[package]] +name = "vcpkg" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" + [[package]] name = "vec_map" version = "0.8.2" diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index 3ed616c4..696f15d1 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -7,19 +7,30 @@ edition = "2018" publish = false [dependencies] +async-trait = "0.1.36" chrono = "0.4" clap = "2.33" +coldsnap = "0.1" +futures = "0.3.5" +indicatif = "0.15.0" lazy_static = "1.4" log = "0.4" parse-datetime = { path = "../../sources/parse-datetime" } # Need to bring in reqwest with a TLS feature so tough can support TLS repos. reqwest = { version = "0.10.1", default-features = false, features = ["rustls-tls", "blocking"] } +rusoto_core = "0.45.0" +rusoto_credential = "0.45.0" +rusoto_ebs = "0.45.0" +rusoto_ec2 = "0.45.0" +rusoto_signature = "0.45.0" +rusoto_sts = "0.45.0" simplelog = "0.8" snafu = "0.6" semver = "0.10.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" structopt = { version = "0.3", default-features = false } +tokio = "0.2.21" toml = "0.5" tough = { version = "0.8", features = ["http"] } tough-ssm = "0.3" diff --git a/tools/pubsys/Infra.toml.example b/tools/pubsys/Infra.toml.example index b60dc11e..6446150a 100644 --- a/tools/pubsys/Infra.toml.example +++ b/tools/pubsys/Infra.toml.example @@ -26,3 +26,20 @@ signing_keys = { default = { file = { path = "/home/user/key.pem" } } } [repo.default] # metadata_base_url = "https://example.com/metadata/" # targets_url = "https://example.com/targets/" + +[aws] +# The list of regions in which you want to publish AMIs. We register an AMI in +# the first region and copy it to all other regions. +regions = ["us-west-2", "us-east-1", "us-east-2"] +# If specified, we use this named profile from ~/.aws/credentials, rather than +# the default path of trying credentials from the environment, from a +# credential process, from the default profile, and then from an IAM instance +# profile. +profile = "my-profile" +# If specified, we assume this role before making any API calls. +role = "arn:aws:iam::012345678901:role/assume-global" + +[aws.region.us-west-2] +# If specified, we assume this role before making any API calls in this region. +# (This is assumed after the "global" aws.role, if that is also specified.) +role = "arn:aws:iam::012345678901:role/assume-regional" diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs new file mode 100644 index 00000000..1340654b --- /dev/null +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -0,0 +1,324 @@ +//! The ami module owns the 'ami' subcommand and controls the process of registering and copying +//! EC2 AMIs. + +mod register; +mod snapshot; +mod wait; + +use crate::aws::client::build_client; +use crate::config::{AwsConfig, InfraConfig}; +use crate::Args; +use futures::future::{join, lazy, ready, FutureExt}; +use futures::stream::{self, StreamExt}; +use log::{error, info, trace}; +use register::{get_ami_id, register_image}; +use rusoto_core::{Region, RusotoError}; +use rusoto_ebs::EbsClient; +use rusoto_ec2::{CopyImageError, CopyImageRequest, CopyImageResult, Ec2, Ec2Client}; +use snafu::{ensure, OptionExt, ResultExt}; +use std::collections::{HashMap, VecDeque}; +use std::path::PathBuf; +use structopt::StructOpt; +use wait::wait_for_ami; + +/// Builds Bottlerocket AMIs using latest build artifacts +#[derive(Debug, StructOpt)] +#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +pub(crate) struct AmiArgs { + /// Path to the image containing the root volume + #[structopt(short = "r", long, parse(from_os_str))] + root_image: PathBuf, + + /// Path to the image containing the data volume + #[structopt(short = "d", long, parse(from_os_str))] + data_image: PathBuf, + + /// Desired root volume size in gibibytes + #[structopt(long)] + root_volume_size: Option, + + /// Desired data volume size in gibibytes + #[structopt(long)] + data_volume_size: i64, + + /// The architecture of the machine image + #[structopt(short = "a", long)] + arch: String, + + /// The desired AMI name + #[structopt(short = "n", long)] + name: String, + + /// The desired AMI description + #[structopt(long)] + description: Option, + + /// Don't display progress bars + #[structopt(long)] + no_progress: bool, + + /// Regions where you want the AMI, the first will be used as the base for copying + #[structopt(long, use_delimiter = true)] + regions: Vec, +} + +/// Common entrypoint from main() +pub(crate) async fn run(args: &Args, ami_args: &AmiArgs) -> Result<()> { + info!( + "Using infra config from path: {}", + args.infra_config_path.display() + ); + let infra_config = InfraConfig::from_path(&args.infra_config_path).context(error::Config)?; + trace!("Parsed infra config: {:?}", infra_config); + + let aws = infra_config.aws.context(error::MissingConfig { + missing: "aws section", + })?; + + // If the user gave an override list of regions, use that, otherwise use what's in the config. + let mut regions = if !ami_args.regions.is_empty() { + VecDeque::from(ami_args.regions.clone()) + } else { + aws.regions.clone() + } + .into_iter() + .map(|name| region_from_string(&name, &aws)) + .collect::>>()?; + + // We register in this base region first, then copy from there to any other regions. + let base_region = regions.pop_front().context(error::MissingConfig { + missing: "aws.regions", + })?; + + // Build EBS client for snapshot management, and EC2 client for registration + let ebs_client = build_client::(&base_region, &aws).context(error::Client { + client_type: "EBS", + region: base_region.name(), + })?; + let ec2_client = build_client::(&base_region, &aws).context(error::Client { + client_type: "EC2", + region: base_region.name(), + })?; + + // Check if the AMI already exists, in which case we can use the existing ID, otherwise we + // register a new one. + let maybe_id = get_ami_id( + &ami_args.name, + &ami_args.arch, + base_region.name(), + &ec2_client, + ) + .await + .context(error::GetAmiId { + name: &ami_args.name, + arch: &ami_args.arch, + region: base_region.name(), + })?; + + let (image_id, already_registered) = if let Some(found_id) = maybe_id { + info!( + "Found '{}' already registered in {}: {}", + ami_args.name, + base_region.name(), + found_id + ); + (found_id, true) + } else { + let new_id = register_image(ami_args, base_region.name(), ebs_client, &ec2_client) + .await + .context(error::RegisterImage { + name: &ami_args.name, + arch: &ami_args.arch, + region: base_region.name(), + })?; + info!( + "Registered AMI '{}' in {}: {}", + ami_args.name, + base_region.name(), + new_id + ); + (new_id, false) + }; + + // If we don't need to copy AMIs, we're done. + if regions.is_empty() { + return Ok(()); + } + + // Wait for AMI to be available so it can be copied + let successes_required = if already_registered { 1 } else { 3 }; + wait_for_ami( + &image_id, + &base_region, + "available", + successes_required, + &aws, + ) + .await + .context(error::WaitAmi { + id: &image_id, + region: base_region.name(), + })?; + + // For every other region, initiate copy-image calls. + // We make a map storing our regional clients because they're used in a future and need to + // live until the future is resolved. + let mut ec2_clients = HashMap::with_capacity(regions.len()); + for region in regions.iter() { + let ec2_client = build_client::(®ion, &aws).context(error::Client { + client_type: "EC2", + region: base_region.name(), + })?; + ec2_clients.insert(region.clone(), ec2_client); + } + + let mut copy_requests = Vec::with_capacity(regions.len()); + for region in regions.iter() { + let ec2_client = &ec2_clients[region]; + if let Some(id) = get_ami_id(&ami_args.name, &ami_args.arch, region.name(), ec2_client) + .await + .context(error::GetAmiId { + name: &ami_args.name, + arch: &ami_args.arch, + region: base_region.name(), + })? + { + info!( + "Found '{}' already registered in {}: {}", + ami_args.name, + region.name(), + id + ); + continue; + } + let request = CopyImageRequest { + description: ami_args.description.clone(), + name: ami_args.name.clone(), + source_image_id: image_id.clone(), + source_region: base_region.name().to_string(), + ..Default::default() + }; + let response_future = ec2_client.copy_image(request); + + let base_region_name = base_region.name(); + // Store the region so we can output it to the user + let region_future = ready(region.clone()); + // Let the user know the copy is starting, when this future goes to run + let message_future = lazy(move |_| { + info!( + "Starting copy from {} to {}", + base_region_name, + region.name() + ) + }); + copy_requests.push(message_future.then(|_| join(region_future, response_future))); + } + + // If all target regions already have the AMI, we're done. + if copy_requests.is_empty() { + return Ok(()); + } + + // Start requests; they return almost immediately and the copying work is done by the service + // afterward. You should wait for the AMI status to be "available" before launching it. + // (We still use buffer_unordered, rather than something like join_all, to retain some control + // over the number of requests going out in case we need it later, but this will effectively + // spin through all regions quickly because the requests return before any copying is done.) + let request_stream = stream::iter(copy_requests).buffer_unordered(4); + // Run through the stream and collect results into a list. + let copy_responses: Vec<( + Region, + std::result::Result>, + )> = request_stream.collect().await; + + // Report on successes and errors; don't fail immediately if we see an error so we can report + // all successful IDs. + let mut saw_error = false; + for (region, copy_response) in copy_responses { + match copy_response { + Ok(success) => info!( + "Registered AMI '{}' in region {}: {}", + ami_args.name, + region.name(), + success.image_id.unwrap_or_else(|| "".to_string()) + ), + Err(e) => { + saw_error = true; + error!("Copy to {} failed: {}", region.name(), e); + } + } + } + + ensure!(!saw_error, error::AmiCopy); + + Ok(()) +} + +/// Builds a Region from the given region name, and uses the custom endpoint from the AWS config, +/// if specified in aws.region.REGION.endpoint. +fn region_from_string(name: &str, aws: &AwsConfig) -> Result { + let maybe_endpoint = aws.region.get(name).and_then(|r| r.endpoint.clone()); + Ok(match maybe_endpoint { + Some(endpoint) => Region::Custom { + name: name.to_string(), + endpoint, + }, + None => name.parse().context(error::ParseRegion { name })?, + }) +} + +mod error { + use crate::aws::{self, ami}; + use snafu::Snafu; + + #[derive(Debug, Snafu)] + #[snafu(visibility = "pub(super)")] + pub(crate) enum Error { + #[snafu(display("Some AMIs failed to copy, see above"))] + AmiCopy, + + #[snafu(display("Error creating {} client in {}: {}", client_type, region, source))] + Client { + client_type: String, + region: String, + source: aws::client::Error, + }, + + #[snafu(display("Error reading config: {}", source))] + Config { source: crate::config::Error }, + + #[snafu(display("Error getting AMI ID for {} {} in {}: {}", arch, name, region, source))] + GetAmiId { + name: String, + arch: String, + region: String, + source: ami::register::Error, + }, + + #[snafu(display("Infra.toml is missing {}", missing))] + MissingConfig { missing: String }, + + #[snafu(display("Failed to parse region '{}': {}", name, source))] + ParseRegion { + name: String, + source: rusoto_signature::region::ParseRegionError, + }, + + #[snafu(display("Error registering {} {} in {}: {}", arch, name, region, source))] + RegisterImage { + name: String, + arch: String, + region: String, + source: ami::register::Error, + }, + + #[snafu(display("AMI '{}' in {} did not become available: {}", id, region, source))] + WaitAmi { + id: String, + region: String, + source: ami::wait::Error, + }, + } +} +pub(crate) use error::Error; +type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/ami/register.rs b/tools/pubsys/src/aws/ami/register.rs new file mode 100644 index 00000000..8e9a3cce --- /dev/null +++ b/tools/pubsys/src/aws/ami/register.rs @@ -0,0 +1,256 @@ +use super::{snapshot::snapshot_from_image, AmiArgs}; +use coldsnap::{SnapshotUploader, SnapshotWaiter}; +use log::{debug, info, warn}; +use rusoto_ebs::EbsClient; +use rusoto_ec2::{ + BlockDeviceMapping, DeleteSnapshotRequest, DescribeImagesRequest, EbsBlockDevice, Ec2, + Ec2Client, Filter, RegisterImageRequest, +}; +use snafu::{ensure, OptionExt, ResultExt}; + +const ROOT_DEVICE_NAME: &str = "/dev/xvda"; +const DATA_DEVICE_NAME: &str = "/dev/xvdb"; + +// Features we assume/enable for the images. +const VIRT_TYPE: &str = "hvm"; +const VOLUME_TYPE: &str = "gp2"; +const SRIOV: &str = "simple"; +const ENA: bool = true; + +/// Helper for `register_image`. Inserts registered snapshot IDs into `cleanup_snapshot_ids` so +/// they can be cleaned up on failure if desired. +async fn _register_image( + ami_args: &AmiArgs, + region: &str, + ebs_client: EbsClient, + ec2_client: &Ec2Client, + cleanup_snapshot_ids: &mut Vec, +) -> Result { + debug!( + "Uploading root and data images into EBS snapshots in {}", + region + ); + let uploader = SnapshotUploader::new(ebs_client); + let root_snapshot = snapshot_from_image( + &ami_args.root_image, + &uploader, + ami_args.root_volume_size, + ami_args.no_progress, + ) + .await + .context(error::Snapshot { + path: &ami_args.root_image, + region, + })?; + cleanup_snapshot_ids.push(root_snapshot.clone()); + + let data_snapshot = snapshot_from_image( + &ami_args.data_image, + &uploader, + Some(ami_args.data_volume_size), + ami_args.no_progress, + ) + .await + .context(error::Snapshot { + path: &ami_args.root_image, + region, + })?; + cleanup_snapshot_ids.push(data_snapshot.clone()); + + debug!( + "Waiting for root and data snapshots to become available in {}", + region + ); + let waiter = SnapshotWaiter::new(ec2_client.clone()); + waiter + .wait(&root_snapshot, Default::default()) + .await + .context(error::WaitSnapshot { + snapshot_type: "root", + })?; + waiter + .wait(&data_snapshot, Default::default()) + .await + .context(error::WaitSnapshot { + snapshot_type: "data", + })?; + + // Prepare parameters for AMI registration request + let root_bdm = BlockDeviceMapping { + device_name: Some(ROOT_DEVICE_NAME.to_string()), + ebs: Some(EbsBlockDevice { + delete_on_termination: Some(true), + snapshot_id: Some(root_snapshot), + volume_type: Some(VOLUME_TYPE.to_string()), + ..Default::default() + }), + ..Default::default() + }; + + let mut data_bdm = root_bdm.clone(); + data_bdm.device_name = Some(DATA_DEVICE_NAME.to_string()); + if let Some(ebs) = data_bdm.ebs.as_mut() { + ebs.snapshot_id = Some(data_snapshot); + } + + let register_request = RegisterImageRequest { + architecture: Some(ami_args.arch.clone()), + block_device_mappings: Some(vec![root_bdm, data_bdm]), + description: ami_args.description.clone(), + ena_support: Some(ENA), + name: ami_args.name.clone(), + root_device_name: Some(ROOT_DEVICE_NAME.to_string()), + sriov_net_support: Some(SRIOV.to_string()), + virtualization_type: Some(VIRT_TYPE.to_string()), + ..Default::default() + }; + + debug!("Registering AMI in {}", region); + let register_response = ec2_client + .register_image(register_request) + .await + .context(error::RegisterImage { region })?; + + register_response + .image_id + .context(error::MissingImageId { region }) +} + +/// Uploads the given images into snapshots and registers an AMI using them as its block device +/// mapping. Deletes snapshots on failure. +pub(crate) async fn register_image( + ami_args: &AmiArgs, + region: &str, + ebs_client: EbsClient, + ec2_client: &Ec2Client, +) -> Result { + info!("Registering '{}' in {}", ami_args.name, region); + let mut cleanup_snapshot_ids = Vec::new(); + let register_result = _register_image( + ami_args, + region, + ebs_client, + ec2_client, + &mut cleanup_snapshot_ids, + ) + .await; + + if let Err(_) = register_result { + for snapshot_id in cleanup_snapshot_ids { + let delete_request = DeleteSnapshotRequest { + snapshot_id: snapshot_id.clone(), + ..Default::default() + }; + if let Err(e) = ec2_client.delete_snapshot(delete_request).await { + warn!( + "While cleaning up, failed to delete snapshot {}: {}", + snapshot_id, e + ); + } + } + } + register_result +} + +/// Queries EC2 for the given AMI name. If found, returns Ok(Some(id)), if not returns Ok(None). +pub(crate) async fn get_ami_id( + name: S1, + arch: S2, + region: &str, + ec2_client: &Ec2Client, +) -> Result> +where + S1: Into, + S2: Into, +{ + let describe_request = DescribeImagesRequest { + owners: Some(vec!["self".to_string()]), + filters: Some(vec![ + Filter { + name: Some("name".to_string()), + values: Some(vec![name.into()]), + }, + Filter { + name: Some("architecture".to_string()), + values: Some(vec![arch.into()]), + }, + Filter { + name: Some("image-type".to_string()), + values: Some(vec!["machine".to_string()]), + }, + Filter { + name: Some("virtualization-type".to_string()), + values: Some(vec![VIRT_TYPE.to_string()]), + }, + ]), + ..Default::default() + }; + let describe_response = ec2_client + .describe_images(describe_request) + .await + .context(error::DescribeImages { region })?; + if let Some(mut images) = describe_response.images { + if images.is_empty() { + return Ok(None); + } + ensure!( + images.len() == 1, + error::MultipleImages { + images: images + .into_iter() + .map(|i| i.image_id.unwrap_or_else(|| "".to_string())) + .collect::>() + } + ); + let image = images.remove(0); + // If there is an image but we couldn't find the ID of it, fail rather than returning None, + // which would indicate no image. + let id = image.image_id.context(error::MissingImageId { region })?; + Ok(Some(id)) + } else { + Ok(None) + } +} + +mod error { + use crate::aws::ami; + use snafu::Snafu; + use std::path::PathBuf; + + #[derive(Debug, Snafu)] + #[snafu(visibility = "pub(super)")] + pub(crate) enum Error { + #[snafu(display("Failed to describe images in {}: {}", region, source))] + DescribeImages { + region: String, + source: rusoto_core::RusotoError, + }, + + #[snafu(display("Image response in {} did not include image ID", region))] + MissingImageId { region: String }, + + #[snafu(display("DescribeImages with unique filters returned multiple results: {}", images.join(", ")))] + MultipleImages { images: Vec }, + + #[snafu(display("Failed to register image in {}: {}", region, source))] + RegisterImage { + region: String, + source: rusoto_core::RusotoError, + }, + + #[snafu(display("Failed to upload snapshot from {} in {}: {}", path.display(),region, source))] + Snapshot { + path: PathBuf, + region: String, + source: ami::snapshot::Error, + }, + + #[snafu(display("{} snapshot did not become available: {}", snapshot_type, source))] + WaitSnapshot { + snapshot_type: String, + source: coldsnap::WaitError, + }, + } +} +pub(crate) use error::Error; +type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/ami/snapshot.rs b/tools/pubsys/src/aws/ami/snapshot.rs new file mode 100644 index 00000000..ab358053 --- /dev/null +++ b/tools/pubsys/src/aws/ami/snapshot.rs @@ -0,0 +1,58 @@ +use coldsnap::SnapshotUploader; +use indicatif::{ProgressBar, ProgressStyle}; +use snafu::{OptionExt, ResultExt}; +use std::path::Path; + +/// Create a progress bar to show status of snapshot blocks, if wanted. +fn build_progress_bar(no_progress: bool, verb: &str) -> Option { + if no_progress { + return None; + } + let progress_bar = ProgressBar::new(0); + progress_bar.set_style( + ProgressStyle::default_bar() + .template(&[" ", verb, " [{bar:50.white/black}] {pos}/{len} ({eta})"].concat()) + .progress_chars("=> "), + ); + Some(progress_bar) +} + +/// Uploads the given path into a snapshot. +pub(crate) async fn snapshot_from_image

( + path: P, + uploader: &SnapshotUploader, + desired_size: Option, + no_progress: bool, +) -> Result +where + P: AsRef, +{ + let path = path.as_ref(); + let progress_bar = build_progress_bar(no_progress, "Uploading snapshot"); + let filename = path + .file_name() + .context(error::InvalidImagePath { path })? + .to_string_lossy(); + + uploader + .upload_from_file(path, desired_size, Some(&filename), progress_bar) + .await + .context(error::UploadSnapshot) +} + +mod error { + use snafu::Snafu; + use std::path::PathBuf; + + #[derive(Debug, Snafu)] + #[snafu(visibility = "pub(super)")] + pub(crate) enum Error { + #[snafu(display("Invalid image path '{}'", path.display()))] + InvalidImagePath { path: PathBuf }, + + #[snafu(display("Failed to upload snapshot: {}", source))] + UploadSnapshot { source: coldsnap::UploadError }, + } +} +pub(crate) use error::Error; +type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/ami/wait.rs b/tools/pubsys/src/aws/ami/wait.rs new file mode 100644 index 00000000..c1570e87 --- /dev/null +++ b/tools/pubsys/src/aws/ami/wait.rs @@ -0,0 +1,149 @@ +use crate::aws::client::build_client; +use crate::config::AwsConfig; +use log::info; +use rusoto_core::Region; +use rusoto_ec2::{DescribeImagesRequest, Ec2, Ec2Client}; +use snafu::{ensure, ResultExt}; +use std::thread::sleep; +use std::time::Duration; + +/// Waits for the given AMI ID to reach the given state, requiring it be in that state for +/// `success_required` checks in a row. +pub(crate) async fn wait_for_ami( + id: &str, + region: &Region, + state: &str, + successes_required: u8, + aws: &AwsConfig, +) -> Result<()> { + let mut successes = 0; + let max_attempts = 90; + let mut attempts = 0; + let seconds_between_attempts = 2; + + loop { + attempts += 1; + // Stop if we're over max, unless we're on a success streak, then give it some wiggle room. + ensure!( + (attempts - successes) <= max_attempts, + error::MaxAttempts { + id, + max_attempts, + region: region.name() + } + ); + if attempts % 5 == 1 { + info!( + "Waiting for {} in {} to be {}... (attempt {} of {})", + id, + region.name(), + state, + attempts, + max_attempts + ); + } + + let describe_request = DescribeImagesRequest { + image_ids: Some(vec![id.to_string()]), + ..Default::default() + }; + // Use a new client each time so we have more confidence that different endpoints can see + // the new AMI. + let ec2_client = build_client::(®ion, &aws).context(error::Client { + client_type: "EC2", + region: region.name(), + })?; + let describe_response = + ec2_client + .describe_images(describe_request) + .await + .context(error::DescribeImages { + region: region.name(), + })?; + // The response contains an Option>, so we have to check that we got a + // list at all, and then that the list contains the ID in question. + if let Some(images) = describe_response.images { + let mut saw_it = false; + for image in images { + if let Some(ref found_id) = image.image_id { + if let Some(ref found_state) = image.state { + if id == found_id && state == found_state { + // Success; check if we have enough to declare victory. + saw_it = true; + successes += 1; + if successes >= successes_required { + info!("Found {} {} in {}", id, state, region.name()); + return Ok(()); + } + break; + } + // If the state shows us the AMI failed, we know we'll never hit the + // desired state. (Unless they desired "error", which will be caught + // above.) + ensure!( + !["invalid", "deregistered", "failed", "error"] + .iter() + .any(|e| e == found_state), + error::State { + id, + state: found_state, + region: region.name() + } + ); + } + } + } + if !saw_it { + // Did not find image in list; reset success count and try again (if we have spare attempts) + successes = 0; + } + } else { + // Did not receive list; reset success count and try again (if we have spare attempts) + successes = 0; + }; + sleep(Duration::from_secs(seconds_between_attempts)); + } +} + +mod error { + use crate::aws; + use snafu::Snafu; + + #[derive(Debug, Snafu)] + #[snafu(visibility = "pub(super)")] + pub(crate) enum Error { + #[snafu(display("Error creating {} client in {}: {}", client_type, region, source))] + Client { + client_type: String, + region: String, + source: aws::client::Error, + }, + + #[snafu(display("Failed to describe images in {}: {}", region, source))] + DescribeImages { + region: String, + source: rusoto_core::RusotoError, + }, + + #[snafu(display( + "Failed to reach desired state within {} attempts for {} in {}", + max_attempts, + id, + region + ))] + MaxAttempts { + max_attempts: u8, + id: String, + region: String, + }, + + #[snafu(display("Image '{}' went to '{}' state in {}", id, state, region))] + State { + id: String, + state: String, + region: String, + }, + } +} +pub(crate) use error::Error; +type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/client.rs b/tools/pubsys/src/aws/client.rs new file mode 100644 index 00000000..30479037 --- /dev/null +++ b/tools/pubsys/src/aws/client.rs @@ -0,0 +1,127 @@ +use async_trait::async_trait; +use crate::config::AwsConfig; +use rusoto_core::{request::DispatchSignedRequest, HttpClient, Region}; +use rusoto_credential::{ + AutoRefreshingProvider, AwsCredentials, CredentialsError, DefaultCredentialsProvider, + ProfileProvider, ProvideAwsCredentials, +}; +use rusoto_ebs::EbsClient; +use rusoto_ec2::Ec2Client; +use rusoto_sts::{StsAssumeRoleSessionCredentialsProvider, StsClient}; +use snafu::ResultExt; + +pub(crate) trait NewWith { + fn new_with(request_dispatcher: D, credentials_provider: P, region: Region) -> Self + where + P: ProvideAwsCredentials + Send + Sync + 'static, + D: DispatchSignedRequest + Send + Sync + 'static; +} + +impl NewWith for EbsClient { + fn new_with(request_dispatcher: D, credentials_provider: P, region: Region) -> Self + where + P: ProvideAwsCredentials + Send + Sync + 'static, + D: DispatchSignedRequest + Send + Sync + 'static, + { + Self::new_with(request_dispatcher, credentials_provider, region) + } +} + +impl NewWith for Ec2Client { + fn new_with(request_dispatcher: D, credentials_provider: P, region: Region) -> Self + where + P: ProvideAwsCredentials + Send + Sync + 'static, + D: DispatchSignedRequest + Send + Sync + 'static, + { + Self::new_with(request_dispatcher, credentials_provider, region) + } +} + +/// Create a rusoto client of the given type using the given region and configuration. +pub(crate) fn build_client(region: &Region, aws: &AwsConfig) -> Result { + let maybe_regional_role = aws.region.get(region.name()).and_then(|r| r.role.clone()); + let assume_roles = aws.role.iter().chain(maybe_regional_role.iter()).cloned(); + let provider = build_provider(®ion, assume_roles.clone(), base_provider(&aws.profile)?)?; + Ok(T::new_with( + rusoto_core::HttpClient::new().context(error::HttpClient)?, + provider, + region.clone(), + )) +} + +/// Wrapper for trait object that implements ProvideAwsCredentials to simplify return values. +/// Might be able to remove if rusoto implements ProvideAwsCredentials for +/// Box. +struct CredentialsProvider(Box); +#[async_trait] +impl ProvideAwsCredentials for CredentialsProvider { + async fn credentials(&self) -> std::result::Result { + self.0.credentials().await + } +} + +/// Chains credentials providers to assume the given roles in order. +fn build_provider

( + region: &Region, + assume_roles: impl Iterator, + base_provider: P, +) -> Result +where + P: ProvideAwsCredentials + Send + Sync + 'static, +{ + let mut provider = CredentialsProvider(Box::new(base_provider)); + for assume_role in assume_roles { + let sts = StsClient::new_with( + HttpClient::new().context(error::HttpClient)?, + provider, + region.clone(), + ); + let expiring_provider = StsAssumeRoleSessionCredentialsProvider::new( + sts, + assume_role, + "pubsys".to_string(), // session name + None, // external ID + None, // session duration + None, // scope down policy + None, // MFA serial + ); + provider = CredentialsProvider(Box::new( + AutoRefreshingProvider::new(expiring_provider).context(error::Provider)?, + )); + } + Ok(provider) +} + +/// If the user specified a profile, have rusoto use that, otherwise use Rusoto's default +/// credentials mechanisms. +fn base_provider(maybe_profile: &Option) -> Result { + if let Some(profile) = maybe_profile { + let mut p = ProfileProvider::new().context(error::Provider)?; + p.set_profile(profile); + Ok(CredentialsProvider(Box::new(p))) + } else { + Ok(CredentialsProvider(Box::new( + DefaultCredentialsProvider::new().context(error::Provider)?, + ))) + } +} + +pub(crate) mod error { + use snafu::Snafu; + + #[derive(Debug, Snafu)] + #[snafu(visibility = "pub(super)")] + pub(crate) enum Error { + #[snafu(display("Failed to create HTTP client: {}", source))] + HttpClient { + source: rusoto_core::request::TlsError, + }, + + #[snafu(display("Failed to create AWS credentials provider: {}", source))] + Provider { + source: rusoto_credential::CredentialsError, + }, + } +} +pub(crate) use error::Error; +type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/mod.rs b/tools/pubsys/src/aws/mod.rs new file mode 100644 index 00000000..ebd75728 --- /dev/null +++ b/tools/pubsys/src/aws/mod.rs @@ -0,0 +1,4 @@ +#[macro_use] +pub(crate) mod client; + +pub(crate) mod ami; diff --git a/tools/pubsys/src/config.rs b/tools/pubsys/src/config.rs index 9d9834f7..962ea629 100644 --- a/tools/pubsys/src/config.rs +++ b/tools/pubsys/src/config.rs @@ -4,7 +4,7 @@ use crate::deserialize_offset; use chrono::Duration; use serde::Deserialize; use snafu::ResultExt; -use std::collections::HashMap; +use std::collections::{HashMap, VecDeque}; use std::fs; use std::path::{Path, PathBuf}; use url::Url; @@ -12,9 +12,13 @@ use url::Url; /// Configuration needed to load and create repos #[derive(Debug, Deserialize)] pub(crate) struct InfraConfig { + // Repo subcommand config pub(crate) root_role_path: Option, pub(crate) signing_keys: Option>, pub(crate) repo: Option>, + + // Config for AWS specific subcommands + pub(crate) aws: Option, } impl InfraConfig { @@ -29,6 +33,22 @@ impl InfraConfig { } } +/// AWS-specific infrastructure configuration +#[derive(Debug, Deserialize)] +pub(crate) struct AwsConfig { + pub(crate) regions: VecDeque, + pub(crate) role: Option, + pub(crate) profile: Option, + pub(crate) region: HashMap, +} + +/// AWS region-specific configuration +#[derive(Debug, Deserialize)] +pub(crate) struct AwsRegionConfig { + pub(crate) role: Option, + pub(crate) endpoint: Option, +} + /// Location of signing keys // These variant names are lowercase because they have to match the text in Infra.toml, and it's // more common for TOML config to be lowercase. diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs index c96ce684..7711fe2f 100644 --- a/tools/pubsys/src/main.rs +++ b/tools/pubsys/src/main.rs @@ -3,20 +3,23 @@ Currently implemented: * building repos, whether starting from an existing repo or from scratch +* registering and copying EC2 AMIs To be implemented: -* building AMIs +* machine-readable output describing AMI registrations * updating SSM parameters +* high-level document describing pubsys usage with examples Configuration comes from: * command-line parameters, to specify basic options and paths to the below files -* Infra.toml, for repo configuration +* Infra.toml, for repo and AMI configuration * Release.toml, for migrations * Policy files for repo metadata expiration and update wave timing */ #![deny(rust_2018_idioms)] +mod aws; mod config; mod repo; @@ -30,7 +33,7 @@ use std::path::PathBuf; use std::process; use structopt::StructOpt; -fn run() -> Result<()> { +async fn run() -> Result<()> { // Parse and store the args passed to the program let args = Args::from_args(); @@ -40,11 +43,13 @@ fn run() -> Result<()> { match args.subcommand { SubCommand::Repo(ref repo_args) => repo::run(&args, &repo_args).context(error::Repo), + SubCommand::Ami(ref ami_args) => aws::ami::run(&args, &ami_args).await.context(error::Ami), } } -fn main() { - if let Err(e) = run() { +#[tokio::main] +async fn main() { + if let Err(e) = run().await { eprintln!("{}", e); process::exit(1); } @@ -69,6 +74,7 @@ struct Args { #[derive(Debug, StructOpt)] enum SubCommand { Repo(repo::RepoArgs), + Ami(aws::ami::AmiArgs), } /// Parses a SemVer, stripping a leading 'v' if present @@ -102,6 +108,9 @@ mod error { #[snafu(display("Failed to build repo: {}", source))] Repo { source: crate::repo::Error }, + + #[snafu(display("Failed to build AMI: {}", source))] + Ami { source: crate::aws::ami::Error }, } } type Result = std::result::Result; From 4af66d61f9c3597d0b54d1303faebd403d7eed98 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Tue, 11 Aug 2020 17:41:16 +0000 Subject: [PATCH 0316/1356] pubsys: default AwsConfig so it's not required Have serde default `aws.region` so the user does not have to have an empty `aws.region` table if they have no region-specific configuration. Have serde default `aws.regions` in case the user only wants to specify regions on the command line with `PUBLISH_REGIONS`. Derive Default on AwsConfig now that all sections of `aws` in Infra.toml are optional, so the user doesn't need an empty `[aws]` section if they intend to use default credential mechanisms and specify `PUBLISH_REGIONS`. Co-authored-by: Zac Mrowicki Co-authored-by: Tom Kirchner --- tools/pubsys/src/aws/ami/mod.rs | 4 +--- tools/pubsys/src/config.rs | 4 +++- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index 1340654b..d7911734 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -71,9 +71,7 @@ pub(crate) async fn run(args: &Args, ami_args: &AmiArgs) -> Result<()> { let infra_config = InfraConfig::from_path(&args.infra_config_path).context(error::Config)?; trace!("Parsed infra config: {:?}", infra_config); - let aws = infra_config.aws.context(error::MissingConfig { - missing: "aws section", - })?; + let aws = infra_config.aws.unwrap_or_else(|| Default::default()); // If the user gave an override list of regions, use that, otherwise use what's in the config. let mut regions = if !ami_args.regions.is_empty() { diff --git a/tools/pubsys/src/config.rs b/tools/pubsys/src/config.rs index 962ea629..d4489a43 100644 --- a/tools/pubsys/src/config.rs +++ b/tools/pubsys/src/config.rs @@ -34,11 +34,13 @@ impl InfraConfig { } /// AWS-specific infrastructure configuration -#[derive(Debug, Deserialize)] +#[derive(Debug, Default, Deserialize)] pub(crate) struct AwsConfig { + #[serde(default)] pub(crate) regions: VecDeque, pub(crate) role: Option, pub(crate) profile: Option, + #[serde(default)] pub(crate) region: HashMap, } From 9f725edd2bac1563bcb460b63a05a18cb2182112 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Wed, 12 Aug 2020 16:49:42 -0700 Subject: [PATCH 0317/1356] pubsys: only create tokio runtime when needed tough-ssm creates its own tokio runtime for making SSM calls, so we can't use it inside our own tokio runtime. This change removes the general `[tokio::main]` annotation that creates a runtime for the entire app, and instead only creates a runtime inside the `ami` subcommand that needs one. We can switch back to the annotation when tough-ssm moves to an async interface. --- tools/pubsys/src/main.rs | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs index 7711fe2f..1f42b69e 100644 --- a/tools/pubsys/src/main.rs +++ b/tools/pubsys/src/main.rs @@ -32,8 +32,9 @@ use snafu::ResultExt; use std::path::PathBuf; use std::process; use structopt::StructOpt; +use tokio::runtime::Runtime; -async fn run() -> Result<()> { +fn run() -> Result<()> { // Parse and store the args passed to the program let args = Args::from_args(); @@ -43,13 +44,17 @@ async fn run() -> Result<()> { match args.subcommand { SubCommand::Repo(ref repo_args) => repo::run(&args, &repo_args).context(error::Repo), - SubCommand::Ami(ref ami_args) => aws::ami::run(&args, &ami_args).await.context(error::Ami), + SubCommand::Ami(ref ami_args) => { + let mut rt = Runtime::new().context(error::Runtime)?; + rt.block_on(async { + aws::ami::run(&args, &ami_args).await.context(error::Ami) + }) + }, } } -#[tokio::main] -async fn main() { - if let Err(e) = run().await { +fn main() { + if let Err(e) = run() { eprintln!("{}", e); process::exit(1); } @@ -103,14 +108,17 @@ mod error { #[derive(Debug, Snafu)] #[snafu(visibility = "pub(super)")] pub(super) enum Error { + #[snafu(display("Failed to build AMI: {}", source))] + Ami { source: crate::aws::ami::Error }, + #[snafu(display("Logger setup error: {}", source))] Logger { source: simplelog::TermLogError }, #[snafu(display("Failed to build repo: {}", source))] Repo { source: crate::repo::Error }, - #[snafu(display("Failed to build AMI: {}", source))] - Ami { source: crate::aws::ami::Error }, + #[snafu(display("Failed to create async runtime: {}", source))] + Runtime { source: std::io::Error }, } } type Result = std::result::Result; From 6486504c51924c06faede41183d1529ef050b521 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Mon, 17 Aug 2020 17:24:30 -0700 Subject: [PATCH 0318/1356] Update BUILDING.md for new coldsnap-based amiize.sh --- BUILDING.md | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/BUILDING.md b/BUILDING.md index 4e58c9c6..bb927213 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -49,8 +49,8 @@ The `bin/amiize.sh` script does this for you. The script has some assumptions about your setup, in particular that you: * have [aws-cli v1](https://aws.amazon.com/cli/) set up, and that its default profile can create and control EC2 resources - * have an SSH key that's registered with EC2 and is available to `ssh` (for example, loaded into `ssh-agent`) - * have a few other common tools installed, like `jq`, `du`, and `rsync` + * have [coldsnap](https://github.com/awslabs/coldsnap/) installed to upload snapshots + * have a few other common tools installed, like `jq` and `du` First, decompress the images. (Note: these filenames assume an `x86_64` architecture and `aws-k8s-1.17` [variant](README.md).) @@ -64,23 +64,14 @@ Next, register an AMI: ``` bin/amiize.sh --name YOUR-AMI-NAME-HERE \ - --ssh-keypair YOUR-EC2-SSH-KEYPAIR-NAME-HERE \ - --root-image build/images/x86_64-aws-k8s-1.17/latest/bottlerocket-aws-k8s-1.17-x86_64.img \ - --data-image build/images/x86_64-aws-k8s-1.17/latest/bottlerocket-aws-k8s-1.17-x86_64-data.img \ - --region us-west-2 \ - --instance-type m3.xlarge \ --arch x86_64 \ - --worker-ami ami-08d489468314a58df \ - --user-data 'I2Nsb3VkLWNvbmZpZwpyZXBvX3VwZ3JhZGU6IG5vbmUK' + --region us-west-2 \ + --root-image build/images/x86_64-aws-k8s-1.17/latest/bottlerocket-aws-k8s-1.17-x86_64.img \ + --data-image build/images/x86_64-aws-k8s-1.17/latest/bottlerocket-aws-k8s-1.17-x86_64-data.img ``` Your new AMI ID will be printed at the end. -The amiize script starts an EC2 instance, which it uses to write the image to a new EBS volume. -It then registers this EBS volume as an AMI and terminates the instance. -In the example command above, the `--worker-ami` is an Amazon Linux AMI, and the `--user-data` disables updates at boot to speed up registration. -Make sure you use an up-to-date worker AMI. - ## Use your image See the [setup guide](QUICKSTART.md) for information on running Bottlerocket images. From 5f15ad002c66bc212624ed78726ba3bdcfec3840 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Mon, 17 Aug 2020 17:34:16 -0700 Subject: [PATCH 0319/1356] README: add supported architectures --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 3df45bb8..ae42e75e 100644 --- a/README.md +++ b/README.md @@ -49,6 +49,10 @@ For example, an `x86_64` build of the `aws-k8s-1.17` variant will produce an ima Our first supported variants, `aws-k8s-1.15`, `aws-k8s-1.16`, and `aws-k8s-1.17`, supports EKS as described above. +## Architectures + +Our supported architectures include `x86_64` and `aarch64` (written as `arm64` in some contexts). + ## Setup :walking: :running: From 2082c4d21d78ff6a85a2785d3b4dc5648acc706d Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Wed, 12 Aug 2020 22:14:47 +0000 Subject: [PATCH 0320/1356] pubsys: rustfmt client module Co-authored-by: Zac Mrowicki Co-authored-by: Tom Kirchner --- tools/pubsys/src/aws/client.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tools/pubsys/src/aws/client.rs b/tools/pubsys/src/aws/client.rs index 30479037..48980621 100644 --- a/tools/pubsys/src/aws/client.rs +++ b/tools/pubsys/src/aws/client.rs @@ -1,5 +1,5 @@ -use async_trait::async_trait; use crate::config::AwsConfig; +use async_trait::async_trait; use rusoto_core::{request::DispatchSignedRequest, HttpClient, Region}; use rusoto_credential::{ AutoRefreshingProvider, AwsCredentials, CredentialsError, DefaultCredentialsProvider, @@ -22,9 +22,9 @@ impl NewWith for EbsClient { where P: ProvideAwsCredentials + Send + Sync + 'static, D: DispatchSignedRequest + Send + Sync + 'static, - { - Self::new_with(request_dispatcher, credentials_provider, region) - } + { + Self::new_with(request_dispatcher, credentials_provider, region) + } } impl NewWith for Ec2Client { @@ -32,9 +32,9 @@ impl NewWith for Ec2Client { where P: ProvideAwsCredentials + Send + Sync + 'static, D: DispatchSignedRequest + Send + Sync + 'static, - { - Self::new_with(request_dispatcher, credentials_provider, region) - } + { + Self::new_with(request_dispatcher, credentials_provider, region) + } } /// Create a rusoto client of the given type using the given region and configuration. From e2d486a8e6894fe790cbd46acd5e2780097a9aa5 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Wed, 12 Aug 2020 22:22:04 +0000 Subject: [PATCH 0321/1356] pubsys: allow JSON output of registered AMIs Co-authored-by: Zac Mrowicki Co-authored-by: Tom Kirchner --- tools/pubsys/src/aws/ami/mod.rs | 76 ++++++++++++++++++++++++++++----- tools/pubsys/src/main.rs | 1 - 2 files changed, 65 insertions(+), 12 deletions(-) diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index d7911734..14d975ed 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -17,6 +17,7 @@ use rusoto_ebs::EbsClient; use rusoto_ec2::{CopyImageError, CopyImageRequest, CopyImageResult, Ec2, Ec2Client}; use snafu::{ensure, OptionExt, ResultExt}; use std::collections::{HashMap, VecDeque}; +use std::fs::File; use std::path::PathBuf; use structopt::StructOpt; use wait::wait_for_ami; @@ -60,10 +61,31 @@ pub(crate) struct AmiArgs { /// Regions where you want the AMI, the first will be used as the base for copying #[structopt(long, use_delimiter = true)] regions: Vec, + + /// If specified, save created regional AMI IDs in JSON at this path. + #[structopt(long)] + ami_output: Option, } /// Common entrypoint from main() pub(crate) async fn run(args: &Args, ami_args: &AmiArgs) -> Result<()> { + match _run(args, ami_args).await { + Ok(ami_ids) => { + // Write the AMI IDs to file if requested + if let Some(ref path) = ami_args.ami_output { + let file = File::create(path).context(error::FileCreate { path })?; + serde_json::to_writer_pretty(file, &ami_ids).context(error::Serialize { path })?; + info!("Wrote AMI data to {}", path.display()); + } + Ok(()) + } + Err(e) => Err(e), + } +} + +async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> { + let mut ami_ids = HashMap::new(); + info!( "Using infra config from path: {}", args.infra_config_path.display() @@ -138,9 +160,11 @@ pub(crate) async fn run(args: &Args, ami_args: &AmiArgs) -> Result<()> { (new_id, false) }; + ami_ids.insert(base_region.name().to_string(), image_id.clone()); + // If we don't need to copy AMIs, we're done. if regions.is_empty() { - return Ok(()); + return Ok(ami_ids); } // Wait for AMI to be available so it can be copied @@ -187,6 +211,7 @@ pub(crate) async fn run(args: &Args, ami_args: &AmiArgs) -> Result<()> { region.name(), id ); + ami_ids.insert(region.name().to_string(), id.clone()); continue; } let request = CopyImageRequest { @@ -214,7 +239,7 @@ pub(crate) async fn run(args: &Args, ami_args: &AmiArgs) -> Result<()> { // If all target regions already have the AMI, we're done. if copy_requests.is_empty() { - return Ok(()); + return Ok(ami_ids); } // Start requests; they return almost immediately and the copying work is done by the service @@ -234,12 +259,24 @@ pub(crate) async fn run(args: &Args, ami_args: &AmiArgs) -> Result<()> { let mut saw_error = false; for (region, copy_response) in copy_responses { match copy_response { - Ok(success) => info!( - "Registered AMI '{}' in region {}: {}", - ami_args.name, - region.name(), - success.image_id.unwrap_or_else(|| "".to_string()) - ), + Ok(success) => { + if let Some(image_id) = success.image_id { + info!( + "Registered AMI '{}' in {}: {}", + ami_args.name, + region.name(), + image_id, + ); + ami_ids.insert(region.name().to_string(), image_id); + } else { + saw_error = true; + error!( + "Registered AMI '{}' in {} but didn't receive an AMI ID!", + ami_args.name, + region.name(), + ); + } + } Err(e) => { saw_error = true; error!("Copy to {} failed: {}", region.name(), e); @@ -249,7 +286,7 @@ pub(crate) async fn run(args: &Args, ami_args: &AmiArgs) -> Result<()> { ensure!(!saw_error, error::AmiCopy); - Ok(()) + Ok(ami_ids) } /// Builds a Region from the given region name, and uses the custom endpoint from the AWS config, @@ -268,6 +305,7 @@ fn region_from_string(name: &str, aws: &AwsConfig) -> Result { mod error { use crate::aws::{self, ami}; use snafu::Snafu; + use std::path::PathBuf; #[derive(Debug, Snafu)] #[snafu(visibility = "pub(super)")] @@ -283,7 +321,15 @@ mod error { }, #[snafu(display("Error reading config: {}", source))] - Config { source: crate::config::Error }, + Config { + source: crate::config::Error, + }, + + #[snafu(display("Failed to create file '{}': {}", path.display(), source))] + FileCreate { + path: PathBuf, + source: std::io::Error, + }, #[snafu(display("Error getting AMI ID for {} {} in {}: {}", arch, name, region, source))] GetAmiId { @@ -294,7 +340,9 @@ mod error { }, #[snafu(display("Infra.toml is missing {}", missing))] - MissingConfig { missing: String }, + MissingConfig { + missing: String, + }, #[snafu(display("Failed to parse region '{}': {}", name, source))] ParseRegion { @@ -310,6 +358,12 @@ mod error { source: ami::register::Error, }, + #[snafu(display("Failed to serialize output to '{}': {}", path.display(), source))] + Serialize { + path: PathBuf, + source: serde_json::Error, + }, + #[snafu(display("AMI '{}' in {} did not become available: {}", id, region, source))] WaitAmi { id: String, diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs index 1f42b69e..3b43c7ef 100644 --- a/tools/pubsys/src/main.rs +++ b/tools/pubsys/src/main.rs @@ -6,7 +6,6 @@ Currently implemented: * registering and copying EC2 AMIs To be implemented: -* machine-readable output describing AMI registrations * updating SSM parameters * high-level document describing pubsys usage with examples From b24e7384926be6bba9337d758dbc2d55c022dc38 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Wed, 12 Aug 2020 22:23:28 +0000 Subject: [PATCH 0322/1356] pubsys: move region_from_string to common aws module Co-authored-by: Zac Mrowicki Co-authored-by: Tom Kirchner --- tools/pubsys/src/aws/ami/mod.rs | 23 ++++------------------- tools/pubsys/src/aws/mod.rs | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 19 deletions(-) diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index 14d975ed..1e67734e 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -5,8 +5,8 @@ mod register; mod snapshot; mod wait; -use crate::aws::client::build_client; -use crate::config::{AwsConfig, InfraConfig}; +use crate::aws::{client::build_client, region_from_string}; +use crate::config::InfraConfig; use crate::Args; use futures::future::{join, lazy, ready, FutureExt}; use futures::stream::{self, StreamExt}; @@ -102,7 +102,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result aws.regions.clone() } .into_iter() - .map(|name| region_from_string(&name, &aws)) + .map(|name| region_from_string(&name, &aws).context(error::ParseRegion)) .collect::>>()?; // We register in this base region first, then copy from there to any other regions. @@ -289,19 +289,6 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result Ok(ami_ids) } -/// Builds a Region from the given region name, and uses the custom endpoint from the AWS config, -/// if specified in aws.region.REGION.endpoint. -fn region_from_string(name: &str, aws: &AwsConfig) -> Result { - let maybe_endpoint = aws.region.get(name).and_then(|r| r.endpoint.clone()); - Ok(match maybe_endpoint { - Some(endpoint) => Region::Custom { - name: name.to_string(), - endpoint, - }, - None => name.parse().context(error::ParseRegion { name })?, - }) -} - mod error { use crate::aws::{self, ami}; use snafu::Snafu; @@ -344,10 +331,8 @@ mod error { missing: String, }, - #[snafu(display("Failed to parse region '{}': {}", name, source))] ParseRegion { - name: String, - source: rusoto_signature::region::ParseRegionError, + source: crate::aws::Error, }, #[snafu(display("Error registering {} {} in {}: {}", arch, name, region, source))] diff --git a/tools/pubsys/src/aws/mod.rs b/tools/pubsys/src/aws/mod.rs index ebd75728..795b9efc 100644 --- a/tools/pubsys/src/aws/mod.rs +++ b/tools/pubsys/src/aws/mod.rs @@ -1,4 +1,37 @@ +use crate::config::AwsConfig; +use rusoto_core::Region; +use snafu::ResultExt; + #[macro_use] pub(crate) mod client; pub(crate) mod ami; + +/// Builds a Region from the given region name, and uses the custom endpoint from the AWS config, +/// if specified in aws.region.REGION.endpoint. +fn region_from_string(name: &str, aws: &AwsConfig) -> Result { + let maybe_endpoint = aws.region.get(name).and_then(|r| r.endpoint.clone()); + Ok(match maybe_endpoint { + Some(endpoint) => Region::Custom { + name: name.to_string(), + endpoint, + }, + None => name.parse().context(error::ParseRegion { name })?, + }) +} + +mod error { + use snafu::Snafu; + + #[derive(Debug, Snafu)] + #[snafu(visibility = "pub(super)")] + pub(crate) enum Error { + #[snafu(display("Failed to parse region '{}': {}", name, source))] + ParseRegion { + name: String, + source: rusoto_signature::region::ParseRegionError, + }, + } +} +pub(crate) use error::Error; +type Result = std::result::Result; From 505d52bf455ae631072d1dc19732cd6b890948a0 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Wed, 12 Aug 2020 22:24:15 +0000 Subject: [PATCH 0323/1356] build: add targets to make built AMIs public or private Co-authored-by: Zac Mrowicki Co-authored-by: Tom Kirchner --- tools/pubsys/src/aws/mod.rs | 1 + tools/pubsys/src/aws/publish_ami/mod.rs | 459 ++++++++++++++++++++++++ tools/pubsys/src/main.rs | 17 +- 3 files changed, 475 insertions(+), 2 deletions(-) create mode 100644 tools/pubsys/src/aws/publish_ami/mod.rs diff --git a/tools/pubsys/src/aws/mod.rs b/tools/pubsys/src/aws/mod.rs index 795b9efc..794d622e 100644 --- a/tools/pubsys/src/aws/mod.rs +++ b/tools/pubsys/src/aws/mod.rs @@ -6,6 +6,7 @@ use snafu::ResultExt; pub(crate) mod client; pub(crate) mod ami; +pub(crate) mod publish_ami; /// Builds a Region from the given region name, and uses the custom endpoint from the AWS config, /// if specified in aws.region.REGION.endpoint. diff --git a/tools/pubsys/src/aws/publish_ami/mod.rs b/tools/pubsys/src/aws/publish_ami/mod.rs new file mode 100644 index 00000000..7b92224a --- /dev/null +++ b/tools/pubsys/src/aws/publish_ami/mod.rs @@ -0,0 +1,459 @@ +//! The publish_ami module owns the 'publish-ami' subcommand and controls the process of granting +//! and revoking public access to EC2 AMIs. + +use crate::aws::{client::build_client, region_from_string}; +use crate::config::InfraConfig; +use crate::Args; +use futures::future::{join, ready}; +use futures::stream::{self, StreamExt}; +use log::{debug, error, info, trace}; +use rusoto_core::{Region, RusotoError}; +use rusoto_ec2::{ + DescribeImagesError, DescribeImagesRequest, DescribeImagesResult, Ec2, Ec2Client, + ModifyImageAttributeError, ModifyImageAttributeRequest, ModifySnapshotAttributeError, + ModifySnapshotAttributeRequest, +}; +use snafu::{ensure, OptionExt, ResultExt}; +use std::collections::{HashMap, HashSet}; +use std::fs::File; +use std::iter::FromIterator; +use std::path::PathBuf; +use structopt::StructOpt; + +/// Grants or revokes permissions to Bottlerocket AMIs +#[derive(Debug, StructOpt)] +#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +#[structopt(group = clap::ArgGroup::with_name("mode").required(true).multiple(false))] +pub(crate) struct PublishArgs { + /// Path to the JSON file containing regional AMI IDs to modify + #[structopt(long)] + ami_input: PathBuf, + + /// Comma-separated list of regions to publish in, overriding Infra.toml; given regions must be + /// in the --ami-input file + #[structopt(long, use_delimiter = true)] + regions: Vec, + + /// Make the AMIs public + #[structopt(long, group = "mode")] + make_public: bool, + /// Make the AMIs private + #[structopt(long, group = "mode")] + make_private: bool, +} + +/// Common entrypoint from main() +pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { + let (operation, mode) = if publish_args.make_public { + ("add".to_string(), "public") + } else if publish_args.make_private { + ("remove".to_string(), "private") + } else { + unreachable!("developer error: make-public and make-private not required/exclusive"); + }; + + info!( + "Using AMI data from path: {}", + publish_args.ami_input.display() + ); + let file = File::open(&publish_args.ami_input).context(error::File { + op: "open", + path: &publish_args.ami_input, + })?; + let mut ami_input: HashMap = + serde_json::from_reader(file).context(error::Deserialize { + path: &publish_args.ami_input, + })?; + trace!("Parsed AMI input: {:?}", ami_input); + + // pubsys will not create a file if it did not create AMIs, so we should only have an empty + // file if a user created one manually, and they shouldn't be creating an empty file. + ensure!( + !ami_input.is_empty(), + error::Input { + path: &publish_args.ami_input + } + ); + + info!( + "Using infra config from path: {}", + args.infra_config_path.display() + ); + let infra_config = InfraConfig::from_path(&args.infra_config_path).context(error::Config)?; + trace!("Parsed infra config: {:?}", infra_config); + + let aws = infra_config.aws.unwrap_or_else(Default::default); + + // If the user gave an override list of regions, use that, otherwise use what's in the config. + let regions = if !publish_args.regions.is_empty() { + publish_args.regions.clone() + } else { + aws.regions.clone().into() + }; + // Check that the requested regions are a subset of the regions we *could* publish from the AMI + // input JSON. + let requested_regions = HashSet::from_iter(regions.iter()); + let known_regions = HashSet::<&String>::from_iter(ami_input.keys()); + ensure!( + requested_regions.is_subset(&known_regions), + error::UnknownRegions { + regions: requested_regions + .difference(&known_regions) + .map(|s| s.to_string()) + .collect::>(), + } + ); + + // Parse region names, adding endpoints from InfraConfig if specified + let mut amis = HashMap::with_capacity(regions.len()); + for name in regions { + let ami_id = ami_input + .remove(&name) + // This could only happen if someone removes the check above... + .with_context(|| error::UnknownRegions { + regions: vec![name.clone()], + })?; + let region = region_from_string(&name, &aws).context(error::ParseRegion)?; + amis.insert(region, ami_id); + } + + // We make a map storing our regional clients because they're used in a future and need to + // live until the future is resolved. + let mut ec2_clients = HashMap::with_capacity(amis.len()); + for region in amis.keys() { + let ec2_client = build_client::(®ion, &aws).context(error::Client { + client_type: "EC2", + region: region.name(), + })?; + ec2_clients.insert(region.clone(), ec2_client); + } + + let snapshots = get_snapshots(&amis, &ec2_clients).await?; + trace!("Found snapshots: {:?}", snapshots); + + info!("Updating snapshot permissions - making {}", mode); + modify_snapshots(&snapshots, &ec2_clients, operation.clone()).await?; + info!("Updating image permissions - making {}", mode); + modify_images(&amis, &ec2_clients, operation.clone()).await?; + + Ok(()) +} + +/// Returns a regional mapping of snapshot IDs associated with the given AMIs. +async fn get_snapshots( + amis: &HashMap, + clients: &HashMap, +) -> Result>> { + // Build requests for image information. + let mut describe_requests = Vec::with_capacity(amis.len()); + for (region, image_id) in amis { + let ec2_client = &clients[region]; + let describe_request = DescribeImagesRequest { + image_ids: Some(vec![image_id.to_string()]), + ..Default::default() + }; + let describe_future = ec2_client.describe_images(describe_request); + + // Store the region and image ID so we can include it in errors + let info_future = ready((region.clone(), image_id.clone())); + describe_requests.push(join(info_future, describe_future)); + } + + // Send requests in parallel and wait for responses, collecting results into a list. + let request_stream = stream::iter(describe_requests).buffer_unordered(4); + let describe_responses: Vec<( + (Region, String), + std::result::Result>, + )> = request_stream.collect().await; + + // For each described image, get the snapshot IDs from the block device mappings. + let mut snapshots = HashMap::with_capacity(amis.len()); + for ((region, image_id), describe_response) in describe_responses { + // Get the image description, ensuring we only have one. + let describe_response = describe_response.context(error::DescribeImages { + region: region.name(), + })?; + let mut images = describe_response.images.context(error::MissingInResponse { + request_type: "DescribeImages", + missing: "images", + })?; + ensure!( + !images.is_empty(), + error::MissingImage { + region: region.name(), + image_id, + } + ); + ensure!( + images.len() == 1, + error::MultipleImages { + region: region.name(), + images: images + .into_iter() + .map(|i| i.image_id.unwrap_or_else(|| "".to_string())) + .collect::>() + } + ); + let image = images.remove(0); + + // Look into the block device mappings for snapshots. + let bdms = image + .block_device_mappings + .context(error::MissingInResponse { + request_type: "DescribeImages", + missing: "block_device_mappings", + })?; + ensure!( + !bdms.is_empty(), + error::MissingInResponse { + request_type: "DescribeImages", + missing: "non-empty block_device_mappings" + } + ); + let mut snapshot_ids = Vec::with_capacity(bdms.len()); + for bdm in bdms { + let ebs = bdm.ebs.context(error::MissingInResponse { + request_type: "DescribeImages", + missing: "ebs in block_device_mappings", + })?; + let snapshot_id = ebs.snapshot_id.context(error::MissingInResponse { + request_type: "DescribeImages", + missing: "snapshot_id in block_device_mappings.ebs", + })?; + snapshot_ids.push(snapshot_id); + } + snapshots.insert(region, snapshot_ids); + } + + Ok(snapshots) +} + +/// Modify snapshot attributes to make them public/private as requested. +async fn modify_snapshots( + snapshots: &HashMap>, + clients: &HashMap, + operation: String, +) -> Result<()> { + // Build requests to modify snapshot attributes. + let mut modify_snapshot_requests = Vec::new(); + for (region, snapshot_ids) in snapshots { + for snapshot_id in snapshot_ids { + let ec2_client = &clients[region]; + let modify_snapshot_request = ModifySnapshotAttributeRequest { + attribute: Some("createVolumePermission".to_string()), + group_names: Some(vec!["all".to_string()]), + operation_type: Some(operation.clone()), + snapshot_id: snapshot_id.clone(), + ..Default::default() + }; + let modify_snapshot_future = + ec2_client.modify_snapshot_attribute(modify_snapshot_request); + + // Store the region and snapshot ID so we can include it in errors + let info_future = ready((region.name().to_string(), snapshot_id.clone())); + modify_snapshot_requests.push(join(info_future, modify_snapshot_future)); + } + } + + // Send requests in parallel and wait for responses, collecting results into a list. + let request_stream = stream::iter(modify_snapshot_requests).buffer_unordered(4); + let modify_snapshot_responses: Vec<( + (String, String), + std::result::Result<(), RusotoError>, + )> = request_stream.collect().await; + + // Count up successes and failures so we can give a clear total in the final error message. + let mut error_count = 0u16; + let mut success_count = 0u16; + for ((region, snapshot_id), modify_snapshot_response) in modify_snapshot_responses { + match modify_snapshot_response { + Ok(()) => { + success_count += 1; + debug!( + "Modified permissions of snapshot {} in {}", + snapshot_id, region, + ); + } + Err(e) => { + error_count += 1; + error!( + "Modifying permissions of {} in {} failed: {}", + snapshot_id, region, e + ); + } + } + } + + ensure!( + error_count == 0, + error::ModifySnapshotAttribute { + error_count, + success_count, + } + ); + + Ok(()) +} + +/// Modify image attributes to make them public/private as requested. +async fn modify_images( + images: &HashMap, + clients: &HashMap, + operation: String, +) -> Result<()> { + // Build requests to modify image attributes. + let mut modify_image_requests = Vec::new(); + for (region, image_id) in images { + let ec2_client = &clients[region]; + let modify_image_request = ModifyImageAttributeRequest { + attribute: Some("launchPermission".to_string()), + user_groups: Some(vec!["all".to_string()]), + operation_type: Some(operation.clone()), + image_id: image_id.clone(), + ..Default::default() + }; + let modify_image_future = ec2_client.modify_image_attribute(modify_image_request); + + // Store the region and image ID so we can include it in errors + let info_future = ready((region.name().to_string(), image_id.clone())); + modify_image_requests.push(join(info_future, modify_image_future)); + } + + // Send requests in parallel and wait for responses, collecting results into a list. + let request_stream = stream::iter(modify_image_requests).buffer_unordered(4); + let modify_image_responses: Vec<( + (String, String), + std::result::Result<(), RusotoError>, + )> = request_stream.collect().await; + + // Count up successes and failures so we can give a clear total in the final error message. + let mut error_count = 0u16; + let mut success_count = 0u16; + for ((region, image_id), modify_image_response) in modify_image_responses { + match modify_image_response { + Ok(()) => { + success_count += 1; + info!("Modified permissions of image {} in {}", image_id, region,); + } + Err(e) => { + error_count += 1; + error!( + "Modifying permissions of {} in {} failed: {}", + image_id, region, e + ); + } + } + } + + ensure!( + error_count == 0, + error::ModifyImageAttribute { + error_count, + success_count, + } + ); + + Ok(()) +} + +mod error { + use crate::aws; + use snafu::Snafu; + use std::io; + use std::path::PathBuf; + + #[derive(Debug, Snafu)] + #[snafu(visibility = "pub(super)")] + pub(crate) enum Error { + #[snafu(display("Error creating {} client in {}: {}", client_type, region, source))] + Client { + client_type: String, + region: String, + source: aws::client::Error, + }, + + #[snafu(display("Error reading config: {}", source))] + Config { + source: crate::config::Error, + }, + + #[snafu(display("Failed to describe images in {}: {}", region, source))] + DescribeImages { + region: String, + source: rusoto_core::RusotoError, + }, + + #[snafu(display("Failed to deserialize input from '{}': {}", path.display(), source))] + Deserialize { + path: PathBuf, + source: serde_json::Error, + }, + + #[snafu(display("Failed to {} '{}': {}", op, path.display(), source))] + File { + op: String, + path: PathBuf, + source: io::Error, + }, + + #[snafu(display("Input '{}' is empty", path.display()))] + Input { + path: PathBuf, + }, + + #[snafu(display("Infra.toml is missing {}", missing))] + MissingConfig { + missing: String, + }, + + #[snafu(display("Failed to find given AMI ID {} in {}", image_id, region))] + MissingImage { + region: String, + image_id: String, + }, + + #[snafu(display("Response to {} was missing {}", request_type, missing))] + MissingInResponse { + request_type: String, + missing: String, + }, + + #[snafu(display( + "Failed to modify permissions of {} of {} images", + error_count, error_count + success_count, + ))] + ModifyImageAttribute { + error_count: u16, + success_count: u16, + }, + + #[snafu(display( + "Failed to modify permissions of {} of {} snapshots", + error_count, error_count + success_count, + ))] + ModifySnapshotAttribute { + error_count: u16, + success_count: u16, + }, + + #[snafu(display("DescribeImages in {} with unique filters returned multiple results: {}", region, images.join(", ")))] + MultipleImages { + region: String, + images: Vec, + }, + + ParseRegion { + source: crate::aws::Error, + }, + + #[snafu(display( + "Given region(s) in Infra.toml / regions argument that are not in --ami-input file: {}", + regions.join(", ") + ))] + UnknownRegions { + regions: Vec, + }, + } +} +pub(crate) use error::Error; +type Result = std::result::Result; diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs index 3b43c7ef..da8292cb 100644 --- a/tools/pubsys/src/main.rs +++ b/tools/pubsys/src/main.rs @@ -4,6 +4,7 @@ Currently implemented: * building repos, whether starting from an existing repo or from scratch * registering and copying EC2 AMIs +* Marking EC2 AMIs public (or private again) To be implemented: * updating SSM parameters @@ -44,11 +45,17 @@ fn run() -> Result<()> { match args.subcommand { SubCommand::Repo(ref repo_args) => repo::run(&args, &repo_args).context(error::Repo), SubCommand::Ami(ref ami_args) => { + let mut rt = Runtime::new().context(error::Runtime)?; + rt.block_on(async { aws::ami::run(&args, &ami_args).await.context(error::Ami) }) + } + SubCommand::PublishAmi(ref publish_args) => { let mut rt = Runtime::new().context(error::Runtime)?; rt.block_on(async { - aws::ami::run(&args, &ami_args).await.context(error::Ami) + aws::publish_ami::run(&args, &publish_args) + .await + .context(error::PublishAmi) }) - }, + } } } @@ -79,6 +86,7 @@ struct Args { enum SubCommand { Repo(repo::RepoArgs), Ami(aws::ami::AmiArgs), + PublishAmi(aws::publish_ami::PublishArgs), } /// Parses a SemVer, stripping a leading 'v' if present @@ -113,6 +121,11 @@ mod error { #[snafu(display("Logger setup error: {}", source))] Logger { source: simplelog::TermLogError }, + #[snafu(display("Failed to publish AMI: {}", source))] + PublishAmi { + source: crate::aws::publish_ami::Error, + }, + #[snafu(display("Failed to build repo: {}", source))] Repo { source: crate::repo::Error }, From 4e78ef465701ba63d8da4b213dc2e0bc37b4ce32 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 17 Aug 2020 22:18:37 +0000 Subject: [PATCH 0324/1356] build: use per-checkout cache directories The cache directory is primarily used by cargo, and having two cargo commands writing build artifacts to the same location will lead to unpredictable results. Signed-off-by: Ben Cressey --- tools/buildsys/src/builder.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index fdceec15..5778af43 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -123,8 +123,8 @@ fn build(target: &str, build_args: &str, tag: &str, output: &str) -> Result<()> let mut d = Sha512::new(); d.update(&root); let digest = hex::encode(d.finalize()); - let suffix = &digest[..12]; - let tag = format!("{}-{}", tag, suffix); + let token = &digest[..12]; + let tag = format!("{}-{}", tag, token); // Our SDK image is picked by the external `cargo make` invocation. let sdk = getenv("BUILDSYS_SDK_IMAGE")?; @@ -134,6 +134,9 @@ fn build(target: &str, build_args: &str, tag: &str, output: &str) -> Result<()> let nocache = rand::thread_rng().gen::(); let nocache_args = format!("--build-arg NOCACHE={}", nocache); + // Avoid using a cached layer from a concurrent build in another checkout. + let token_args = format!("--build-arg TOKEN={}", token); + let build = args(format!( "build . \ --network none \ @@ -141,11 +144,13 @@ fn build(target: &str, build_args: &str, tag: &str, output: &str) -> Result<()> {build_args} \ {sdk_args} \ {nocache_args} \ + {token_args} \ --tag {tag}", target = target, build_args = build_args, sdk_args = sdk_args, nocache_args = nocache_args, + token_args = token_args, tag = tag, )); From 66789d3717633ca4e9945f4242b398f7e2113127 Mon Sep 17 00:00:00 2001 From: srgothi92 Date: Fri, 21 Aug 2020 16:16:32 +0000 Subject: [PATCH 0325/1356] Removed nested .gitignore and moved to packages root --- packages/.gitignore | 1 + 1 file changed, 1 insertion(+) create mode 100644 packages/.gitignore diff --git a/packages/.gitignore b/packages/.gitignore new file mode 100644 index 00000000..7102c73b --- /dev/null +++ b/packages/.gitignore @@ -0,0 +1 @@ +*.patch.bz2 From 0b6e29f934abf9ab1d11971c8c5d65340c1d60ae Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Sat, 22 Aug 2020 23:50:43 +0000 Subject: [PATCH 0326/1356] pubsys: use base region for contacting STS The region used for the base credentials provider should be the one in which you want to talk to STS to get temporary credentials, not the region in which you want to talk to a service endpoint like EC2. This is needed because you may be assuming a role in an opt-in region from an account that has not opted-in to that region, and you need to get session credentials from an STS endpoint in a region to which you have access in the base account. --- tools/pubsys/src/aws/ami/mod.rs | 7 ++++--- tools/pubsys/src/aws/ami/wait.rs | 3 ++- tools/pubsys/src/aws/client.rs | 17 +++++++++++++---- tools/pubsys/src/aws/publish_ami/mod.rs | 5 ++++- 4 files changed, 23 insertions(+), 9 deletions(-) diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index 1e67734e..1ff662bf 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -111,11 +111,11 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result })?; // Build EBS client for snapshot management, and EC2 client for registration - let ebs_client = build_client::(&base_region, &aws).context(error::Client { + let ebs_client = build_client::(&base_region, &base_region, &aws).context(error::Client { client_type: "EBS", region: base_region.name(), })?; - let ec2_client = build_client::(&base_region, &aws).context(error::Client { + let ec2_client = build_client::(&base_region, &base_region, &aws).context(error::Client { client_type: "EC2", region: base_region.name(), })?; @@ -172,6 +172,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result wait_for_ami( &image_id, &base_region, + &base_region, "available", successes_required, &aws, @@ -187,7 +188,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result // live until the future is resolved. let mut ec2_clients = HashMap::with_capacity(regions.len()); for region in regions.iter() { - let ec2_client = build_client::(®ion, &aws).context(error::Client { + let ec2_client = build_client::(®ion, &base_region, &aws).context(error::Client { client_type: "EC2", region: base_region.name(), })?; diff --git a/tools/pubsys/src/aws/ami/wait.rs b/tools/pubsys/src/aws/ami/wait.rs index c1570e87..752142c5 100644 --- a/tools/pubsys/src/aws/ami/wait.rs +++ b/tools/pubsys/src/aws/ami/wait.rs @@ -12,6 +12,7 @@ use std::time::Duration; pub(crate) async fn wait_for_ami( id: &str, region: &Region, + sts_region: &Region, state: &str, successes_required: u8, aws: &AwsConfig, @@ -49,7 +50,7 @@ pub(crate) async fn wait_for_ami( }; // Use a new client each time so we have more confidence that different endpoints can see // the new AMI. - let ec2_client = build_client::(®ion, &aws).context(error::Client { + let ec2_client = build_client::(®ion, &sts_region, &aws).context(error::Client { client_type: "EC2", region: region.name(), })?; diff --git a/tools/pubsys/src/aws/client.rs b/tools/pubsys/src/aws/client.rs index 48980621..d73a76eb 100644 --- a/tools/pubsys/src/aws/client.rs +++ b/tools/pubsys/src/aws/client.rs @@ -38,10 +38,14 @@ impl NewWith for Ec2Client { } /// Create a rusoto client of the given type using the given region and configuration. -pub(crate) fn build_client(region: &Region, aws: &AwsConfig) -> Result { +pub(crate) fn build_client( + region: &Region, + sts_region: &Region, + aws: &AwsConfig, +) -> Result { let maybe_regional_role = aws.region.get(region.name()).and_then(|r| r.role.clone()); let assume_roles = aws.role.iter().chain(maybe_regional_role.iter()).cloned(); - let provider = build_provider(®ion, assume_roles.clone(), base_provider(&aws.profile)?)?; + let provider = build_provider(&sts_region, assume_roles.clone(), base_provider(&aws.profile)?)?; Ok(T::new_with( rusoto_core::HttpClient::new().context(error::HttpClient)?, provider, @@ -61,8 +65,13 @@ impl ProvideAwsCredentials for CredentialsProvider { } /// Chains credentials providers to assume the given roles in order. +/// The region given should be the one in which you want to talk to STS to get temporary +/// credentials, not the region in which you want to talk to a service endpoint like EC2. This is +/// needed because you may be assuming a role in an opt-in region from an account that has not +/// opted-in to that region, and you need to get session credentials from an STS endpoint in a +/// region to which you have access in the base account. fn build_provider

( - region: &Region, + sts_region: &Region, assume_roles: impl Iterator, base_provider: P, ) -> Result @@ -74,7 +83,7 @@ where let sts = StsClient::new_with( HttpClient::new().context(error::HttpClient)?, provider, - region.clone(), + sts_region.clone(), ); let expiring_provider = StsAssumeRoleSessionCredentialsProvider::new( sts, diff --git a/tools/pubsys/src/aws/publish_ami/mod.rs b/tools/pubsys/src/aws/publish_ami/mod.rs index 7b92224a..94dea6fe 100644 --- a/tools/pubsys/src/aws/publish_ami/mod.rs +++ b/tools/pubsys/src/aws/publish_ami/mod.rs @@ -90,6 +90,9 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { } else { aws.regions.clone().into() }; + ensure!(!regions.is_empty(), error::MissingConfig { missing: "aws.regions" }); + let base_region = region_from_string(®ions[0], &aws).context(error::ParseRegion)?; + // Check that the requested regions are a subset of the regions we *could* publish from the AMI // input JSON. let requested_regions = HashSet::from_iter(regions.iter()); @@ -121,7 +124,7 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { // live until the future is resolved. let mut ec2_clients = HashMap::with_capacity(amis.len()); for region in amis.keys() { - let ec2_client = build_client::(®ion, &aws).context(error::Client { + let ec2_client = build_client::(®ion, &base_region, &aws).context(error::Client { client_type: "EC2", region: region.name(), })?; From bc28fba3cee32da3eda9439a8a9737c0a96cdee7 Mon Sep 17 00:00:00 2001 From: srgothi92 Date: Mon, 24 Aug 2020 17:14:59 -0400 Subject: [PATCH 0327/1356] Removed aws-cli v1 requirement in docs --- BUILDING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/BUILDING.md b/BUILDING.md index bb927213..a59a6637 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -48,7 +48,7 @@ To use the image in Amazon EC2, we need to register the image as an AMI. The `bin/amiize.sh` script does this for you. The script has some assumptions about your setup, in particular that you: - * have [aws-cli v1](https://aws.amazon.com/cli/) set up, and that its default profile can create and control EC2 resources + * have [aws-cli](https://aws.amazon.com/cli/) set up, and that its default profile can create and control EC2 resources * have [coldsnap](https://github.com/awslabs/coldsnap/) installed to upload snapshots * have a few other common tools installed, like `jq` and `du` From c7806803889360a05b1ab5a1c7f279494d14b29b Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Mon, 10 Aug 2020 20:36:01 +0000 Subject: [PATCH 0328/1356] pubsys: add SSM parameter publishing `cargo make ssm` will use the amis.json from `cargo make ami` to populate parameter name/value templates from files in `policies/ssm`. The parameters are set in SSM using the full build version, e.g. "0.5.0-abcdef", and it won't overwrite by default. `cargo make promote-ssm -e SSM_TARGET=VERSION` will promote (copy) those values from the full-build-version name to a more general one. It's recommended to promote from the full version to a short version like "0.5.0", then to a well-known pointer like "latest". This allows for easy rollback by promoting from an older version, using `-e SSM_SOURCE=OLD_VERSION`. Co-authored-by: Zac Mrowicki Co-authored-by: Tom Kirchner --- tools/Cargo.lock | 28 +- tools/pubsys/Cargo.toml | 4 +- tools/pubsys/Infra.toml.example | 2 + tools/pubsys/policies/ssm/README.md | 30 ++ tools/pubsys/policies/ssm/defaults.toml | 7 + tools/pubsys/src/aws/ami/mod.rs | 45 ++- tools/pubsys/src/aws/client.rs | 11 + tools/pubsys/src/aws/mod.rs | 2 + tools/pubsys/src/aws/promote_ssm/mod.rs | 269 +++++++++++++++++ tools/pubsys/src/aws/publish_ami/mod.rs | 24 +- tools/pubsys/src/aws/ssm/mod.rs | 366 +++++++++++++++++++++++ tools/pubsys/src/aws/ssm/ssm.rs | 377 ++++++++++++++++++++++++ tools/pubsys/src/aws/ssm/template.rs | 216 ++++++++++++++ tools/pubsys/src/config.rs | 1 + tools/pubsys/src/main.rs | 27 +- 15 files changed, 1384 insertions(+), 25 deletions(-) create mode 100644 tools/pubsys/policies/ssm/README.md create mode 100644 tools/pubsys/policies/ssm/defaults.toml create mode 100644 tools/pubsys/src/aws/promote_ssm/mod.rs create mode 100644 tools/pubsys/src/aws/ssm/mod.rs create mode 100644 tools/pubsys/src/aws/ssm/ssm.rs create mode 100644 tools/pubsys/src/aws/ssm/template.rs diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 287144b1..bf3f2199 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -1332,6 +1332,7 @@ dependencies = [ "rusoto_ebs", "rusoto_ec2", "rusoto_signature 0.45.0", + "rusoto_ssm 0.45.0", "rusoto_sts", "semver 0.10.0", "serde", @@ -1340,6 +1341,7 @@ dependencies = [ "snafu", "structopt", "tempfile", + "tinytemplate", "tokio", "toml", "tough", @@ -1683,6 +1685,20 @@ dependencies = [ "serde_json", ] +[[package]] +name = "rusoto_ssm" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e4950a5600f4aab2eeb1f525d7843acbfbc7a720275d26c2afcddbb112ffd17" +dependencies = [ + "async-trait", + "bytes", + "futures", + "rusoto_core 0.45.0", + "serde", + "serde_json", +] + [[package]] name = "rusoto_sts" version = "0.45.0" @@ -2232,6 +2248,16 @@ dependencies = [ "syn", ] +[[package]] +name = "tinytemplate" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d3dc76004a03cec1c5932bca4cdc2e39aaa798e3f82363dd94f9adf6098c12f" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "tinyvec" version = "0.3.3" @@ -2361,7 +2387,7 @@ checksum = "54e670640f67e719671a87fac948eabba0fd33633aa8be7804b38a1a1d2da32b" dependencies = [ "rusoto_core 0.44.0", "rusoto_credential 0.44.0", - "rusoto_ssm", + "rusoto_ssm 0.44.0", "serde", "serde_json", "snafu", diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index 696f15d1..fcee92c2 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -23,6 +23,7 @@ rusoto_credential = "0.45.0" rusoto_ebs = "0.45.0" rusoto_ec2 = "0.45.0" rusoto_signature = "0.45.0" +rusoto_ssm = "0.45.0" rusoto_sts = "0.45.0" simplelog = "0.8" snafu = "0.6" @@ -30,7 +31,8 @@ semver = "0.10.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" structopt = { version = "0.3", default-features = false } -tokio = "0.2.21" +tinytemplate = "1.1" +tokio = { version = "0.2.21", features = ["time"] } toml = "0.5" tough = { version = "0.8", features = ["http"] } tough-ssm = "0.3" diff --git a/tools/pubsys/Infra.toml.example b/tools/pubsys/Infra.toml.example index 6446150a..55f1cdb1 100644 --- a/tools/pubsys/Infra.toml.example +++ b/tools/pubsys/Infra.toml.example @@ -38,6 +38,8 @@ regions = ["us-west-2", "us-east-1", "us-east-2"] profile = "my-profile" # If specified, we assume this role before making any API calls. role = "arn:aws:iam::012345678901:role/assume-global" +# If specified, this string will be prefixed on all parameter names published to SSM. +ssm_prefix = "/your/prefix/here" [aws.region.us-west-2] # If specified, we assume this role before making any API calls in this region. diff --git a/tools/pubsys/policies/ssm/README.md b/tools/pubsys/policies/ssm/README.md new file mode 100644 index 00000000..57c47d8b --- /dev/null +++ b/tools/pubsys/policies/ssm/README.md @@ -0,0 +1,30 @@ +# Parameter templates + +Files in this directory contain template strings that are used to generate SSM parameter names and values. +You can pass a different directory to `pubsys` to use a different set of parameters. + +The directory is expected to contain a file named `defaults.toml` with a table entry per parameter, like this: + +``` +[[parameter]] +name = "{variant}/{arch}/{image_version}/image_id" +value = "{image_id}" +``` + +The `name` and `value` can contain template variables that will be replaced with information from the current build and from the AMI registered from that build. + +The available variables include: +* `variant`, for example "aws-k8s-1.17" +* `arch`, for example "x86_64" +* `image_id`, for example "ami-0123456789abcdef0" +* `image_name`, for example "bottlerocket-aws-k8s-1.17-x86_64-v0.5.0-e0ddf1b" +* `image_version`, for example "0.5.0-e0ddf1b" +* `region`, for example "us-west-2" + +# Overrides + +You can also add or override parameters that are specific to `variant` or `arch`. +To do so, create a directory named "variant" or "arch" inside parameters directory, and create a file named after the specific variant or arch for which you want overrides. + +For example, to add extra parameters just for the "aarch64" architecture, create `arch/aarch64.toml`. +Inside you can put the same types of `[[parameter]]` declarations that you see in `defaults.toml`, but they'll only be applied for `aarch64` builds. diff --git a/tools/pubsys/policies/ssm/defaults.toml b/tools/pubsys/policies/ssm/defaults.toml new file mode 100644 index 00000000..5e972276 --- /dev/null +++ b/tools/pubsys/policies/ssm/defaults.toml @@ -0,0 +1,7 @@ +[[parameter]] +name = "{variant}/{arch}/{image_version}/image_id" +value = "{image_id}" + +[[parameter]] +name = "{variant}/{arch}/{image_version}/image_version" +value = "{image_version}" diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index 1ff662bf..fe9a2ef3 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -15,6 +15,7 @@ use register::{get_ami_id, register_image}; use rusoto_core::{Region, RusotoError}; use rusoto_ebs::EbsClient; use rusoto_ec2::{CopyImageError, CopyImageRequest, CopyImageResult, Ec2, Ec2Client}; +use serde::{Deserialize, Serialize}; use snafu::{ensure, OptionExt, ResultExt}; use std::collections::{HashMap, VecDeque}; use std::fs::File; @@ -70,11 +71,11 @@ pub(crate) struct AmiArgs { /// Common entrypoint from main() pub(crate) async fn run(args: &Args, ami_args: &AmiArgs) -> Result<()> { match _run(args, ami_args).await { - Ok(ami_ids) => { + Ok(amis) => { // Write the AMI IDs to file if requested if let Some(ref path) = ami_args.ami_output { let file = File::create(path).context(error::FileCreate { path })?; - serde_json::to_writer_pretty(file, &ami_ids).context(error::Serialize { path })?; + serde_json::to_writer_pretty(file, &amis).context(error::Serialize { path })?; info!("Wrote AMI data to {}", path.display()); } Ok(()) @@ -83,8 +84,8 @@ pub(crate) async fn run(args: &Args, ami_args: &AmiArgs) -> Result<()> { } } -async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> { - let mut ami_ids = HashMap::new(); +async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> { + let mut amis = HashMap::new(); info!( "Using infra config from path: {}", @@ -160,11 +161,14 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result (new_id, false) }; - ami_ids.insert(base_region.name().to_string(), image_id.clone()); + amis.insert( + base_region.name().to_string(), + Image::new(&image_id, &ami_args.name), + ); // If we don't need to copy AMIs, we're done. if regions.is_empty() { - return Ok(ami_ids); + return Ok(amis); } // Wait for AMI to be available so it can be copied @@ -212,7 +216,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result region.name(), id ); - ami_ids.insert(region.name().to_string(), id.clone()); + amis.insert(region.name().to_string(), Image::new(&id, &ami_args.name)); continue; } let request = CopyImageRequest { @@ -240,7 +244,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result // If all target regions already have the AMI, we're done. if copy_requests.is_empty() { - return Ok(ami_ids); + return Ok(amis); } // Start requests; they return almost immediately and the copying work is done by the service @@ -268,7 +272,10 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result region.name(), image_id, ); - ami_ids.insert(region.name().to_string(), image_id); + amis.insert( + region.name().to_string(), + Image::new(&image_id, &ami_args.name), + ); } else { saw_error = true; error!( @@ -287,7 +294,25 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result ensure!(!saw_error, error::AmiCopy); - Ok(ami_ids) + Ok(amis) +} + +/// If JSON output was requested, we serialize out a mapping of region to AMI information; this +/// struct holds the information we save about each AMI. The `ssm` subcommand uses this +/// information to populate templates representing SSM parameter names and values. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub(crate) struct Image { + pub(crate) id: String, + pub(crate) name: String, +} + +impl Image { + fn new(id: &str, name: &str) -> Self { + Self { + id: id.to_string(), + name: name.to_string(), + } + } } mod error { diff --git a/tools/pubsys/src/aws/client.rs b/tools/pubsys/src/aws/client.rs index d73a76eb..b7f46e4e 100644 --- a/tools/pubsys/src/aws/client.rs +++ b/tools/pubsys/src/aws/client.rs @@ -7,6 +7,7 @@ use rusoto_credential::{ }; use rusoto_ebs::EbsClient; use rusoto_ec2::Ec2Client; +use rusoto_ssm::SsmClient; use rusoto_sts::{StsAssumeRoleSessionCredentialsProvider, StsClient}; use snafu::ResultExt; @@ -37,6 +38,16 @@ impl NewWith for Ec2Client { } } +impl NewWith for SsmClient { + fn new_with(request_dispatcher: D, credentials_provider: P, region: Region) -> Self + where + P: ProvideAwsCredentials + Send + Sync + 'static, + D: DispatchSignedRequest + Send + Sync + 'static, + { + Self::new_with(request_dispatcher, credentials_provider, region) + } +} + /// Create a rusoto client of the given type using the given region and configuration. pub(crate) fn build_client( region: &Region, diff --git a/tools/pubsys/src/aws/mod.rs b/tools/pubsys/src/aws/mod.rs index 794d622e..faae16b7 100644 --- a/tools/pubsys/src/aws/mod.rs +++ b/tools/pubsys/src/aws/mod.rs @@ -6,7 +6,9 @@ use snafu::ResultExt; pub(crate) mod client; pub(crate) mod ami; +pub(crate) mod promote_ssm; pub(crate) mod publish_ami; +pub(crate) mod ssm; /// Builds a Region from the given region name, and uses the custom endpoint from the AWS config, /// if specified in aws.region.REGION.endpoint. diff --git a/tools/pubsys/src/aws/promote_ssm/mod.rs b/tools/pubsys/src/aws/promote_ssm/mod.rs new file mode 100644 index 00000000..f9061423 --- /dev/null +++ b/tools/pubsys/src/aws/promote_ssm/mod.rs @@ -0,0 +1,269 @@ +//! The promote_ssm module owns the 'promote-ssm' subcommand and controls the process of copying +//! SSM parameters from one version to another + +use crate::aws::client::build_client; +use crate::aws::region_from_string; +use crate::aws::ssm::{key_difference, ssm, template, BuildContext, SsmKey}; +use crate::config::InfraConfig; +use crate::Args; +use log::{info, trace}; +use rusoto_core::Region; +use rusoto_ssm::SsmClient; +use snafu::{ensure, ResultExt}; +use std::collections::HashMap; +use std::path::PathBuf; +use structopt::StructOpt; + +/// Copies sets of SSM parameters +#[derive(Debug, StructOpt)] +#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +pub(crate) struct PromoteArgs { + /// The architecture of the machine image + #[structopt(long)] + arch: String, + + /// The variant name for the current build + #[structopt(long)] + variant: String, + + /// Version number (or string) to copy from + #[structopt(long)] + source: String, + + /// Version number (or string) to copy to + #[structopt(long)] + target: String, + + /// Comma-separated list of regions to promote in, overriding Infra.toml + #[structopt(long, use_delimiter = true)] + regions: Vec, + + /// Directory holding the parameter template files + #[structopt(long)] + template_dir: PathBuf, +} + +/// Common entrypoint from main() +pub(crate) async fn run(args: &Args, promote_args: &PromoteArgs) -> Result<()> { + info!( + "Promoting SSM parameters from {} to {}", + promote_args.source, promote_args.target + ); + + // Setup =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= + + info!( + "Using infra config from path: {}", + args.infra_config_path.display() + ); + let infra_config = InfraConfig::from_path(&args.infra_config_path).context(error::Config)?; + trace!("Parsed infra config: {:#?}", infra_config); + let aws = infra_config.aws.unwrap_or_else(Default::default); + let ssm_prefix = aws.ssm_prefix.as_deref().unwrap_or_else(|| ""); + + // If the user gave an override list of regions, use that, otherwise use what's in the config. + let regions = if !promote_args.regions.is_empty() { + promote_args.regions.clone() + } else { + aws.regions.clone().into() + } + .into_iter() + .map(|name| region_from_string(&name, &aws).context(error::ParseRegion)) + .collect::>>()?; + + ensure!(!regions.is_empty(), error::MissingConfig { missing: "aws.regions" }); + let base_region = ®ions[0]; + + let mut ssm_clients = HashMap::with_capacity(regions.len()); + for region in ®ions { + let ssm_client = build_client::(region, &base_region, &aws).context(error::Client { + client_type: "SSM", + region: region.name(), + })?; + ssm_clients.insert(region.clone(), ssm_client); + } + + // Template setup =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= + + // Non-image-specific context for building and rendering templates + let source_build_context = BuildContext { + variant: &promote_args.variant, + arch: &promote_args.arch, + image_version: &promote_args.source, + }; + + let target_build_context = BuildContext { + variant: &promote_args.variant, + arch: &promote_args.arch, + image_version: &promote_args.target, + }; + + info!( + "Parsing SSM parameter templates from {}", + promote_args.template_dir.display() + ); + // Doesn't matter which build context we use to find template files because version isn't used + // in their naming + let template_parameters = + template::get_parameters(&promote_args.template_dir, &source_build_context) + .context(error::FindTemplates)?; + + // Render parameter names into maps of {template string => rendered value}. We need the + // template strings so we can associate source parameters with target parameters that came + // from the same template, so we know what to copy. + let source_parameter_map = + template::render_parameter_names(&template_parameters, ssm_prefix, &source_build_context) + .context(error::RenderTemplates)?; + let target_parameter_map = + template::render_parameter_names(&template_parameters, ssm_prefix, &target_build_context) + .context(error::RenderTemplates)?; + + // Parameters are the same in each region, so we need to associate each region with each of + // the parameter names so we can fetch them. + let source_keys: Vec = regions + .iter() + .flat_map(|region| { + source_parameter_map + .values() + .map(move |name| SsmKey::new(region.clone(), name.clone())) + }) + .collect(); + let target_keys: Vec = regions + .iter() + .flat_map(|region| { + target_parameter_map + .values() + .map(move |name| SsmKey::new(region.clone(), name.clone())) + }) + .collect(); + + // SSM get/compare =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= + + info!("Getting current SSM parameters for source and target names"); + let current_source_parameters = ssm::get_parameters(&source_keys, &ssm_clients) + .await + .context(error::FetchSsm)?; + trace!( + "Current source SSM parameters: {:#?}", + current_source_parameters + ); + ensure!( + !current_source_parameters.is_empty(), + error::EmptySource { + version: &promote_args.source + } + ); + + let current_target_parameters = ssm::get_parameters(&target_keys, &ssm_clients) + .await + .context(error::FetchSsm)?; + trace!( + "Current target SSM parameters: {:#?}", + current_target_parameters + ); + + // Build a map of rendered source parameter names to rendered target parameter names. This + // will let us find which target parameters to set based on the source parameter names we get + // back from SSM. + let source_target_map: HashMap<&String, &String> = source_parameter_map + .iter() + .map(|(k, v)| (v, &target_parameter_map[k])) + .collect(); + + // Show the difference between source and target parameters in SSM. We use the + // source_target_map we built above to map source keys to target keys (generated from the same + // template) so that the diff code has common keys to compare. + let set_parameters = key_difference( + ¤t_source_parameters + .into_iter() + .map(|(key, value)| { + ( + SsmKey::new(key.region, source_target_map[&key.name].to_string()), + value, + ) + }) + .collect(), + ¤t_target_parameters, + ); + if set_parameters.is_empty() { + info!("No changes necessary."); + return Ok(()); + } + + // SSM set =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= + + info!("Setting updated SSM parameters."); + ssm::set_parameters(&set_parameters, &ssm_clients) + .await + .context(error::SetSsm)?; + + info!("Validating whether live parameters in SSM reflect changes."); + ssm::validate_parameters(&set_parameters, &ssm_clients) + .await + .context(error::ValidateSsm)?; + + info!("All parameters match requested values."); + Ok(()) +} + +mod error { + use crate::aws; + use crate::aws::ssm::{ssm, template}; + use snafu::Snafu; + + #[derive(Debug, Snafu)] + #[snafu(visibility = "pub(super)")] + pub(crate) enum Error { + #[snafu(display("Error creating {} client in {}: {}", client_type, region, source))] + Client { + client_type: String, + region: String, + source: aws::client::Error, + }, + + #[snafu(display("Error reading config: {}", source))] + Config { + source: crate::config::Error, + }, + + #[snafu(display("Found no parameters in source version {}", version))] + EmptySource { + version: String, + }, + + #[snafu(display("Failed to fetch parameters from SSM: {}", source))] + FetchSsm { + source: ssm::Error, + }, + + #[snafu(display("Failed to find templates: {}", source))] + FindTemplates { + source: template::Error, + }, + + #[snafu(display("Infra.toml is missing {}", missing))] + MissingConfig { + missing: String, + }, + + ParseRegion { + source: crate::aws::Error, + }, + + #[snafu(display("Failed to render templates: {}", source))] + RenderTemplates { + source: template::Error, + }, + + #[snafu(display("Failed to set SSM parameters: {}", source))] + SetSsm { + source: ssm::Error, + }, + + ValidateSsm { + source: ssm::Error, + }, + } +} +pub(crate) use error::Error; +type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/publish_ami/mod.rs b/tools/pubsys/src/aws/publish_ami/mod.rs index 94dea6fe..6f596937 100644 --- a/tools/pubsys/src/aws/publish_ami/mod.rs +++ b/tools/pubsys/src/aws/publish_ami/mod.rs @@ -1,7 +1,7 @@ //! The publish_ami module owns the 'publish-ami' subcommand and controls the process of granting //! and revoking public access to EC2 AMIs. -use crate::aws::{client::build_client, region_from_string}; +use crate::aws::{ami::Image, client::build_client, region_from_string}; use crate::config::InfraConfig; use crate::Args; use futures::future::{join, ready}; @@ -60,7 +60,7 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { op: "open", path: &publish_args.ami_input, })?; - let mut ami_input: HashMap = + let mut ami_input: HashMap = serde_json::from_reader(file).context(error::Deserialize { path: &publish_args.ami_input, })?; @@ -110,14 +110,14 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { // Parse region names, adding endpoints from InfraConfig if specified let mut amis = HashMap::with_capacity(regions.len()); for name in regions { - let ami_id = ami_input + let image = ami_input .remove(&name) // This could only happen if someone removes the check above... .with_context(|| error::UnknownRegions { regions: vec![name.clone()], })?; let region = region_from_string(&name, &aws).context(error::ParseRegion)?; - amis.insert(region, ami_id); + amis.insert(region, image); } // We make a map storing our regional clients because they're used in a future and need to @@ -144,21 +144,21 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { /// Returns a regional mapping of snapshot IDs associated with the given AMIs. async fn get_snapshots( - amis: &HashMap, + amis: &HashMap, clients: &HashMap, ) -> Result>> { // Build requests for image information. let mut describe_requests = Vec::with_capacity(amis.len()); - for (region, image_id) in amis { + for (region, image) in amis { let ec2_client = &clients[region]; let describe_request = DescribeImagesRequest { - image_ids: Some(vec![image_id.to_string()]), + image_ids: Some(vec![image.id.to_string()]), ..Default::default() }; let describe_future = ec2_client.describe_images(describe_request); // Store the region and image ID so we can include it in errors - let info_future = ready((region.clone(), image_id.clone())); + let info_future = ready((region.clone(), image.id.clone())); describe_requests.push(join(info_future, describe_future)); } @@ -300,25 +300,25 @@ async fn modify_snapshots( /// Modify image attributes to make them public/private as requested. async fn modify_images( - images: &HashMap, + images: &HashMap, clients: &HashMap, operation: String, ) -> Result<()> { // Build requests to modify image attributes. let mut modify_image_requests = Vec::new(); - for (region, image_id) in images { + for (region, image) in images { let ec2_client = &clients[region]; let modify_image_request = ModifyImageAttributeRequest { attribute: Some("launchPermission".to_string()), user_groups: Some(vec!["all".to_string()]), operation_type: Some(operation.clone()), - image_id: image_id.clone(), + image_id: image.id.clone(), ..Default::default() }; let modify_image_future = ec2_client.modify_image_attribute(modify_image_request); // Store the region and image ID so we can include it in errors - let info_future = ready((region.name().to_string(), image_id.clone())); + let info_future = ready((region.name().to_string(), image.id.clone())); modify_image_requests.push(join(info_future, modify_image_future)); } diff --git a/tools/pubsys/src/aws/ssm/mod.rs b/tools/pubsys/src/aws/ssm/mod.rs new file mode 100644 index 00000000..0baa97f6 --- /dev/null +++ b/tools/pubsys/src/aws/ssm/mod.rs @@ -0,0 +1,366 @@ +//! The ssm module owns the 'ssm' subcommand and controls the process of setting SSM parameters +//! based on current build information + +pub(crate) mod ssm; +pub(crate) mod template; + +use crate::aws::{ami::Image, client::build_client, region_from_string}; +use crate::config::{AwsConfig, InfraConfig}; +use crate::Args; +use log::{info, trace}; +use rusoto_core::Region; +use rusoto_ssm::SsmClient; +use serde::Serialize; +use snafu::{ensure, OptionExt, ResultExt}; +use std::collections::{HashMap, HashSet}; +use std::fs::File; +use std::iter::FromIterator; +use std::path::PathBuf; +use structopt::StructOpt; + +/// Sets SSM parameters based on current build information +#[derive(Debug, StructOpt)] +#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +pub(crate) struct SsmArgs { + // This is JSON output from `pubsys ami` like `{"us-west-2": "ami-123"}` + /// Path to the JSON file containing regional AMI IDs to modify + #[structopt(long, parse(from_os_str))] + ami_input: PathBuf, + + /// The architecture of the machine image + #[structopt(long)] + arch: String, + + /// The variant name for the current build + #[structopt(long)] + variant: String, + + /// The version of the current build + #[structopt(long)] + version: String, + + /// Regions where you want parameters published + #[structopt(long, use_delimiter = true)] + regions: Vec, + + /// Directory holding the parameter template files + #[structopt(long)] + template_dir: PathBuf, + + /// Allows overwrite of existing parameters + #[structopt(long)] + allow_clobber: bool, +} + +/// Common entrypoint from main() +pub(crate) async fn run(args: &Args, ssm_args: &SsmArgs) -> Result<()> { + // Setup =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= + + info!( + "Using infra config from path: {}", + args.infra_config_path.display() + ); + let infra_config = InfraConfig::from_path(&args.infra_config_path).context(error::Config)?; + trace!("Parsed infra config: {:#?}", infra_config); + let aws = infra_config.aws.unwrap_or_else(Default::default); + let ssm_prefix = aws.ssm_prefix.as_deref().unwrap_or_else(|| ""); + + // If the user gave an override list of regions, use that, otherwise use what's in the config. + let regions = if !ssm_args.regions.is_empty() { + ssm_args.regions.clone() + } else { + aws.regions.clone().into() + }; + ensure!(!regions.is_empty(), error::MissingConfig { missing: "aws.regions" }); + let base_region = region_from_string(®ions[0], &aws).context(error::ParseRegion)?; + + let amis = parse_ami_input(®ions, &ssm_args, &aws)?; + + let mut ssm_clients = HashMap::with_capacity(amis.len()); + for region in amis.keys() { + let ssm_client = build_client::(®ion, &base_region, &aws).context(error::Client { + client_type: "SSM", + region: region.name(), + })?; + ssm_clients.insert(region.clone(), ssm_client); + } + + // Template setup =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= + + // Non-image-specific context for building and rendering templates + let build_context = BuildContext { + variant: &ssm_args.variant, + arch: &ssm_args.arch, + image_version: &ssm_args.version, + }; + + info!( + "Parsing SSM parameter templates from {}", + ssm_args.template_dir.display() + ); + let template_parameters = template::get_parameters(&ssm_args.template_dir, &build_context) + .context(error::FindTemplates)?; + + let new_parameters = + template::render_parameters(template_parameters, amis, ssm_prefix, &build_context) + .context(error::RenderTemplates)?; + trace!("Generated templated parameters: {:#?}", new_parameters); + + // SSM get/compare =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= + + info!("Getting current SSM parameters"); + let new_parameter_names: Vec<&SsmKey> = new_parameters.keys().collect(); + let current_parameters = ssm::get_parameters(&new_parameter_names, &ssm_clients) + .await + .context(error::FetchSsm)?; + trace!("Current SSM parameters: {:#?}", current_parameters); + + // Show the difference between source and target parameters in SSM. + let parameters_to_set = key_difference(&new_parameters, ¤t_parameters); + if parameters_to_set.is_empty() { + info!("No changes necessary."); + return Ok(()); + } + + // Unless the user wants to allow it, make sure we're not going to overwrite any existing + // keys. + if !ssm_args.allow_clobber { + let current_keys: HashSet<&SsmKey> = current_parameters.keys().collect(); + let new_keys: HashSet<&SsmKey> = parameters_to_set.keys().collect(); + ensure!(current_keys.is_disjoint(&new_keys), error::NoClobber); + } + + // SSM set =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= + + info!("Setting updated SSM parameters."); + ssm::set_parameters(¶meters_to_set, &ssm_clients) + .await + .context(error::SetSsm)?; + + info!("Validating whether live parameters in SSM reflect changes."); + ssm::validate_parameters(¶meters_to_set, &ssm_clients) + .await + .context(error::ValidateSsm)?; + + info!("All parameters match requested values."); + Ok(()) +} + +/// The key to a unique SSM parameter +#[derive(Debug, Eq, Hash, PartialEq)] +pub(crate) struct SsmKey { + pub(crate) region: Region, + pub(crate) name: String, +} + +impl SsmKey { + pub(crate) fn new(region: Region, name: String) -> Self { + Self { region, name } + } +} + +impl AsRef for SsmKey { + fn as_ref(&self) -> &Self { + self + } +} + +/// Non-image-specific context for building and rendering templates +#[derive(Debug, Serialize)] +pub(crate) struct BuildContext<'a> { + pub(crate) variant: &'a str, + pub(crate) arch: &'a str, + pub(crate) image_version: &'a str, +} + +/// A map of SsmKey to its value +type SsmParameters = HashMap; + +/// Parse the AMI input file +fn parse_ami_input(regions: &[String], ssm_args: &SsmArgs, aws: &AwsConfig) -> Result> { + info!("Using AMI data from path: {}", ssm_args.ami_input.display()); + let file = File::open(&ssm_args.ami_input).context(error::File { + op: "open", + path: &ssm_args.ami_input, + })?; + let mut ami_input: HashMap = + serde_json::from_reader(file).context(error::Deserialize { + path: &ssm_args.ami_input, + })?; + trace!("Parsed AMI input: {:#?}", ami_input); + + // pubsys will not create a file if it did not create AMIs, so we should only have an empty + // file if a user created one manually, and they shouldn't be creating an empty file. + ensure!( + !ami_input.is_empty(), + error::Input { + path: &ssm_args.ami_input + } + ); + + // Check that the requested regions are a subset of the regions we *could* publish from the AMI + // input JSON. + let requested_regions = HashSet::from_iter(regions.iter()); + let known_regions = HashSet::<&String>::from_iter(ami_input.keys()); + ensure!( + requested_regions.is_subset(&known_regions), + error::UnknownRegions { + regions: requested_regions + .difference(&known_regions) + .map(|s| s.to_string()) + .collect::>(), + } + ); + + // Parse region names, adding endpoints from InfraConfig if specified + let mut amis = HashMap::with_capacity(regions.len()); + for name in regions { + let image = ami_input + .remove(name) + // This could only happen if someone removes the check above... + .with_context(|| error::UnknownRegions { + regions: vec![name.clone()], + })?; + let region = region_from_string(&name, &aws).context(error::ParseRegion)?; + amis.insert(region, image); + } + + Ok(amis) +} + +/// Shows the user the difference between two sets of parameters. We look for parameters in +/// `wanted` that are either missing or changed in `current`. We print these differences for the +/// user, then return the `wanted` values. +pub(crate) fn key_difference(wanted: &SsmParameters, current: &SsmParameters) -> SsmParameters { + let mut parameters_to_set = HashMap::new(); + + let wanted_keys: HashSet<&SsmKey> = wanted.keys().collect(); + let current_keys: HashSet<&SsmKey> = current.keys().collect(); + + for key in wanted_keys.difference(¤t_keys) { + let new_value = &wanted[key]; + println!( + "{} - {} - new parameter:\n new value: {}", + key.name, + key.region.name(), + new_value, + ); + parameters_to_set.insert( + SsmKey::new(key.region.clone(), key.name.clone()), + new_value.clone(), + ); + } + + for key in wanted_keys.intersection(¤t_keys) { + let current_value = ¤t[key]; + let new_value = &wanted[key]; + + if current_value == new_value { + println!("{} - {} - no change", key.name, key.region.name()); + } else { + println!( + "{} - {} - changing value:\n old value: {}\n new value: {}", + key.name, + key.region.name(), + current_value, + new_value + ); + parameters_to_set.insert( + SsmKey::new(key.region.clone(), key.name.clone()), + new_value.clone(), + ); + } + } + // Note: don't care about items that are in current but not wanted; that could happen if you + // remove a parameter from your templates, for example. + + parameters_to_set +} + +mod error { + use crate::aws; + use crate::aws::ssm::{ssm, template}; + use snafu::Snafu; + use std::io; + use std::path::PathBuf; + + #[derive(Debug, Snafu)] + #[snafu(visibility = "pub(super)")] + pub(crate) enum Error { + #[snafu(display("Error creating {} client in {}: {}", client_type, region, source))] + Client { + client_type: String, + region: String, + source: aws::client::Error, + }, + + #[snafu(display("Error reading config: {}", source))] + Config { + source: crate::config::Error, + }, + + #[snafu(display("Failed to deserialize input from '{}': {}", path.display(), source))] + Deserialize { + path: PathBuf, + source: serde_json::Error, + }, + + #[snafu(display("Failed to fetch parameters from SSM: {}", source))] + FetchSsm { + source: ssm::Error, + }, + + #[snafu(display("Failed to {} '{}': {}", op, path.display(), source))] + File { + op: String, + path: PathBuf, + source: io::Error, + }, + + #[snafu(display("Failed to find templates: {}", source))] + FindTemplates { + source: template::Error, + }, + + #[snafu(display("Input '{}' is empty", path.display()))] + Input { + path: PathBuf, + }, + + #[snafu(display("Infra.toml is missing {}", missing))] + MissingConfig { + missing: String, + }, + + #[snafu(display("Cowardly refusing to overwrite parameters without ALLOW_CLOBBER"))] + NoClobber, + + ParseRegion { + source: crate::aws::Error, + }, + + #[snafu(display("Failed to render templates: {}", source))] + RenderTemplates { + source: template::Error, + }, + + #[snafu(display("Failed to set SSM parameters: {}", source))] + SetSsm { + source: ssm::Error, + }, + + #[snafu(display( + "Given region(s) in Infra.toml / regions argument that are not in --ami-input file: {}", + regions.join(", ") + ))] + UnknownRegions { + regions: Vec, + }, + + ValidateSsm { + source: ssm::Error, + }, + } +} +pub(crate) use error::Error; +type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/ssm/ssm.rs b/tools/pubsys/src/aws/ssm/ssm.rs new file mode 100644 index 00000000..9469e638 --- /dev/null +++ b/tools/pubsys/src/aws/ssm/ssm.rs @@ -0,0 +1,377 @@ +//! The ssm module owns the getting and setting of parameters in SSM. + +use super::{SsmKey, SsmParameters}; +use futures::future::{join, ready}; +use futures::stream::{self, StreamExt}; +use log::{debug, error, trace, warn}; +use rusoto_core::{Region, RusotoError}; +use rusoto_ssm::{ + GetParametersError, GetParametersRequest, GetParametersResult, PutParameterError, + PutParameterRequest, PutParameterResult, Ssm, SsmClient, +}; +use snafu::{ensure, OptionExt, ResultExt}; +use std::collections::{HashMap, HashSet}; +use std::time::Duration; +use tokio::time::throttle; + +/// Fetches the values of the given SSM keys using the given clients +// TODO: We can batch GET requests so throttling is less likely here, but if we need to handle +// hundreds of parameters for a given build, we could use the throttling logic from +// `set_parameters` +pub(crate) async fn get_parameters( + requested: &[K], + clients: &HashMap, +) -> Result +where + K: AsRef, +{ + // Build requests for parameters; we have to request with a regional client so we split them by + // region + let mut requests = Vec::with_capacity(requested.len()); + let mut regional_names: HashMap> = HashMap::new(); + for key in requested { + let SsmKey { region, name } = key.as_ref(); + regional_names + .entry(region.clone()) + .or_default() + .push(name.clone()); + } + for (region, names) in regional_names { + // At most 10 parameters can be requested at a time. + for names_chunk in names.chunks(10) { + trace!("Requesting {:?} in {}", names_chunk, region.name()); + let ssm_client = &clients[®ion]; + let len = names_chunk.len(); + let get_request = GetParametersRequest { + names: names_chunk.to_vec(), + ..Default::default() + }; + let get_future = ssm_client.get_parameters(get_request); + + // Store the region so we can include it in errors and the output map + let info_future = ready((region.clone(), len)); + requests.push(join(info_future, get_future)); + } + } + + // Send requests in parallel and wait for responses, collecting results into a list. + let request_stream = stream::iter(requests).buffer_unordered(4); + let responses: Vec<( + (Region, usize), + std::result::Result>, + )> = request_stream.collect().await; + + // If you're checking parameters in a region you haven't pushed to before, you can get an + // error here about the parameter's namespace being new. We want to treat these as new + // parameters rather than failing. Unfortunately, we don't know which parameter in the region + // was considered new, but we expect that most people are publishing all of their parameters + // under the same namespace, so treating the whole region as new is OK. We use this just to + // warn the user. + let mut new_regions = HashSet::new(); + + // For each existing parameter in the response, get the name and value for our output map. + let mut parameters = HashMap::with_capacity(requested.len()); + for ((region, expected_len), response) in responses { + // Get the image description, ensuring we only have one. + let response = match response { + Ok(response) => response, + Err(e) => { + // Note: there's no structured error type for this so we have to string match. + if e.to_string().contains("is not a valid namespace") { + new_regions.insert(region.name().to_string()); + continue; + } else { + return Err(e).context(error::GetParameters { + region: region.name(), + }); + } + } + }; + + // Check that we received a response including every parameter + // Note: response.invalid_parameters includes both new parameters and ill-formatted + // parameter names... + let valid_count = response.parameters.as_ref().map(|v| v.len()).unwrap_or(0); + let invalid_count = response.invalid_parameters.map(|v| v.len()).unwrap_or(0); + let total_count = valid_count + invalid_count; + ensure!( + total_count == expected_len, + error::MissingInResponse { + region: region.name(), + request_type: "GetParameters", + missing: format!( + "parameters - got {}, expected {}", + total_count, expected_len + ), + } + ); + + // Save the successful parameters + if let Some(valid_parameters) = response.parameters { + if !valid_parameters.is_empty() { + for parameter in valid_parameters { + let name = parameter.name.context(error::MissingInResponse { + region: region.name(), + request_type: "GetParameters", + missing: "parameter name", + })?; + let value = parameter.value.context(error::MissingInResponse { + region: region.name(), + request_type: "GetParameters", + missing: format!("value for parameter {}", name), + })?; + parameters.insert(SsmKey::new(region.clone(), name), value); + } + } + } + } + + for region in new_regions { + warn!( + "Invalid namespace in {}, this is OK for the first publish in a region", + region + ); + } + + Ok(parameters) +} + +/// Sets the values of the given SSM keys using the given clients +pub(crate) async fn set_parameters( + parameters_to_set: &SsmParameters, + ssm_clients: &HashMap, +) -> Result<()> { + // Start with a small delay between requests, and increase if we get throttled. + let mut request_interval = Duration::from_millis(100); + let max_interval = Duration::from_millis(1600); + let interval_factor = 2; + let mut should_increase_interval = false; + + // We run all requests in a batch, and any failed requests are added to the next batch for + // retry + let mut failed_parameters: HashMap)>> = HashMap::new(); + let max_failures = 5; + + /// Stores the values we need to be able to retry requests + struct RequestContext<'a> { + region: &'a Region, + name: &'a str, + value: &'a str, + failures: u8, + } + + // Create the initial request contexts + let mut contexts = Vec::new(); + for (SsmKey { region, name }, value) in parameters_to_set { + contexts.push(RequestContext { + region, + name, + value, + failures: 0, + }); + } + let total_count = contexts.len(); + + // We drain requests out of the contexts list and put them back if we need to retry; we do this + // until all requests have succeeded or we've hit the max failures + while !contexts.is_empty() { + debug!("Starting {} SSM put requests", contexts.len()); + + if should_increase_interval { + request_interval *= interval_factor; + warn!( + "Requests were throttled, increasing interval to {:?}", + request_interval + ); + } + should_increase_interval = false; + + ensure!( + request_interval <= max_interval, + error::Throttled { max_interval } + ); + + // Build requests for parameters. We need to group them by region so we can run each + // region in parallel. Each region's stream will be throttled to run one request per + // request_interval. + let mut regional_requests = HashMap::new(); + // Remove contexts from the list with drain; they get added back in if we retry the + // request. + for context in contexts.drain(..) { + let ssm_client = &ssm_clients[&context.region]; + let put_request = PutParameterRequest { + name: context.name.to_string(), + value: context.value.to_string(), + overwrite: Some(true), + type_: Some("String".to_string()), + ..Default::default() + }; + let put_future = ssm_client.put_parameter(put_request); + + let regional_list = regional_requests + .entry(context.region) + .or_insert_with(Vec::new); + // Store the context so we can retry as needed + regional_list.push(join(ready(context), put_future)); + } + + // Create a throttled stream per region; throttling applies per region. (Request futures + // are already regional, by virtue of being created with a regional client, so we don't + // need the region again here.) + let mut throttled_streams = Vec::new(); + for (_region, request_list) in regional_requests { + throttled_streams.push(throttle(request_interval, stream::iter(request_list))); + } + + // Run all regions in parallel and wait for responses. + let parallel_requests = stream::select_all(throttled_streams).buffer_unordered(4); + let responses: Vec<( + RequestContext<'_>, + std::result::Result>, + )> = parallel_requests.collect().await; + + // For each error response, check if we should retry or bail. + for (context, response) in responses { + if let Err(e) = response { + // Throttling errors in Rusoto are structured like this: + // RusotoError::Unknown(BufferedHttpResponse {status: 400, body: "{\"__type\":\"ThrottlingException\",\"message\":\"Rate exceeded\"}", headers: ...}) + // Even if we were to do a structural match, we would still have to string match + // the body of the error. Simpler to match the string form. + if e.to_string().contains("ThrottlingException") { + // We only want to increase the interval once per loop, not once per error, + // because when you get throttled you're likely to get a bunch of throttling + // errors at once. + should_increase_interval = true; + // Retry the request without increasing the failure counter; the request didn't + // fail, a throttle means we couldn't even make the request. + contexts.push(context); + // -1 so we don't try again next loop; this keeps failure checking in one place + } else if context.failures >= max_failures - 1 { + // Past max failures, store the failure for reporting, don't retry. + failed_parameters + .entry(context.region.clone()) + .or_default() + .push((context.name.to_string(), e)); + } else { + // Increase failure counter and try again. + let context = RequestContext { + failures: context.failures + 1, + ..context + }; + debug!( + "Request attempt {} of {} failed in {}: {}", + context.failures, + max_failures, + context.region.name(), + e + ); + contexts.push(context); + } + } + } + } + + if !failed_parameters.is_empty() { + for (region, failures) in &failed_parameters { + for (parameter, error) in failures { + error!( + "Failed to set {} in {}: {}", + parameter, + region.name(), + error + ); + } + } + return error::SetParameters { + failure_count: failed_parameters.len(), + total_count, + } + .fail(); + } + + Ok(()) +} + +/// Fetch the given parameters, and ensure the live values match the given values +pub(crate) async fn validate_parameters( + expected_parameters: &SsmParameters, + ssm_clients: &HashMap, +) -> Result<()> { + // Fetch the given parameter names + let expected_parameter_names: Vec<&SsmKey> = expected_parameters.keys().collect(); + let updated_parameters = get_parameters(&expected_parameter_names, &ssm_clients).await?; + + // Walk through and check each value + let mut success = true; + for (expected_key, expected_value) in expected_parameters { + let SsmKey { + region: expected_region, + name: expected_name, + } = expected_key; + // All parameters should have a value, and it should match the given value, otherwise the + // parameter wasn't updated / created. + if let Some(updated_value) = updated_parameters.get(expected_key) { + if updated_value != expected_value { + error!( + "Failed to set {} in {}", + expected_name, + expected_region.name() + ); + success = false; + } + } else { + error!( + "{} in {} still doesn't exist", + expected_name, + expected_region.name() + ); + success = false; + } + } + ensure!(success, error::ValidateParameters); + + Ok(()) +} + +mod error { + use rusoto_core::RusotoError; + use rusoto_ssm::GetParametersError; + use snafu::Snafu; + use std::time::Duration; + + #[derive(Debug, Snafu)] + #[snafu(visibility = "pub(super)")] + pub(crate) enum Error { + #[snafu(display("Failed to fetch SSM parameters in {}: {}", region, source))] + GetParameters { + region: String, + source: RusotoError, + }, + + #[snafu(display("Response to {} was missing {}", request_type, missing))] + MissingInResponse { + region: String, + request_type: String, + missing: String, + }, + + #[snafu(display("Failed to {} of {} parameters; see above", failure_count, total_count))] + SetParameters { + failure_count: usize, + total_count: usize, + }, + + #[snafu(display( + "SSM requests throttled too many times, went beyond our max interval {:?}", + max_interval + ))] + Throttled { + max_interval: Duration, + }, + + #[snafu(display("Failed to validate all changes; see above."))] + ValidateParameters, + } +} +pub(crate) use error::Error; +type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/ssm/template.rs b/tools/pubsys/src/aws/ssm/template.rs new file mode 100644 index 00000000..e1561442 --- /dev/null +++ b/tools/pubsys/src/aws/ssm/template.rs @@ -0,0 +1,216 @@ +//! The template module owns the finding and rendering of parameter templates that used to generate +//! SSM parameter names and values. + +use super::{BuildContext, SsmKey, SsmParameters}; +use crate::aws::ami::Image; +use log::{info, trace}; +use rusoto_core::Region; +use serde::{Deserialize, Serialize}; +use snafu::{ensure, ResultExt}; +use std::collections::HashMap; +use std::fs; +use std::path::Path; +use tinytemplate::TinyTemplate; + +/// Represents a single SSM parameter +#[derive(Debug, Deserialize)] +pub(crate) struct TemplateParameter { + pub(crate) name: String, + pub(crate) value: String, +} + +/// Represents a set of SSM parameters, in a format that allows for clear definition of +/// parameters in TOML files +#[derive(Debug, Deserialize)] +pub(crate) struct TemplateParameters { + // In a TOML table, it's clearer to define a single entry as a "parameter". + #[serde(default, rename = "parameter")] + pub(crate) parameters: Vec, +} + +impl TemplateParameters { + fn extend(&mut self, other: Self) { + self.parameters.extend(other.parameters) + } +} + +/// Finds and deserializes template parameters from the template directory, taking into account +/// overrides requested by the user +pub(crate) fn get_parameters( + template_dir: &Path, + build_context: &BuildContext<'_>, +) -> Result { + let defaults_path = template_dir.join("defaults.toml"); + let defaults_str = fs::read_to_string(&defaults_path).context(error::File { + op: "read", + path: &defaults_path, + })?; + let mut template_parameters: TemplateParameters = + toml::from_str(&defaults_str).context(error::InvalidToml { + path: &defaults_path, + })?; + trace!("Parsed default templates: {:#?}", template_parameters); + + // Allow the user to add/override parameters specific to variant or arch. Because these are + // added after the defaults, they will take precedence. (It doesn't make sense to override + // based on the version argument.) + let mut context = HashMap::new(); + context.insert("variant", build_context.variant); + context.insert("arch", build_context.arch); + for (key, value) in context { + let override_path = template_dir.join(key).join(format!("{}.toml", value)); + if override_path.exists() { + info!( + "Parsing SSM parameter overrides from {}", + override_path.display() + ); + let template_str = fs::read_to_string(&override_path).context(error::File { + op: "read", + path: &override_path, + })?; + let override_parameters: TemplateParameters = + toml::from_str(&template_str).context(error::InvalidToml { + path: &override_path, + })?; + trace!("Parsed override templates: {:#?}", override_parameters); + template_parameters.extend(override_parameters); + } + } + + ensure!( + !template_parameters.parameters.is_empty(), + error::NoTemplates { path: template_dir } + ); + + Ok(template_parameters) +} + +/// Render the given template parameters using the data from the given AMIs +pub(crate) fn render_parameters( + template_parameters: TemplateParameters, + amis: HashMap, + ssm_prefix: &str, + build_context: &BuildContext<'_>, +) -> Result { + /// Values that we allow as template variables + #[derive(Debug, Serialize)] + struct TemplateContext<'a> { + variant: &'a str, + arch: &'a str, + image_id: &'a str, + image_name: &'a str, + image_version: &'a str, + region: &'a str, + } + let mut new_parameters = HashMap::new(); + for (region, image) in amis { + let context = TemplateContext { + variant: build_context.variant, + arch: build_context.arch, + image_id: &image.id, + image_name: &image.name, + image_version: build_context.image_version, + region: region.name(), + }; + + for tp in &template_parameters.parameters { + let mut tt = TinyTemplate::new(); + tt.add_template("name", &tp.name) + .context(error::AddTemplate { template: &tp.name })?; + tt.add_template("value", &tp.value) + .context(error::AddTemplate { + template: &tp.value, + })?; + let name_suffix = tt + .render("name", &context) + .context(error::RenderTemplate { template: &tp.name })?; + let value = tt + .render("value", &context) + .context(error::RenderTemplate { + template: &tp.value, + })?; + + new_parameters.insert( + SsmKey::new(region.clone(), join_name(ssm_prefix, &name_suffix)), + value, + ); + } + } + + Ok(new_parameters) +} + +/// Render the names of the given template parameters using the fixed data about the current build. +/// Returns a mapping of templated name to rendered name, so we can associate rendered names to a +/// common source name +pub(crate) fn render_parameter_names( + template_parameters: &TemplateParameters, + ssm_prefix: &str, + build_context: &BuildContext<'_>, +) -> Result> { + let mut new_parameters = HashMap::new(); + for tp in &template_parameters.parameters { + let mut tt = TinyTemplate::new(); + tt.add_template("name", &tp.name) + .context(error::AddTemplate { template: &tp.name })?; + let name_suffix = tt + .render("name", &build_context) + .context(error::RenderTemplate { template: &tp.name })?; + new_parameters.insert(tp.name.clone(), join_name(ssm_prefix, &name_suffix)); + } + + Ok(new_parameters) +} + +/// Make sure prefix and parameter name are separated by one slash +fn join_name(ssm_prefix: &str, name_suffix: &str) -> String { + if ssm_prefix.ends_with('/') && name_suffix.starts_with('/') { + format!("{}{}", ssm_prefix, &name_suffix[1..]) + } else if ssm_prefix.ends_with('/') || name_suffix.starts_with('/') { + format!("{}{}", ssm_prefix, name_suffix) + } else { + format!("{}/{}", ssm_prefix, name_suffix) + } +} + +mod error { + use snafu::Snafu; + use std::io; + use std::path::PathBuf; + + #[derive(Debug, Snafu)] + #[snafu(visibility = "pub(super)")] + pub(crate) enum Error { + #[snafu(display("Error building template from '{}': {}", template, source))] + AddTemplate { + template: String, + source: tinytemplate::error::Error, + }, + + #[snafu(display("Failed to {} '{}': {}", op, path.display(), source))] + File { + op: String, + path: PathBuf, + source: io::Error, + }, + + #[snafu(display("Invalid config file at '{}': {}", path.display(), source))] + InvalidToml { + path: PathBuf, + source: toml::de::Error, + }, + + #[snafu(display("Found no parameter templates in {}", path.display()))] + NoTemplates { + path: PathBuf, + }, + + #[snafu(display("Error rendering template from '{}': {}", template, source))] + RenderTemplate { + template: String, + source: tinytemplate::error::Error, + }, + } +} +pub(crate) use error::Error; +type Result = std::result::Result; diff --git a/tools/pubsys/src/config.rs b/tools/pubsys/src/config.rs index d4489a43..855c75df 100644 --- a/tools/pubsys/src/config.rs +++ b/tools/pubsys/src/config.rs @@ -42,6 +42,7 @@ pub(crate) struct AwsConfig { pub(crate) profile: Option, #[serde(default)] pub(crate) region: HashMap, + pub(crate) ssm_prefix: Option, } /// AWS region-specific configuration diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs index da8292cb..1aa83906 100644 --- a/tools/pubsys/src/main.rs +++ b/tools/pubsys/src/main.rs @@ -5,9 +5,10 @@ Currently implemented: * building repos, whether starting from an existing repo or from scratch * registering and copying EC2 AMIs * Marking EC2 AMIs public (or private again) +* setting SSM parameters based on built AMIs +* promoting SSM parameters from versioned entries to named (e.g. 'latest') To be implemented: -* updating SSM parameters * high-level document describing pubsys usage with examples Configuration comes from: @@ -56,6 +57,18 @@ fn run() -> Result<()> { .context(error::PublishAmi) }) } + SubCommand::Ssm(ref ssm_args) => { + let mut rt = Runtime::new().context(error::Runtime)?; + rt.block_on(async { aws::ssm::run(&args, &ssm_args).await.context(error::Ssm) }) + } + SubCommand::PromoteSsm(ref promote_args) => { + let mut rt = Runtime::new().context(error::Runtime)?; + rt.block_on(async { + aws::promote_ssm::run(&args, &promote_args) + .await + .context(error::PromoteSsm) + }) + } } } @@ -85,8 +98,12 @@ struct Args { #[derive(Debug, StructOpt)] enum SubCommand { Repo(repo::RepoArgs), + Ami(aws::ami::AmiArgs), PublishAmi(aws::publish_ami::PublishArgs), + + Ssm(aws::ssm::SsmArgs), + PromoteSsm(aws::promote_ssm::PromoteArgs), } /// Parses a SemVer, stripping a leading 'v' if present @@ -126,11 +143,19 @@ mod error { source: crate::aws::publish_ami::Error, }, + #[snafu(display("Failed to promote SSM: {}", source))] + PromoteSsm { + source: crate::aws::promote_ssm::Error, + }, + #[snafu(display("Failed to build repo: {}", source))] Repo { source: crate::repo::Error }, #[snafu(display("Failed to create async runtime: {}", source))] Runtime { source: std::io::Error }, + + #[snafu(display("Failed to update SSM: {}", source))] + Ssm { source: crate::aws::ssm::Error }, } } type Result = std::result::Result; From 9c78a2420560d939d3665c658d36463e07e6d822 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Sun, 23 Aug 2020 19:10:20 +0000 Subject: [PATCH 0329/1356] pubsys: grant AMI permissions to target account before copying If you specify regional roles in Infra.toml that refer to different accounts, those accounts need access to the AMI and its snapshots before it can copy the AMI. This change adds the missing grants: * If we find an existing AMI, we describe its snapshots so we can grant them * publish_ami functions were split so we can handle single regions or lists of regions more easily * Ask STS for the account IDs of the given roles (removing the original account ID that already has access) * modify_snapshots and modify_images were updated with user/group parameters rather than hardcoding "all" * A new RegisteredIds struct is used to pass around the image and snapshot IDs to grant Co-authored-by: Zac Mrowicki Co-authored-by: Tom Kirchner --- tools/pubsys/src/aws/ami/mod.rs | 230 ++++++++++++-- tools/pubsys/src/aws/ami/register.rs | 23 +- tools/pubsys/src/aws/client.rs | 16 +- tools/pubsys/src/aws/publish_ami/mod.rs | 395 +++++++++++++++--------- 4 files changed, 486 insertions(+), 178 deletions(-) diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index fe9a2ef3..f6274daa 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -5,19 +5,23 @@ mod register; mod snapshot; mod wait; +use crate::aws::publish_ami::{get_snapshots, modify_image, modify_snapshots}; use crate::aws::{client::build_client, region_from_string}; -use crate::config::InfraConfig; +use crate::config::{AwsConfig, InfraConfig}; use crate::Args; use futures::future::{join, lazy, ready, FutureExt}; use futures::stream::{self, StreamExt}; use log::{error, info, trace}; -use register::{get_ami_id, register_image}; +use register::{get_ami_id, register_image, RegisteredIds}; use rusoto_core::{Region, RusotoError}; use rusoto_ebs::EbsClient; use rusoto_ec2::{CopyImageError, CopyImageRequest, CopyImageResult, Ec2, Ec2Client}; +use rusoto_sts::{ + GetCallerIdentityError, GetCallerIdentityRequest, GetCallerIdentityResponse, Sts, StsClient, +}; use serde::{Deserialize, Serialize}; use snafu::{ensure, OptionExt, ResultExt}; -use std::collections::{HashMap, VecDeque}; +use std::collections::{HashMap, HashSet}; use std::fs::File; use std::path::PathBuf; use structopt::StructOpt; @@ -98,28 +102,35 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> // If the user gave an override list of regions, use that, otherwise use what's in the config. let mut regions = if !ami_args.regions.is_empty() { - VecDeque::from(ami_args.regions.clone()) + ami_args.regions.clone() } else { - aws.regions.clone() + aws.regions.clone().into() } .into_iter() .map(|name| region_from_string(&name, &aws).context(error::ParseRegion)) - .collect::>>()?; + .collect::>>()?; + + ensure!( + !regions.is_empty(), + error::MissingConfig { + missing: "aws.regions" + } + ); // We register in this base region first, then copy from there to any other regions. - let base_region = regions.pop_front().context(error::MissingConfig { - missing: "aws.regions", - })?; + let base_region = regions.remove(0); // Build EBS client for snapshot management, and EC2 client for registration - let ebs_client = build_client::(&base_region, &base_region, &aws).context(error::Client { - client_type: "EBS", - region: base_region.name(), - })?; - let ec2_client = build_client::(&base_region, &base_region, &aws).context(error::Client { - client_type: "EC2", - region: base_region.name(), - })?; + let ebs_client = + build_client::(&base_region, &base_region, &aws).context(error::Client { + client_type: "EBS", + region: base_region.name(), + })?; + let ec2_client = + build_client::(&base_region, &base_region, &aws).context(error::Client { + client_type: "EC2", + region: base_region.name(), + })?; // Check if the AMI already exists, in which case we can use the existing ID, otherwise we // register a new one. @@ -136,16 +147,26 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> region: base_region.name(), })?; - let (image_id, already_registered) = if let Some(found_id) = maybe_id { + let (ids_of_image, already_registered) = if let Some(found_id) = maybe_id { info!( "Found '{}' already registered in {}: {}", ami_args.name, base_region.name(), found_id ); - (found_id, true) + let snapshot_ids = get_snapshots(&found_id, &base_region, &ec2_client) + .await + .context(error::GetSnapshots { + image_id: &found_id, + region: base_region.name(), + })?; + let found_ids = RegisteredIds { + image_id: found_id, + snapshot_ids, + }; + (found_ids, true) } else { - let new_id = register_image(ami_args, base_region.name(), ebs_client, &ec2_client) + let new_ids = register_image(ami_args, base_region.name(), ebs_client, &ec2_client) .await .context(error::RegisterImage { name: &ami_args.name, @@ -156,14 +177,14 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> "Registered AMI '{}' in {}: {}", ami_args.name, base_region.name(), - new_id + new_ids.image_id ); - (new_id, false) + (new_ids, false) }; amis.insert( base_region.name().to_string(), - Image::new(&image_id, &ami_args.name), + Image::new(&ids_of_image.image_id, &ami_args.name), ); // If we don't need to copy AMIs, we're done. @@ -174,7 +195,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> // Wait for AMI to be available so it can be copied let successes_required = if already_registered { 1 } else { 3 }; wait_for_ami( - &image_id, + &ids_of_image.image_id, &base_region, &base_region, "available", @@ -183,19 +204,77 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> ) .await .context(error::WaitAmi { - id: &image_id, + id: &ids_of_image.image_id, region: base_region.name(), })?; // For every other region, initiate copy-image calls. - // We make a map storing our regional clients because they're used in a future and need to - // live until the future is resolved. - let mut ec2_clients = HashMap::with_capacity(regions.len()); - for region in regions.iter() { - let ec2_client = build_client::(®ion, &base_region, &aws).context(error::Client { - client_type: "EC2", + + // First we need to find the account IDs for any given roles, so we can grant access to those + // accounts to copy the AMI and snapshots. + let mut account_ids = get_account_ids(®ions, &base_region, &aws).await?; + + // Get the account ID used in the base region; we don't need to grant to it so we can remove it + // from the list. + let sts_client = + build_client::(&base_region, &base_region, &aws).context(error::Client { + client_type: "STS", + region: base_region.name(), + })?; + let response = sts_client + .get_caller_identity(GetCallerIdentityRequest {}) + .await + .context(error::GetCallerIdentity { + region: base_region.name(), + })?; + let base_account_id = response.account.context(error::MissingInResponse { + request_type: "GetCallerIdentity", + missing: "account", + })?; + account_ids.remove(&base_account_id); + + // If we have any accounts other than the base account, grant them access. + if !account_ids.is_empty() { + let account_id_vec: Vec<_> = account_ids.into_iter().collect(); + + modify_snapshots( + Some(account_id_vec.clone()), + None, + "add", + &ids_of_image.snapshot_ids, + &ec2_client, + &base_region, + ) + .await + .context(error::GrantAccess { + thing: "snapshots", + region: base_region.name(), + })?; + + modify_image( + Some(account_id_vec.clone()), + None, + "add", + &ids_of_image.image_id, + &ec2_client, + &base_region, + ) + .await + .context(error::GrantAccess { + thing: "image", region: base_region.name(), })?; + } + + // Next, make EC2 clients so we can fetch and copy AMIs. We make a map storing our regional + // clients because they're used in a future and need to live until the future is resolved. + let mut ec2_clients = HashMap::with_capacity(regions.len()); + for region in regions.iter() { + let ec2_client = + build_client::(®ion, &base_region, &aws).context(error::Client { + client_type: "EC2", + region: base_region.name(), + })?; ec2_clients.insert(region.clone(), ec2_client); } @@ -222,7 +301,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> let request = CopyImageRequest { description: ami_args.description.clone(), name: ami_args.name.clone(), - source_image_id: image_id.clone(), + source_image_id: ids_of_image.image_id.clone(), source_region: base_region.name().to_string(), ..Default::default() }; @@ -315,8 +394,62 @@ impl Image { } } +/// Returns the set of account IDs associated with the roles configured for the given regions. +async fn get_account_ids( + regions: &[Region], + base_region: &Region, + aws: &AwsConfig, +) -> Result> { + let mut grant_accounts = HashSet::new(); + + // We make a map storing our regional clients because they're used in a future and need to + // live until the future is resolved. + let mut sts_clients = HashMap::with_capacity(regions.len()); + for region in regions.iter() { + let sts_client = + build_client::(®ion, &base_region, &aws).context(error::Client { + client_type: "STS", + region: region.name(), + })?; + sts_clients.insert(region.clone(), sts_client); + } + + let mut requests = Vec::with_capacity(regions.len()); + for region in regions.iter() { + let sts_client = &sts_clients[region]; + let response_future = sts_client.get_caller_identity(GetCallerIdentityRequest {}); + + // Store the region so we can include it in any errors + let region_future = ready(region.clone()); + requests.push(join(region_future, response_future)); + } + + let request_stream = stream::iter(requests).buffer_unordered(4); + // Run through the stream and collect results into a list. + let responses: Vec<( + Region, + std::result::Result>, + )> = request_stream.collect().await; + + for (region, response) in responses { + let response = response.context(error::GetCallerIdentity { + region: region.name(), + })?; + let account_id = response.account.context(error::MissingInResponse { + request_type: "GetCallerIdentity", + missing: "account", + })?; + grant_accounts.insert(account_id); + } + trace!("Found account IDs {:?}", grant_accounts); + + Ok(grant_accounts) +} + mod error { - use crate::aws::{self, ami}; + use crate::aws::{self, ami, publish_ami}; + use rusoto_core::RusotoError; + use rusoto_sts::GetCallerIdentityError; use snafu::Snafu; use std::path::PathBuf; @@ -352,11 +485,42 @@ mod error { source: ami::register::Error, }, + #[snafu(display("Error getting account ID in {}: {}", region, source))] + GetCallerIdentity { + region: String, + source: RusotoError, + }, + + #[snafu(display( + "Failed to get snapshot IDs associated with {} in {}: {}", + image_id, + region, + source + ))] + GetSnapshots { + image_id: String, + region: String, + source: publish_ami::Error, + }, + + #[snafu(display("Failed to grant access to {} in {}: {}", thing, region, source))] + GrantAccess { + thing: String, + region: String, + source: publish_ami::Error, + }, + #[snafu(display("Infra.toml is missing {}", missing))] MissingConfig { missing: String, }, + #[snafu(display("Response to {} was missing {}", request_type, missing))] + MissingInResponse { + request_type: String, + missing: String, + }, + ParseRegion { source: crate::aws::Error, }, diff --git a/tools/pubsys/src/aws/ami/register.rs b/tools/pubsys/src/aws/ami/register.rs index 8e9a3cce..f17aac51 100644 --- a/tools/pubsys/src/aws/ami/register.rs +++ b/tools/pubsys/src/aws/ami/register.rs @@ -17,6 +17,12 @@ const VOLUME_TYPE: &str = "gp2"; const SRIOV: &str = "simple"; const ENA: bool = true; +#[derive(Debug)] +pub(crate) struct RegisteredIds { + pub(crate) image_id: String, + pub(crate) snapshot_ids: Vec, +} + /// Helper for `register_image`. Inserts registered snapshot IDs into `cleanup_snapshot_ids` so /// they can be cleaned up on failure if desired. async fn _register_image( @@ -25,7 +31,7 @@ async fn _register_image( ebs_client: EbsClient, ec2_client: &Ec2Client, cleanup_snapshot_ids: &mut Vec, -) -> Result { +) -> Result { debug!( "Uploading root and data images into EBS snapshots in {}", region @@ -80,7 +86,7 @@ async fn _register_image( device_name: Some(ROOT_DEVICE_NAME.to_string()), ebs: Some(EbsBlockDevice { delete_on_termination: Some(true), - snapshot_id: Some(root_snapshot), + snapshot_id: Some(root_snapshot.clone()), volume_type: Some(VOLUME_TYPE.to_string()), ..Default::default() }), @@ -90,7 +96,7 @@ async fn _register_image( let mut data_bdm = root_bdm.clone(); data_bdm.device_name = Some(DATA_DEVICE_NAME.to_string()); if let Some(ebs) = data_bdm.ebs.as_mut() { - ebs.snapshot_id = Some(data_snapshot); + ebs.snapshot_id = Some(data_snapshot.clone()); } let register_request = RegisterImageRequest { @@ -111,9 +117,14 @@ async fn _register_image( .await .context(error::RegisterImage { region })?; - register_response + let image_id = register_response .image_id - .context(error::MissingImageId { region }) + .context(error::MissingImageId { region })?; + + Ok(RegisteredIds { + image_id, + snapshot_ids: vec![root_snapshot, data_snapshot], + }) } /// Uploads the given images into snapshots and registers an AMI using them as its block device @@ -123,7 +134,7 @@ pub(crate) async fn register_image( region: &str, ebs_client: EbsClient, ec2_client: &Ec2Client, -) -> Result { +) -> Result { info!("Registering '{}' in {}", ami_args.name, region); let mut cleanup_snapshot_ids = Vec::new(); let register_result = _register_image( diff --git a/tools/pubsys/src/aws/client.rs b/tools/pubsys/src/aws/client.rs index b7f46e4e..e9055f00 100644 --- a/tools/pubsys/src/aws/client.rs +++ b/tools/pubsys/src/aws/client.rs @@ -48,6 +48,16 @@ impl NewWith for SsmClient { } } +impl NewWith for StsClient { + fn new_with(request_dispatcher: D, credentials_provider: P, region: Region) -> Self + where + P: ProvideAwsCredentials + Send + Sync + 'static, + D: DispatchSignedRequest + Send + Sync + 'static, + { + Self::new_with(request_dispatcher, credentials_provider, region) + } +} + /// Create a rusoto client of the given type using the given region and configuration. pub(crate) fn build_client( region: &Region, @@ -56,7 +66,11 @@ pub(crate) fn build_client( ) -> Result { let maybe_regional_role = aws.region.get(region.name()).and_then(|r| r.role.clone()); let assume_roles = aws.role.iter().chain(maybe_regional_role.iter()).cloned(); - let provider = build_provider(&sts_region, assume_roles.clone(), base_provider(&aws.profile)?)?; + let provider = build_provider( + &sts_region, + assume_roles.clone(), + base_provider(&aws.profile)?, + )?; Ok(T::new_with( rusoto_core::HttpClient::new().context(error::HttpClient)?, provider, diff --git a/tools/pubsys/src/aws/publish_ami/mod.rs b/tools/pubsys/src/aws/publish_ami/mod.rs index 6f596937..fd98a40b 100644 --- a/tools/pubsys/src/aws/publish_ami/mod.rs +++ b/tools/pubsys/src/aws/publish_ami/mod.rs @@ -9,9 +9,8 @@ use futures::stream::{self, StreamExt}; use log::{debug, error, info, trace}; use rusoto_core::{Region, RusotoError}; use rusoto_ec2::{ - DescribeImagesError, DescribeImagesRequest, DescribeImagesResult, Ec2, Ec2Client, - ModifyImageAttributeError, ModifyImageAttributeRequest, ModifySnapshotAttributeError, - ModifySnapshotAttributeRequest, + DescribeImagesRequest, Ec2, Ec2Client, ModifyImageAttributeRequest, + ModifySnapshotAttributeError, ModifySnapshotAttributeRequest, }; use snafu::{ensure, OptionExt, ResultExt}; use std::collections::{HashMap, HashSet}; @@ -90,7 +89,12 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { } else { aws.regions.clone().into() }; - ensure!(!regions.is_empty(), error::MissingConfig { missing: "aws.regions" }); + ensure!( + !regions.is_empty(), + error::MissingConfig { + missing: "aws.regions" + } + ); let base_region = region_from_string(®ions[0], &aws).context(error::ParseRegion)?; // Check that the requested regions are a subset of the regions we *could* publish from the AMI @@ -124,164 +128,225 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { // live until the future is resolved. let mut ec2_clients = HashMap::with_capacity(amis.len()); for region in amis.keys() { - let ec2_client = build_client::(®ion, &base_region, &aws).context(error::Client { - client_type: "EC2", - region: region.name(), - })?; + let ec2_client = + build_client::(®ion, &base_region, &aws).context(error::Client { + client_type: "EC2", + region: region.name(), + })?; ec2_clients.insert(region.clone(), ec2_client); } - let snapshots = get_snapshots(&amis, &ec2_clients).await?; + let snapshots = get_regional_snapshots(&amis, &ec2_clients).await?; trace!("Found snapshots: {:?}", snapshots); + let all = Some(vec!["all".to_string()]); info!("Updating snapshot permissions - making {}", mode); - modify_snapshots(&snapshots, &ec2_clients, operation.clone()).await?; + modify_regional_snapshots(None, all.clone(), &operation, &snapshots, &ec2_clients).await?; + info!("Updating image permissions - making {}", mode); - modify_images(&amis, &ec2_clients, operation.clone()).await?; + let ami_ids = amis + .into_iter() + .map(|(region, image)| (region, image.id)) + .collect(); + modify_regional_images(None, all, &operation, &ami_ids, &ec2_clients).await?; Ok(()) } +/// Returns the snapshot IDs associated with the given AMI. +pub(crate) async fn get_snapshots( + image_id: &str, + region: &Region, + ec2_client: &Ec2Client, +) -> Result> { + let describe_request = DescribeImagesRequest { + image_ids: Some(vec![image_id.to_string()]), + ..Default::default() + }; + let describe_response = ec2_client.describe_images(describe_request).await; + let describe_response = describe_response.context(error::DescribeImages { + region: region.name(), + })?; + + // Get the image description, ensuring we only have one. + let mut images = describe_response.images.context(error::MissingInResponse { + request_type: "DescribeImages", + missing: "images", + })?; + ensure!( + !images.is_empty(), + error::MissingImage { + region: region.name(), + image_id: image_id.to_string(), + } + ); + ensure!( + images.len() == 1, + error::MultipleImages { + region: region.name(), + images: images + .into_iter() + .map(|i| i.image_id.unwrap_or_else(|| "".to_string())) + .collect::>() + } + ); + let image = images.remove(0); + + // Look into the block device mappings for snapshots. + let bdms = image + .block_device_mappings + .context(error::MissingInResponse { + request_type: "DescribeImages", + missing: "block_device_mappings", + })?; + ensure!( + !bdms.is_empty(), + error::MissingInResponse { + request_type: "DescribeImages", + missing: "non-empty block_device_mappings" + } + ); + let mut snapshot_ids = Vec::with_capacity(bdms.len()); + for bdm in bdms { + let ebs = bdm.ebs.context(error::MissingInResponse { + request_type: "DescribeImages", + missing: "ebs in block_device_mappings", + })?; + let snapshot_id = ebs.snapshot_id.context(error::MissingInResponse { + request_type: "DescribeImages", + missing: "snapshot_id in block_device_mappings.ebs", + })?; + snapshot_ids.push(snapshot_id); + } + + Ok(snapshot_ids) +} + /// Returns a regional mapping of snapshot IDs associated with the given AMIs. -async fn get_snapshots( +async fn get_regional_snapshots( amis: &HashMap, clients: &HashMap, ) -> Result>> { // Build requests for image information. - let mut describe_requests = Vec::with_capacity(amis.len()); + let mut snapshots_requests = Vec::with_capacity(amis.len()); for (region, image) in amis { let ec2_client = &clients[region]; - let describe_request = DescribeImagesRequest { - image_ids: Some(vec![image.id.to_string()]), - ..Default::default() - }; - let describe_future = ec2_client.describe_images(describe_request); - // Store the region and image ID so we can include it in errors - let info_future = ready((region.clone(), image.id.clone())); - describe_requests.push(join(info_future, describe_future)); + let snapshots_future = get_snapshots(&image.id, region, ec2_client); + + // Store the region so we can include it in errors + let info_future = ready(region.clone()); + snapshots_requests.push(join(info_future, snapshots_future)); } // Send requests in parallel and wait for responses, collecting results into a list. - let request_stream = stream::iter(describe_requests).buffer_unordered(4); - let describe_responses: Vec<( - (Region, String), - std::result::Result>, - )> = request_stream.collect().await; + let request_stream = stream::iter(snapshots_requests).buffer_unordered(4); + let snapshots_responses: Vec<(Region, Result>)> = request_stream.collect().await; // For each described image, get the snapshot IDs from the block device mappings. let mut snapshots = HashMap::with_capacity(amis.len()); - for ((region, image_id), describe_response) in describe_responses { - // Get the image description, ensuring we only have one. - let describe_response = describe_response.context(error::DescribeImages { - region: region.name(), - })?; - let mut images = describe_response.images.context(error::MissingInResponse { - request_type: "DescribeImages", - missing: "images", - })?; - ensure!( - !images.is_empty(), - error::MissingImage { - region: region.name(), - image_id, - } - ); - ensure!( - images.len() == 1, - error::MultipleImages { - region: region.name(), - images: images - .into_iter() - .map(|i| i.image_id.unwrap_or_else(|| "".to_string())) - .collect::>() - } - ); - let image = images.remove(0); - - // Look into the block device mappings for snapshots. - let bdms = image - .block_device_mappings - .context(error::MissingInResponse { - request_type: "DescribeImages", - missing: "block_device_mappings", - })?; - ensure!( - !bdms.is_empty(), - error::MissingInResponse { - request_type: "DescribeImages", - missing: "non-empty block_device_mappings" - } - ); - let mut snapshot_ids = Vec::with_capacity(bdms.len()); - for bdm in bdms { - let ebs = bdm.ebs.context(error::MissingInResponse { - request_type: "DescribeImages", - missing: "ebs in block_device_mappings", - })?; - let snapshot_id = ebs.snapshot_id.context(error::MissingInResponse { - request_type: "DescribeImages", - missing: "snapshot_id in block_device_mappings.ebs", - })?; - snapshot_ids.push(snapshot_id); - } + for (region, snapshot_ids) in snapshots_responses { + let snapshot_ids = snapshot_ids?; snapshots.insert(region, snapshot_ids); } Ok(snapshots) } -/// Modify snapshot attributes to make them public/private as requested. -async fn modify_snapshots( +/// Modify createVolumePermission for the given users/groups on the given snapshots. The +/// `operation` should be "add" or "remove" to allow/deny permission. +pub(crate) async fn modify_snapshots( + user_ids: Option>, + group_names: Option>, + operation: &str, + snapshot_ids: &[String], + ec2_client: &Ec2Client, + region: &Region, +) -> Result<()> { + let mut requests = Vec::new(); + for snapshot_id in snapshot_ids { + let request = ModifySnapshotAttributeRequest { + attribute: Some("createVolumePermission".to_string()), + user_ids: user_ids.clone(), + group_names: group_names.clone(), + operation_type: Some(operation.to_string()), + snapshot_id: snapshot_id.clone(), + ..Default::default() + }; + let response_future = ec2_client.modify_snapshot_attribute(request); + // Store the snapshot_id so we can include it in any errors + let info_future = ready(snapshot_id.to_string()); + requests.push(join(info_future, response_future)); + } + + // Send requests in parallel and wait for responses, collecting results into a list. + let request_stream = stream::iter(requests).buffer_unordered(4); + let responses: Vec<( + String, + std::result::Result<(), RusotoError>, + )> = request_stream.collect().await; + + for (snapshot_id, response) in responses { + response.context(error::ModifyImageAttribute { + snapshot_id, + region: region.name(), + })? + } + + Ok(()) +} + +/// Modify createVolumePermission for the given users/groups, across all of the snapshots in the +/// given regional mapping. The `operation` should be "add" or "remove" to allow/deny permission. +pub(crate) async fn modify_regional_snapshots( + user_ids: Option>, + group_names: Option>, + operation: &str, snapshots: &HashMap>, clients: &HashMap, - operation: String, ) -> Result<()> { // Build requests to modify snapshot attributes. - let mut modify_snapshot_requests = Vec::new(); + let mut requests = Vec::new(); for (region, snapshot_ids) in snapshots { - for snapshot_id in snapshot_ids { - let ec2_client = &clients[region]; - let modify_snapshot_request = ModifySnapshotAttributeRequest { - attribute: Some("createVolumePermission".to_string()), - group_names: Some(vec!["all".to_string()]), - operation_type: Some(operation.clone()), - snapshot_id: snapshot_id.clone(), - ..Default::default() - }; - let modify_snapshot_future = - ec2_client.modify_snapshot_attribute(modify_snapshot_request); - - // Store the region and snapshot ID so we can include it in errors - let info_future = ready((region.name().to_string(), snapshot_id.clone())); - modify_snapshot_requests.push(join(info_future, modify_snapshot_future)); - } + let ec2_client = &clients[region]; + let modify_snapshot_future = modify_snapshots( + user_ids.clone(), + group_names.clone(), + operation, + snapshot_ids, + ec2_client, + region, + ); + + // Store the region and snapshot ID so we can include it in errors + let info_future = ready((region.clone(), snapshot_ids.clone())); + requests.push(join(info_future, modify_snapshot_future)); } // Send requests in parallel and wait for responses, collecting results into a list. - let request_stream = stream::iter(modify_snapshot_requests).buffer_unordered(4); - let modify_snapshot_responses: Vec<( - (String, String), - std::result::Result<(), RusotoError>, - )> = request_stream.collect().await; + let request_stream = stream::iter(requests).buffer_unordered(4); + let responses: Vec<((Region, Vec), Result<()>)> = request_stream.collect().await; // Count up successes and failures so we can give a clear total in the final error message. let mut error_count = 0u16; let mut success_count = 0u16; - for ((region, snapshot_id), modify_snapshot_response) in modify_snapshot_responses { - match modify_snapshot_response { + for ((region, snapshot_ids), response) in responses { + match response { Ok(()) => { success_count += 1; debug!( - "Modified permissions of snapshot {} in {}", - snapshot_id, region, + "Modified permissions in {} for snapshots [{}]", + region.name(), + snapshot_ids.join(", "), ); } Err(e) => { error_count += 1; error!( - "Modifying permissions of {} in {} failed: {}", - snapshot_id, region, e + "Failed to modify permissions in {} for snapshots [{}]: {}", + region.name(), + snapshot_ids.join(", "), + e ); } } @@ -289,7 +354,7 @@ async fn modify_snapshots( ensure!( error_count == 0, - error::ModifySnapshotAttribute { + error::ModifySnapshotAttributes { error_count, success_count, } @@ -298,41 +363,69 @@ async fn modify_snapshots( Ok(()) } -/// Modify image attributes to make them public/private as requested. -async fn modify_images( - images: &HashMap, - clients: &HashMap, - operation: String, +/// Modify launchPermission for the given users/groups on the given images. The `operation` +/// should be "add" or "remove" to allow/deny permission. +pub(crate) async fn modify_image( + user_ids: Option>, + user_groups: Option>, + operation: &str, + image_id: &str, + ec2_client: &Ec2Client, + region: &Region, ) -> Result<()> { // Build requests to modify image attributes. - let mut modify_image_requests = Vec::new(); - for (region, image) in images { + let modify_image_request = ModifyImageAttributeRequest { + attribute: Some("launchPermission".to_string()), + user_ids: user_ids.clone(), + user_groups: user_groups.clone(), + operation_type: Some(operation.to_string()), + image_id: image_id.to_string(), + ..Default::default() + }; + ec2_client + .modify_image_attribute(modify_image_request) + .await + .context(error::ModifyImageAttributes { + image_id, + region: region.name(), + }) +} + +/// Modify launchPermission for the given users/groups, across all of the images in the given +/// regional mapping. The `operation` should be "add" or "remove" to allow/deny permission. +pub(crate) async fn modify_regional_images( + user_ids: Option>, + user_groups: Option>, + operation: &str, + images: &HashMap, + clients: &HashMap, +) -> Result<()> { + let mut requests = Vec::new(); + for (region, image_id) in images { let ec2_client = &clients[region]; - let modify_image_request = ModifyImageAttributeRequest { - attribute: Some("launchPermission".to_string()), - user_groups: Some(vec!["all".to_string()]), - operation_type: Some(operation.clone()), - image_id: image.id.clone(), - ..Default::default() - }; - let modify_image_future = ec2_client.modify_image_attribute(modify_image_request); + + let modify_image_future = modify_image( + user_ids.clone(), + user_groups.clone(), + operation, + image_id, + ec2_client, + region, + ); // Store the region and image ID so we can include it in errors - let info_future = ready((region.name().to_string(), image.id.clone())); - modify_image_requests.push(join(info_future, modify_image_future)); + let info_future = ready((region.name().to_string(), image_id.clone())); + requests.push(join(info_future, modify_image_future)); } // Send requests in parallel and wait for responses, collecting results into a list. - let request_stream = stream::iter(modify_image_requests).buffer_unordered(4); - let modify_image_responses: Vec<( - (String, String), - std::result::Result<(), RusotoError>, - )> = request_stream.collect().await; + let request_stream = stream::iter(requests).buffer_unordered(4); + let responses: Vec<((String, String), Result<()>)> = request_stream.collect().await; // Count up successes and failures so we can give a clear total in the final error message. let mut error_count = 0u16; let mut success_count = 0u16; - for ((region, image_id), modify_image_response) in modify_image_responses { + for ((region, image_id), modify_image_response) in responses { match modify_image_response { Ok(()) => { success_count += 1; @@ -350,7 +443,7 @@ async fn modify_images( ensure!( error_count == 0, - error::ModifyImageAttribute { + error::ModifyImagesAttributes { error_count, success_count, } @@ -361,6 +454,8 @@ async fn modify_images( mod error { use crate::aws; + use rusoto_core::RusotoError; + use rusoto_ec2::{ModifyImageAttributeError, ModifySnapshotAttributeError}; use snafu::Snafu; use std::io; use std::path::PathBuf; @@ -421,20 +516,44 @@ mod error { missing: String, }, + #[snafu(display( + "Failed to modify permissions of {} in {}: {}", + snapshot_id, + region, + source + ))] + ModifyImageAttribute { + snapshot_id: String, + region: String, + source: RusotoError, + }, + #[snafu(display( "Failed to modify permissions of {} of {} images", error_count, error_count + success_count, ))] - ModifyImageAttribute { + ModifyImagesAttributes { error_count: u16, success_count: u16, }, + #[snafu(display( + "Failed to modify permissions of {} in {}: {}", + image_id, + region, + source + ))] + ModifyImageAttributes { + image_id: String, + region: String, + source: RusotoError, + }, + #[snafu(display( "Failed to modify permissions of {} of {} snapshots", error_count, error_count + success_count, ))] - ModifySnapshotAttribute { + ModifySnapshotAttributes { error_count: u16, success_count: u16, }, From 9b37e41d28b3b79cfea3ce663696421e5ebe354f Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Sun, 23 Aug 2020 21:20:07 -0700 Subject: [PATCH 0330/1356] pubsys: change "parameter overrides" to "conditional parameters" Rather than template directories with override files for specific variants and architectures, this uses a single TOML file for all parameters with (optional) keys restricting parameters to specific variants and arches. This model is clearer, based on early feedback, and also allows users to specify parameters that are conditional to a variant *and* an architecture, whereas the override files were more like "or" matches. --- tools/pubsys/policies/ssm/README.md | 18 ++++-- tools/pubsys/src/aws/promote_ssm/mod.rs | 16 ++++-- tools/pubsys/src/aws/ssm/mod.rs | 16 ++++-- tools/pubsys/src/aws/ssm/template.rs | 75 ++++++++++--------------- 4 files changed, 66 insertions(+), 59 deletions(-) diff --git a/tools/pubsys/policies/ssm/README.md b/tools/pubsys/policies/ssm/README.md index 57c47d8b..a6bcc682 100644 --- a/tools/pubsys/policies/ssm/README.md +++ b/tools/pubsys/policies/ssm/README.md @@ -21,10 +21,18 @@ The available variables include: * `image_version`, for example "0.5.0-e0ddf1b" * `region`, for example "us-west-2" -# Overrides +# Conditional parameters -You can also add or override parameters that are specific to `variant` or `arch`. -To do so, create a directory named "variant" or "arch" inside parameters directory, and create a file named after the specific variant or arch for which you want overrides. +You can also list parameters that only apply to specific variants or architectures. +To do so, add `variant` or `arch` keys (or both) to your parameter definition. +The parameter will only be populated if the current `variant` or `arch` matches one of the values in the list. +(If both `variant` and `arch` are listed, the build must match an entry from both lists.) -For example, to add extra parameters just for the "aarch64" architecture, create `arch/aarch64.toml`. -Inside you can put the same types of `[[parameter]]` declarations that you see in `defaults.toml`, but they'll only be applied for `aarch64` builds. +For example, to add an extra parameter that's only set for "aarch64" builds of the "aws-ecs-1" variant: +``` +[[parameter]] +arch = ["aarch64"] +variant = ["aws-ecs-1"] +name = "/a/special/aarch64/ecs/parameter" +value = "{image_name}" +``` diff --git a/tools/pubsys/src/aws/promote_ssm/mod.rs b/tools/pubsys/src/aws/promote_ssm/mod.rs index f9061423..78231146 100644 --- a/tools/pubsys/src/aws/promote_ssm/mod.rs +++ b/tools/pubsys/src/aws/promote_ssm/mod.rs @@ -38,9 +38,9 @@ pub(crate) struct PromoteArgs { #[structopt(long, use_delimiter = true)] regions: Vec, - /// Directory holding the parameter template files + /// File holding the parameter templates #[structopt(long)] - template_dir: PathBuf, + template_path: PathBuf, } /// Common entrypoint from main() @@ -100,14 +100,22 @@ pub(crate) async fn run(args: &Args, promote_args: &PromoteArgs) -> Result<()> { info!( "Parsing SSM parameter templates from {}", - promote_args.template_dir.display() + promote_args.template_path.display() ); // Doesn't matter which build context we use to find template files because version isn't used // in their naming let template_parameters = - template::get_parameters(&promote_args.template_dir, &source_build_context) + template::get_parameters(&promote_args.template_path, &source_build_context) .context(error::FindTemplates)?; + if template_parameters.parameters.is_empty() { + info!( + "No parameters for this arch/variant in {}", + promote_args.template_path.display() + ); + return Ok(()); + } + // Render parameter names into maps of {template string => rendered value}. We need the // template strings so we can associate source parameters with target parameters that came // from the same template, so we know what to copy. diff --git a/tools/pubsys/src/aws/ssm/mod.rs b/tools/pubsys/src/aws/ssm/mod.rs index 0baa97f6..58f827ad 100644 --- a/tools/pubsys/src/aws/ssm/mod.rs +++ b/tools/pubsys/src/aws/ssm/mod.rs @@ -43,9 +43,9 @@ pub(crate) struct SsmArgs { #[structopt(long, use_delimiter = true)] regions: Vec, - /// Directory holding the parameter template files + /// File holding the parameter templates #[structopt(long)] - template_dir: PathBuf, + template_path: PathBuf, /// Allows overwrite of existing parameters #[structopt(long)] @@ -96,11 +96,19 @@ pub(crate) async fn run(args: &Args, ssm_args: &SsmArgs) -> Result<()> { info!( "Parsing SSM parameter templates from {}", - ssm_args.template_dir.display() + ssm_args.template_path.display() ); - let template_parameters = template::get_parameters(&ssm_args.template_dir, &build_context) + let template_parameters = template::get_parameters(&ssm_args.template_path, &build_context) .context(error::FindTemplates)?; + if template_parameters.parameters.is_empty() { + info!( + "No parameters for this arch/variant in {}", + ssm_args.template_path.display() + ); + return Ok(()); + } + let new_parameters = template::render_parameters(template_parameters, amis, ssm_prefix, &build_context) .context(error::RenderTemplates)?; diff --git a/tools/pubsys/src/aws/ssm/template.rs b/tools/pubsys/src/aws/ssm/template.rs index e1561442..c869567a 100644 --- a/tools/pubsys/src/aws/ssm/template.rs +++ b/tools/pubsys/src/aws/ssm/template.rs @@ -3,7 +3,7 @@ use super::{BuildContext, SsmKey, SsmParameters}; use crate::aws::ami::Image; -use log::{info, trace}; +use log::trace; use rusoto_core::Region; use serde::{Deserialize, Serialize}; use snafu::{ensure, ResultExt}; @@ -17,6 +17,12 @@ use tinytemplate::TinyTemplate; pub(crate) struct TemplateParameter { pub(crate) name: String, pub(crate) value: String, + + // User can say parameters only apply to these variants/arches + #[serde(default, rename = "variant")] + pub(crate) variants: Vec, + #[serde(default, rename = "arch")] + pub(crate) arches: Vec, } /// Represents a set of SSM parameters, in a format that allows for clear definition of @@ -28,60 +34,39 @@ pub(crate) struct TemplateParameters { pub(crate) parameters: Vec, } -impl TemplateParameters { - fn extend(&mut self, other: Self) { - self.parameters.extend(other.parameters) - } -} - -/// Finds and deserializes template parameters from the template directory, taking into account -/// overrides requested by the user +/// Deserializes template parameters from the template file, taking into account conditional +/// parameters that may or may not apply based on our build context. pub(crate) fn get_parameters( - template_dir: &Path, + template_path: &Path, build_context: &BuildContext<'_>, ) -> Result { - let defaults_path = template_dir.join("defaults.toml"); - let defaults_str = fs::read_to_string(&defaults_path).context(error::File { + let templates_str = fs::read_to_string(&template_path).context(error::File { op: "read", - path: &defaults_path, + path: &template_path, })?; let mut template_parameters: TemplateParameters = - toml::from_str(&defaults_str).context(error::InvalidToml { - path: &defaults_path, + toml::from_str(&templates_str).context(error::InvalidToml { + path: &template_path, })?; - trace!("Parsed default templates: {:#?}", template_parameters); - - // Allow the user to add/override parameters specific to variant or arch. Because these are - // added after the defaults, they will take precedence. (It doesn't make sense to override - // based on the version argument.) - let mut context = HashMap::new(); - context.insert("variant", build_context.variant); - context.insert("arch", build_context.arch); - for (key, value) in context { - let override_path = template_dir.join(key).join(format!("{}.toml", value)); - if override_path.exists() { - info!( - "Parsing SSM parameter overrides from {}", - override_path.display() - ); - let template_str = fs::read_to_string(&override_path).context(error::File { - op: "read", - path: &override_path, - })?; - let override_parameters: TemplateParameters = - toml::from_str(&template_str).context(error::InvalidToml { - path: &override_path, - })?; - trace!("Parsed override templates: {:#?}", override_parameters); - template_parameters.extend(override_parameters); - } - } + trace!("Parsed templates: {:#?}", template_parameters); + // You shouldn't point to an empty file, but if all the entries are removed by + // conditionals below, we allow that and just don't set any parameters. ensure!( !template_parameters.parameters.is_empty(), - error::NoTemplates { path: template_dir } + error::NoTemplates { + path: template_path + } ); + let variant = build_context.variant.to_string(); + let arch = build_context.arch.to_string(); + template_parameters.parameters.retain(|p| { + (p.variants.is_empty() || p.variants.contains(&variant)) + && (p.arches.is_empty() || p.arches.contains(&arch)) + }); + trace!("Templates after conditionals: {:#?}", template_parameters); + Ok(template_parameters) } @@ -201,9 +186,7 @@ mod error { }, #[snafu(display("Found no parameter templates in {}", path.display()))] - NoTemplates { - path: PathBuf, - }, + NoTemplates { path: PathBuf }, #[snafu(display("Error rendering template from '{}': {}", template, source))] RenderTemplate { From 8567b3bf26cd3d871e1a7102582a2537d15594ce Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Sun, 23 Aug 2020 16:55:21 -0700 Subject: [PATCH 0331/1356] pubsys: fix region name in error message --- tools/pubsys/src/aws/ami/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index f6274daa..e484eb8b 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -273,7 +273,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> let ec2_client = build_client::(®ion, &base_region, &aws).context(error::Client { client_type: "EC2", - region: base_region.name(), + region: region.name(), })?; ec2_clients.insert(region.clone(), ec2_client); } @@ -286,7 +286,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> .context(error::GetAmiId { name: &ami_args.name, arch: &ami_args.arch, - region: base_region.name(), + region: region.name(), })? { info!( From a00b00b863d4c291facaada8c0b3f5ed31018cec Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Sun, 23 Aug 2020 16:57:37 -0700 Subject: [PATCH 0332/1356] pubsys: rename client variable to reduce confusion It's too easy to use "ec2_client" by accident when you need a regional client, when this is actually a client just for the base region. --- tools/pubsys/src/aws/ami/mod.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index e484eb8b..1851788f 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -121,12 +121,12 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> let base_region = regions.remove(0); // Build EBS client for snapshot management, and EC2 client for registration - let ebs_client = + let base_ebs_client = build_client::(&base_region, &base_region, &aws).context(error::Client { client_type: "EBS", region: base_region.name(), })?; - let ec2_client = + let base_ec2_client = build_client::(&base_region, &base_region, &aws).context(error::Client { client_type: "EC2", region: base_region.name(), @@ -138,7 +138,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> &ami_args.name, &ami_args.arch, base_region.name(), - &ec2_client, + &base_ec2_client, ) .await .context(error::GetAmiId { @@ -154,7 +154,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> base_region.name(), found_id ); - let snapshot_ids = get_snapshots(&found_id, &base_region, &ec2_client) + let snapshot_ids = get_snapshots(&found_id, &base_region, &base_ec2_client) .await .context(error::GetSnapshots { image_id: &found_id, @@ -166,7 +166,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> }; (found_ids, true) } else { - let new_ids = register_image(ami_args, base_region.name(), ebs_client, &ec2_client) + let new_ids = register_image(ami_args, base_region.name(), base_ebs_client, &base_ec2_client) .await .context(error::RegisterImage { name: &ami_args.name, @@ -216,12 +216,12 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> // Get the account ID used in the base region; we don't need to grant to it so we can remove it // from the list. - let sts_client = + let base_sts_client = build_client::(&base_region, &base_region, &aws).context(error::Client { client_type: "STS", region: base_region.name(), })?; - let response = sts_client + let response = base_sts_client .get_caller_identity(GetCallerIdentityRequest {}) .await .context(error::GetCallerIdentity { @@ -242,7 +242,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> None, "add", &ids_of_image.snapshot_ids, - &ec2_client, + &base_ec2_client, &base_region, ) .await @@ -256,7 +256,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> None, "add", &ids_of_image.image_id, - &ec2_client, + &base_ec2_client, &base_region, ) .await From 8f6cfb18088f4810fa88efe71c081ec9613fe4c2 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Sun, 23 Aug 2020 17:04:21 -0700 Subject: [PATCH 0333/1356] pubsys: increase log level inside AMI registration process Waiting for snapshots is the longest part of AMI registration and there wasn't much explanation of what was happening, so this bumps a couple debug messages up to info level. --- tools/pubsys/src/aws/ami/register.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/pubsys/src/aws/ami/register.rs b/tools/pubsys/src/aws/ami/register.rs index f17aac51..f724f274 100644 --- a/tools/pubsys/src/aws/ami/register.rs +++ b/tools/pubsys/src/aws/ami/register.rs @@ -63,7 +63,7 @@ async fn _register_image( })?; cleanup_snapshot_ids.push(data_snapshot.clone()); - debug!( + info!( "Waiting for root and data snapshots to become available in {}", region ); @@ -111,7 +111,7 @@ async fn _register_image( ..Default::default() }; - debug!("Registering AMI in {}", region); + info!("Making register image call in {}", region); let register_response = ec2_client .register_image(register_request) .await From d98b421d08b58dfd1fb53633bf7fc878d604f5f8 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Sun, 23 Aug 2020 17:35:06 -0700 Subject: [PATCH 0334/1356] pubsys: print waiting message only if we're going to sleep If the AMI is found on the first attempt, the success message is enough. --- tools/pubsys/src/aws/ami/wait.rs | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/tools/pubsys/src/aws/ami/wait.rs b/tools/pubsys/src/aws/ami/wait.rs index 752142c5..072434d1 100644 --- a/tools/pubsys/src/aws/ami/wait.rs +++ b/tools/pubsys/src/aws/ami/wait.rs @@ -33,16 +33,6 @@ pub(crate) async fn wait_for_ami( region: region.name() } ); - if attempts % 5 == 1 { - info!( - "Waiting for {} in {} to be {}... (attempt {} of {})", - id, - region.name(), - state, - attempts, - max_attempts - ); - } let describe_request = DescribeImagesRequest { image_ids: Some(vec![id.to_string()]), @@ -102,6 +92,17 @@ pub(crate) async fn wait_for_ami( // Did not receive list; reset success count and try again (if we have spare attempts) successes = 0; }; + + if attempts % 5 == 1 { + info!( + "Waiting for {} in {} to be {}... (attempt {} of {})", + id, + region.name(), + state, + attempts, + max_attempts + ); + } sleep(Duration::from_secs(seconds_between_attempts)); } } From e5b6dc0689b988b07c38392949080f2404fa77cb Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Sun, 23 Aug 2020 17:16:09 -0700 Subject: [PATCH 0335/1356] pubsys: wait for AMIs to be available before granting access Without a wait, if you use publish_ami right after registering/copying AMIs, describe images responses can include partial information that doesn't include snapshot IDs. --- tools/pubsys/src/aws/ami/mod.rs | 2 +- tools/pubsys/src/aws/publish_ami/mod.rs | 37 +++++++++++++++++++++++-- 2 files changed, 36 insertions(+), 3 deletions(-) diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index 1851788f..9b6c610d 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -3,7 +3,7 @@ mod register; mod snapshot; -mod wait; +pub(crate) mod wait; use crate::aws::publish_ami::{get_snapshots, modify_image, modify_snapshots}; use crate::aws::{client::build_client, region_from_string}; diff --git a/tools/pubsys/src/aws/publish_ami/mod.rs b/tools/pubsys/src/aws/publish_ami/mod.rs index fd98a40b..603f1bf8 100644 --- a/tools/pubsys/src/aws/publish_ami/mod.rs +++ b/tools/pubsys/src/aws/publish_ami/mod.rs @@ -1,7 +1,10 @@ //! The publish_ami module owns the 'publish-ami' subcommand and controls the process of granting //! and revoking public access to EC2 AMIs. -use crate::aws::{ami::Image, client::build_client, region_from_string}; +use crate::aws::ami::wait::{self, wait_for_ami}; +use crate::aws::ami::Image; +use crate::aws::client::build_client; +use crate::aws::region_from_string; use crate::config::InfraConfig; use crate::Args; use futures::future::{join, ready}; @@ -136,6 +139,29 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { ec2_clients.insert(region.clone(), ec2_client); } + // If AMIs aren't in "available" state, we can get a DescribeImages response that includes + // most of the data we need, but not snapshot IDs. + info!("Waiting for AMIs to be available..."); + let mut wait_requests = Vec::with_capacity(amis.len()); + for (region, image) in &amis { + let wait_future = wait_for_ami(&image.id, ®ion, &base_region, "available", 1, &aws); + // Store the region and ID so we can include it in errors + let info_future = ready((region.clone(), image.id.clone())); + wait_requests.push(join(info_future, wait_future)); + } + // Send requests in parallel and wait for responses, collecting results into a list. + let request_stream = stream::iter(wait_requests).buffer_unordered(4); + let wait_responses: Vec<((Region, String), std::result::Result<(), wait::Error>)> = + request_stream.collect().await; + + // Make sure waits succeeded and AMIs are available. + for ((region, image_id), wait_response) in wait_responses { + wait_response.context(error::WaitAmi { + id: &image_id, + region: region.name(), + })?; + } + let snapshots = get_regional_snapshots(&amis, &ec2_clients).await?; trace!("Found snapshots: {:?}", snapshots); @@ -453,7 +479,7 @@ pub(crate) async fn modify_regional_images( } mod error { - use crate::aws; + use crate::aws::{self, ami}; use rusoto_core::RusotoError; use rusoto_ec2::{ModifyImageAttributeError, ModifySnapshotAttributeError}; use snafu::Snafu; @@ -575,6 +601,13 @@ mod error { UnknownRegions { regions: Vec, }, + + #[snafu(display("AMI '{}' in {} did not become available: {}", id, region, source))] + WaitAmi { + id: String, + region: String, + source: ami::wait::Error, + }, } } pub(crate) use error::Error; From 142d3caf89abb2cd062ff7f80b9b4899100be6f4 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Mon, 24 Aug 2020 16:29:28 +0000 Subject: [PATCH 0336/1356] pubsys: map to EC2 architecture names SSM parameters are used to find EC2 AMI information, so the arch name used in parameters should match the name used by EC2. Co-authored-by: Zac Mrowicki Co-authored-by: Tom Kirchner --- tools/pubsys/policies/ssm/README.md | 3 ++- tools/pubsys/src/aws/mod.rs | 15 +++++++++++++++ tools/pubsys/src/aws/promote_ssm/mod.rs | 4 ++-- tools/pubsys/src/aws/ssm/mod.rs | 4 ++-- 4 files changed, 21 insertions(+), 5 deletions(-) diff --git a/tools/pubsys/policies/ssm/README.md b/tools/pubsys/policies/ssm/README.md index a6bcc682..28560c38 100644 --- a/tools/pubsys/policies/ssm/README.md +++ b/tools/pubsys/policies/ssm/README.md @@ -15,7 +15,8 @@ The `name` and `value` can contain template variables that will be replaced with The available variables include: * `variant`, for example "aws-k8s-1.17" -* `arch`, for example "x86_64" +* `arch`, for example "x86_64" or "arm64". + * Note: "amd64" and "aarch64" are mapped to "x86_64" and "arm64", respectively, to match the names used by EC2. * `image_id`, for example "ami-0123456789abcdef0" * `image_name`, for example "bottlerocket-aws-k8s-1.17-x86_64-v0.5.0-e0ddf1b" * `image_version`, for example "0.5.0-e0ddf1b" diff --git a/tools/pubsys/src/aws/mod.rs b/tools/pubsys/src/aws/mod.rs index faae16b7..bc19dd44 100644 --- a/tools/pubsys/src/aws/mod.rs +++ b/tools/pubsys/src/aws/mod.rs @@ -23,12 +23,27 @@ fn region_from_string(name: &str, aws: &AwsConfig) -> Result { }) } +/// Parses the given string as an architecture, mapping values to the ones used in EC2. +pub(crate) fn parse_arch(input: &str) -> Result { + match input { + "x86_64" | "amd64" => Ok("x86_64".to_string()), + "arm64" | "aarch64" => Ok("arm64".to_string()), + _ => error::ParseArch { input, msg: "unknown architecture" }.fail(), + } +} + mod error { use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility = "pub(super)")] pub(crate) enum Error { + #[snafu(display("Failed to parse arch '{}': {}", input, msg))] + ParseArch { + input: String, + msg: String + }, + #[snafu(display("Failed to parse region '{}': {}", name, source))] ParseRegion { name: String, diff --git a/tools/pubsys/src/aws/promote_ssm/mod.rs b/tools/pubsys/src/aws/promote_ssm/mod.rs index 78231146..05ac9067 100644 --- a/tools/pubsys/src/aws/promote_ssm/mod.rs +++ b/tools/pubsys/src/aws/promote_ssm/mod.rs @@ -2,7 +2,7 @@ //! SSM parameters from one version to another use crate::aws::client::build_client; -use crate::aws::region_from_string; +use crate::aws::{parse_arch, region_from_string}; use crate::aws::ssm::{key_difference, ssm, template, BuildContext, SsmKey}; use crate::config::InfraConfig; use crate::Args; @@ -19,7 +19,7 @@ use structopt::StructOpt; #[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] pub(crate) struct PromoteArgs { /// The architecture of the machine image - #[structopt(long)] + #[structopt(long, parse(try_from_str = parse_arch))] arch: String, /// The variant name for the current build diff --git a/tools/pubsys/src/aws/ssm/mod.rs b/tools/pubsys/src/aws/ssm/mod.rs index 58f827ad..b0377db6 100644 --- a/tools/pubsys/src/aws/ssm/mod.rs +++ b/tools/pubsys/src/aws/ssm/mod.rs @@ -4,7 +4,7 @@ pub(crate) mod ssm; pub(crate) mod template; -use crate::aws::{ami::Image, client::build_client, region_from_string}; +use crate::aws::{ami::Image, client::build_client, parse_arch, region_from_string}; use crate::config::{AwsConfig, InfraConfig}; use crate::Args; use log::{info, trace}; @@ -28,7 +28,7 @@ pub(crate) struct SsmArgs { ami_input: PathBuf, /// The architecture of the machine image - #[structopt(long)] + #[structopt(long, parse(try_from_str = parse_arch))] arch: String, /// The variant name for the current build From 65afc2a22a73338db70e78e29b6363420318aaf6 Mon Sep 17 00:00:00 2001 From: Samuel Karp Date: Wed, 19 Aug 2020 15:25:11 -0700 Subject: [PATCH 0337/1356] docs: rename QUICKSTART.md to QUICKSTART-EKS.md --- BUILDING.md | 4 ++-- README.md | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/BUILDING.md b/BUILDING.md index a59a6637..e475162c 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -1,7 +1,7 @@ # Building Bottlerocket If you'd like to build your own image instead of relying on an Amazon-provided image, follow these steps. -You can skip to the [setup guide](QUICKSTART.md) to use an existing image in Amazon EC2. +You can skip to the [setup guide](QUICKSTART-EKS.md) to use an existing image in Amazon EC2. (We're still working on other use cases!) ## Build an image @@ -74,4 +74,4 @@ Your new AMI ID will be printed at the end. ## Use your image -See the [setup guide](QUICKSTART.md) for information on running Bottlerocket images. +See the [setup guide](QUICKSTART-EKS.md) for information on running Bottlerocket images. diff --git a/README.md b/README.md index ae42e75e..bab10b2e 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ Welcome to Bottlerocket! Bottlerocket is a free and open-source Linux-based operating system meant for hosting containers. Bottlerocket is currently in a developer preview phase and we’re looking for your [feedback](#contact-us). -If you’re ready to jump right in, read our [QUICKSTART](QUICKSTART.md) to try Bottlerocket in an Amazon EKS cluster. +If you’re ready to jump right in, read our [QUICKSTART](QUICKSTART-EKS.md) to try Bottlerocket in an Amazon EKS cluster. Bottlerocket focuses on security and maintainability, providing a reliable, consistent, and safe platform for container-based workloads. This is a reflection of what we've learned building operating systems and services at Amazon. @@ -62,7 +62,7 @@ It describes: * how to build an image * how to register an EC2 AMI from an image -To get started using Bottlerocket, please see [QUICKSTART](QUICKSTART.md). +To get started using Bottlerocket, please see [QUICKSTART](QUICKSTART-EKS.md). It describes: * how to set up a Kubernetes cluster, so your Bottlerocket instance can run pods * how to launch a Bottlerocket instance in EC2 @@ -85,7 +85,7 @@ Bottlerocket has a ["control" container](https://github.com/bottlerocket-os/bott This container runs the [AWS SSM agent](https://github.com/aws/amazon-ssm-agent) that lets you run commands, or start shell sessions, on Bottlerocket instances in EC2. (You can easily replace this control container with your own just by changing the URI; see [Settings](#settings).) -You need to give your instance the SSM role for this to work; see the [setup guide](QUICKSTART.md#enabling-ssm). +You need to give your instance the SSM role for this to work; see the [setup guide](QUICKSTART-EKS.md#enabling-ssm). Once the instance is started, you can start a session: @@ -333,8 +333,8 @@ In this format, "settings.kubernetes.cluster-name" refers to the same key as in The following settings must be specified in order to join a Kubernetes cluster. You should [specify them in user data](#using-user-data). -See the [setup guide](QUICKSTART.md) for *much* more detail on setting up Bottlerocket and Kubernetes. -* `settings.kubernetes.cluster-name`: The cluster name you chose during setup; the [setup guide](QUICKSTART.md) uses "bottlerocket". +See the [setup guide](QUICKSTART-EKS.md) for *much* more detail on setting up Bottlerocket and Kubernetes. +* `settings.kubernetes.cluster-name`: The cluster name you chose during setup; the [setup guide](QUICKSTART-EKS.md) uses "bottlerocket". * `settings.kubernetes.cluster-certificate`: This is the base64-encoded certificate authority of the cluster. * `settings.kubernetes.api-server`: This is the cluster's Kubernetes API endpoint. From 91e763e275bd28bcc859dd7376255352fd8de3ce Mon Sep 17 00:00:00 2001 From: Samuel Karp Date: Wed, 19 Aug 2020 15:36:45 -0700 Subject: [PATCH 0338/1356] docs: add documentation for aws-ecs-1 variant --- BUILDING.md | 11 +++++++++-- README.md | 46 +++++++++++++++++++++++++++++++++++++++------- 2 files changed, 48 insertions(+), 9 deletions(-) diff --git a/BUILDING.md b/BUILDING.md index e475162c..7a159e41 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -1,7 +1,7 @@ # Building Bottlerocket If you'd like to build your own image instead of relying on an Amazon-provided image, follow these steps. -You can skip to the [setup guide](QUICKSTART-EKS.md) to use an existing image in Amazon EC2. +You can skip to the [setup guide for Kubernetes](QUICKSTART-EKS.md) or the [setup guide for Amazon ECS](QUICKSTART-ECS.md) to use an existing image in Amazon EC2. (We're still working on other use cases!) ## Build an image @@ -40,8 +40,15 @@ To build an image, run: cargo make ``` +This will build an image for the default variant, `aws-k8s-1.17`. All packages will be built in turn, and then compiled into an `img` file in the `build/` directory. +To build an image for a different variant, run: + +``` +cargo make -e BUILDSYS_VARIANT=my-variant-here +``` + ### Register an AMI To use the image in Amazon EC2, we need to register the image as an AMI. @@ -74,4 +81,4 @@ Your new AMI ID will be printed at the end. ## Use your image -See the [setup guide](QUICKSTART-EKS.md) for information on running Bottlerocket images. +See the [setup guide for Kubernetes](QUICKSTART-EKS.md) or the [setup guide for Amazon ECS](QUICKSTART-ECS.md) for information on running Bottlerocket images. diff --git a/README.md b/README.md index bab10b2e..05cdcbcf 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ Welcome to Bottlerocket! Bottlerocket is a free and open-source Linux-based operating system meant for hosting containers. Bottlerocket is currently in a developer preview phase and we’re looking for your [feedback](#contact-us). -If you’re ready to jump right in, read our [QUICKSTART](QUICKSTART-EKS.md) to try Bottlerocket in an Amazon EKS cluster. +If you’re ready to jump right in, read our [QUICKSTART for Kubernetes](QUICKSTART-EKS.md) to try Bottlerocket in an Amazon EKS cluster or our [QUICKSTART for Amazon ECS](QUICKSTART-ECS.md) to try Bottlerocket in an Amazon ECS cluster. Bottlerocket focuses on security and maintainability, providing a reliable, consistent, and safe platform for container-based workloads. This is a reflection of what we've learned building operating systems and services at Amazon. @@ -39,7 +39,7 @@ You can let us know about things that seem difficult, or even ways you might lik ## Variants -To start, we're focusing on use of Bottlerocket as a host OS in AWS EKS Kubernetes clusters. +To start, we're focusing on use of Bottlerocket as a host OS in AWS EKS Kubernetes clusters and Amazon ECS clusters. We’re excited to get early feedback and to continue working on more use cases! Bottlerocket is architected such that different cloud environments and container orchestrators can be supported in the future. @@ -47,7 +47,8 @@ A build of Bottlerocket that supports different features or integration characte The artifacts of a build will include the architecture and variant name. For example, an `x86_64` build of the `aws-k8s-1.17` variant will produce an image named `bottlerocket-aws-k8s-1.17-x86_64--.img`. -Our first supported variants, `aws-k8s-1.15`, `aws-k8s-1.16`, and `aws-k8s-1.17`, supports EKS as described above. +Our first supported variants, `aws-k8s-1.15`, `aws-k8s-1.16`, and `aws-k8s-1.17`, support EKS as described above. +We also have a new `aws-ecs-1` variant designed to work with ECS. ## Architectures @@ -62,9 +63,11 @@ It describes: * how to build an image * how to register an EC2 AMI from an image -To get started using Bottlerocket, please see [QUICKSTART](QUICKSTART-EKS.md). -It describes: -* how to set up a Kubernetes cluster, so your Bottlerocket instance can run pods +Bottlerocket is best used with a container orchestrator. +To get started with Kubernetes, please see [QUICKSTART-EKS](QUICKSTART-EKS.md). +To get started with Amazon ECS, please see [QUICKSTART-ECS](QUICKSTART-ECS.md). +These guides describe: +* how to set up a cluster with the orchestrator, so your Bottlerocket instance can run containers * how to launch a Bottlerocket instance in EC2 ## Exploration @@ -331,9 +334,10 @@ In this format, "settings.kubernetes.cluster-name" refers to the same key as in #### Kubernetes settings +See the [setup guide](QUICKSTART-EKS.md) for much more detail on setting up Bottlerocket and Kubernetes. + The following settings must be specified in order to join a Kubernetes cluster. You should [specify them in user data](#using-user-data). -See the [setup guide](QUICKSTART-EKS.md) for *much* more detail on setting up Bottlerocket and Kubernetes. * `settings.kubernetes.cluster-name`: The cluster name you chose during setup; the [setup guide](QUICKSTART-EKS.md) uses "bottlerocket". * `settings.kubernetes.cluster-certificate`: This is the base64-encoded certificate authority of the cluster. * `settings.kubernetes.api-server`: This is the cluster's Kubernetes API endpoint. @@ -360,6 +364,33 @@ The following settings are set for you automatically by [pluto](sources/api/) ba * `settings.kubernetes.node-ip`: The IPv4 address of this node. * `settings.kubernetes.pod-infra-container-image`: The URI of the "pause" container. +#### Amazon ECS settings + +See the [setup guide](QUICKSTART-ECS.md) for much more detail on setting up Bottlerocket and ECS. + +The following settings are optional and allow you to configure how your instance joins an ECS cluster. +Since joining a cluster happens at startup, they need to be [specified in user data](#using-user-data). +* `settings.ecs.cluster`: The name or [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of your Amazon ECS cluster. + If left unspecified, Bottlerocket will join your `default` cluster. +* `settings.ecs.instance-attributes`: [Attributes](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html#attributes) in the form of key, value pairs added when registering the container instance in the cluster. + * Example user data for setting up attributes: + ``` + [settings.ecs.instance-attributes] + attribute1 = "foo" + attribute2 = "bar" + ``` + +The following settings are optional and allow you to further configure your cluster. +These settings can be changed at any time. +* `settings.ecs.logging-drivers`: The list of logging drivers available on the container instance. + The ECS agent running on a container instance must register available logging drivers before tasks that use those drivers are eligible to be placed on the instance. + Bottlerocket enables the `json-file`, `awslogs`, and `none` drivers by default. +* `settings.ecs.allow-privileged-containers`: Whether launching privileged containers is allowed on the container instance. + If this value is set to false, privileged containers are not permitted. + Bottlerocket sets this value to false by default. +* `settings.ecs.loglevel`: The level of verbosity for the ECS agent's logs. + Supported values are `debug`, `info`, `warn`, `error`, and `crit`, and the default is `info`. + #### Updates settings * `settings.updates.metadata-base-url`: The common portion of all URIs used to download update metadata. @@ -482,6 +513,7 @@ We currently package the following major third-party components: * containerd ([background](https://containerd.io/), [packaging](packages/containerd/)) * Kubernetes ([background](https://kubernetes.io/), [packaging](packages/kubernetes-1.15/)) * aws-iam-authenticator ([background](https://github.com/kubernetes-sigs/aws-iam-authenticator), [packaging](packages/aws-iam-authenticator/)) +* Amazon ECS agent ([background](https://github.com/aws/amazon-ecs-agent), [packaging](packages/ecs-agent/)) For further documentation or to see the rest of the packages, see the [packaging directory](packages/). From acba6b138db358c8c999ad687106c28f21d9f633 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Mon, 31 Aug 2020 09:47:30 -0700 Subject: [PATCH 0339/1356] README: remove note about being in preview phase --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index 05cdcbcf..2f8b6fbd 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,6 @@ Welcome to Bottlerocket! Bottlerocket is a free and open-source Linux-based operating system meant for hosting containers. -Bottlerocket is currently in a developer preview phase and we’re looking for your [feedback](#contact-us). If you’re ready to jump right in, read our [QUICKSTART for Kubernetes](QUICKSTART-EKS.md) to try Bottlerocket in an Amazon EKS cluster or our [QUICKSTART for Amazon ECS](QUICKSTART-ECS.md) to try Bottlerocket in an Amazon ECS cluster. From 3315e481c28646f9776aaac8de9e9f088be14630 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Sun, 23 Aug 2020 16:10:12 -0700 Subject: [PATCH 0340/1356] pubsys: check for copied AMIs in parallel We were waiting on the get_ami_id calls serially which led to an unnecessary increase in runtime when we increase region count. Co-authored-by: Zac Mrowicki Co-authored-by: Tom Kirchner --- tools/pubsys/src/aws/ami/mod.rs | 36 ++++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index 9b6c610d..c7ce0e95 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -278,17 +278,27 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> ec2_clients.insert(region.clone(), ec2_client); } - let mut copy_requests = Vec::with_capacity(regions.len()); + // First, we check if the AMI already exists in each region. + let mut get_requests = Vec::with_capacity(regions.len()); for region in regions.iter() { let ec2_client = &ec2_clients[region]; - if let Some(id) = get_ami_id(&ami_args.name, &ami_args.arch, region.name(), ec2_client) - .await - .context(error::GetAmiId { - name: &ami_args.name, - arch: &ami_args.arch, - region: region.name(), - })? - { + let get_request = get_ami_id(&ami_args.name, &ami_args.arch, region.name(), ec2_client); + let info_future = ready(region.clone()); + get_requests.push(join(info_future, get_request)); + } + let request_stream = stream::iter(get_requests).buffer_unordered(4); + let get_responses: Vec<(Region, std::result::Result, register::Error>)> = + request_stream.collect().await; + + // If an AMI already existed, just add it to our list, otherwise prepare a copy request. + let mut copy_requests = Vec::with_capacity(regions.len()); + for (region, get_response) in get_responses { + let get_response = get_response.context(error::GetAmiId { + name: &ami_args.name, + arch: &ami_args.arch, + region: region.name(), + })?; + if let Some(id) = get_response { info!( "Found '{}' already registered in {}: {}", ami_args.name, @@ -298,14 +308,16 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> amis.insert(region.name().to_string(), Image::new(&id, &ami_args.name)); continue; } - let request = CopyImageRequest { + + let ec2_client = &ec2_clients[®ion]; + let copy_request = CopyImageRequest { description: ami_args.description.clone(), name: ami_args.name.clone(), source_image_id: ids_of_image.image_id.clone(), source_region: base_region.name().to_string(), ..Default::default() }; - let response_future = ec2_client.copy_image(request); + let copy_future = ec2_client.copy_image(copy_request); let base_region_name = base_region.name(); // Store the region so we can output it to the user @@ -318,7 +330,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> region.name() ) }); - copy_requests.push(message_future.then(|_| join(region_future, response_future))); + copy_requests.push(message_future.then(|_| join(region_future, copy_future))); } // If all target regions already have the AMI, we're done. From ef9ccd75d88a91d4e2a7060fc349c726daec9b3f Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Thu, 27 Aug 2020 12:02:37 -0700 Subject: [PATCH 0341/1356] Add info logging before service requests to help explain timing Co-authored-by: Zac Mrowicki Co-authored-by: Tom Kirchner --- tools/pubsys/src/aws/ami/mod.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index c7ce0e95..4e65edf0 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -212,6 +212,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> // First we need to find the account IDs for any given roles, so we can grant access to those // accounts to copy the AMI and snapshots. + info!("Getting account IDs for target regions so we can grant access to copy source AMI"); let mut account_ids = get_account_ids(®ions, &base_region, &aws).await?; // Get the account ID used in the base region; we don't need to grant to it so we can remove it @@ -235,6 +236,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> // If we have any accounts other than the base account, grant them access. if !account_ids.is_empty() { + info!("Granting access to target accounts so we can copy the AMI"); let account_id_vec: Vec<_> = account_ids.into_iter().collect(); modify_snapshots( @@ -279,6 +281,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> } // First, we check if the AMI already exists in each region. + info!("Checking whether AMIs already exist in target regions"); let mut get_requests = Vec::with_capacity(regions.len()); for region in regions.iter() { let ec2_client = &ec2_clients[region]; From db52265239f9a1360f0ee547a4cb377a7bf2a0c8 Mon Sep 17 00:00:00 2001 From: Justin Weissig Date: Tue, 1 Sep 2020 10:53:07 -0700 Subject: [PATCH 0342/1356] Update README.md Minor spelling/grammar tweaks --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 2f8b6fbd..ef11b68f 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,7 @@ You can let us know about things that seem difficult, or even ways you might lik ## Variants -To start, we're focusing on use of Bottlerocket as a host OS in AWS EKS Kubernetes clusters and Amazon ECS clusters. +To start, we're focusing on the use of Bottlerocket as a host OS in AWS EKS Kubernetes clusters and Amazon ECS clusters. We’re excited to get early feedback and to continue working on more use cases! Bottlerocket is architected such that different cloud environments and container orchestrators can be supported in the future. @@ -195,7 +195,7 @@ This will return the current update status in JSON format. The status should loo } ``` -You can see that the we're running `v0.3.2` in the active partition, and that `v0.4.0` is available. +You can see that we're running `v0.3.2` in the active partition, and that `v0.4.0` is available. If you're happy with that selection, you can request that the update be downloaded and applied to disk. (The update will remain inactive until you make the `activate-update` call below.) ``` apiclient -u /actions/prepare-update -m POST @@ -318,7 +318,7 @@ Here we'll describe each setting you can change. **Note:** You can see the default values (for any settings that are not generated at runtime) by looking at [defaults.toml](sources/models/defaults.toml). When you're sending settings to the API, or receiving settings from the API, they're in a structured JSON format. -This allows allow modification of any number of keys at once. +This allows modification of any number of keys at once. It also lets us ensure that they fit the definition of the Bottlerocket data model - requests with invalid settings won't even parse correctly, helping ensure safety. Here, however, we'll use the shortcut "dotted key" syntax for referring to keys. @@ -394,7 +394,7 @@ These settings can be changed at any time. * `settings.updates.metadata-base-url`: The common portion of all URIs used to download update metadata. * `settings.updates.targets-base-url`: The common portion of all URIs used to download update files. -* `settings.updates.seed`: A `u32` value that determines how far into in the update schedule this machine will accept an update. We recommending leaving this at its default generated value so that updates can be somewhat randomized in your cluster. +* `settings.updates.seed`: A `u32` value that determines how far into the update schedule this machine will accept an update. We recommend leaving this at its default generated value so that updates can be somewhat randomized in your cluster. * `settings.updates.version-lock`: Controls the version that will be selected when you issue an update request. Can be locked to a specific version like `v1.0.0`, or `latest` to take the latest available version. Defaults to `latest`. * `settings.updates.ignore-waves`: Updates are rolled out in waves to reduce the impact of issues. For testing purposes, you can set this to `true` to ignore those waves and update immediately. @@ -435,7 +435,7 @@ If the `enabled` flag is `true`, it will be started automatically. All host containers will have the `apiclient` binary available at `/usr/local/bin/apiclient` so they're able to [interact with the API](#using-the-api-client). In addition, all host containers come with persistent storage at `/.bottlerocket/host-containers/$HOST_CONTAINER_NAME` that is persisted across reboots and container start/stop cycles. -The default `admin` host-container, for example, store its SSH host keys under `/.bottlerocket/host-containers/admin/etc/ssh/`. +The default `admin` host-container, for example, stores its SSH host keys under `/.bottlerocket/host-containers/admin/etc/ssh/`. There are a few important caveats to understand about host containers: * They're not orchestrated. They only start or stop according to that `enabled` flag. From c1ccfa6c11e54ce90bf983c91cd839855ab5b0eb Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Thu, 27 Aug 2020 22:37:22 +0000 Subject: [PATCH 0343/1356] Add `cargo make grant-ami` and `revoke-ami` tasks Co-authored-by: Zac Mrowicki Co-authored-by: Tom Kirchner --- tools/pubsys/src/aws/publish_ami/mod.rs | 53 +++++++++++++++++-------- 1 file changed, 37 insertions(+), 16 deletions(-) diff --git a/tools/pubsys/src/aws/publish_ami/mod.rs b/tools/pubsys/src/aws/publish_ami/mod.rs index 603f1bf8..164a0bf9 100644 --- a/tools/pubsys/src/aws/publish_ami/mod.rs +++ b/tools/pubsys/src/aws/publish_ami/mod.rs @@ -1,5 +1,5 @@ //! The publish_ami module owns the 'publish-ami' subcommand and controls the process of granting -//! and revoking public access to EC2 AMIs. +//! and revoking access to EC2 AMIs. use crate::aws::ami::wait::{self, wait_for_ami}; use crate::aws::ami::Image; @@ -26,6 +26,7 @@ use structopt::StructOpt; #[derive(Debug, StructOpt)] #[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] #[structopt(group = clap::ArgGroup::with_name("mode").required(true).multiple(false))] +#[structopt(group = clap::ArgGroup::with_name("who").required(true).multiple(true))] pub(crate) struct PublishArgs { /// Path to the JSON file containing regional AMI IDs to modify #[structopt(long)] @@ -36,22 +37,29 @@ pub(crate) struct PublishArgs { #[structopt(long, use_delimiter = true)] regions: Vec, - /// Make the AMIs public + /// Grant access to the given users/groups #[structopt(long, group = "mode")] - make_public: bool, - /// Make the AMIs private + grant: bool, + /// Revoke access from the given users/groups #[structopt(long, group = "mode")] - make_private: bool, + revoke: bool, + + /// User IDs to give/remove access + #[structopt(long, use_delimiter = true, group = "who")] + user_ids: Vec, + /// Group names to give/remove access + #[structopt(long, use_delimiter = true, group = "who")] + group_names: Vec, } /// Common entrypoint from main() pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { - let (operation, mode) = if publish_args.make_public { - ("add".to_string(), "public") - } else if publish_args.make_private { - ("remove".to_string(), "private") + let (operation, description) = if publish_args.grant { + ("add".to_string(), "granting access") + } else if publish_args.revoke { + ("remove".to_string(), "revoking access") } else { - unreachable!("developer error: make-public and make-private not required/exclusive"); + unreachable!("developer error: --grant and --revoke not required/exclusive"); }; info!( @@ -165,16 +173,29 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { let snapshots = get_regional_snapshots(&amis, &ec2_clients).await?; trace!("Found snapshots: {:?}", snapshots); - let all = Some(vec!["all".to_string()]); - info!("Updating snapshot permissions - making {}", mode); - modify_regional_snapshots(None, all.clone(), &operation, &snapshots, &ec2_clients).await?; - - info!("Updating image permissions - making {}", mode); + info!("Updating snapshot permissions - {}", description); + modify_regional_snapshots( + Some(publish_args.user_ids.clone()), + Some(publish_args.group_names.clone()), + &operation, + &snapshots, + &ec2_clients, + ) + .await?; + + info!("Updating image permissions - {}", description); let ami_ids = amis .into_iter() .map(|(region, image)| (region, image.id)) .collect(); - modify_regional_images(None, all, &operation, &ami_ids, &ec2_clients).await?; + modify_regional_images( + Some(publish_args.user_ids.clone()), + Some(publish_args.group_names.clone()), + &operation, + &ami_ids, + &ec2_clients, + ) + .await?; Ok(()) } From af7f9340ea5f55ca0ecdf06a8aa89d3b107ccd48 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sat, 29 Aug 2020 16:12:10 +0000 Subject: [PATCH 0344/1356] grub: build programs for host rather than target Otherwise we can't run `grub-bios-setup` when building for the x86_64 architecture on an aarch64 host. We've avoided problems in the opposite case because we create the EFI partition with our own tools for aarch64. Signed-off-by: Ben Cressey --- packages/grub/grub.spec | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index d8e16158..94fbd523 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -1,4 +1,5 @@ %global debug_package %{nil} +%global __strip %{_bindir}/strip Name: %{_cross_os}grub Version: 2.04 @@ -82,7 +83,7 @@ cp unicode/COPYING COPYING.unicode %build export \ - CPP="%{_cross_target}-gcc -E" \ + TARGET_CPP="%{_cross_target}-gcc -E" \ TARGET_CC="%{_cross_target}-gcc" \ TARGET_CFLAGS="%{grub_cflags}" \ TARGET_CPPFLAGS="%{grub_cflags}" \ @@ -96,6 +97,7 @@ export \ %cross_configure \ CFLAGS="" \ LDFLAGS="" \ + --host="%{_build}" \ --target="%{_cross_grub_target}" \ --with-platform="%{_cross_grub_platform}" \ --disable-grub-mkfont \ From d79cce2831f094f7b1b9172a463531a183966857 Mon Sep 17 00:00:00 2001 From: iliana etaoin Date: Thu, 3 Sep 2020 18:53:46 +0000 Subject: [PATCH 0345/1356] Apply patch for CVE-2020-14386 --- ...fsx-Disable-Werror-stringop-overflow.patch | 4 +- ...t-packet-fix-overflow-in-tpacket_rcv.patch | 47 +++++++++++++++++++ packages/kernel/kernel.spec | 1 + 3 files changed, 50 insertions(+), 2 deletions(-) create mode 100644 packages/kernel/0002-net-packet-fix-overflow-in-tpacket_rcv.patch diff --git a/packages/kernel/0001-lustrefsx-Disable-Werror-stringop-overflow.patch b/packages/kernel/0001-lustrefsx-Disable-Werror-stringop-overflow.patch index 9030e96c..8c0c23b5 100644 --- a/packages/kernel/0001-lustrefsx-Disable-Werror-stringop-overflow.patch +++ b/packages/kernel/0001-lustrefsx-Disable-Werror-stringop-overflow.patch @@ -1,7 +1,7 @@ -From b85e7195a25319afb421a6a3ee2065fc8d225a8b Mon Sep 17 00:00:00 2001 +From a5f6b26082e0022d3c3e70e0718e4787939778d8 Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Tue, 30 Jul 2019 12:59:09 -0700 -Subject: [PATCH] lustrefsx: Disable -Werror=stringop-overflow= +Subject: [PATCH 1/2] lustrefsx: Disable -Werror=stringop-overflow= Signed-off-by: iliana destroyer of worlds --- diff --git a/packages/kernel/0002-net-packet-fix-overflow-in-tpacket_rcv.patch b/packages/kernel/0002-net-packet-fix-overflow-in-tpacket_rcv.patch new file mode 100644 index 00000000..33257f2f --- /dev/null +++ b/packages/kernel/0002-net-packet-fix-overflow-in-tpacket_rcv.patch @@ -0,0 +1,47 @@ +From eea7a6a08ef3acf437c6ce8a28694c3659542569 Mon Sep 17 00:00:00 2001 +From: Or Cohen +Date: Sun, 30 Aug 2020 20:04:51 +0300 +Subject: [PATCH 2/2] net/packet: fix overflow in tpacket_rcv + +Using tp_reserve to calculate netoff can overflow as +tp_reserve is unsigned int and netoff is unsigned short. + +This may lead to macoff receving a smaller value then +sizeof(struct virtio_net_hdr), and if po->has_vnet_hdr +is set, an out-of-bounds write will occur when +calling virtio_net_hdr_from_skb. + +The bug is fixed by converting netoff to unsigned int +and checking if it exceeds USHRT_MAX. + +Fixes: 8913336a7e8d ("packet: add PACKET_RESERVE sockopt") +Signed-off-by: Or Cohen +--- + net/packet/af_packet.c | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c +index 1d63ab3a878a..56084a16d0f9 100644 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -2167,7 +2167,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, + int skb_len = skb->len; + unsigned int snaplen, res; + unsigned long status = TP_STATUS_USER; +- unsigned short macoff, netoff, hdrlen; ++ unsigned short macoff, hdrlen; ++ unsigned int netoff; + struct sk_buff *copy_skb = NULL; + struct timespec ts; + __u32 ts_status; +@@ -2236,6 +2237,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, + } + macoff = netoff - maclen; + } ++ if (netoff > USHRT_MAX) { ++ atomic_inc(&po->tp_drops); ++ goto drop_n_restore; ++ } + if (po->tp_version <= TPACKET_V2) { + if (macoff + snaplen > po->rx_ring.frame_size) { + if (po->copy_thresh && diff --git a/packages/kernel/kernel.spec b/packages/kernel/kernel.spec index fa97cc9a..089e8c24 100644 --- a/packages/kernel/kernel.spec +++ b/packages/kernel/kernel.spec @@ -10,6 +10,7 @@ URL: https://www.kernel.org/ Source0: https://cdn.amazonlinux.com/blobstore/9e3beaecef0b030d83fb215be7ca67c01009cfec52fe9b12eb4b24fdb46eebce/kernel-5.4.50-25.83.amzn2.src.rpm Source100: config-bottlerocket Patch0001: 0001-lustrefsx-Disable-Werror-stringop-overflow.patch +Patch0002: 0002-net-packet-fix-overflow-in-tpacket_rcv.patch BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From 8bd97007b500be900555cf7a4443d6cec27934d6 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Mon, 14 Sep 2020 14:44:14 -0700 Subject: [PATCH 0346/1356] pubsys: use requested size for volume, keeping snapshot to minimum size The volume size used at instance launch time is determined by the block device mapping, defaulting to the snapshot size. Here we configure the BlockDeviceMappings to use the requested size rather than the snapshot size. The snapshots used to create those volumes, then, don't need to be more than the size of the data inside. (Users creating AMIs based on snapshots from existing AMIs can specify any desired size above the snapshot size in their own block device mappings.) --- tools/pubsys/src/aws/ami/register.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/pubsys/src/aws/ami/register.rs b/tools/pubsys/src/aws/ami/register.rs index f724f274..0b69f319 100644 --- a/tools/pubsys/src/aws/ami/register.rs +++ b/tools/pubsys/src/aws/ami/register.rs @@ -40,7 +40,7 @@ async fn _register_image( let root_snapshot = snapshot_from_image( &ami_args.root_image, &uploader, - ami_args.root_volume_size, + None, ami_args.no_progress, ) .await @@ -53,7 +53,7 @@ async fn _register_image( let data_snapshot = snapshot_from_image( &ami_args.data_image, &uploader, - Some(ami_args.data_volume_size), + None, ami_args.no_progress, ) .await @@ -88,6 +88,7 @@ async fn _register_image( delete_on_termination: Some(true), snapshot_id: Some(root_snapshot.clone()), volume_type: Some(VOLUME_TYPE.to_string()), + volume_size: ami_args.root_volume_size, ..Default::default() }), ..Default::default() @@ -97,6 +98,7 @@ async fn _register_image( data_bdm.device_name = Some(DATA_DEVICE_NAME.to_string()); if let Some(ebs) = data_bdm.ebs.as_mut() { ebs.snapshot_id = Some(data_snapshot.clone()); + ebs.volume_size = Some(ami_args.data_volume_size); } let register_request = RegisterImageRequest { From ff45eada5f36a37589dad01d7a5fae19bacb0068 Mon Sep 17 00:00:00 2001 From: troyaws Date: Tue, 15 Sep 2020 14:22:45 -0400 Subject: [PATCH 0347/1356] BUILDING.md: clarify system requirements --- BUILDING.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/BUILDING.md b/BUILDING.md index 7a159e41..ef020ff1 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -8,6 +8,29 @@ You can skip to the [setup guide for Kubernetes](QUICKSTART-EKS.md) or the [setu ### Dependencies +#### System Requirements + +The build process artifacts and resulting images can consume in excess of 80GB in the local directory. + +#### Linux + +The build system requires certain operating system packages to be installed. + +Ensure the following OS packages are installed: + +##### Ubuntu + +``` +apt install build-essential libssl-dev pkg-config +``` + +##### Fedora + +``` +yum install make automake gcc openssl-devel pkg-config +``` + + #### Rust The build system is based on the Rust language. From ea8820a4745f7669791c2f5cca4945af1c304e72 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 15 Sep 2020 18:20:11 +0000 Subject: [PATCH 0348/1356] build: pass Go module proxy variables through docker-go Previously, we always set `GOPRIVATE=*`, which blocked any use of the public Go module mirror. Now we pass the module proxy variables through from the environment, so that the behavior is under the developer's control. --- tools/docker-go | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/tools/docker-go b/tools/docker-go index d89e90a4..732429a3 100755 --- a/tools/docker-go +++ b/tools/docker-go @@ -54,18 +54,31 @@ DOCKER_RUN_ARGS="--network=host" parse_args "${@}" +# Pass through relevant Go variables, from the config or environment. +go_env=( ) +for i in GOPROXY GONOPROXY GOPRIVATE ; do + if command -v go >/dev/null 2>&1 ; then + govar="$(go env ${i})" + if [ -n "${govar}" ] ; then + go_env[${#go_env[@]}]="--env=${i}=${govar}" + fi + elif [ -n "${!i}" ] ; then + go_env[${#go_env[@]}]="--env=${i}=${!i}" + fi +done + # Go accepts both lower and uppercase proxy variables, pass both through. proxy_env=( ) -for i in http_proxy https_proxy no_proxy HTTP_PROXY HTTPS_PROXY NO_PROXY; do - if [ -n "${!i}" ]; then - proxy_env[${#proxy_env[@]}]="--env=$i=${!i}" - fi +for i in http_proxy https_proxy no_proxy HTTP_PROXY HTTPS_PROXY NO_PROXY ; do + if [ -n "${!i}" ]; then + proxy_env[${#proxy_env[@]}]="--env=$i=${!i}" + fi done docker run --rm \ - -e GOPRIVATE='*' \ -e GOCACHE='/tmp/.cache' \ -e GOPATH='/tmp/go' \ + "${go_env[@]}" \ "${proxy_env[@]}" \ --user "$(id -u):$(id -g)" \ --security-opt label:disable \ From b87f4da5f285f3117dc3efb83eb43d14a86c78c7 Mon Sep 17 00:00:00 2001 From: iliana etaoin Date: Thu, 17 Sep 2020 21:34:06 +0000 Subject: [PATCH 0349/1356] Update pubsys to tough 0.9 and add KMS support --- tools/Cargo.lock | 64 +++++++++++++++++++++++++++++++++++--- tools/pubsys/Cargo.toml | 5 +-- tools/pubsys/src/config.rs | 1 + tools/pubsys/src/repo.rs | 50 ++++++++++++++++++++++------- 4 files changed, 101 insertions(+), 19 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index bf3f2199..35892f1a 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -1344,7 +1344,8 @@ dependencies = [ "tinytemplate", "tokio", "toml", - "tough", + "tough 0.9.0", + "tough-kms", "tough-ssm", "update_metadata", "url", @@ -1621,6 +1622,20 @@ dependencies = [ "xml-rs", ] +[[package]] +name = "rusoto_kms" +version = "0.44.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c5a083f44d08db76d4deedd7527bb215dd008fa08f4b1d8ca40071522bdbcb7" +dependencies = [ + "async-trait", + "bytes", + "futures", + "rusoto_core 0.44.0", + "serde", + "serde_json", +] + [[package]] name = "rusoto_signature" version = "0.44.0" @@ -2361,6 +2376,28 @@ name = "tough" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71b8d86994e9da2233fc30c54223bc448a15bdb782f8060c66107fc6b88619ba" +dependencies = [ + "chrono", + "globset", + "hex", + "log", + "olpc-cjson", + "pem", + "ring", + "serde", + "serde_json", + "serde_plain", + "snafu", + "untrusted", + "url", + "walkdir", +] + +[[package]] +name = "tough" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc05d902ccf136ba55d5e2c7222ddc1623f657e6add3f030e93c4dc5341bbdb7" dependencies = [ "chrono", "globset", @@ -2379,11 +2416,28 @@ dependencies = [ "walkdir", ] +[[package]] +name = "tough-kms" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90d5273998150708e8639464e4fe21eba7afbf7372f57f4d7ae0dabffd3d8dab" +dependencies = [ + "bytes", + "pem", + "ring", + "rusoto_core 0.44.0", + "rusoto_credential 0.44.0", + "rusoto_kms", + "snafu", + "tokio", + "tough 0.9.0", +] + [[package]] name = "tough-ssm" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54e670640f67e719671a87fac948eabba0fd33633aa8be7804b38a1a1d2da32b" +checksum = "63b7e51b42318a756ab7cbcf036f2adb23a8462d762519d5a3e50b886430ed23" dependencies = [ "rusoto_core 0.44.0", "rusoto_credential 0.44.0", @@ -2392,7 +2446,7 @@ dependencies = [ "serde_json", "snafu", "tokio", - "tough", + "tough 0.9.0", ] [[package]] @@ -2498,7 +2552,7 @@ dependencies = [ "serde_plain", "snafu", "toml", - "tough", + "tough 0.8.0", ] [[package]] diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index fcee92c2..fd2d0559 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -34,8 +34,9 @@ structopt = { version = "0.3", default-features = false } tinytemplate = "1.1" tokio = { version = "0.2.21", features = ["time"] } toml = "0.5" -tough = { version = "0.8", features = ["http"] } -tough-ssm = "0.3" +tough = { version = "0.9", features = ["http"] } +tough-kms = "0.1" +tough-ssm = "0.4" update_metadata = { path = "../../sources/updater/update_metadata/" } url = { version = "2.1.0", features = ["serde"] } tempfile = "3.1" diff --git a/tools/pubsys/src/config.rs b/tools/pubsys/src/config.rs index 855c75df..ecd17e98 100644 --- a/tools/pubsys/src/config.rs +++ b/tools/pubsys/src/config.rs @@ -59,6 +59,7 @@ pub(crate) struct AwsRegionConfig { #[derive(Debug, Deserialize)] pub(crate) enum SigningKeyConfig { file { path: PathBuf }, + kms { key_id: String }, ssm { parameter: String }, } diff --git a/tools/pubsys/src/repo.rs b/tools/pubsys/src/repo.rs index 5a4ada78..408f4d52 100644 --- a/tools/pubsys/src/repo.rs +++ b/tools/pubsys/src/repo.rs @@ -23,6 +23,7 @@ use tough::{ schema::Target, ExpirationEnforcement, Limits, Repository, Settings, }; +use tough_kms::{KmsKeySource, KmsSigningAlgorithm}; use tough_ssm::SsmKeySource; use transport::RepoTransport; use update_metadata::{Images, Manifest, Release, UpdateWaves}; @@ -182,7 +183,7 @@ fn update_manifest(repo_args: &RepoArgs, manifest: &mut Manifest) -> Result<()> /// Adds targets, expirations, and version to the RepositoryEditor fn update_editor<'a, P>( repo_args: &'a RepoArgs, - editor: &mut RepositoryEditor, + editor: &mut RepositoryEditor<'a, RepoTransport>, targets: impl Iterator, manifest_path: P, ) -> Result<()> @@ -202,7 +203,7 @@ where path: manifest_path.as_ref(), })?; debug!("Adding target for manifest.json"); - editor.add_target("manifest.json".to_string(), manifest_target); + editor.add_target("manifest.json", manifest_target).context(error::AddTarget { path: "manifest.json" })?; // Add expirations =^..^= =^..^= =^..^= =^..^= @@ -224,6 +225,9 @@ where editor .snapshot_expires(snapshot_expiration) .targets_expires(targets_expiration) + .context(error::SetTargetsExpiration { + expiration: targets_expiration, + })? .timestamp_expires(timestamp_expiration); // Add version =^..^= =^..^= =^..^= =^..^= @@ -235,6 +239,7 @@ where editor .snapshot_version(version) .targets_version(version) + .context(error::SetTargetsVersion { version })? .timestamp_version(version); Ok(()) @@ -284,23 +289,24 @@ fn repo_urls<'a>( /// Builds an editor and manifest; will start from an existing repo if one is specified in the /// configuration. Returns Err if we fail to read from the repo. Returns Ok(None) if we detect /// that the repo does not exist. -fn load_editor_and_manifest

( +fn load_editor_and_manifest<'a, P>( root_role_path: P, - metadata_url: &Url, - targets_url: &Url, -) -> Result> + transport: &'a RepoTransport, + datastore: &'a Path, + metadata_url: &'a Url, + targets_url: &'a Url, +) -> Result, Manifest)>> where P: AsRef, { let root_role_path = root_role_path.as_ref(); // Create a temporary directory where the TUF client can store metadata - let workdir = tempdir().context(error::TempDir)?; let settings = Settings { root: File::open(root_role_path).context(error::File { path: root_role_path, })?, - datastore: workdir.path(), + datastore, metadata_base_url: metadata_url.as_str(), targets_base_url: targets_url.as_str(), limits: Limits::default(), @@ -308,8 +314,7 @@ where }; // Try to load the repo... - let transport = RepoTransport::default(); - match Repository::load(&transport, settings) { + match Repository::load(transport, settings) { // If we load it successfully, build an editor and manifest from it. Ok(repo) => { let reader = repo @@ -377,9 +382,11 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { // Build a repo editor and manifest, from an existing repo if available, otherwise fresh let maybe_urls = repo_urls(&repo_args, &infra_config)?; - let (mut editor, mut manifest) = if let Some((metadata_url, targets_url)) = maybe_urls { + let workdir = tempdir().context(error::TempDir)?; + let transport = RepoTransport::default(); + let (mut editor, mut manifest) = if let Some((metadata_url, targets_url)) = maybe_urls.as_ref() { info!("Found metadata and target URLs, loading existing repository"); - match load_editor_and_manifest(root_role_path, &metadata_url, &targets_url)? { + match load_editor_and_manifest(root_role_path, &transport, workdir.path(), &metadata_url, &targets_url)? { Some((editor, manifest)) => (editor, manifest), None => { info!( @@ -436,6 +443,12 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { let key_source: Box = match signing_key_config { SigningKeyConfig::file { path } => Box::new(LocalKeySource { path: path.clone() }), + SigningKeyConfig::kms { key_id } => Box::new(KmsKeySource { + profile: None, + key_id: key_id.clone(), + client: None, + signing_algorithm: KmsSigningAlgorithm::RsassaPssSha256, + }), SigningKeyConfig::ssm { parameter } => Box::new(SsmKeySource { profile: None, parameter_name: parameter.clone(), @@ -510,6 +523,7 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { } mod error { + use chrono::{DateTime, Utc}; use snafu::Snafu; use std::io; use std::path::PathBuf; @@ -628,6 +642,18 @@ mod error { source: tough::error::Error, }, + #[snafu(display("Failed to set targets expiration to {}: {}", expiration, source))] + SetTargetsExpiration { + expiration: DateTime, + source: tough::error::Error, + }, + + #[snafu(display("Failed to set targets version to {}: {}", version, source))] + SetTargetsVersion { + version: u64, + source: tough::error::Error, + }, + #[snafu(display("Failed to set waves from '{}': {}", wave_policy_path.display(), source))] SetWaves { wave_policy_path: PathBuf, From dad724788066b0465b26bf940dd43a800b2652db Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 22 Sep 2020 13:41:16 -0700 Subject: [PATCH 0350/1356] README: recommend update methods for EKS and ECS variants --- README.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index ef11b68f..37f26985 100644 --- a/README.md +++ b/README.md @@ -144,7 +144,12 @@ For more details, see the [update system documentation](sources/updater/). ### Update methods -There are several ways of updating your Bottlerocket hosts: +There are several ways of updating your Bottlerocket hosts. + +For EKS variants of Bottlerocket, we recommend using the [Bottlerocket update operator](https://github.com/bottlerocket-os/bottlerocket-update-operator) for automated updates. +You can also use one of the methods below for direct control of updates. + +For the ECS preview variant of Bottlerocket, we recommend updating hosts using one of the methods below, until further automation is ready. #### Update API From 97b8a67be826943590f675a8053c9f5e6f758667 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Mon, 21 Sep 2020 14:28:31 -0700 Subject: [PATCH 0351/1356] README: relocate update API instructions and example Relocates update API instructions, diagrams and example from the main Bottlerocket README to the update system documentation. --- README.md | 71 ++----------------------------------------------------- 1 file changed, 2 insertions(+), 69 deletions(-) diff --git a/README.md b/README.md index 37f26985..7b6a9600 100644 --- a/README.md +++ b/README.md @@ -153,78 +153,11 @@ For the ECS preview variant of Bottlerocket, we recommend updating hosts using o #### Update API -The [Bottlerocket API](#api) allows you to update and reboot your host with simple API calls. You can change [settings](#updates-settings) to control which updates are selected. - -In general, the process of using the update API looks like this. You refresh the list of known updates, then apply one to the system. Calls to `/updates/status` will tell you the current state and give more details on any errors. - -![Update API overview](sources/api/update_api.png) - -First, refresh the list of available updates: -``` -apiclient -u /actions/refresh-updates -m POST -``` - -Now you can see the list of available updates, along with the chosen update, according to your `version-lock` [setting](#updates-settings): -``` -apiclient -u /updates/status -``` - -This will return the current update status in JSON format. The status should look something like the following (pretty-printed): -``` -{ - "update_state": "Available", - "available_updates": [ - "0.4.0", - "0.3.4", - ... - ], - "chosen_update": { - "arch": "x86_64", - "version": "0.4.0", - "variant": "aws-k8s-1.15" - }, - "active_partition": { - "image": { - "arch": "x86_64", - "version": "0.3.2", - "variant": "aws-k8s-1.15" - }, - "next_to_boot": true - }, - "staging_partition": null, - "most_recent_command": { - "cmd_type": "refresh", - "cmd_status": "Success", - ... - } -} -``` - -You can see that we're running `v0.3.2` in the active partition, and that `v0.4.0` is available. -If you're happy with that selection, you can request that the update be downloaded and applied to disk. (The update will remain inactive until you make the `activate-update` call below.) -``` -apiclient -u /actions/prepare-update -m POST -``` - -After you request that the update be prepared, you can check the update status again until it reflects the new version in the staging partition. -``` -apiclient -u /updates/status -``` - -If the staging partition shows the new version, you can proceed to "activate" the update. -This means that as soon as the host is rebooted it will try to run the new version. (If the new version can't boot, we automatically flip back to the old version.) -``` -apiclient -u /actions/activate-update -m POST -``` - -You can reboot the host with: -``` -apiclient -u /actions/reboot -m POST -``` +The [Bottlerocket API](#api) includes methods for checking and starting system updates. You can read more about the update APIs in our [update system documentation](sources/updater/README.md#update-api). #### Updog -You can also update using a CLI tool, `updog`, if you [connect through a host container](#exploration). +You can update Bottlerocket using a CLI tool, `updog`, if you [connect through the admin container](#admin-container). Here's how you can see whether there's an update: From bf17cbe27b601135c65491a0cd5aa45a1b47d4d7 Mon Sep 17 00:00:00 2001 From: Magnus Kulke Date: Wed, 2 Sep 2020 13:02:54 +0200 Subject: [PATCH 0352/1356] Added option to enable spot instance draining Co-authored-by: Samuel Karp --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 7b6a9600..194dbaa3 100644 --- a/README.md +++ b/README.md @@ -327,6 +327,7 @@ These settings can be changed at any time. Bottlerocket sets this value to false by default. * `settings.ecs.loglevel`: The level of verbosity for the ECS agent's logs. Supported values are `debug`, `info`, `warn`, `error`, and `crit`, and the default is `info`. +* `settings.ecs.enable-spot-instance-draining`: If the instance receives a spot termination notice, the agent will set the instance's state to `DRAINING`, so the workload can be moved gracefully before the instance is removed. Defaults to `false`. #### Updates settings From 1ae46e3f3c5205058246d497174c8c8cf32a2070 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Thu, 24 Sep 2020 18:21:15 +0000 Subject: [PATCH 0353/1356] pubsys: split pubsys-config into new library crate Co-authored-by: Zac Mrowicki Co-authored-by: Tom Kirchner --- tools/Cargo.lock | 13 ++++ tools/Cargo.toml | 3 +- tools/buildsys/deny.toml | 40 ----------- tools/{pubsys => }/deny.toml | 0 tools/pubsys-config/Cargo.toml | 15 +++++ .../config.rs => pubsys-config/src/lib.rs} | 67 +++++++++++-------- tools/pubsys/Cargo.toml | 1 + tools/pubsys/src/aws/ami/mod.rs | 4 +- tools/pubsys/src/aws/ami/wait.rs | 2 +- tools/pubsys/src/aws/client.rs | 2 +- tools/pubsys/src/aws/mod.rs | 2 +- tools/pubsys/src/aws/promote_ssm/mod.rs | 4 +- tools/pubsys/src/aws/publish_ami/mod.rs | 4 +- tools/pubsys/src/aws/ssm/mod.rs | 4 +- tools/pubsys/src/main.rs | 13 ---- tools/pubsys/src/repo.rs | 4 +- 16 files changed, 82 insertions(+), 96 deletions(-) delete mode 100644 tools/buildsys/deny.toml rename tools/{pubsys => }/deny.toml (100%) create mode 100644 tools/pubsys-config/Cargo.toml rename tools/{pubsys/src/config.rs => pubsys-config/src/lib.rs} (62%) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 35892f1a..59be0091 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -1326,6 +1326,7 @@ dependencies = [ "lazy_static", "log", "parse-datetime", + "pubsys-config", "reqwest", "rusoto_core 0.45.0", "rusoto_credential 0.45.0", @@ -1351,6 +1352,18 @@ dependencies = [ "url", ] +[[package]] +name = "pubsys-config" +version = "0.1.0" +dependencies = [ + "chrono", + "parse-datetime", + "serde", + "snafu", + "toml", + "url", +] + [[package]] name = "quote" version = "1.0.7" diff --git a/tools/Cargo.toml b/tools/Cargo.toml index 68d887d6..3264be5f 100644 --- a/tools/Cargo.toml +++ b/tools/Cargo.toml @@ -1,5 +1,6 @@ [workspace] members = [ "buildsys", - "pubsys" + "pubsys", + "pubsys-config", ] diff --git a/tools/buildsys/deny.toml b/tools/buildsys/deny.toml deleted file mode 100644 index 91e56ffc..00000000 --- a/tools/buildsys/deny.toml +++ /dev/null @@ -1,40 +0,0 @@ -[licenses] -unlicensed = "deny" - -# Deny licenses unless they are specifically listed here -copyleft = "deny" -allow-osi-fsf-free = "neither" -default = "deny" - -# We want really high confidence when inferring licenses from text -confidence-threshold = 0.93 - -allow = [ - "Apache-2.0", - #"BSD-2-Clause", # OK but currently unused; commenting to prevent warning - "BSD-3-Clause", - "BSL-1.0", - "ISC", - "MIT", - "OpenSSL", - "Unlicense", - "Zlib", -] - -exceptions = [ - { name = "webpki-roots", allow = ["MPL-2.0"], version = "*" }, -] - -[[licenses.clarify]] -name = "ring" -expression = "MIT AND ISC AND OpenSSL" -license-files = [ - { path = "LICENSE", hash = 0xbd0eed23 }, -] - -[[licenses.clarify]] -name = "webpki" -expression = "ISC" -license-files = [ - { path = "LICENSE", hash = 0x001c7e6c }, -] diff --git a/tools/pubsys/deny.toml b/tools/deny.toml similarity index 100% rename from tools/pubsys/deny.toml rename to tools/deny.toml diff --git a/tools/pubsys-config/Cargo.toml b/tools/pubsys-config/Cargo.toml new file mode 100644 index 00000000..031ad0aa --- /dev/null +++ b/tools/pubsys-config/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "pubsys-config" +version = "0.1.0" +authors = ["Zac Mrowicki ", "Tom Kirchner "] +license = "Apache-2.0 OR MIT" +edition = "2018" +publish = false + +[dependencies] +chrono = "0.4" +parse-datetime = { path = "../../sources/parse-datetime" } +serde = { version = "1.0", features = ["derive"] } +snafu = "0.6" +toml = "0.5" +url = { version = "2.1.0", features = ["serde"] } diff --git a/tools/pubsys/src/config.rs b/tools/pubsys-config/src/lib.rs similarity index 62% rename from tools/pubsys/src/config.rs rename to tools/pubsys-config/src/lib.rs index ecd17e98..3544f5d6 100644 --- a/tools/pubsys/src/config.rs +++ b/tools/pubsys-config/src/lib.rs @@ -1,8 +1,8 @@ //! The config module owns the definition and loading process for our configuration sources. -use crate::deserialize_offset; use chrono::Duration; -use serde::Deserialize; +use parse_datetime::parse_offset; +use serde::{Deserialize, Deserializer}; use snafu::ResultExt; use std::collections::{HashMap, VecDeque}; use std::fs; @@ -11,19 +11,19 @@ use url::Url; /// Configuration needed to load and create repos #[derive(Debug, Deserialize)] -pub(crate) struct InfraConfig { +pub struct InfraConfig { // Repo subcommand config - pub(crate) root_role_path: Option, - pub(crate) signing_keys: Option>, - pub(crate) repo: Option>, + pub root_role_path: Option, + pub signing_keys: Option>, + pub repo: Option>, // Config for AWS specific subcommands - pub(crate) aws: Option, + pub aws: Option, } impl InfraConfig { /// Deserializes an InfraConfig from a given path - pub(crate) fn from_path

(path: P) -> Result + pub fn from_path

(path: P) -> Result where P: AsRef, { @@ -35,21 +35,21 @@ impl InfraConfig { /// AWS-specific infrastructure configuration #[derive(Debug, Default, Deserialize)] -pub(crate) struct AwsConfig { +pub struct AwsConfig { #[serde(default)] - pub(crate) regions: VecDeque, - pub(crate) role: Option, - pub(crate) profile: Option, + pub regions: VecDeque, + pub role: Option, + pub profile: Option, #[serde(default)] - pub(crate) region: HashMap, - pub(crate) ssm_prefix: Option, + pub region: HashMap, + pub ssm_prefix: Option, } /// AWS region-specific configuration #[derive(Debug, Deserialize)] -pub(crate) struct AwsRegionConfig { - pub(crate) role: Option, - pub(crate) endpoint: Option, +pub struct AwsRegionConfig { + pub role: Option, + pub endpoint: Option, } /// Location of signing keys @@ -57,7 +57,7 @@ pub(crate) struct AwsRegionConfig { // more common for TOML config to be lowercase. #[allow(non_camel_case_types)] #[derive(Debug, Deserialize)] -pub(crate) enum SigningKeyConfig { +pub enum SigningKeyConfig { file { path: PathBuf }, kms { key_id: String }, ssm { parameter: String }, @@ -65,25 +65,25 @@ pub(crate) enum SigningKeyConfig { /// Location of existing published repo #[derive(Debug, Deserialize)] -pub(crate) struct RepoConfig { - pub(crate) metadata_base_url: Option, - pub(crate) targets_url: Option, +pub struct RepoConfig { + pub metadata_base_url: Option, + pub targets_url: Option, } /// How long it takes for each metadata type to expire #[derive(Debug, Deserialize)] -pub(crate) struct RepoExpirationPolicy { +pub struct RepoExpirationPolicy { #[serde(deserialize_with = "deserialize_offset")] - pub(crate) snapshot_expiration: Duration, + pub snapshot_expiration: Duration, #[serde(deserialize_with = "deserialize_offset")] - pub(crate) targets_expiration: Duration, + pub targets_expiration: Duration, #[serde(deserialize_with = "deserialize_offset")] - pub(crate) timestamp_expiration: Duration, + pub timestamp_expiration: Duration, } impl RepoExpirationPolicy { /// Deserializes a RepoExpirationPolicy from a given path - pub(crate) fn from_path

(path: P) -> Result + pub fn from_path

(path: P) -> Result where P: AsRef, { @@ -93,6 +93,15 @@ impl RepoExpirationPolicy { } } +/// Deserializes a Duration in the form of "in X hours/days/weeks" +fn deserialize_offset<'de, D>(deserializer: D) -> std::result::Result +where + D: Deserializer<'de>, +{ + let s: &str = Deserialize::deserialize(deserializer)?; + parse_offset(s).map_err(serde::de::Error::custom) +} + mod error { use snafu::Snafu; use std::io; @@ -100,7 +109,7 @@ mod error { #[derive(Debug, Snafu)] #[snafu(visibility = "pub(super)")] - pub(crate) enum Error { + pub enum Error { #[snafu(display("Failed to read '{}': {}", path.display(), source))] File { path: PathBuf, source: io::Error }, @@ -111,5 +120,5 @@ mod error { }, } } -pub(crate) use error::Error; -type Result = std::result::Result; +pub use error::Error; +pub type Result = std::result::Result; diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index fd2d0559..9bacf3d4 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -11,6 +11,7 @@ async-trait = "0.1.36" chrono = "0.4" clap = "2.33" coldsnap = "0.1" +pubsys-config = { path = "../pubsys-config/" } futures = "0.3.5" indicatif = "0.15.0" lazy_static = "1.4" diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index 4e65edf0..a8ba71fa 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -7,7 +7,7 @@ pub(crate) mod wait; use crate::aws::publish_ami::{get_snapshots, modify_image, modify_snapshots}; use crate::aws::{client::build_client, region_from_string}; -use crate::config::{AwsConfig, InfraConfig}; +use pubsys_config::{AwsConfig, InfraConfig}; use crate::Args; use futures::future::{join, lazy, ready, FutureExt}; use futures::stream::{self, StreamExt}; @@ -483,7 +483,7 @@ mod error { #[snafu(display("Error reading config: {}", source))] Config { - source: crate::config::Error, + source: pubsys_config::Error, }, #[snafu(display("Failed to create file '{}': {}", path.display(), source))] diff --git a/tools/pubsys/src/aws/ami/wait.rs b/tools/pubsys/src/aws/ami/wait.rs index 072434d1..604a9dba 100644 --- a/tools/pubsys/src/aws/ami/wait.rs +++ b/tools/pubsys/src/aws/ami/wait.rs @@ -1,5 +1,5 @@ use crate::aws::client::build_client; -use crate::config::AwsConfig; +use pubsys_config::AwsConfig; use log::info; use rusoto_core::Region; use rusoto_ec2::{DescribeImagesRequest, Ec2, Ec2Client}; diff --git a/tools/pubsys/src/aws/client.rs b/tools/pubsys/src/aws/client.rs index e9055f00..8576424a 100644 --- a/tools/pubsys/src/aws/client.rs +++ b/tools/pubsys/src/aws/client.rs @@ -1,4 +1,4 @@ -use crate::config::AwsConfig; +use pubsys_config::AwsConfig; use async_trait::async_trait; use rusoto_core::{request::DispatchSignedRequest, HttpClient, Region}; use rusoto_credential::{ diff --git a/tools/pubsys/src/aws/mod.rs b/tools/pubsys/src/aws/mod.rs index bc19dd44..ccea97ed 100644 --- a/tools/pubsys/src/aws/mod.rs +++ b/tools/pubsys/src/aws/mod.rs @@ -1,4 +1,4 @@ -use crate::config::AwsConfig; +use pubsys_config::AwsConfig; use rusoto_core::Region; use snafu::ResultExt; diff --git a/tools/pubsys/src/aws/promote_ssm/mod.rs b/tools/pubsys/src/aws/promote_ssm/mod.rs index 05ac9067..ad74bef7 100644 --- a/tools/pubsys/src/aws/promote_ssm/mod.rs +++ b/tools/pubsys/src/aws/promote_ssm/mod.rs @@ -4,7 +4,7 @@ use crate::aws::client::build_client; use crate::aws::{parse_arch, region_from_string}; use crate::aws::ssm::{key_difference, ssm, template, BuildContext, SsmKey}; -use crate::config::InfraConfig; +use pubsys_config::InfraConfig; use crate::Args; use log::{info, trace}; use rusoto_core::Region; @@ -231,7 +231,7 @@ mod error { #[snafu(display("Error reading config: {}", source))] Config { - source: crate::config::Error, + source: pubsys_config::Error, }, #[snafu(display("Found no parameters in source version {}", version))] diff --git a/tools/pubsys/src/aws/publish_ami/mod.rs b/tools/pubsys/src/aws/publish_ami/mod.rs index 164a0bf9..8bef24dd 100644 --- a/tools/pubsys/src/aws/publish_ami/mod.rs +++ b/tools/pubsys/src/aws/publish_ami/mod.rs @@ -5,7 +5,7 @@ use crate::aws::ami::wait::{self, wait_for_ami}; use crate::aws::ami::Image; use crate::aws::client::build_client; use crate::aws::region_from_string; -use crate::config::InfraConfig; +use pubsys_config::InfraConfig; use crate::Args; use futures::future::{join, ready}; use futures::stream::{self, StreamExt}; @@ -519,7 +519,7 @@ mod error { #[snafu(display("Error reading config: {}", source))] Config { - source: crate::config::Error, + source: pubsys_config::Error, }, #[snafu(display("Failed to describe images in {}: {}", region, source))] diff --git a/tools/pubsys/src/aws/ssm/mod.rs b/tools/pubsys/src/aws/ssm/mod.rs index b0377db6..d9c45c74 100644 --- a/tools/pubsys/src/aws/ssm/mod.rs +++ b/tools/pubsys/src/aws/ssm/mod.rs @@ -5,7 +5,7 @@ pub(crate) mod ssm; pub(crate) mod template; use crate::aws::{ami::Image, client::build_client, parse_arch, region_from_string}; -use crate::config::{AwsConfig, InfraConfig}; +use pubsys_config::{AwsConfig, InfraConfig}; use crate::Args; use log::{info, trace}; use rusoto_core::Region; @@ -304,7 +304,7 @@ mod error { #[snafu(display("Error reading config: {}", source))] Config { - source: crate::config::Error, + source: pubsys_config::Error, }, #[snafu(display("Failed to deserialize input from '{}': {}", path.display(), source))] diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs index 1aa83906..d69463cb 100644 --- a/tools/pubsys/src/main.rs +++ b/tools/pubsys/src/main.rs @@ -21,13 +21,9 @@ Configuration comes from: #![deny(rust_2018_idioms)] mod aws; -mod config; mod repo; -use chrono::Duration; -use parse_datetime::parse_offset; use semver::Version; -use serde::{Deserialize, Deserializer}; use simplelog::{Config as LogConfig, LevelFilter, TermLogger, TerminalMode}; use snafu::ResultExt; use std::path::PathBuf; @@ -117,15 +113,6 @@ pub(crate) fn friendly_version( Version::parse(version_str) } -/// Deserializes a Duration in the form of "in X hours/days/weeks" -pub(crate) fn deserialize_offset<'de, D>(deserializer: D) -> std::result::Result -where - D: Deserializer<'de>, -{ - let s: &str = Deserialize::deserialize(deserializer)?; - parse_offset(s).map_err(serde::de::Error::custom) -} - mod error { use snafu::Snafu; diff --git a/tools/pubsys/src/repo.rs b/tools/pubsys/src/repo.rs index 408f4d52..8fff1f93 100644 --- a/tools/pubsys/src/repo.rs +++ b/tools/pubsys/src/repo.rs @@ -2,12 +2,12 @@ mod transport; -use crate::config::{InfraConfig, RepoExpirationPolicy, SigningKeyConfig}; use crate::{friendly_version, Args}; use chrono::{DateTime, Utc}; use lazy_static::lazy_static; use log::{debug, info, trace}; use parse_datetime::parse_datetime; +use pubsys_config::{InfraConfig, RepoExpirationPolicy, SigningKeyConfig}; use semver::Version; use snafu::{ensure, OptionExt, ResultExt}; use std::convert::TryInto; @@ -557,7 +557,7 @@ mod error { }, #[snafu(display("Error reading config: {}", source))] - Config { source: crate::config::Error }, + Config { source: pubsys_config::Error }, #[snafu(display("Failed to create directory '{}': {}", path.display(), source))] CreateDir { path: PathBuf, source: io::Error }, From fb39653146e1df075c1f8ca0469b9857210066d1 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Thu, 24 Sep 2020 18:36:15 +0000 Subject: [PATCH 0354/1356] Add pubsys-setup to ease creation of root role and signing key If the user doesn't define a root role and key themselves in Infra.toml, we'll generate them so it's easier to get started building images and repos. The user should update Infra.toml with more permanent resources and locations, but this makes it much easier to get started handling your own builds. Note: this changes the format of Infra.toml to make root roles and keys specific to a repo. Roles and keys are necessarily tied to a repo, so this is simpler than having to specify PUBLISH_REPO *and* PUBLISH_KEY (and manually changing the root.json file, which wasn't configurable before). Co-authored-by: Zac Mrowicki Co-authored-by: Tom Kirchner --- .gitignore | 2 + tools/.gitignore | 3 +- tools/Cargo.lock | 24 ++ tools/Cargo.toml | 1 + tools/buildsys/src/builder.rs | 8 +- tools/pubsys-config/src/lib.rs | 39 +++- tools/pubsys-setup/Cargo.toml | 21 ++ tools/pubsys-setup/src/main.rs | 380 ++++++++++++++++++++++++++++++++ tools/pubsys/Infra.toml.example | 43 ++-- tools/pubsys/src/repo.rs | 91 ++++---- 10 files changed, 546 insertions(+), 66 deletions(-) create mode 100644 tools/pubsys-setup/Cargo.toml create mode 100644 tools/pubsys-setup/src/main.rs diff --git a/.gitignore b/.gitignore index f152b238..e2e553ee 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,5 @@ /html /Infra.toml /*.pem +/keys +/roles diff --git a/tools/.gitignore b/tools/.gitignore index 9f76dddb..d3ceb7fc 100644 --- a/tools/.gitignore +++ b/tools/.gitignore @@ -1,4 +1,3 @@ -/bin/buildsys -/bin/pubsys +/bin /.crates.toml /.crates2.json diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 59be0091..e9ede8ab 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -1364,6 +1364,24 @@ dependencies = [ "url", ] +[[package]] +name = "pubsys-setup" +version = "0.1.0" +dependencies = [ + "hex", + "log", + "pubsys-config", + "reqwest", + "sha2 0.9.1", + "shell-words", + "simplelog", + "snafu", + "structopt", + "tempfile", + "toml", + "url", +] + [[package]] name = "quote" version = "1.0.7" @@ -1984,6 +2002,12 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "shell-words" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6fa3938c99da4914afedd13bf3d79bcb6c277d1b2c398d23257a304d9e1b074" + [[package]] name = "shlex" version = "0.1.1" diff --git a/tools/Cargo.toml b/tools/Cargo.toml index 3264be5f..c4502c3c 100644 --- a/tools/Cargo.toml +++ b/tools/Cargo.toml @@ -3,4 +3,5 @@ members = [ "buildsys", "pubsys", "pubsys-config", + "pubsys-setup", ] diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index 5778af43..999a6411 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -48,15 +48,21 @@ impl PackageBuilder { // themselves in the package's spec file. let var = "BUILDSYS_VARIANT"; let variant = env::var(var).context(error::Environment { var })?; + // Same for repo, which is used to determine the correct root.json, which is only included + // in the os package. + let var = "PUBLISH_REPO"; + let repo = env::var(var).context(error::Environment { var })?; let target = "package"; let build_args = format!( "--build-arg PACKAGE={package} \ --build-arg ARCH={arch} \ - --build-arg VARIANT={variant}", + --build-arg VARIANT={variant} \ + --build-arg REPO={repo}", package = package, arch = arch, variant = variant, + repo = repo, ); let tag = format!( "buildsys-pkg-{package}-{arch}", diff --git a/tools/pubsys-config/src/lib.rs b/tools/pubsys-config/src/lib.rs index 3544f5d6..712d3b78 100644 --- a/tools/pubsys-config/src/lib.rs +++ b/tools/pubsys-config/src/lib.rs @@ -5,6 +5,7 @@ use parse_datetime::parse_offset; use serde::{Deserialize, Deserializer}; use snafu::ResultExt; use std::collections::{HashMap, VecDeque}; +use std::convert::TryFrom; use std::fs; use std::path::{Path, PathBuf}; use url::Url; @@ -13,8 +14,6 @@ use url::Url; #[derive(Debug, Deserialize)] pub struct InfraConfig { // Repo subcommand config - pub root_role_path: Option, - pub signing_keys: Option>, pub repo: Option>, // Config for AWS specific subcommands @@ -56,16 +55,48 @@ pub struct AwsRegionConfig { // These variant names are lowercase because they have to match the text in Infra.toml, and it's // more common for TOML config to be lowercase. #[allow(non_camel_case_types)] -#[derive(Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub enum SigningKeyConfig { file { path: PathBuf }, kms { key_id: String }, ssm { parameter: String }, } -/// Location of existing published repo +impl TryFrom for Url { + type Error = (); + fn try_from(key: SigningKeyConfig) -> std::result::Result { + match key { + SigningKeyConfig::file { path } => Url::from_file_path(path), + // We don't support passing profiles to tough in the name of the key/parameter, so for + // KMS and SSM we prepend a slash if there isn't one present. + SigningKeyConfig::kms { key_id } => { + let key_id = if key_id.starts_with('/') { + key_id.to_string() + } else { + format!("/{}", key_id) + }; + Url::parse(&format!("aws-kms://{}", key_id)).map_err(|_| ()) + } + SigningKeyConfig::ssm { parameter } => { + let parameter = if parameter.starts_with('/') { + parameter.to_string() + } else { + format!("/{}", parameter) + }; + Url::parse(&format!("aws-ssm://{}", parameter)).map_err(|_| ()) + } + } + } +} + +/// Represents a Bottlerocket repo's location and the metadata needed to update the repo #[derive(Debug, Deserialize)] pub struct RepoConfig { + pub root_role_url: Option, + pub root_role_sha512: Option, + + pub signing_keys: Option, + pub metadata_base_url: Option, pub targets_url: Option, } diff --git a/tools/pubsys-setup/Cargo.toml b/tools/pubsys-setup/Cargo.toml new file mode 100644 index 00000000..9df5fa75 --- /dev/null +++ b/tools/pubsys-setup/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "pubsys-setup" +version = "0.1.0" +authors = ["Zac Mrowicki ", "Tom Kirchner "] +license = "Apache-2.0 OR MIT" +edition = "2018" +publish = false + +[dependencies] +hex = "0.4.0" +log = "0.4" +pubsys-config = { path = "../pubsys-config/" } +reqwest = { version = "0.10.1", default-features = false, features = ["rustls-tls", "blocking"] } +sha2 = "0.9" +shell-words = "1.0" +simplelog = "0.8" +snafu = "0.6" +structopt = { version = "0.3", default-features = false } +tempfile = "3.1" +toml = "0.5" +url = { version = "2.1.0", features = ["serde"] } diff --git a/tools/pubsys-setup/src/main.rs b/tools/pubsys-setup/src/main.rs new file mode 100644 index 00000000..aec53e67 --- /dev/null +++ b/tools/pubsys-setup/src/main.rs @@ -0,0 +1,380 @@ +/*! +`pubsys setup` helps you get started with the credentials you need to make Bottlerocket images and +the repos you use to update them. Specifically, it can create a new key and role, or download an +existing role. +*/ + +#![deny(rust_2018_idioms)] + +use log::{debug, info, trace, warn}; +use pubsys_config::InfraConfig; +use sha2::{Digest, Sha512}; +use simplelog::{Config as LogConfig, LevelFilter, TermLogger, TerminalMode}; +use snafu::{ensure, OptionExt, ResultExt}; +use std::convert::TryFrom; +use std::fs; +use std::os::unix::fs::PermissionsExt; +use std::path::PathBuf; +use std::process::{self, Command}; +use structopt::StructOpt; +use tempfile::NamedTempFile; +use url::Url; + +/// Helps you get started with credentials to make Bottlerocket images and repos. +#[derive(Debug, StructOpt)] +struct Args { + #[structopt(global = true, long, default_value = "INFO")] + /// How much detail to log; from least to most: ERROR, WARN, INFO, DEBUG, TRACE + log_level: LevelFilter, + + #[structopt(long, parse(from_os_str))] + /// Path to Infra.toml + infra_config_path: PathBuf, + + #[structopt(long)] + /// Use this named repo from Infra.toml + repo: String, + + #[structopt(long, parse(from_os_str))] + /// Path to root.json + root_role_path: PathBuf, + #[structopt(long, parse(from_os_str))] + /// If we have to generate a local key, store it here + default_key_path: PathBuf, + + #[structopt(long)] + /// Allow setup to continue if we have a root role but no key for it + allow_missing_key: bool, +} + +/// The tuftool macro wraps Command to simplify calls to tuftool. +macro_rules! tuftool { + // We use variadic arguments to wrap a format! call so the user doesn't need to call format! + // each time. `tuftool root` always requires the path to root.json so there's always at least + // one. + ($format_str:expr, $($format_arg:expr),*) => { + let arg_str = format!($format_str, $($format_arg),*); + trace!("tuftool arg string: {}", arg_str); + let args = shell_words::split(&arg_str).context(error::CommandSplit { command: &arg_str })?; + trace!("tuftool split args: {:#?}", args); + + let status = Command::new("tuftool") + .args(args) + .status() + .context(error::TuftoolSpawn)?; + + ensure!(status.success(), error::TuftoolResult { + command: arg_str, + code: status.code().map(|i| i.to_string()).unwrap_or_else(|| "".to_string()) + }); + } +} + +/// Main entry point for tuftool setup. +fn run() -> Result<()> { + // Parse and store the args passed to the program + let args = Args::from_args(); + + // TerminalMode::Mixed will send errors to stderr and anything less to stdout. + TermLogger::init(args.log_level, LogConfig::default(), TerminalMode::Mixed) + .context(error::Logger)?; + + // Make /roles and /keys directories, if they don't exist, so we can write generated files. + let role_dir = args.root_role_path.parent().context(error::Path { + path: &args.root_role_path, + thing: "root role", + })?; + let key_dir = args.default_key_path.parent().context(error::Path { + path: &args.default_key_path, + thing: "key", + })?; + fs::create_dir_all(role_dir).context(error::Mkdir { path: role_dir })?; + fs::create_dir_all(key_dir).context(error::Mkdir { path: key_dir })?; + + // Main branching logic for deciding whether to create role/key, use what we have, or error. + match find_root_role_and_key(&args)? { + (Some(_root_role_path), Some(_key_url)) => Ok(()), + (Some(_root_role_path), None) => { + ensure!( + args.allow_missing_key, + error::MissingKey { repo: args.repo } + ); + Ok(()) + } + // User is missing something, so we generate at least a root.json and maybe a key. + (None, maybe_key_url) => { + if maybe_key_url.is_some() { + info!("Didn't find root role in Infra.toml, generating..."); + } else { + info!("Didn't find root role or signing key in Infra.toml, generating..."); + } + + let temp_root_role = + NamedTempFile::new_in(&role_dir).context(error::TempFileCreate { + purpose: "root role", + })?; + let temp_root_role_path = temp_root_role.path().display(); + + // Make tuftool calls to create an initial root.json with basic parameters. + tuftool!("root init '{}'", temp_root_role_path); + + tuftool!("root expire '{}' 'in 52 weeks'", temp_root_role_path); + + tuftool!("root set-threshold '{}' root 1", temp_root_role_path); + tuftool!("root set-threshold '{}' snapshot 1", temp_root_role_path); + tuftool!("root set-threshold '{}' targets 1", temp_root_role_path); + tuftool!("root set-threshold '{}' timestamp 1", temp_root_role_path); + + let key_url = if let Some(key_url) = maybe_key_url { + // If the user has a key, add it to each role. + tuftool!("root add-key '{}' '{}' --role root --role snapshot --role targets --role timestamp", + temp_root_role_path, key_url); + key_url + } else { + // If the user has no key, build one and add it to each role. + tuftool!("root gen-rsa-key '{}' '{}' --role root --role snapshot --role targets --role timestamp", + temp_root_role_path, args.default_key_path.display()); + warn!( + "Created a key at {} - note that for production use, you should \ + use a key stored in a trusted service like KMS or SSM", + args.default_key_path.display() + ); + + Url::from_file_path(&args.default_key_path) + .ok() + .context(error::FileUrl { + path: args.default_key_path, + })? + }; + + // Sign the role with the given key. + tuftool!("root sign '{}' -k '{}'", temp_root_role_path, key_url); + + temp_root_role + .persist_noclobber(&args.root_role_path) + .context(error::TempFilePersist { + path: &args.root_role_path, + })?; + + warn!( + "Created a root role at {} - note that for production use, you should create \ + a role with a shorter expiration and higher thresholds", + args.root_role_path.display() + ); + + // Root role files don't need to be secret. + fs::set_permissions(&args.root_role_path, fs::Permissions::from_mode(0o644)).context( + error::SetMode { + path: &args.root_role_path, + }, + )?; + + Ok(()) + } + } +} + +/// Searches Infra.toml and expected local paths for a root role and key for the requested repo. +fn find_root_role_and_key(args: &Args) -> Result<(Option<&PathBuf>, Option)> { + let (mut root_role_path, mut key_url) = (None, None); + + if args.infra_config_path.exists() { + info!( + "Found infra config at path: {}", + args.infra_config_path.display() + ); + + let infra_config = + InfraConfig::from_path(&args.infra_config_path).context(error::Config)?; + trace!("Parsed infra config: {:?}", infra_config); + + let repo_config = infra_config + .repo + .as_ref() + .context(error::MissingConfig { + missing: "repo section", + })? + .get(&args.repo) + .context(error::MissingConfig { + missing: format!("definition for repo {}", &args.repo), + })?; + + // If they have a root role URL and checksum defined, we can download it. + if let (Some(url), Some(sha512)) = + (&repo_config.root_role_url, &repo_config.root_role_sha512) + { + // If it's already been downloaded, just confirm the checksum. + if args.root_role_path.exists() { + let root_role_data = + fs::read_to_string(&args.root_role_path).context(error::ReadFile { + path: &args.root_role_path, + })?; + let mut d = Sha512::new(); + d.update(&root_role_data); + let digest = hex::encode(d.finalize()); + + ensure!( + &digest == sha512, + error::Hash { + expected: sha512, + got: digest, + thing: args.root_role_path.to_string_lossy() + } + ); + debug!( + "Using existing downloaded root role at {}", + args.root_role_path.display() + ); + } else { + // Download the root role by URL and verify its checksum before writing it. + let root_role_data = reqwest::blocking::get(url.clone()) + .with_context(|| error::GetUrl { url: url.clone() })? + .text() + .with_context(|| error::GetUrl { url: url.clone() })?; + + let mut d = Sha512::new(); + d.update(&root_role_data); + let digest = hex::encode(d.finalize()); + + ensure!( + &digest == sha512, + error::Hash { + expected: sha512, + got: digest, + thing: url.to_string() + } + ); + + // Write root role to expected path on disk. + fs::write(&args.root_role_path, &root_role_data).context(error::WriteFile { + path: &args.root_role_path, + })?; + debug!("Downloaded root role to {}", args.root_role_path.display()); + } + + root_role_path = Some(&args.root_role_path); + } else if repo_config.root_role_url.is_some() || repo_config.root_role_sha512.is_some() { + // Must specify both URL and checksum. + error::RootRoleConfig.fail()?; + } + + if let Some(key_config) = &repo_config.signing_keys { + key_url = Some( + Url::try_from(key_config.clone()) + .ok() + .context(error::SigningKeyUrl { repo: &args.repo })?, + ); + } + } else { + info!( + "No infra config at '{}' - using local roles/keys", + args.infra_config_path.display() + ); + } + + // If they don't have an Infra.toml or didn't define a root role / key there, check for them in + // expected local paths. + if root_role_path.is_none() && args.root_role_path.exists() { + root_role_path = Some(&args.root_role_path); + } + if key_url.is_none() && args.default_key_path.exists() { + key_url = Some(Url::from_file_path(&args.default_key_path).ok().context( + error::FileUrl { + path: &args.default_key_path, + }, + )?); + } + + Ok((root_role_path, key_url)) +} + +// Returning a Result from main makes it print a Debug representation of the error, but with Snafu +// we have nice Display representations of the error, so we wrap "main" (run) and print any error. +// https://github.com/shepmaster/snafu/issues/110 +fn main() { + if let Err(e) = run() { + eprintln!("{}", e); + process::exit(1); + } +} + +mod error { + use snafu::Snafu; + use std::io; + use std::path::PathBuf; + use url::Url; + + #[derive(Debug, Snafu)] + #[snafu(visibility = "pub(super)")] + pub(super) enum Error { + #[snafu(display("Error splitting shell command - {} - input: {}", source, command))] + CommandSplit { + command: String, + source: shell_words::ParseError, + }, + + #[snafu(display("Error reading config: {}", source))] + Config { source: pubsys_config::Error }, + + #[snafu(display("Path not valid as a URL: {}", path.display()))] + FileUrl { path: PathBuf }, + + #[snafu(display("Failed to fetch URL '{}': {}", url, source))] + GetUrl { url: Url, source: reqwest::Error }, + + #[snafu(display("Hash mismatch for '{}', got {} but expected {}", thing, got, expected))] + Hash { + expected: String, + got: String, + thing: String, + }, + + #[snafu(display("Logger setup error: {}", source))] + Logger { source: simplelog::TermLogError }, + + #[snafu(display("Infra.toml is missing {}", missing))] + MissingConfig { missing: String }, + + #[snafu(display("'{}' repo has root role but no key. You wouldn't be able to update a repo without the matching key. To continue, pass '-e ALLOW_MISSING_KEY=true'", repo))] + MissingKey { repo: String }, + + #[snafu(display("Failed to create '{}': {}", path.display(), source))] + Mkdir { path: PathBuf, source: io::Error }, + + #[snafu(display("Invalid path '{}' for {}", path.display(), thing))] + Path { path: PathBuf, thing: String }, + + #[snafu(display("Failed to read '{}': {}", path.display(), source))] + ReadFile { path: PathBuf, source: io::Error }, + + #[snafu(display( + "Must specify both URL and SHA512 of root role in Infra.toml, found only one" + ))] + RootRoleConfig, + + #[snafu(display("Failed to set permissions on {}: {}", path.display(), source))] + SetMode { path: PathBuf, source: io::Error }, + + #[snafu(display("Failed to create temp file for {}: {}", purpose, source))] + TempFileCreate { purpose: String, source: io::Error }, + + #[snafu(display("Failed to move temp file to {}: {}", path.display(), source))] + TempFilePersist { + path: PathBuf, + source: tempfile::PersistError, + }, + + #[snafu(display("Returned {}: tuftool {}", code, command))] + TuftoolResult { code: String, command: String }, + + #[snafu(display("Failed to start tuftool: {}", source))] + TuftoolSpawn { source: io::Error }, + + #[snafu(display("Unable to build URL from signing key for repo '{}'", repo))] + SigningKeyUrl { repo: String }, + + #[snafu(display("Failed to write '{}': {}", path.display(), source))] + WriteFile { path: PathBuf, source: io::Error }, + } +} +type Result = std::result::Result; diff --git a/tools/pubsys/Infra.toml.example b/tools/pubsys/Infra.toml.example index 55f1cdb1..b22692c4 100644 --- a/tools/pubsys/Infra.toml.example +++ b/tools/pubsys/Infra.toml.example @@ -2,30 +2,35 @@ # creates repos when you call `cargo make repo`. Save a copy as `Infra.toml` # at the root of the repo, then edit the settings below to match your use case. -# Path to your root role JSON file. -root_role_path = "/home/user/root.json" +# You can have any number of repos defined and build a specific one by running like this: +# cargo make repo -e PUBLISH_REPO=myrepo +[repo.default] +# URL to your root role JSON file; can be a file:// URL for local files. If +# you don't specify one here, a file will be generated for you under /roles. +# For production use, you should store them somewhere safer. +root_role_url = "https://example.com/root.json" +# SHA512 checksum of your root role JSON file. +root_role_sha512 = "0123456789abcdef" + +# For reference, this is the Bottlerocket root role: +#root_role_url = "https://cache.bottlerocket.aws/root.json" +#root_role_sha512 = "90393204232a1ad6b0a45528b1f7df1a3e37493b1e05b1c149f081849a292c8dafb4ea5f7ee17bcc664e35f66e37e4cfa4aae9de7a2a28aa31ae6ac3d9bea4d5" -# You would normally create repo signing keys using `tuftool root gen-rsa-key` -# as part of the initial setup of your TUF roles. pubsys assumes a single -# publication key that lives in the snapshot, targets, and timestamp roles. -# Here you specify where that key lives so we can sign the created repo. -# You can specify multiple keys, if you like, and select one by name: -# cargo make repo -e PUBLISH_KEY=mysigningkey -# (Don't keep your keys in the repo!) -# You can keep your private key in a file or an SSM parameter; pick one: +# pubsys assumes a single publication key that signs the snapshot, targets, +# and timestamp roles. Here you specify where that key lives so we can sign +# the created repo. If you don't specify one here, a key will be generated for +# you under /keys. For production use, you should use a key stored in a +# trusted service like KMS or SSM. # (Need inline table syntax until this is fixed: https://github.com/alexcrichton/toml-rs/issues/225) -signing_keys = { default = { file = { path = "/home/user/key.pem" } } } -# signing_keys = { default = { ssm = { parameter = "/my/parameter" } } } +signing_keys = { file = { path = "/home/user/key.pem" } } +#signing_keys = { kms = { key_id = "abc-def-123" } } +#signing_keys = { ssm = { parameter = "/my/parameter" } } -# You can have any number of repos defined and build a specific one by running like this: -# cargo make repo -e PUBLISH_REPO=myrepo -# If the URLs are uncommented, they will be pulled and used as a starting +# If these URLs are uncommented, the repo will be pulled and used as a starting # point, and your images (and related files) will be added as a new update in # the created repo. Otherwise, we build a new repo from scratch. - -[repo.default] -# metadata_base_url = "https://example.com/metadata/" -# targets_url = "https://example.com/targets/" +metadata_base_url = "https://example.com/metadata/" +targets_url = "https://example.com/targets/" [aws] # The list of regions in which you want to publish AMIs. We register an AMI in diff --git a/tools/pubsys/src/repo.rs b/tools/pubsys/src/repo.rs index 8fff1f93..3e4813a8 100644 --- a/tools/pubsys/src/repo.rs +++ b/tools/pubsys/src/repo.rs @@ -7,7 +7,7 @@ use chrono::{DateTime, Utc}; use lazy_static::lazy_static; use log::{debug, info, trace}; use parse_datetime::parse_datetime; -use pubsys_config::{InfraConfig, RepoExpirationPolicy, SigningKeyConfig}; +use pubsys_config::{InfraConfig, RepoConfig, RepoExpirationPolicy, SigningKeyConfig}; use semver::Version; use snafu::{ensure, OptionExt, ResultExt}; use std::convert::TryInto; @@ -75,22 +75,24 @@ pub(crate) struct RepoArgs { /// Path to file that defines when repo metadata should expire repo_expiration_policy_path: PathBuf, - // Policies that pubsys passes on to other tools + // Configuration that pubsys passes on to other tools #[structopt(long, parse(from_os_str))] /// Path to Release.toml release_config_path: PathBuf, #[structopt(long, parse(from_os_str))] /// Path to file that defines when this update will become available wave_policy_path: PathBuf, + #[structopt(long, parse(from_os_str))] + /// Path to root.json for this repo + root_role_path: PathBuf, + #[structopt(long, parse(from_os_str))] + /// If we generated a local key, we'll find it here; used if Infra.toml has no key defined + default_key_path: PathBuf, #[structopt(long, parse(try_from_str = parse_datetime))] /// When the waves and expiration timer will start; RFC3339 date or "in X hours/days/weeks" release_start_time: Option>, - #[structopt(long)] - /// Use this named key from Infra.toml - signing_key: String, - #[structopt(long, parse(from_os_str))] /// Where to store the created repo outdir: PathBuf, @@ -203,7 +205,11 @@ where path: manifest_path.as_ref(), })?; debug!("Adding target for manifest.json"); - editor.add_target("manifest.json", manifest_target).context(error::AddTarget { path: "manifest.json" })?; + editor + .add_target("manifest.json", manifest_target) + .context(error::AddTarget { + path: "manifest.json", + })?; // Add expirations =^..^= =^..^= =^..^= =^..^= @@ -249,19 +255,8 @@ where /// targets URLs defined, returns those URLs, otherwise None. fn repo_urls<'a>( repo_args: &RepoArgs, - infra_config: &'a InfraConfig, + repo_config: &'a RepoConfig, ) -> Result> { - let repo_config = infra_config - .repo - .as_ref() - .context(error::MissingConfig { - missing: "repo section", - })? - .get(&repo_args.repo) - .context(error::MissingConfig { - missing: format!("definition for repo {}", &repo_args.repo), - })?; - // Check if both URLs are set if let Some(metadata_base_url) = repo_config.metadata_base_url.as_ref() { if let Some(targets_url) = repo_config.targets_url.as_ref() { @@ -373,20 +368,32 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { ); let infra_config = InfraConfig::from_path(&args.infra_config_path).context(error::Config)?; trace!("Parsed infra config: {:?}", infra_config); - let root_role_path = infra_config - .root_role_path + + let repo_config = infra_config + .repo .as_ref() .context(error::MissingConfig { - missing: "root_role_path", + missing: "repo section", + })? + .get(&repo_args.repo) + .context(error::MissingConfig { + missing: format!("definition for repo {}", &repo_args.repo), })?; // Build a repo editor and manifest, from an existing repo if available, otherwise fresh - let maybe_urls = repo_urls(&repo_args, &infra_config)?; + let maybe_urls = repo_urls(&repo_args, &repo_config)?; let workdir = tempdir().context(error::TempDir)?; let transport = RepoTransport::default(); - let (mut editor, mut manifest) = if let Some((metadata_url, targets_url)) = maybe_urls.as_ref() { + let (mut editor, mut manifest) = if let Some((metadata_url, targets_url)) = maybe_urls.as_ref() + { info!("Found metadata and target URLs, loading existing repository"); - match load_editor_and_manifest(root_role_path, &transport, workdir.path(), &metadata_url, &targets_url)? { + match load_editor_and_manifest( + &repo_args.root_role_path, + &transport, + workdir.path(), + &metadata_url, + &targets_url, + )? { Some((editor, manifest)) => (editor, manifest), None => { info!( @@ -394,7 +401,7 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { metadata_url ); ( - RepositoryEditor::new(root_role_path).context(error::NewEditor)?, + RepositoryEditor::new(&repo_args.root_role_path).context(error::NewEditor)?, Manifest::default(), ) } @@ -402,7 +409,7 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { } else { info!("Did not find metadata and target URLs in infra config, creating a new repository"); ( - RepositoryEditor::new(root_role_path).context(error::NewEditor)?, + RepositoryEditor::new(&repo_args.root_role_path).context(error::NewEditor)?, Manifest::default(), ) }; @@ -430,30 +437,34 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { // Sign repo =^..^= =^..^= =^..^= =^..^= - let signing_key_config = infra_config - .signing_keys - .as_ref() - .context(error::MissingConfig { - missing: "signing_keys", - })? - .get(&repo_args.signing_key) - .context(error::MissingConfig { - missing: format!("profile {} in signing_keys", &repo_args.signing_key), - })?; + // Check if we have a signing key defined in Infra.toml; if not, we'll fall back to the + // generated local key. + let signing_key_config = repo_config.signing_keys.as_ref(); let key_source: Box = match signing_key_config { - SigningKeyConfig::file { path } => Box::new(LocalKeySource { path: path.clone() }), - SigningKeyConfig::kms { key_id } => Box::new(KmsKeySource { + Some(SigningKeyConfig::file { path }) => Box::new(LocalKeySource { path: path.clone() }), + Some(SigningKeyConfig::kms { key_id }) => Box::new(KmsKeySource { profile: None, key_id: key_id.clone(), client: None, signing_algorithm: KmsSigningAlgorithm::RsassaPssSha256, }), - SigningKeyConfig::ssm { parameter } => Box::new(SsmKeySource { + Some(SigningKeyConfig::ssm { parameter }) => Box::new(SsmKeySource { profile: None, parameter_name: parameter.clone(), key_id: None, }), + None => { + ensure!( + repo_args.default_key_path.exists(), + error::MissingConfig { + missing: "signing_keys in repo config, and we found no local key", + } + ); + Box::new(LocalKeySource { + path: repo_args.default_key_path.clone(), + }) + } }; let signed_repo = editor.sign(&[key_source]).context(error::RepoSign)?; From f723f39344782e379ba5d850c9eaec50c188401c Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Thu, 24 Sep 2020 20:35:12 +0000 Subject: [PATCH 0355/1356] pubsys-config: disallow unknown fields Without this, a user could have an old-format Infra.toml files with root role and key defined at the top level, and pubsys wouldn't error, so the user might think they're being used. With this change, we give a clear error so the user knows what to update. Co-authored-by: Zac Mrowicki Co-authored-by: Tom Kirchner --- tools/pubsys-config/src/lib.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/pubsys-config/src/lib.rs b/tools/pubsys-config/src/lib.rs index 712d3b78..d30588f3 100644 --- a/tools/pubsys-config/src/lib.rs +++ b/tools/pubsys-config/src/lib.rs @@ -12,6 +12,7 @@ use url::Url; /// Configuration needed to load and create repos #[derive(Debug, Deserialize)] +#[serde(deny_unknown_fields)] pub struct InfraConfig { // Repo subcommand config pub repo: Option>, @@ -34,6 +35,7 @@ impl InfraConfig { /// AWS-specific infrastructure configuration #[derive(Debug, Default, Deserialize)] +#[serde(deny_unknown_fields)] pub struct AwsConfig { #[serde(default)] pub regions: VecDeque, @@ -46,6 +48,7 @@ pub struct AwsConfig { /// AWS region-specific configuration #[derive(Debug, Deserialize)] +#[serde(deny_unknown_fields)] pub struct AwsRegionConfig { pub role: Option, pub endpoint: Option, @@ -56,6 +59,7 @@ pub struct AwsRegionConfig { // more common for TOML config to be lowercase. #[allow(non_camel_case_types)] #[derive(Clone, Debug, Deserialize)] +#[serde(deny_unknown_fields)] pub enum SigningKeyConfig { file { path: PathBuf }, kms { key_id: String }, @@ -91,6 +95,7 @@ impl TryFrom for Url { /// Represents a Bottlerocket repo's location and the metadata needed to update the repo #[derive(Debug, Deserialize)] +#[serde(deny_unknown_fields)] pub struct RepoConfig { pub root_role_url: Option, pub root_role_sha512: Option, @@ -103,6 +108,7 @@ pub struct RepoConfig { /// How long it takes for each metadata type to expire #[derive(Debug, Deserialize)] +#[serde(deny_unknown_fields)] pub struct RepoExpirationPolicy { #[serde(deserialize_with = "deserialize_offset")] pub snapshot_expiration: Duration, From 7f01c06a9e6a27f535928fe10b08454ef325528b Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Mon, 5 Oct 2020 18:25:15 +0000 Subject: [PATCH 0356/1356] pubsys-setup: don't require repo section if Infra.toml exists The user could have set up Infra.toml for other purposes, for example just building images and AMIs, and they don't care about repos. In this case, we can continue and generate role/key for them, as if there's no Infra.toml. Co-authored-by: Zac Mrowicki Co-authored-by: Tom Kirchner --- tools/pubsys-setup/src/main.rs | 141 ++++++++++++++++----------------- 1 file changed, 70 insertions(+), 71 deletions(-) diff --git a/tools/pubsys-setup/src/main.rs b/tools/pubsys-setup/src/main.rs index aec53e67..b8518924 100644 --- a/tools/pubsys-setup/src/main.rs +++ b/tools/pubsys-setup/src/main.rs @@ -188,81 +188,83 @@ fn find_root_role_and_key(args: &Args) -> Result<(Option<&PathBuf>, Option) InfraConfig::from_path(&args.infra_config_path).context(error::Config)?; trace!("Parsed infra config: {:?}", infra_config); - let repo_config = infra_config + // Check whether the user has the relevant repo defined in their Infra.toml. + if let Some(repo_config) = infra_config .repo .as_ref() - .context(error::MissingConfig { - missing: "repo section", - })? - .get(&args.repo) - .context(error::MissingConfig { - missing: format!("definition for repo {}", &args.repo), - })?; - - // If they have a root role URL and checksum defined, we can download it. - if let (Some(url), Some(sha512)) = - (&repo_config.root_role_url, &repo_config.root_role_sha512) + .and_then(|repo_section| repo_section.get(&args.repo)) { - // If it's already been downloaded, just confirm the checksum. - if args.root_role_path.exists() { - let root_role_data = - fs::read_to_string(&args.root_role_path).context(error::ReadFile { + // If they have a root role URL and checksum defined, we can download it. + if let (Some(url), Some(sha512)) = + (&repo_config.root_role_url, &repo_config.root_role_sha512) + { + // If it's already been downloaded, just confirm the checksum. + if args.root_role_path.exists() { + let root_role_data = + fs::read_to_string(&args.root_role_path).context(error::ReadFile { + path: &args.root_role_path, + })?; + let mut d = Sha512::new(); + d.update(&root_role_data); + let digest = hex::encode(d.finalize()); + + ensure!( + &digest == sha512, + error::Hash { + expected: sha512, + got: digest, + thing: args.root_role_path.to_string_lossy() + } + ); + debug!( + "Using existing downloaded root role at {}", + args.root_role_path.display() + ); + } else { + // Download the root role by URL and verify its checksum before writing it. + let root_role_data = reqwest::blocking::get(url.clone()) + .with_context(|| error::GetUrl { url: url.clone() })? + .text() + .with_context(|| error::GetUrl { url: url.clone() })?; + + let mut d = Sha512::new(); + d.update(&root_role_data); + let digest = hex::encode(d.finalize()); + + ensure!( + &digest == sha512, + error::Hash { + expected: sha512, + got: digest, + thing: url.to_string() + } + ); + + // Write root role to expected path on disk. + fs::write(&args.root_role_path, &root_role_data).context(error::WriteFile { path: &args.root_role_path, })?; - let mut d = Sha512::new(); - d.update(&root_role_data); - let digest = hex::encode(d.finalize()); - - ensure!( - &digest == sha512, - error::Hash { - expected: sha512, - got: digest, - thing: args.root_role_path.to_string_lossy() - } - ); - debug!( - "Using existing downloaded root role at {}", - args.root_role_path.display() - ); - } else { - // Download the root role by URL and verify its checksum before writing it. - let root_role_data = reqwest::blocking::get(url.clone()) - .with_context(|| error::GetUrl { url: url.clone() })? - .text() - .with_context(|| error::GetUrl { url: url.clone() })?; - - let mut d = Sha512::new(); - d.update(&root_role_data); - let digest = hex::encode(d.finalize()); - - ensure!( - &digest == sha512, - error::Hash { - expected: sha512, - got: digest, - thing: url.to_string() - } - ); - - // Write root role to expected path on disk. - fs::write(&args.root_role_path, &root_role_data).context(error::WriteFile { - path: &args.root_role_path, - })?; - debug!("Downloaded root role to {}", args.root_role_path.display()); + debug!("Downloaded root role to {}", args.root_role_path.display()); + } + + root_role_path = Some(&args.root_role_path); + } else if repo_config.root_role_url.is_some() || repo_config.root_role_sha512.is_some() + { + // Must specify both URL and checksum. + error::RootRoleConfig.fail()?; } - root_role_path = Some(&args.root_role_path); - } else if repo_config.root_role_url.is_some() || repo_config.root_role_sha512.is_some() { - // Must specify both URL and checksum. - error::RootRoleConfig.fail()?; - } - - if let Some(key_config) = &repo_config.signing_keys { - key_url = Some( - Url::try_from(key_config.clone()) - .ok() - .context(error::SigningKeyUrl { repo: &args.repo })?, + if let Some(key_config) = &repo_config.signing_keys { + key_url = Some( + Url::try_from(key_config.clone()) + .ok() + .context(error::SigningKeyUrl { repo: &args.repo })?, + ); + } + } else { + info!( + "No repo config in '{}' - using local roles/keys", + args.infra_config_path.display() ); } } else { @@ -332,9 +334,6 @@ mod error { #[snafu(display("Logger setup error: {}", source))] Logger { source: simplelog::TermLogError }, - #[snafu(display("Infra.toml is missing {}", missing))] - MissingConfig { missing: String }, - #[snafu(display("'{}' repo has root role but no key. You wouldn't be able to update a repo without the matching key. To continue, pass '-e ALLOW_MISSING_KEY=true'", repo))] MissingKey { repo: String }, From 554f9cab025977b81f53daa1a8edf80a32f41001 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Mon, 5 Oct 2020 12:08:53 -0700 Subject: [PATCH 0357/1356] Update kernel to 5.4.50-25.83 --- ...t-packet-fix-overflow-in-tpacket_rcv.patch | 47 ------------------- packages/kernel/Cargo.toml | 4 +- packages/kernel/kernel.spec | 5 +- 3 files changed, 4 insertions(+), 52 deletions(-) delete mode 100644 packages/kernel/0002-net-packet-fix-overflow-in-tpacket_rcv.patch diff --git a/packages/kernel/0002-net-packet-fix-overflow-in-tpacket_rcv.patch b/packages/kernel/0002-net-packet-fix-overflow-in-tpacket_rcv.patch deleted file mode 100644 index 33257f2f..00000000 --- a/packages/kernel/0002-net-packet-fix-overflow-in-tpacket_rcv.patch +++ /dev/null @@ -1,47 +0,0 @@ -From eea7a6a08ef3acf437c6ce8a28694c3659542569 Mon Sep 17 00:00:00 2001 -From: Or Cohen -Date: Sun, 30 Aug 2020 20:04:51 +0300 -Subject: [PATCH 2/2] net/packet: fix overflow in tpacket_rcv - -Using tp_reserve to calculate netoff can overflow as -tp_reserve is unsigned int and netoff is unsigned short. - -This may lead to macoff receving a smaller value then -sizeof(struct virtio_net_hdr), and if po->has_vnet_hdr -is set, an out-of-bounds write will occur when -calling virtio_net_hdr_from_skb. - -The bug is fixed by converting netoff to unsigned int -and checking if it exceeds USHRT_MAX. - -Fixes: 8913336a7e8d ("packet: add PACKET_RESERVE sockopt") -Signed-off-by: Or Cohen ---- - net/packet/af_packet.c | 7 ++++++- - 1 file changed, 6 insertions(+), 1 deletion(-) - -diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c -index 1d63ab3a878a..56084a16d0f9 100644 ---- a/net/packet/af_packet.c -+++ b/net/packet/af_packet.c -@@ -2167,7 +2167,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, - int skb_len = skb->len; - unsigned int snaplen, res; - unsigned long status = TP_STATUS_USER; -- unsigned short macoff, netoff, hdrlen; -+ unsigned short macoff, hdrlen; -+ unsigned int netoff; - struct sk_buff *copy_skb = NULL; - struct timespec ts; - __u32 ts_status; -@@ -2236,6 +2237,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, - } - macoff = netoff - maclen; - } -+ if (netoff > USHRT_MAX) { -+ atomic_inc(&po->tp_drops); -+ goto drop_n_restore; -+ } - if (po->tp_version <= TPACKET_V2) { - if (macoff + snaplen > po->rx_ring.frame_size) { - if (po->copy_thresh && diff --git a/packages/kernel/Cargo.toml b/packages/kernel/Cargo.toml index b68bc635..b93b4ac9 100644 --- a/packages/kernel/Cargo.toml +++ b/packages/kernel/Cargo.toml @@ -10,5 +10,5 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/9e3beaecef0b030d83fb215be7ca67c01009cfec52fe9b12eb4b24fdb46eebce/kernel-5.4.50-25.83.amzn2.src.rpm" -sha512 = "edc81ee7acdb9f34da2ca1a9ecef42a8c8daab01b7bc0fb130c04d53278091856fa9c65b740f41839a5d65b374f6953d553e6076c94aea578469ef0181014a76" +url = "https://cdn.amazonlinux.com/blobstore/36ea759a11e6e364ab8b2bf857c03cdbf53d33e348e785ed6767b87f8ac12c27/kernel-5.4.58-32.125.amzn2.src.rpm" +sha512 = "c7c2fdcb752cc6ddc6410ac9195e5443b66e2e6354f4299786df6aa303aad64b3a4aa86bec4f1d8f0ac93280353ca2cc989e67417e974aa1a1f38013b01c9e6f" diff --git a/packages/kernel/kernel.spec b/packages/kernel/kernel.spec index 089e8c24..a1b2212f 100644 --- a/packages/kernel/kernel.spec +++ b/packages/kernel/kernel.spec @@ -1,16 +1,15 @@ %global debug_package %{nil} Name: %{_cross_os}kernel -Version: 5.4.50 +Version: 5.4.58 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/9e3beaecef0b030d83fb215be7ca67c01009cfec52fe9b12eb4b24fdb46eebce/kernel-5.4.50-25.83.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/36ea759a11e6e364ab8b2bf857c03cdbf53d33e348e785ed6767b87f8ac12c27/kernel-5.4.58-32.125.amzn2.src.rpm Source100: config-bottlerocket Patch0001: 0001-lustrefsx-Disable-Werror-stringop-overflow.patch -Patch0002: 0002-net-packet-fix-overflow-in-tpacket_rcv.patch BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From 661e0028d1f4a43a9a7cd7c9e40bf3f38813a224 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Mon, 5 Oct 2020 18:47:12 -0700 Subject: [PATCH 0358/1356] pubsys: cargo fmt changes Ran cargo fmt to please Ferris. --- tools/pubsys/src/aws/ami/mod.rs | 21 +++++++++------ tools/pubsys/src/aws/ami/register.rs | 36 ++++++++++--------------- tools/pubsys/src/aws/ami/wait.rs | 11 ++++---- tools/pubsys/src/aws/client.rs | 2 +- tools/pubsys/src/aws/mod.rs | 11 ++++---- tools/pubsys/src/aws/promote_ssm/mod.rs | 20 +++++++++----- tools/pubsys/src/aws/publish_ami/mod.rs | 2 +- tools/pubsys/src/aws/ssm/mod.rs | 24 ++++++++++++----- tools/pubsys/src/aws/ssm/ssm.rs | 4 +-- 9 files changed, 72 insertions(+), 59 deletions(-) diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index a8ba71fa..77ba332e 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -7,11 +7,11 @@ pub(crate) mod wait; use crate::aws::publish_ami::{get_snapshots, modify_image, modify_snapshots}; use crate::aws::{client::build_client, region_from_string}; -use pubsys_config::{AwsConfig, InfraConfig}; use crate::Args; use futures::future::{join, lazy, ready, FutureExt}; use futures::stream::{self, StreamExt}; use log::{error, info, trace}; +use pubsys_config::{AwsConfig, InfraConfig}; use register::{get_ami_id, register_image, RegisteredIds}; use rusoto_core::{Region, RusotoError}; use rusoto_ebs::EbsClient; @@ -166,13 +166,18 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> }; (found_ids, true) } else { - let new_ids = register_image(ami_args, base_region.name(), base_ebs_client, &base_ec2_client) - .await - .context(error::RegisterImage { - name: &ami_args.name, - arch: &ami_args.arch, - region: base_region.name(), - })?; + let new_ids = register_image( + ami_args, + base_region.name(), + base_ebs_client, + &base_ec2_client, + ) + .await + .context(error::RegisterImage { + name: &ami_args.name, + arch: &ami_args.arch, + region: base_region.name(), + })?; info!( "Registered AMI '{}' in {}: {}", ami_args.name, diff --git a/tools/pubsys/src/aws/ami/register.rs b/tools/pubsys/src/aws/ami/register.rs index 0b69f319..9cd02c02 100644 --- a/tools/pubsys/src/aws/ami/register.rs +++ b/tools/pubsys/src/aws/ami/register.rs @@ -37,30 +37,22 @@ async fn _register_image( region ); let uploader = SnapshotUploader::new(ebs_client); - let root_snapshot = snapshot_from_image( - &ami_args.root_image, - &uploader, - None, - ami_args.no_progress, - ) - .await - .context(error::Snapshot { - path: &ami_args.root_image, - region, - })?; + let root_snapshot = + snapshot_from_image(&ami_args.root_image, &uploader, None, ami_args.no_progress) + .await + .context(error::Snapshot { + path: &ami_args.root_image, + region, + })?; cleanup_snapshot_ids.push(root_snapshot.clone()); - let data_snapshot = snapshot_from_image( - &ami_args.data_image, - &uploader, - None, - ami_args.no_progress, - ) - .await - .context(error::Snapshot { - path: &ami_args.root_image, - region, - })?; + let data_snapshot = + snapshot_from_image(&ami_args.data_image, &uploader, None, ami_args.no_progress) + .await + .context(error::Snapshot { + path: &ami_args.root_image, + region, + })?; cleanup_snapshot_ids.push(data_snapshot.clone()); info!( diff --git a/tools/pubsys/src/aws/ami/wait.rs b/tools/pubsys/src/aws/ami/wait.rs index 604a9dba..61c9ffb4 100644 --- a/tools/pubsys/src/aws/ami/wait.rs +++ b/tools/pubsys/src/aws/ami/wait.rs @@ -1,6 +1,6 @@ use crate::aws::client::build_client; -use pubsys_config::AwsConfig; use log::info; +use pubsys_config::AwsConfig; use rusoto_core::Region; use rusoto_ec2::{DescribeImagesRequest, Ec2, Ec2Client}; use snafu::{ensure, ResultExt}; @@ -40,10 +40,11 @@ pub(crate) async fn wait_for_ami( }; // Use a new client each time so we have more confidence that different endpoints can see // the new AMI. - let ec2_client = build_client::(®ion, &sts_region, &aws).context(error::Client { - client_type: "EC2", - region: region.name(), - })?; + let ec2_client = + build_client::(®ion, &sts_region, &aws).context(error::Client { + client_type: "EC2", + region: region.name(), + })?; let describe_response = ec2_client .describe_images(describe_request) diff --git a/tools/pubsys/src/aws/client.rs b/tools/pubsys/src/aws/client.rs index 8576424a..588885a5 100644 --- a/tools/pubsys/src/aws/client.rs +++ b/tools/pubsys/src/aws/client.rs @@ -1,5 +1,5 @@ -use pubsys_config::AwsConfig; use async_trait::async_trait; +use pubsys_config::AwsConfig; use rusoto_core::{request::DispatchSignedRequest, HttpClient, Region}; use rusoto_credential::{ AutoRefreshingProvider, AwsCredentials, CredentialsError, DefaultCredentialsProvider, diff --git a/tools/pubsys/src/aws/mod.rs b/tools/pubsys/src/aws/mod.rs index ccea97ed..bb81132c 100644 --- a/tools/pubsys/src/aws/mod.rs +++ b/tools/pubsys/src/aws/mod.rs @@ -28,7 +28,11 @@ pub(crate) fn parse_arch(input: &str) -> Result { match input { "x86_64" | "amd64" => Ok("x86_64".to_string()), "arm64" | "aarch64" => Ok("arm64".to_string()), - _ => error::ParseArch { input, msg: "unknown architecture" }.fail(), + _ => error::ParseArch { + input, + msg: "unknown architecture", + } + .fail(), } } @@ -39,10 +43,7 @@ mod error { #[snafu(visibility = "pub(super)")] pub(crate) enum Error { #[snafu(display("Failed to parse arch '{}': {}", input, msg))] - ParseArch { - input: String, - msg: String - }, + ParseArch { input: String, msg: String }, #[snafu(display("Failed to parse region '{}': {}", name, source))] ParseRegion { diff --git a/tools/pubsys/src/aws/promote_ssm/mod.rs b/tools/pubsys/src/aws/promote_ssm/mod.rs index ad74bef7..4c96335b 100644 --- a/tools/pubsys/src/aws/promote_ssm/mod.rs +++ b/tools/pubsys/src/aws/promote_ssm/mod.rs @@ -2,11 +2,11 @@ //! SSM parameters from one version to another use crate::aws::client::build_client; -use crate::aws::{parse_arch, region_from_string}; use crate::aws::ssm::{key_difference, ssm, template, BuildContext, SsmKey}; -use pubsys_config::InfraConfig; +use crate::aws::{parse_arch, region_from_string}; use crate::Args; use log::{info, trace}; +use pubsys_config::InfraConfig; use rusoto_core::Region; use rusoto_ssm::SsmClient; use snafu::{ensure, ResultExt}; @@ -71,15 +71,21 @@ pub(crate) async fn run(args: &Args, promote_args: &PromoteArgs) -> Result<()> { .map(|name| region_from_string(&name, &aws).context(error::ParseRegion)) .collect::>>()?; - ensure!(!regions.is_empty(), error::MissingConfig { missing: "aws.regions" }); + ensure!( + !regions.is_empty(), + error::MissingConfig { + missing: "aws.regions" + } + ); let base_region = ®ions[0]; let mut ssm_clients = HashMap::with_capacity(regions.len()); for region in ®ions { - let ssm_client = build_client::(region, &base_region, &aws).context(error::Client { - client_type: "SSM", - region: region.name(), - })?; + let ssm_client = + build_client::(region, &base_region, &aws).context(error::Client { + client_type: "SSM", + region: region.name(), + })?; ssm_clients.insert(region.clone(), ssm_client); } diff --git a/tools/pubsys/src/aws/publish_ami/mod.rs b/tools/pubsys/src/aws/publish_ami/mod.rs index 8bef24dd..baf0e765 100644 --- a/tools/pubsys/src/aws/publish_ami/mod.rs +++ b/tools/pubsys/src/aws/publish_ami/mod.rs @@ -5,11 +5,11 @@ use crate::aws::ami::wait::{self, wait_for_ami}; use crate::aws::ami::Image; use crate::aws::client::build_client; use crate::aws::region_from_string; -use pubsys_config::InfraConfig; use crate::Args; use futures::future::{join, ready}; use futures::stream::{self, StreamExt}; use log::{debug, error, info, trace}; +use pubsys_config::InfraConfig; use rusoto_core::{Region, RusotoError}; use rusoto_ec2::{ DescribeImagesRequest, Ec2, Ec2Client, ModifyImageAttributeRequest, diff --git a/tools/pubsys/src/aws/ssm/mod.rs b/tools/pubsys/src/aws/ssm/mod.rs index d9c45c74..e9430be6 100644 --- a/tools/pubsys/src/aws/ssm/mod.rs +++ b/tools/pubsys/src/aws/ssm/mod.rs @@ -5,9 +5,9 @@ pub(crate) mod ssm; pub(crate) mod template; use crate::aws::{ami::Image, client::build_client, parse_arch, region_from_string}; -use pubsys_config::{AwsConfig, InfraConfig}; use crate::Args; use log::{info, trace}; +use pubsys_config::{AwsConfig, InfraConfig}; use rusoto_core::Region; use rusoto_ssm::SsmClient; use serde::Serialize; @@ -71,17 +71,23 @@ pub(crate) async fn run(args: &Args, ssm_args: &SsmArgs) -> Result<()> { } else { aws.regions.clone().into() }; - ensure!(!regions.is_empty(), error::MissingConfig { missing: "aws.regions" }); + ensure!( + !regions.is_empty(), + error::MissingConfig { + missing: "aws.regions" + } + ); let base_region = region_from_string(®ions[0], &aws).context(error::ParseRegion)?; let amis = parse_ami_input(®ions, &ssm_args, &aws)?; let mut ssm_clients = HashMap::with_capacity(amis.len()); for region in amis.keys() { - let ssm_client = build_client::(®ion, &base_region, &aws).context(error::Client { - client_type: "SSM", - region: region.name(), - })?; + let ssm_client = + build_client::(®ion, &base_region, &aws).context(error::Client { + client_type: "SSM", + region: region.name(), + })?; ssm_clients.insert(region.clone(), ssm_client); } @@ -185,7 +191,11 @@ pub(crate) struct BuildContext<'a> { type SsmParameters = HashMap; /// Parse the AMI input file -fn parse_ami_input(regions: &[String], ssm_args: &SsmArgs, aws: &AwsConfig) -> Result> { +fn parse_ami_input( + regions: &[String], + ssm_args: &SsmArgs, + aws: &AwsConfig, +) -> Result> { info!("Using AMI data from path: {}", ssm_args.ami_input.display()); let file = File::open(&ssm_args.ami_input).context(error::File { op: "open", diff --git a/tools/pubsys/src/aws/ssm/ssm.rs b/tools/pubsys/src/aws/ssm/ssm.rs index 9469e638..11ae7b74 100644 --- a/tools/pubsys/src/aws/ssm/ssm.rs +++ b/tools/pubsys/src/aws/ssm/ssm.rs @@ -365,9 +365,7 @@ mod error { "SSM requests throttled too many times, went beyond our max interval {:?}", max_interval ))] - Throttled { - max_interval: Duration, - }, + Throttled { max_interval: Duration }, #[snafu(display("Failed to validate all changes; see above."))] ValidateParameters, From e112655bea81b1f7d848d27708e51c9c17044169 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Tue, 29 Sep 2020 16:35:22 +0000 Subject: [PATCH 0359/1356] Add PUBLISHING.md guide explaining pubsys and related tools Co-authored-by: Zac Mrowicki Co-authored-by: Tom Kirchner --- BUILDING.md | 41 ++++++++++++++++++--------------- README.md | 17 ++++++++++---- tools/pubsys/Infra.toml.example | 2 +- 3 files changed, 36 insertions(+), 24 deletions(-) diff --git a/BUILDING.md b/BUILDING.md index ef020ff1..c6a6603a 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -64,7 +64,10 @@ cargo make ``` This will build an image for the default variant, `aws-k8s-1.17`. -All packages will be built in turn, and then compiled into an `img` file in the `build/` directory. +All packages will be built in turn, and then compiled into an `img` file in the `build/images/` directory. + +The version number in [Release.toml](Release.toml) will be used in naming the file, and will be used inside the image as the release version. +If you're planning on [publishing your build](PUBLISHING.md), you may want to change the version. To build an image for a different variant, run: @@ -72,36 +75,38 @@ To build an image for a different variant, run: cargo make -e BUILDSYS_VARIANT=my-variant-here ``` +To build an image for a different architecture, run: + +``` +cargo make -e BUILDSYS_ARCH=my-arch-here +``` + +(You can use variant and arch arguments together, too.) + ### Register an AMI To use the image in Amazon EC2, we need to register the image as an AMI. -The `bin/amiize.sh` script does this for you. - -The script has some assumptions about your setup, in particular that you: - * have [aws-cli](https://aws.amazon.com/cli/) set up, and that its default profile can create and control EC2 resources - * have [coldsnap](https://github.com/awslabs/coldsnap/) installed to upload snapshots - * have a few other common tools installed, like `jq` and `du` -First, decompress the images. -(Note: these filenames assume an `x86_64` architecture and `aws-k8s-1.17` [variant](README.md).) +For a simple start, pick an [EC2 region](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions), then run: ``` -lz4 -d build/images/x86_64-aws-k8s-1.17/latest/bottlerocket-aws-k8s-1.17-x86_64.img.lz4 && \ -lz4 -d build/images/x86_64-aws-k8s-1.17/latest/bottlerocket-aws-k8s-1.17-x86_64-data.img.lz4 +cargo make ami -e PUBLISH_REGIONS=your-region-here ``` -Next, register an AMI: +Your new AMI ID will be printed after it's registered. + +If you built your image for a different architecture or variant, just use the same arguments here: ``` -bin/amiize.sh --name YOUR-AMI-NAME-HERE \ - --arch x86_64 \ - --region us-west-2 \ - --root-image build/images/x86_64-aws-k8s-1.17/latest/bottlerocket-aws-k8s-1.17-x86_64.img \ - --data-image build/images/x86_64-aws-k8s-1.17/latest/bottlerocket-aws-k8s-1.17-x86_64-data.img +cargo make ami -e PUBLISH_REGIONS=your-region-here -e BUILDSYS_VARIANT=my-variant-here ``` -Your new AMI ID will be printed at the end. +(There's a lot more detail on building and managing AMIs in the [PUBLISHING](PUBLISHING.md) guide.) ## Use your image See the [setup guide for Kubernetes](QUICKSTART-EKS.md) or the [setup guide for Amazon ECS](QUICKSTART-ECS.md) for information on running Bottlerocket images. + +## Publish your image + +See the [PUBLISHING](PUBLISHING.md) guide for information on deploying Bottlerocket images and repositories. diff --git a/README.md b/README.md index 194dbaa3..5acbee04 100644 --- a/README.md +++ b/README.md @@ -57,11 +57,6 @@ Our supported architectures include `x86_64` and `aarch64` (written as `arm64` i :walking: :running: -To build your own Bottlerocket images, please see [BUILDING](BUILDING.md). -It describes: -* how to build an image -* how to register an EC2 AMI from an image - Bottlerocket is best used with a container orchestrator. To get started with Kubernetes, please see [QUICKSTART-EKS](QUICKSTART-EKS.md). To get started with Amazon ECS, please see [QUICKSTART-ECS](QUICKSTART-ECS.md). @@ -69,6 +64,18 @@ These guides describe: * how to set up a cluster with the orchestrator, so your Bottlerocket instance can run containers * how to launch a Bottlerocket instance in EC2 +To build your own Bottlerocket images, please see [BUILDING](BUILDING.md). +It describes: +* how to build an image +* how to register an EC2 AMI from an image + +To publish your built Bottlerocket images, please see [PUBLISHING](PUBLISHING.md). +It describes: +* how to make TUF repos including your image +* how to copy your AMI across regions +* how to mark your AMIs public or grant access to specific accounts +* how to make your AMIs discoverable using [SSM parameters](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-parameter-store.html) + ## Exploration To improve security, there's no SSH server in a Bottlerocket image, and not even a shell. diff --git a/tools/pubsys/Infra.toml.example b/tools/pubsys/Infra.toml.example index b22692c4..ce163694 100644 --- a/tools/pubsys/Infra.toml.example +++ b/tools/pubsys/Infra.toml.example @@ -29,7 +29,7 @@ signing_keys = { file = { path = "/home/user/key.pem" } } # If these URLs are uncommented, the repo will be pulled and used as a starting # point, and your images (and related files) will be added as a new update in # the created repo. Otherwise, we build a new repo from scratch. -metadata_base_url = "https://example.com/metadata/" +metadata_base_url = "https://example.com/" targets_url = "https://example.com/targets/" [aws] From f9d8f8a3585d6c4197564c178cb3d1a71771fa6b Mon Sep 17 00:00:00 2001 From: Sanika Shah Date: Tue, 6 Oct 2020 10:47:28 -0700 Subject: [PATCH 0360/1356] Add aws-k8s-1.18 variant with Kubernetes 1.18 Co-authored-by: Tom Kirchner --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index aeb97fca..6c6503cb 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -8,7 +8,7 @@ jobs: continue-on-error: ${{ matrix.supported }} strategy: matrix: - variant: [aws-k8s-1.15, aws-k8s-1.16, aws-k8s-1.17, aws-ecs-1] + variant: [aws-k8s-1.15, aws-k8s-1.16, aws-k8s-1.17, aws-k8s-1.18, aws-ecs-1] arch: [x86_64, aarch64] supported: [true] include: From adbf79912cf86d912d4e71884c1d3cb2ffaa34f7 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Fri, 9 Oct 2020 20:00:29 +0000 Subject: [PATCH 0361/1356] actions: Force Rust 1.46.0 temporarily Rust 1.47.0 is causing the build to fail in CI. This forces 1.46.0. --- .github/workflows/build.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6c6503cb..91fc4156 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -18,6 +18,7 @@ jobs: fail-fast: false steps: - uses: actions/checkout@v2 + - run: rustup toolchain install 1.46.0 && rustup default 1.46.0 - run: cargo install --version 0.30.0 cargo-make - run: cargo install --version 0.6.6 cargo-deny --no-default-features - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} unit-tests From 484a74be4065753fee71c3d7e3acbc8feb4116dd Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 9 Oct 2020 23:03:30 +0000 Subject: [PATCH 0362/1356] Revert "actions: Force Rust 1.46.0 temporarily" This reverts commit 7fc8620fc3cc180a8669f07012b33dfb92740aca. --- .github/workflows/build.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 91fc4156..6c6503cb 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -18,7 +18,6 @@ jobs: fail-fast: false steps: - uses: actions/checkout@v2 - - run: rustup toolchain install 1.46.0 && rustup default 1.46.0 - run: cargo install --version 0.30.0 cargo-make - run: cargo install --version 0.6.6 cargo-deny --no-default-features - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} unit-tests From 4018e31081dc100c29db7b8c4fbc1fc6feb8fca0 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Thu, 15 Oct 2020 14:23:28 -0700 Subject: [PATCH 0363/1356] Allow setting Linux kernel parameters (sysctl) via settings --- GLOSSARY.md | 1 + README.md | 12 ++++++++++++ 2 files changed, 13 insertions(+) diff --git a/GLOSSARY.md b/GLOSSARY.md index 060c75f1..105ac9c1 100644 --- a/GLOSSARY.md +++ b/GLOSSARY.md @@ -4,6 +4,7 @@ * [**bork**](sources/api/bork): A setting generator called by sundog to generate the random seed for updog, determining where the host falls in the update order. * [**buildsys**](tools/buildsys): A build tool that runs package and image builds inside containers. cargo-make starts the build of each package, each of which calls buildsys, which in turn starts a Docker-based build using the SDK image. +* [**corndog**](sources/api/corndog): A program that sets kernel sysctl values based on API settings. * [**early-boot-config**](sources/api/early-boot-config): A program run at boot to read platform-specific data, such as EC2 user data, and send requested configuration to the API. * **gptprio:** A structure of bits in GPT partition headers that specifies priority, tries remaining, and whether the partition booted successfully before. signpost sets these and GRUB uses them to determine which partition set to boot. diff --git a/README.md b/README.md index 5acbee04..4a9a5616 100644 --- a/README.md +++ b/README.md @@ -348,6 +348,18 @@ These settings can be changed at any time. * `settings.ntp.time-servers`: A list of NTP servers used to set and verify the system time. +#### Kernel settings + +* `settings.kernel.sysctl`: Key/value pairs representing Linux kernel parameters. + Remember to quote keys (since they often contain ".") and to quote all values. + * Example user data for setting up sysctl: + ``` + [settings.kernel.sysctl] + "user.max_user_namespaces" = "16384" + "vm.max_map_count" = "262144" + ``` + + #### Host containers settings * `settings.host-containers.admin.source`: The URI of the [admin container](#admin-container). * `settings.host-containers.admin.enabled`: Whether the admin container is enabled. From b7761df04a1d83330d18c62a9b2ce364e655c337 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Fri, 16 Oct 2020 10:54:11 -0700 Subject: [PATCH 0364/1356] pubsys: don't require Infra.toml to get started If the user has no Infra.toml, we can continue with defaults most of the time. For making an AMI, only passing PUBLISH_REGIONS is required. For making a repo, we can use a default configuration and write it under 'default'. SSM still requires Infra.toml. --- tools/pubsys-config/src/lib.rs | 19 ++++++++++++--- tools/pubsys/src/aws/ami/mod.rs | 7 +++--- tools/pubsys/src/aws/publish_ami/mod.rs | 7 +++--- tools/pubsys/src/repo.rs | 31 +++++++++++++++---------- 4 files changed, 43 insertions(+), 21 deletions(-) diff --git a/tools/pubsys-config/src/lib.rs b/tools/pubsys-config/src/lib.rs index d30588f3..c4e16989 100644 --- a/tools/pubsys-config/src/lib.rs +++ b/tools/pubsys-config/src/lib.rs @@ -11,7 +11,7 @@ use std::path::{Path, PathBuf}; use url::Url; /// Configuration needed to load and create repos -#[derive(Debug, Deserialize)] +#[derive(Debug, Default, Deserialize)] #[serde(deny_unknown_fields)] pub struct InfraConfig { // Repo subcommand config @@ -23,7 +23,7 @@ pub struct InfraConfig { impl InfraConfig { /// Deserializes an InfraConfig from a given path - pub fn from_path

(path: P) -> Result + pub fn from_path

(path: P) -> Result where P: AsRef, { @@ -31,6 +31,19 @@ impl InfraConfig { let infra_config_str = fs::read_to_string(path).context(error::File { path })?; toml::from_str(&infra_config_str).context(error::InvalidToml { path }) } + + /// Deserializes an InfraConfig from a given path, if it exists, otherwise builds a default + /// config + pub fn from_path_or_default

(path: P) -> Result + where + P: AsRef, + { + if path.as_ref().exists() { + Self::from_path(path) + } else { + Ok(Self::default()) + } + } } /// AWS-specific infrastructure configuration @@ -94,7 +107,7 @@ impl TryFrom for Url { } /// Represents a Bottlerocket repo's location and the metadata needed to update the repo -#[derive(Debug, Deserialize)] +#[derive(Debug, Default, Deserialize)] #[serde(deny_unknown_fields)] pub struct RepoConfig { pub root_role_url: Option, diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index 77ba332e..3dcb0885 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -92,11 +92,12 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> let mut amis = HashMap::new(); info!( - "Using infra config from path: {}", + "Checking for infra config at path: {}", args.infra_config_path.display() ); - let infra_config = InfraConfig::from_path(&args.infra_config_path).context(error::Config)?; - trace!("Parsed infra config: {:?}", infra_config); + let infra_config = + InfraConfig::from_path_or_default(&args.infra_config_path).context(error::Config)?; + trace!("Using infra config: {:?}", infra_config); let aws = infra_config.aws.unwrap_or_else(|| Default::default()); diff --git a/tools/pubsys/src/aws/publish_ami/mod.rs b/tools/pubsys/src/aws/publish_ami/mod.rs index baf0e765..c3ff5719 100644 --- a/tools/pubsys/src/aws/publish_ami/mod.rs +++ b/tools/pubsys/src/aws/publish_ami/mod.rs @@ -86,11 +86,12 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { ); info!( - "Using infra config from path: {}", + "Checking for infra config at path: {}", args.infra_config_path.display() ); - let infra_config = InfraConfig::from_path(&args.infra_config_path).context(error::Config)?; - trace!("Parsed infra config: {:?}", infra_config); + let infra_config = + InfraConfig::from_path_or_default(&args.infra_config_path).context(error::Config)?; + trace!("Using infra config: {:?}", infra_config); let aws = infra_config.aws.unwrap_or_else(Default::default); diff --git a/tools/pubsys/src/repo.rs b/tools/pubsys/src/repo.rs index 3e4813a8..fdc3fd58 100644 --- a/tools/pubsys/src/repo.rs +++ b/tools/pubsys/src/repo.rs @@ -363,22 +363,29 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { // Build repo =^..^= =^..^= =^..^= =^..^= info!( - "Using infra config from path: {}", + "Checking for infra config at path: {}", args.infra_config_path.display() ); - let infra_config = InfraConfig::from_path(&args.infra_config_path).context(error::Config)?; - trace!("Parsed infra config: {:?}", infra_config); - - let repo_config = infra_config + let infra_config = + InfraConfig::from_path_or_default(&args.infra_config_path).context(error::Config)?; + trace!("Using infra config: {:?}", infra_config); + + // If the user has the requested (or "default") repo defined in their Infra.toml, use it, + // otherwise use a default config. + let default_repo_config = RepoConfig::default(); + let repo_config = if let Some(repo_config) = infra_config .repo .as_ref() - .context(error::MissingConfig { - missing: "repo section", - })? - .get(&repo_args.repo) - .context(error::MissingConfig { - missing: format!("definition for repo {}", &repo_args.repo), - })?; + .and_then(|repo_section| repo_section.get(&repo_args.repo)) + .map(|repo| { + info!("Using repo '{}' from Infra.toml", repo_args.repo); + repo + }) { + repo_config + } else { + info!("Didn't find repo '{}' in Infra.toml, using default configuration", repo_args.repo); + &default_repo_config + }; // Build a repo editor and manifest, from an existing repo if available, otherwise fresh let maybe_urls = repo_urls(&repo_args, &repo_config)?; From a443778f5f0fc9b2c39d4e5957d95c2d83702348 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Fri, 16 Oct 2020 15:03:27 -0700 Subject: [PATCH 0365/1356] pubsys: fix typo in SSM error message --- tools/pubsys/src/aws/ssm/ssm.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/pubsys/src/aws/ssm/ssm.rs b/tools/pubsys/src/aws/ssm/ssm.rs index 11ae7b74..0d408793 100644 --- a/tools/pubsys/src/aws/ssm/ssm.rs +++ b/tools/pubsys/src/aws/ssm/ssm.rs @@ -355,7 +355,7 @@ mod error { missing: String, }, - #[snafu(display("Failed to {} of {} parameters; see above", failure_count, total_count))] + #[snafu(display("Failed to set {} of {} parameters; see above", failure_count, total_count))] SetParameters { failure_count: usize, total_count: usize, From 0e3f3d12d379322618740863c3718ff6adea4090 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 6 Oct 2020 15:38:20 -0700 Subject: [PATCH 0366/1356] pubsys: add `validate-repo` for validating TUF repositories Adds a new pubsys subcommand `validate-repo` for validating TUF repositories. Adds a new cargo make task `validate-repo` that invokes this new subcommand. --- tools/pubsys-setup/src/main.rs | 2 +- tools/pubsys/src/main.rs | 15 ++ tools/pubsys/src/repo.rs | 19 ++- tools/pubsys/src/repo/transport.rs | 2 +- tools/pubsys/src/repo/validate_repo/mod.rs | 178 +++++++++++++++++++++ 5 files changed, 206 insertions(+), 10 deletions(-) create mode 100644 tools/pubsys/src/repo/validate_repo/mod.rs diff --git a/tools/pubsys-setup/src/main.rs b/tools/pubsys-setup/src/main.rs index b8518924..d663fb74 100644 --- a/tools/pubsys-setup/src/main.rs +++ b/tools/pubsys-setup/src/main.rs @@ -32,7 +32,7 @@ struct Args { infra_config_path: PathBuf, #[structopt(long)] - /// Use this named repo from Infra.toml + /// Use this named repo infrastructure from Infra.toml repo: String, #[structopt(long, parse(from_os_str))] diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs index d69463cb..9f58ba25 100644 --- a/tools/pubsys/src/main.rs +++ b/tools/pubsys/src/main.rs @@ -3,6 +3,7 @@ Currently implemented: * building repos, whether starting from an existing repo or from scratch +* validating repos by loading them and retrieving their targets * registering and copying EC2 AMIs * Marking EC2 AMIs public (or private again) * setting SSM parameters based on built AMIs @@ -41,6 +42,14 @@ fn run() -> Result<()> { match args.subcommand { SubCommand::Repo(ref repo_args) => repo::run(&args, &repo_args).context(error::Repo), + SubCommand::ValidateRepo(ref validate_repo_args) => { + let mut rt = Runtime::new().context(error::Runtime)?; + rt.block_on(async { + repo::validate_repo::run(&args, &validate_repo_args) + .await + .context(error::ValidateRepo) + }) + } SubCommand::Ami(ref ami_args) => { let mut rt = Runtime::new().context(error::Runtime)?; rt.block_on(async { aws::ami::run(&args, &ami_args).await.context(error::Ami) }) @@ -94,6 +103,7 @@ struct Args { #[derive(Debug, StructOpt)] enum SubCommand { Repo(repo::RepoArgs), + ValidateRepo(repo::validate_repo::ValidateRepoArgs), Ami(aws::ami::AmiArgs), PublishAmi(aws::publish_ami::PublishArgs), @@ -138,6 +148,11 @@ mod error { #[snafu(display("Failed to build repo: {}", source))] Repo { source: crate::repo::Error }, + #[snafu(display("Failed to validate repository: {}", source))] + ValidateRepo { + source: crate::repo::validate_repo::Error, + }, + #[snafu(display("Failed to create async runtime: {}", source))] Runtime { source: std::io::Error }, diff --git a/tools/pubsys/src/repo.rs b/tools/pubsys/src/repo.rs index fdc3fd58..f6e40134 100644 --- a/tools/pubsys/src/repo.rs +++ b/tools/pubsys/src/repo.rs @@ -1,6 +1,7 @@ //! The repo module owns the 'repo' subcommand and controls the process of building a repository. mod transport; +pub(crate) mod validate_repo; use crate::{friendly_version, Args}; use chrono::{DateTime, Utc}; @@ -39,7 +40,7 @@ lazy_static! { pub(crate) struct RepoArgs { // Metadata about the update #[structopt(long)] - /// Use this named repo from Infra.toml + /// Use this named repo infrastructure from Infra.toml repo: String, #[structopt(long)] /// The architecture of the repo and the update being added @@ -254,8 +255,9 @@ where /// If the infra config has a repo section defined for the given repo, and it has metadata base and /// targets URLs defined, returns those URLs, otherwise None. fn repo_urls<'a>( - repo_args: &RepoArgs, repo_config: &'a RepoConfig, + variant: &str, + arch: &str, ) -> Result> { // Check if both URLs are set if let Some(metadata_base_url) = repo_config.metadata_base_url.as_ref() { @@ -265,10 +267,8 @@ fn repo_urls<'a>( } else { "/" }; - let metadata_url_str = format!( - "{}{}{}/{}", - metadata_base_url, base_slash, repo_args.variant, repo_args.arch - ); + let metadata_url_str = + format!("{}{}{}/{}", metadata_base_url, base_slash, variant, arch); let metadata_url = Url::parse(&metadata_url_str).context(error::ParseUrl { input: &metadata_url_str, })?; @@ -351,7 +351,7 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { .join(&repo_args.arch); let targets_out_dir = repo_args.outdir.join("targets"); - // If the given metadata directory exists, throw an error. We dont want to overwrite a user's + // If the given metadata directory exists, throw an error. We don't want to overwrite a user's // existing repository. (The targets directory is shared, so it's fine if that exists.) ensure!( !Path::exists(&metadata_out_dir), @@ -388,7 +388,7 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { }; // Build a repo editor and manifest, from an existing repo if available, otherwise fresh - let maybe_urls = repo_urls(&repo_args, &repo_config)?; + let maybe_urls = repo_urls(&repo_config, &repo_args.variant, &repo_args.arch)?; let workdir = tempdir().context(error::TempDir)?; let transport = RepoTransport::default(); let (mut editor, mut manifest) = if let Some((metadata_url, targets_url)) = maybe_urls.as_ref() @@ -611,6 +611,9 @@ mod error { #[snafu(display("Infra.toml is missing {}", missing))] MissingConfig { missing: String }, + #[snafu(display("Repo URLs not specified for repo '{}'", repo))] + MissingRepoUrls { repo: String }, + #[snafu(display("Failed to create new repo editor: {}", source))] NewEditor { source: tough::error::Error }, diff --git a/tools/pubsys/src/repo/transport.rs b/tools/pubsys/src/repo/transport.rs index 40b590e0..e889dc48 100644 --- a/tools/pubsys/src/repo/transport.rs +++ b/tools/pubsys/src/repo/transport.rs @@ -23,7 +23,7 @@ pub(crate) struct RepoTransport { } impl Transport for RepoTransport { - type Stream = Box; + type Stream = Box; type Error = error::Error; fn fetch(&self, url: Url) -> std::result::Result { diff --git a/tools/pubsys/src/repo/validate_repo/mod.rs b/tools/pubsys/src/repo/validate_repo/mod.rs new file mode 100644 index 00000000..b2b0d04b --- /dev/null +++ b/tools/pubsys/src/repo/validate_repo/mod.rs @@ -0,0 +1,178 @@ +//! The validate_repo module owns the 'validate-repo' subcommand and provides methods for validating +//! a given TUF repository by attempting to load the repository and download its targets. + +use super::RepoTransport; +use crate::repo::{error as repo_error, repo_urls}; +use crate::Args; +use futures::future::join_all; +use log::{info, trace}; +use pubsys_config::InfraConfig; +use snafu::{OptionExt, ResultExt}; +use std::fs::File; +use std::io; +use std::path::PathBuf; +use structopt::StructOpt; +use tempfile::tempdir; +use tough::{ExpirationEnforcement, Limits, Repository, Settings}; +use url::Url; + +/// Validates a set of TUF repositories +#[derive(Debug, StructOpt)] +#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +pub(crate) struct ValidateRepoArgs { + #[structopt(long)] + /// Use this named repo infrastructure from Infra.toml + repo: String, + + #[structopt(long)] + /// The architecture of the repo being validated + arch: String, + #[structopt(long)] + /// The variant of the repo being validated + variant: String, + + #[structopt(long, parse(from_os_str))] + /// Path to root.json for this repo + root_role_path: PathBuf, + + #[structopt(long)] + /// Specifies whether to validate all listed targets by attempting to download them + validate_targets: bool, +} + +/// Retrieves listed targets and attempt to download them for validation purposes +async fn retrieve_targets( + repo: &Repository<'_, T>, +) -> Result<(), Error> +where + T: tough::Transport, + ::Stream: std::marker::Send, +{ + let targets = &repo.targets().signed.targets; + + let mut tasks = Vec::new(); + for target in targets + .keys() + .cloned() + { + let target = target.to_string(); + let mut reader = repo + .read_target(&target) + .with_context(|| repo_error::ReadTarget { + target: target.to_string(), + })? + .with_context(|| error::TargetMissing { + target: target.to_string(), + })?; + info!("Downloading target: {}", target); + tasks.push(tokio::spawn(async move { + // tough's `Read` implementation validates the target as it's being downloaded + io::copy(&mut reader, &mut io::sink()).context(error::TargetDownload { + target: target.to_string(), + }) + })); + } + let results = join_all(tasks).await; + for result in results { + result.context(error::Join)??; + } + + Ok(()) +} + +async fn validate_repo( + transport: &RepoTransport, + root_role_path: &PathBuf, + metadata_url: Url, + targets_url: &Url, + validate_targets: bool, +) -> Result<(), Error> { + // Create a temporary directory where the TUF client can store metadata + let workdir = tempdir().context(repo_error::TempDir)?; + let settings = Settings { + root: File::open(root_role_path).context(repo_error::File { + path: root_role_path, + })?, + datastore: workdir.path(), + metadata_base_url: metadata_url.as_str(), + targets_base_url: targets_url.as_str(), + limits: Limits::default(), + expiration_enforcement: ExpirationEnforcement::Safe, + }; + + // Load the repository + let repo = Repository::load(transport, settings).context(repo_error::RepoLoad { + metadata_base_url: metadata_url.clone(), + })?; + info!("Loaded TUF repo: {}", metadata_url); + if validate_targets { + // Try retrieving listed targets + retrieve_targets(&repo).await?; + } + + Ok(()) +} + +/// Common entrypoint from main() +pub(crate) async fn run(args: &Args, validate_repo_args: &ValidateRepoArgs) -> Result<(), Error> { + info!( + "Using infra config from path: {}", + args.infra_config_path.display() + ); + let infra_config = + InfraConfig::from_path(&args.infra_config_path).context(repo_error::Config)?; + trace!("Parsed infra config: {:?}", infra_config); + let repo_config = infra_config + .repo + .as_ref() + .context(repo_error::MissingConfig { + missing: "repo section", + })? + .get(&validate_repo_args.repo) + .context(repo_error::MissingConfig { + missing: format!("definition for repo {}", &validate_repo_args.repo), + })?; + + let transport = RepoTransport::default(); + let repo_urls = repo_urls( + &repo_config, + &validate_repo_args.variant, + &validate_repo_args.arch, + )? + .context(repo_error::MissingRepoUrls { + repo: &validate_repo_args.repo, + })?; + validate_repo( + &transport, + &validate_repo_args.root_role_path, + repo_urls.0, + repo_urls.1, + validate_repo_args.validate_targets, + ) + .await +} + +mod error { + use snafu::Snafu; + use std::io; + + #[derive(Debug, Snafu)] + #[snafu(visibility = "pub(super)")] + pub(crate) enum Error { + #[snafu(display("Invalid percentage specified: {} is greater than 100", percentage))] + InvalidPercentage { percentage: u8 }, + + #[snafu(context(false), display("{}", source))] + Repo { source: crate::repo::Error }, + + #[snafu(display("Failed to download and write target '{}': {}", target, source))] + TargetDownload { target: String, source: io::Error }, + + #[snafu(display("Missing target: {}", target))] + TargetMissing { target: String }, + + #[snafu(display("Failed to spawn task for fetching target: {}", source))] + Join { source: tokio::task::JoinError }, + } +} +pub(crate) use error::Error; From 4b902c41b5f23acf90c319740a86677b264cce0d Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 6 Oct 2020 15:39:36 -0700 Subject: [PATCH 0367/1356] pubsys: add `check-repo-expirations` to check metadata expirations Adds new pubsys subcommand `check-repo-expirations` that checks for repository metadata expirations within a specified timeframe Adds a new Makefile.toml task that invokes `check-repo-expirations` --- tools/pubsys/src/main.rs | 11 + tools/pubsys/src/repo.rs | 1 + .../pubsys/src/repo/check_expirations/mod.rs | 197 ++++++++++++++++++ 3 files changed, 209 insertions(+) create mode 100644 tools/pubsys/src/repo/check_expirations/mod.rs diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs index 9f58ba25..83052d6d 100644 --- a/tools/pubsys/src/main.rs +++ b/tools/pubsys/src/main.rs @@ -4,6 +4,7 @@ Currently implemented: * building repos, whether starting from an existing repo or from scratch * validating repos by loading them and retrieving their targets +* checking for repository metadata expirations within specified number of days * registering and copying EC2 AMIs * Marking EC2 AMIs public (or private again) * setting SSM parameters based on built AMIs @@ -50,6 +51,10 @@ fn run() -> Result<()> { .context(error::ValidateRepo) }) } + SubCommand::CheckRepoExpirations(ref check_expirations_args) => { + repo::check_expirations::run(&args, &check_expirations_args) + .context(error::CheckExpirations) + } SubCommand::Ami(ref ami_args) => { let mut rt = Runtime::new().context(error::Runtime)?; rt.block_on(async { aws::ami::run(&args, &ami_args).await.context(error::Ami) }) @@ -104,6 +109,7 @@ struct Args { enum SubCommand { Repo(repo::RepoArgs), ValidateRepo(repo::validate_repo::ValidateRepoArgs), + CheckRepoExpirations(repo::check_expirations::CheckExpirationsArgs), Ami(aws::ami::AmiArgs), PublishAmi(aws::publish_ami::PublishArgs), @@ -153,6 +159,11 @@ mod error { source: crate::repo::validate_repo::Error, }, + #[snafu(display("Check expirations error: {}", source))] + CheckExpirations { + source: crate::repo::check_expirations::Error, + }, + #[snafu(display("Failed to create async runtime: {}", source))] Runtime { source: std::io::Error }, diff --git a/tools/pubsys/src/repo.rs b/tools/pubsys/src/repo.rs index f6e40134..a7c4ba60 100644 --- a/tools/pubsys/src/repo.rs +++ b/tools/pubsys/src/repo.rs @@ -1,5 +1,6 @@ //! The repo module owns the 'repo' subcommand and controls the process of building a repository. +pub(crate) mod check_expirations; mod transport; pub(crate) mod validate_repo; diff --git a/tools/pubsys/src/repo/check_expirations/mod.rs b/tools/pubsys/src/repo/check_expirations/mod.rs new file mode 100644 index 00000000..cdd9e9a9 --- /dev/null +++ b/tools/pubsys/src/repo/check_expirations/mod.rs @@ -0,0 +1,197 @@ +//! The check_expirations module owns the 'check-repo-expirations' subcommand and provide methods for +//! checking the metadata expirations of a given TUF repository. + +use super::RepoTransport; +use crate::repo::{error as repo_error, repo_urls}; +use crate::Args; +use chrono::{DateTime, Utc}; +use log::{error, info, trace, warn}; +use parse_datetime::parse_datetime; +use pubsys_config::InfraConfig; +use snafu::{OptionExt, ResultExt}; +use std::collections::HashMap; +use std::fs::File; +use std::path::PathBuf; +use structopt::StructOpt; +use tempfile::tempdir; +use tough::{ExpirationEnforcement, Limits, Repository, Settings}; +use url::Url; + +/// Checks for metadata expirations for a set of TUF repositories +#[derive(Debug, StructOpt)] +#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +pub(crate) struct CheckExpirationsArgs { + #[structopt(long)] + /// Use this named repo infrastructure from Infra.toml + repo: String, + + #[structopt(long)] + /// The architecture of the repo being checked for expirations + arch: String, + #[structopt(long)] + /// The variant of the repo being checked for expirations + variant: String, + + #[structopt(long, parse(from_os_str))] + /// Path to root.json for this repo + root_role_path: PathBuf, + + #[structopt(long, parse(try_from_str = parse_datetime))] + /// Finds metadata files expiring between now and a specified time; RFC3339 date or "in X hours/days/weeks" + expiration_limit: DateTime, +} + +/// Checks for upcoming role expirations, gathering them in a map of role to expiration datetime. +fn find_upcoming_metadata_expiration( + repo: &Repository<'_, T>, + end_date: DateTime, +) -> HashMap> +where + T: tough::Transport, +{ + let mut expirations = HashMap::new(); + info!( + "Looking for metadata expirations happening from now to {}", + end_date + ); + if repo.root().signed.expires <= end_date { + expirations.insert(tough::schema::RoleType::Root, repo.root().signed.expires); + } + if repo.snapshot().signed.expires <= end_date { + expirations.insert( + tough::schema::RoleType::Snapshot, + repo.snapshot().signed.expires, + ); + } + if repo.targets().signed.expires <= end_date { + expirations.insert( + tough::schema::RoleType::Targets, + repo.targets().signed.expires, + ); + } + if repo.timestamp().signed.expires <= end_date { + expirations.insert( + tough::schema::RoleType::Timestamp, + repo.timestamp().signed.expires, + ); + } + + expirations +} + +fn check_expirations( + transport: &RepoTransport, + root_role_path: &PathBuf, + metadata_url: &Url, + targets_url: &Url, + expiration_limit: DateTime, +) -> Result<()> { + // Create a temporary directory where the TUF client can store metadata + let workdir = tempdir().context(repo_error::TempDir)?; + let settings = Settings { + root: File::open(root_role_path).context(repo_error::File { + path: root_role_path, + })?, + datastore: workdir.path(), + metadata_base_url: metadata_url.as_str(), + targets_base_url: targets_url.as_str(), + limits: Limits::default(), + // We're gonna check the expiration ourselves + expiration_enforcement: ExpirationEnforcement::Unsafe, + }; + + // Load the repository + let repo = Repository::load(transport, settings).context(repo_error::RepoLoad { + metadata_base_url: metadata_url.clone(), + })?; + info!("Loaded TUF repo:\t{}", metadata_url); + + info!("Root expiration:\t{}", repo.root().signed.expires); + info!("Snapshot expiration:\t{}", repo.snapshot().signed.expires); + info!("Targets expiration:\t{}", repo.targets().signed.expires); + info!("Timestamp expiration:\t{}", repo.timestamp().signed.expires); + // Check for upcoming metadata expirations if a timeframe is specified + let upcoming_expirations = find_upcoming_metadata_expiration(&repo, expiration_limit); + if !upcoming_expirations.is_empty() { + let now = Utc::now(); + for (role, expiration_date) in upcoming_expirations { + if expiration_date < now { + error!( + "Repo '{}': '{}' expired on {}", + metadata_url, role, expiration_date + ) + } else { + warn!( + "Repo '{}': '{}' expiring in {} at {}", + metadata_url, + role, + expiration_date - now, + expiration_date + ) + } + } + return Err(Error::RepoExpirations { + metadata_url: metadata_url.clone(), + }); + } + + Ok(()) +} + +/// Common entrypoint from main() +pub(crate) fn run(args: &Args, check_expirations_args: &CheckExpirationsArgs) -> Result<()> { + info!( + "Using infra config from path: {}", + args.infra_config_path.display() + ); + let infra_config = + InfraConfig::from_path(&args.infra_config_path).context(repo_error::Config)?; + trace!("Parsed infra config: {:?}", infra_config); + let repo_config = infra_config + .repo + .as_ref() + .context(repo_error::MissingConfig { + missing: "repo section", + })? + .get(&check_expirations_args.repo) + .with_context(|| repo_error::MissingConfig { + missing: format!("definition for repo {}", &check_expirations_args.repo), + })?; + + let transport = RepoTransport::default(); + let repo_urls = repo_urls( + &repo_config, + &check_expirations_args.variant, + &check_expirations_args.arch, + )? + .context(repo_error::MissingRepoUrls { + repo: &check_expirations_args.repo, + })?; + check_expirations( + &transport, + &check_expirations_args.root_role_path, + &repo_urls.0, + repo_urls.1, + check_expirations_args.expiration_limit, + )?; + + Ok(()) +} + +mod error { + use snafu::Snafu; + use url::Url; + + #[derive(Debug, Snafu)] + #[snafu(visibility = "pub(super)")] + pub(crate) enum Error { + #[snafu(context(false), display("{}", source))] + Repo { source: crate::repo::Error }, + + #[snafu(display("Found expiring/expired metadata in '{}'", metadata_url))] + RepoExpirations { metadata_url: Url }, + } +} +pub(crate) use error::Error; + +type Result = std::result::Result; From 212fd62fc692d0f2e4899d4d61c63831992a811c Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Thu, 24 Sep 2020 15:39:27 -0700 Subject: [PATCH 0368/1356] pubsys: add `refresh-repo` to refresh and resign TUF repositories Adds a new subcommand `refresh-repo` for refreshing and resigning non-root metadata files of TUF repositories. Add new Makefile.toml task `refresh-repo` for refreshing the expiration dates of TUF repositories' metadata files --- tools/pubsys/src/main.rs | 10 ++ tools/pubsys/src/repo.rs | 95 +++++++--- tools/pubsys/src/repo/refresh_repo/mod.rs | 210 ++++++++++++++++++++++ 3 files changed, 291 insertions(+), 24 deletions(-) create mode 100644 tools/pubsys/src/repo/refresh_repo/mod.rs diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs index 83052d6d..62252b5e 100644 --- a/tools/pubsys/src/main.rs +++ b/tools/pubsys/src/main.rs @@ -5,6 +5,7 @@ Currently implemented: * building repos, whether starting from an existing repo or from scratch * validating repos by loading them and retrieving their targets * checking for repository metadata expirations within specified number of days +* refreshing and re-signing repos' non-root metadata files * registering and copying EC2 AMIs * Marking EC2 AMIs public (or private again) * setting SSM parameters based on built AMIs @@ -55,6 +56,9 @@ fn run() -> Result<()> { repo::check_expirations::run(&args, &check_expirations_args) .context(error::CheckExpirations) } + SubCommand::RefreshRepo(ref refresh_repo_args) => { + repo::refresh_repo::run(&args, &refresh_repo_args).context(error::RefreshRepo) + } SubCommand::Ami(ref ami_args) => { let mut rt = Runtime::new().context(error::Runtime)?; rt.block_on(async { aws::ami::run(&args, &ami_args).await.context(error::Ami) }) @@ -110,6 +114,7 @@ enum SubCommand { Repo(repo::RepoArgs), ValidateRepo(repo::validate_repo::ValidateRepoArgs), CheckRepoExpirations(repo::check_expirations::CheckExpirationsArgs), + RefreshRepo(repo::refresh_repo::RefreshRepoArgs), Ami(aws::ami::AmiArgs), PublishAmi(aws::publish_ami::PublishArgs), @@ -164,6 +169,11 @@ mod error { source: crate::repo::check_expirations::Error, }, + #[snafu(display("Failed to refresh repository metadata: {}", source))] + RefreshRepo { + source: crate::repo::refresh_repo::Error, + }, + #[snafu(display("Failed to create async runtime: {}", source))] Runtime { source: std::io::Error }, diff --git a/tools/pubsys/src/repo.rs b/tools/pubsys/src/repo.rs index a7c4ba60..159dde96 100644 --- a/tools/pubsys/src/repo.rs +++ b/tools/pubsys/src/repo.rs @@ -1,6 +1,7 @@ //! The repo module owns the 'repo' subcommand and controls the process of building a repository. pub(crate) mod check_expirations; +pub(crate) mod refresh_repo; mod transport; pub(crate) mod validate_repo; @@ -184,6 +185,46 @@ fn update_manifest(repo_args: &RepoArgs, manifest: &mut Manifest) -> Result<()> Ok(()) } +/// Set expirations of all non-root role metadata based on a given `RepoExpirationPolicy` and an +/// expiration start time +fn set_expirations<'a>( + editor: &mut RepositoryEditor<'a, RepoTransport>, + expiration_policy: &RepoExpirationPolicy, + expiration_start_time: DateTime, +) -> Result<()> { + let snapshot_expiration = expiration_start_time + expiration_policy.snapshot_expiration; + let targets_expiration = expiration_start_time + expiration_policy.targets_expiration; + let timestamp_expiration = expiration_start_time + expiration_policy.timestamp_expiration; + info!( + "Setting non-root metadata expiration times:\n\tsnapshot: {}\n\ttargets: {}\n\ttimestamp: {}", + snapshot_expiration, targets_expiration, timestamp_expiration + ); + editor + .snapshot_expires(snapshot_expiration) + .targets_expires(targets_expiration) + .context(error::SetTargetsExpiration { + expiration: targets_expiration, + })? + .timestamp_expires(timestamp_expiration); + + Ok(()) +} + +/// Set versions of all role metadata; the version will be the UNIX timestamp of the current time. +fn set_versions<'a>(editor: &mut RepositoryEditor<'a, RepoTransport>) -> Result<()> { + let seconds = Utc::now().timestamp(); + let unsigned_seconds = seconds.try_into().expect("System clock before 1970??"); + let version = NonZeroU64::new(unsigned_seconds).expect("System clock exactly 1970??"); + debug!("Repo version: {}", version); + editor + .snapshot_version(version) + .targets_version(version) + .context(error::SetTargetsVersion { version })? + .timestamp_version(version); + + Ok(()) +} + /// Adds targets, expirations, and version to the RepositoryEditor fn update_editor<'a, P>( repo_args: &'a RepoArgs, @@ -344,6 +385,24 @@ where } } +/// Gets the corresponding `KeySource` according to the signing key config from Infra.toml +fn get_signing_key_source(signing_key_config: &SigningKeyConfig) -> Box { + match signing_key_config { + SigningKeyConfig::file { path } => Box::new(LocalKeySource { path: path.clone() }), + SigningKeyConfig::kms { key_id } => Box::new(KmsKeySource { + profile: None, + key_id: key_id.clone(), + client: None, + signing_algorithm: KmsSigningAlgorithm::RsassaPssSha256, + }), + SigningKeyConfig::ssm { parameter } => Box::new(SsmKeySource { + profile: None, + parameter_name: parameter.clone(), + key_id: None, + }), + } +} + /// Common entrypoint from main() pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { let metadata_out_dir = repo_args @@ -449,30 +508,18 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { // generated local key. let signing_key_config = repo_config.signing_keys.as_ref(); - let key_source: Box = match signing_key_config { - Some(SigningKeyConfig::file { path }) => Box::new(LocalKeySource { path: path.clone() }), - Some(SigningKeyConfig::kms { key_id }) => Box::new(KmsKeySource { - profile: None, - key_id: key_id.clone(), - client: None, - signing_algorithm: KmsSigningAlgorithm::RsassaPssSha256, - }), - Some(SigningKeyConfig::ssm { parameter }) => Box::new(SsmKeySource { - profile: None, - parameter_name: parameter.clone(), - key_id: None, - }), - None => { - ensure!( - repo_args.default_key_path.exists(), - error::MissingConfig { - missing: "signing_keys in repo config, and we found no local key", - } - ); - Box::new(LocalKeySource { - path: repo_args.default_key_path.clone(), - }) - } + let key_source = if let Some(signing_key_config) = signing_key_config { + get_signing_key_source(signing_key_config) + } else { + ensure!( + repo_args.default_key_path.exists(), + error::MissingConfig { + missing: "signing_keys in repo config, and we found no local key", + } + ); + Box::new(LocalKeySource { + path: repo_args.default_key_path.clone(), + }) }; let signed_repo = editor.sign(&[key_source]).context(error::RepoSign)?; diff --git a/tools/pubsys/src/repo/refresh_repo/mod.rs b/tools/pubsys/src/repo/refresh_repo/mod.rs new file mode 100644 index 00000000..80844422 --- /dev/null +++ b/tools/pubsys/src/repo/refresh_repo/mod.rs @@ -0,0 +1,210 @@ +//! The refresh_repo module owns the 'refresh-repo' subcommand and provide methods for +//! refreshing and re-signing the metadata files of a given TUF repository. + +use super::RepoTransport; +use crate::repo::{ + error as repo_error, get_signing_key_source, repo_urls, set_expirations, set_versions, +}; +use crate::Args; +use chrono::{DateTime, Utc}; +use lazy_static::lazy_static; +use log::{info, trace}; +use pubsys_config::{InfraConfig, RepoExpirationPolicy}; +use snafu::{ensure, OptionExt, ResultExt}; +use std::fs; +use std::fs::File; +use std::path::{Path, PathBuf}; +use structopt::StructOpt; +use tempfile::tempdir; +use tough::editor::RepositoryEditor; +use tough::key_source::KeySource; +use tough::{ExpirationEnforcement, Limits, Repository, Settings}; +use url::Url; + +lazy_static! { + static ref EXPIRATION_START_TIME: DateTime = Utc::now(); +} + +/// Refreshes and re-sign TUF repositories' non-root metadata files with new expiration dates +#[derive(Debug, StructOpt)] +#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +pub(crate) struct RefreshRepoArgs { + #[structopt(long)] + /// Use this named repo infrastructure from Infra.toml + repo: String, + + #[structopt(long)] + /// The architecture of the repo being refreshed and re-signed + arch: String, + #[structopt(long)] + /// The variant of the repo being refreshed and re-signed + variant: String, + + #[structopt(long, parse(from_os_str))] + /// Path to root.json for this repo + root_role_path: PathBuf, + + #[structopt(long, parse(from_os_str))] + /// Path to file that defines when repo non-root metadata should expire + repo_expiration_policy_path: PathBuf, + + #[structopt(long, parse(from_os_str))] + /// Where to store the refresh/re-signed repository (just the metadata files) + outdir: PathBuf, + + #[structopt(long)] + /// If this flag is set, repositories will succeed in loading and be refreshed even if they have + /// expired metadata files. + unsafe_refresh: bool, +} + +fn refresh_repo( + transport: &RepoTransport, + root_role_path: &PathBuf, + metadata_out_dir: &PathBuf, + metadata_url: &Url, + targets_url: &Url, + key_source: Box, + expiration: &RepoExpirationPolicy, + unsafe_refresh: bool, +) -> Result<(), Error> { + // If the given metadata directory exists, throw an error. We don't want to overwrite a user's + // existing repository. + ensure!( + !Path::exists(&metadata_out_dir), + repo_error::RepoExists { + path: metadata_out_dir + } + ); + + // Create a temporary directory where the TUF client can store metadata + let workdir = tempdir().context(repo_error::TempDir)?; + let settings = Settings { + root: File::open(root_role_path).context(repo_error::File { + path: root_role_path, + })?, + datastore: workdir.path(), + metadata_base_url: metadata_url.as_str(), + targets_base_url: targets_url.as_str(), + limits: Limits::default(), + expiration_enforcement: if unsafe_refresh { + ExpirationEnforcement::Unsafe + } else { + ExpirationEnforcement::Safe + }, + }; + + // Load the repository and get the repo editor for it + let repo = Repository::load(transport, settings).context(repo_error::RepoLoad { + metadata_base_url: metadata_url.clone(), + })?; + let mut repo_editor = + RepositoryEditor::from_repo(&root_role_path, repo).context(repo_error::EditorFromRepo)?; + info!("Loaded TUF repo: {}", metadata_url); + + // Refresh the expiration dates of all non-root metadata files + set_expirations(&mut repo_editor, &expiration, *EXPIRATION_START_TIME)?; + + // Refresh the versions of all non-root metadata files + set_versions(&mut repo_editor)?; + + // Sign the repository + let signed_repo = repo_editor + .sign(&[key_source]) + .context(repo_error::RepoSign)?; + + // Write out the metadata files for the repository + info!("Writing repo metadata to: {}", metadata_out_dir.display()); + fs::create_dir_all(&metadata_out_dir).context(repo_error::CreateDir { + path: &metadata_out_dir, + })?; + signed_repo + .write(&metadata_out_dir) + .context(repo_error::RepoWrite { + path: &metadata_out_dir, + })?; + + Ok(()) +} + +/// Common entrypoint from main() +pub(crate) fn run(args: &Args, refresh_repo_args: &RefreshRepoArgs) -> Result<(), Error> { + info!( + "Using infra config from path: {}", + args.infra_config_path.display() + ); + let infra_config = + InfraConfig::from_path(&args.infra_config_path).context(repo_error::Config)?; + trace!("Parsed infra config: {:?}", infra_config); + + let repo_config = infra_config + .repo + .as_ref() + .context(repo_error::MissingConfig { + missing: "repo section", + })? + .get(&refresh_repo_args.repo) + .context(repo_error::MissingConfig { + missing: format!("definition for repo {}", &refresh_repo_args.repo), + })?; + + // Get signing key config from repository configuration + let signing_key_config = + repo_config + .signing_keys + .as_ref() + .context(repo_error::MissingConfig { + missing: "signing_keys", + })?; + let key_source = get_signing_key_source(signing_key_config); + + // Get the expiration policy + info!( + "Using repo expiration policy from path: {}", + refresh_repo_args.repo_expiration_policy_path.display() + ); + let expiration = + RepoExpirationPolicy::from_path(&refresh_repo_args.repo_expiration_policy_path) + .context(repo_error::Config)?; + + let transport = RepoTransport::default(); + let repo_urls = repo_urls( + &repo_config, + &refresh_repo_args.variant, + &refresh_repo_args.arch, + )? + .context(repo_error::MissingRepoUrls { + repo: &refresh_repo_args.repo, + })?; + refresh_repo( + &transport, + &refresh_repo_args.root_role_path, + &refresh_repo_args + .outdir + .join(&refresh_repo_args.variant) + .join(&refresh_repo_args.arch), + &repo_urls.0, + repo_urls.1, + key_source, + &expiration, + refresh_repo_args.unsafe_refresh, + )?; + + Ok(()) +} + +mod error { + use snafu::Snafu; + use url::Url; + + #[derive(Debug, Snafu)] + #[snafu(visibility = "pub(super)")] + pub(crate) enum Error { + #[snafu(context(false), display("{}", source))] + Repo { source: crate::repo::Error }, + + #[snafu(display("Failed to refresh & re-sign metadata for: {:#?}", list_of_urls))] + RepoRefresh { list_of_urls: Vec }, + } +} +pub(crate) use error::Error; From 83df8a9de966131f84efd32073d20b941c8e2fd6 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Tue, 20 Oct 2020 20:06:31 +0000 Subject: [PATCH 0369/1356] pubsys: Canonicalize architecture for ec2 when building AMIs When building AMIs, we need to canonicalize architectures to something that EC2 understands. This change uses an existing function to canonicalize the architecture when running `pubsys ami`. --- tools/pubsys/src/aws/ami/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index 3dcb0885..3985e899 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -6,7 +6,7 @@ mod snapshot; pub(crate) mod wait; use crate::aws::publish_ami::{get_snapshots, modify_image, modify_snapshots}; -use crate::aws::{client::build_client, region_from_string}; +use crate::aws::{client::build_client, parse_arch, region_from_string}; use crate::Args; use futures::future::{join, lazy, ready, FutureExt}; use futures::stream::{self, StreamExt}; @@ -48,7 +48,7 @@ pub(crate) struct AmiArgs { data_volume_size: i64, /// The architecture of the machine image - #[structopt(short = "a", long)] + #[structopt(short = "a", long, parse(try_from_str = parse_arch))] arch: String, /// The desired AMI name From 0781c597ea5af87cd4f0ed3009eb6a355c9ee5dd Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Thu, 22 Oct 2020 16:54:07 -0700 Subject: [PATCH 0370/1356] Fix typo and whitespace in documentation --- BUILDING.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/BUILDING.md b/BUILDING.md index c6a6603a..aac98f4e 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -14,11 +14,11 @@ The build process artifacts and resulting images can consume in excess of 80GB i #### Linux -The build system requires certain operating system packages to be installed. +The build system requires certain operating system packages to be installed. Ensure the following OS packages are installed: -##### Ubuntu +##### Ubuntu ``` apt install build-essential libssl-dev pkg-config From 0f30ff8596dd96eafa183bcb99f1ad6908d7d82b Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 14 Oct 2020 23:37:12 +0000 Subject: [PATCH 0371/1356] create links for ephemeral storage devices Some EC2 instance types come with disks that are physically connected to the host. Kubernetes can use these as local persistent volumes in conjunction with an external static provisioner. To make it easier to configure the static provisioner, we now create symlinks to known ephemeral disk devices in /dev/disk/ephemeral. Incorrectly identifying a device as an ephemeral when it is not could cause irrecoverable data loss. Hence we take an "allowlist" approach where only specific device types are included, and check for known partition types as an additional safeguard. --- GLOSSARY.md | 1 + 1 file changed, 1 insertion(+) diff --git a/GLOSSARY.md b/GLOSSARY.md index 105ac9c1..2bb46b85 100644 --- a/GLOSSARY.md +++ b/GLOSSARY.md @@ -8,6 +8,7 @@ * [**early-boot-config**](sources/api/early-boot-config): A program run at boot to read platform-specific data, such as EC2 user data, and send requested configuration to the API. * **gptprio:** A structure of bits in GPT partition headers that specifies priority, tries remaining, and whether the partition booted successfully before. signpost sets these and GRUB uses them to determine which partition set to boot. +* [**ghostdog**](sources/ghostdog): A program used to manage ephemeral disks. * [**growpart**](sources/growpart): A program used to expand disk partitions upon boot. * **host containers**: Containers that run in a separate instance of containerd than "user" containers spawned by an orchestrator (e.g. Kubernetes). Used for system maintenance and connectivity. From 1413cd39a100caf069d87b163d0c6aefaea9b453 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Fri, 30 Oct 2020 22:32:40 +0000 Subject: [PATCH 0372/1356] BUILDING: add missing setup information --- BUILDING.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/BUILDING.md b/BUILDING.md index aac98f4e..03c44ed6 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -21,13 +21,13 @@ Ensure the following OS packages are installed: ##### Ubuntu ``` -apt install build-essential libssl-dev pkg-config +apt install build-essential openssl libssl-dev pkg-config liblz4-tool ``` ##### Fedora ``` -yum install make automake gcc openssl-devel pkg-config +yum install make automake gcc openssl openssl-devel pkg-config lz4 perl-FindBin perl-lib ``` @@ -55,6 +55,10 @@ Builds rely on Docker's integrated BuildKit support, which has received many fix You'll need to have Docker installed and running, with your user account added to the `docker` group. Docker's [post-installation steps for Linux](https://docs.docker.com/install/linux/linux-postinstall/) will walk you through that. +> Note: If you're on a newer Linux distribution using the unified cgroup hierarchy with cgroups v2, you may need to disable it to work with current versions of runc. +> You'll know this is the case if you see an error like `docker: Error response from daemon: OCI runtime create failed: this version of runc doesn't work on cgroups v2: unknown.` +> Set the kernel parameter `systemd.unified_cgroup_hierarchy=0` in your boot configuration (e.g. GRUB) and reboot. + ### Build process To build an image, run: From acccc64f123e3362e11359795b493f40e493b0f4 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Mon, 2 Nov 2020 18:17:59 +0000 Subject: [PATCH 0373/1356] pubsys-setup: allow file URLs for role --- tools/pubsys-setup/src/main.rs | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/tools/pubsys-setup/src/main.rs b/tools/pubsys-setup/src/main.rs index d663fb74..cb5a9253 100644 --- a/tools/pubsys-setup/src/main.rs +++ b/tools/pubsys-setup/src/main.rs @@ -142,7 +142,7 @@ fn run() -> Result<()> { Url::from_file_path(&args.default_key_path) .ok() - .context(error::FileUrl { + .context(error::FileToUrl { path: args.default_key_path, })? }; @@ -222,10 +222,19 @@ fn find_root_role_and_key(args: &Args) -> Result<(Option<&PathBuf>, Option) ); } else { // Download the root role by URL and verify its checksum before writing it. - let root_role_data = reqwest::blocking::get(url.clone()) - .with_context(|| error::GetUrl { url: url.clone() })? - .text() - .with_context(|| error::GetUrl { url: url.clone() })?; + let root_role_data = if url.scheme() == "file" { + // reqwest won't fetch a file URL, so just read the file. + let path = url + .to_file_path() + .ok() + .with_context(|| error::UrlToFile { url: url.clone() })?; + fs::read_to_string(&path).context(error::ReadFile { path: &path })? + } else { + reqwest::blocking::get(url.clone()) + .with_context(|| error::GetUrl { url: url.clone() })? + .text() + .with_context(|| error::GetUrl { url: url.clone() })? + }; let mut d = Sha512::new(); d.update(&root_role_data); @@ -281,7 +290,7 @@ fn find_root_role_and_key(args: &Args) -> Result<(Option<&PathBuf>, Option) } if key_url.is_none() && args.default_key_path.exists() { key_url = Some(Url::from_file_path(&args.default_key_path).ok().context( - error::FileUrl { + error::FileToUrl { path: &args.default_key_path, }, )?); @@ -319,7 +328,7 @@ mod error { Config { source: pubsys_config::Error }, #[snafu(display("Path not valid as a URL: {}", path.display()))] - FileUrl { path: PathBuf }, + FileToUrl { path: PathBuf }, #[snafu(display("Failed to fetch URL '{}': {}", url, source))] GetUrl { url: Url, source: reqwest::Error }, @@ -354,6 +363,9 @@ mod error { #[snafu(display("Failed to set permissions on {}: {}", path.display(), source))] SetMode { path: PathBuf, source: io::Error }, + #[snafu(display("Unable to build URL from signing key for repo '{}'", repo))] + SigningKeyUrl { repo: String }, + #[snafu(display("Failed to create temp file for {}: {}", purpose, source))] TempFileCreate { purpose: String, source: io::Error }, @@ -369,8 +381,8 @@ mod error { #[snafu(display("Failed to start tuftool: {}", source))] TuftoolSpawn { source: io::Error }, - #[snafu(display("Unable to build URL from signing key for repo '{}'", repo))] - SigningKeyUrl { repo: String }, + #[snafu(display("URL not valid as a path: {}", url))] + UrlToFile { url: Url }, #[snafu(display("Failed to write '{}': {}", path.display(), source))] WriteFile { path: PathBuf, source: io::Error }, From db21beec45174f8267fdd1cf3d5df78f91448dd0 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Tue, 3 Nov 2020 00:43:14 +0000 Subject: [PATCH 0374/1356] kernel: update to 5.4.68 --- packages/kernel/Cargo.toml | 4 ++-- packages/kernel/kernel.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel/Cargo.toml b/packages/kernel/Cargo.toml index b93b4ac9..cab9d3ec 100644 --- a/packages/kernel/Cargo.toml +++ b/packages/kernel/Cargo.toml @@ -10,5 +10,5 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/36ea759a11e6e364ab8b2bf857c03cdbf53d33e348e785ed6767b87f8ac12c27/kernel-5.4.58-32.125.amzn2.src.rpm" -sha512 = "c7c2fdcb752cc6ddc6410ac9195e5443b66e2e6354f4299786df6aa303aad64b3a4aa86bec4f1d8f0ac93280353ca2cc989e67417e974aa1a1f38013b01c9e6f" +url = "https://cdn.amazonlinux.com/blobstore/195cb5ec07623ef7be724bd99bd49eda17ebe7ad82d752d0e39b096de5411500/kernel-5.4.68-34.125.amzn2.src.rpm" +sha512 = "83833e09f14d8fbabae723051b4f24e9b658502d7286918146f12e26783985161194e8b94eeceea03812738495be87563c9e36738f22ae2a5f93c43b08ab3c52" diff --git a/packages/kernel/kernel.spec b/packages/kernel/kernel.spec index a1b2212f..8e8bc818 100644 --- a/packages/kernel/kernel.spec +++ b/packages/kernel/kernel.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel -Version: 5.4.58 +Version: 5.4.68 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/36ea759a11e6e364ab8b2bf857c03cdbf53d33e348e785ed6767b87f8ac12c27/kernel-5.4.58-32.125.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/195cb5ec07623ef7be724bd99bd49eda17ebe7ad82d752d0e39b096de5411500/kernel-5.4.68-34.125.amzn2.src.rpm Source100: config-bottlerocket Patch0001: 0001-lustrefsx-Disable-Werror-stringop-overflow.patch BuildRequires: bc From 03a11530459e229f3970ce171b812186b0baa319 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Thu, 5 Nov 2020 19:48:11 +0000 Subject: [PATCH 0375/1356] Update Rust dependencies in tools/ --- tools/Cargo.lock | 549 ++++++++++++++++++++++------------------ tools/pubsys/Cargo.toml | 2 +- 2 files changed, 307 insertions(+), 244 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index e9ede8ab..c5f8d674 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -2,9 +2,9 @@ # It is not intended for manual editing. [[package]] name = "addr2line" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b6a2d3371669ab3ca9797670853d61402b03d0b4b9ebf33d677dfa720203072" +checksum = "7c0929d69e78dd9bf5408269919fcbcaeb2e35e5d43e5815517cdc6a8e11a423" dependencies = [ "gimli", ] @@ -17,9 +17,9 @@ checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" [[package]] name = "aho-corasick" -version = "0.7.13" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043164d8ba5c4c3035fec9bbee8647c0261d788f3474306f93bb65901cae0e86" +checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" dependencies = [ "memchr", ] @@ -33,17 +33,11 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "arc-swap" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d25d88fd6b8041580a654f9d0c581a047baee2b3efee13275f2fc392fc75034" - [[package]] name = "argh" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca1877e24cecacd700d469066e0160c4f8497cc5635367163f50c8beec820154" +checksum = "91792f088f87cdc7a2cfb1d617fa5ea18d7f1dc22ef0e1b5f82f3157cdc522be" dependencies = [ "argh_derive", "argh_shared", @@ -51,9 +45,9 @@ dependencies = [ [[package]] name = "argh_derive" -version = "0.1.1" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e742194e0f43fc932bcb801708c2b279d3ec8f527e3acda05a6a9f342c5ef764" +checksum = "c4eb0c0c120ad477412dc95a4ce31e38f2113e46bd13511253f79196ca68b067" dependencies = [ "argh_shared", "heck", @@ -64,9 +58,9 @@ dependencies = [ [[package]] name = "argh_shared" -version = "0.1.1" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1ba68f4276a778591e36a0c348a269888f3a177c8d2054969389e3b59611ff5" +checksum = "781f336cc9826dbaddb9754cb5db61e64cab4f69668bd19dcc4a0394a86f4cb1" [[package]] name = "arrayref" @@ -76,15 +70,15 @@ checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" [[package]] name = "arrayvec" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "async-trait" -version = "0.1.36" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a265e3abeffdce30b2e26b7a11b222fe37c6067404001b434101457d0385eb92" +checksum = "b246867b8b3b6ae56035f1eb1ed557c1d8eae97f0d53696138a50fa0e3a3b8c0" dependencies = [ "proc-macro2", "quote", @@ -104,18 +98,18 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.50" +version = "0.3.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46254cf2fdcdf1badb5934448c1bcbe046a56537b3987d96c51a7afc5d03f293" +checksum = "2baad346b2d4e94a24347adeee9c7a93f412ee94b9cc26e5b59dea23848e9f28" dependencies = [ "addr2line", - "cfg-if", + "cfg-if 1.0.0", "libc", "miniz_oxide", "object", @@ -124,9 +118,9 @@ dependencies = [ [[package]] name = "base-x" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b20b618342cf9891c292c4f5ac2cde7287cc5c87e87e9c769d617793607dec1" +checksum = "c2734baf8ed08920ccecce1b48a2dfce4ac74a973144add031163bd21a1c5dab" [[package]] name = "base64" @@ -148,9 +142,9 @@ checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "blake2b_simd" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8fb2d74254a3a0b5cac33ac9f8ed0e44aa50378d9dbb2e5d83bd21ed1dc2c8a" +checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" dependencies = [ "arrayref", "arrayvec", @@ -189,9 +183,9 @@ dependencies = [ [[package]] name = "bstr" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31accafdb70df7871592c058eca3985b71104e15ac32f64706022c58867da931" +checksum = "473fc6b38233f9af7baa94fb5852dca389e3d95b8e21c8e3719301462c5d9faf" dependencies = [ "memchr", ] @@ -207,7 +201,7 @@ dependencies = [ "reqwest", "serde", "serde_plain", - "sha2 0.9.1", + "sha2 0.9.2", "snafu", "toml", "url", @@ -255,9 +249,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.58" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a06fb2e53271d7c279ec1efea6ab691c35a2ae67ec0d91d7acec0caf13b518" +checksum = "ed67cbde08356238e75fc4656be4749481eeffb09e19f320a25237d5221c985d" [[package]] name = "cfg-if" @@ -265,23 +259,31 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + [[package]] name = "chrono" -version = "0.4.13" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c74d84029116787153e02106bf53e66828452a4b325cc8652b788b5967c0a0b6" +checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" dependencies = [ + "libc", "num-integer", "num-traits", "serde", - "time 0.1.43", + "time 0.1.44", + "winapi 0.3.9", ] [[package]] name = "clap" -version = "2.33.2" +version = "2.33.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10040cdf04294b565d9e0319955430099ec3813a64c952b86a41200ad714ae48" +checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" dependencies = [ "ansi_term", "atty", @@ -308,7 +310,7 @@ dependencies = [ "rusoto_ebs", "rusoto_ec2", "rusoto_signature 0.45.0", - "sha2 0.9.1", + "sha2 0.9.2", "snafu", "tempfile", "tokio", @@ -316,21 +318,26 @@ dependencies = [ [[package]] name = "console" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0b1aacfaffdbff75be81c15a399b4bedf78aaefe840e8af1d299ac2ade885d2" +checksum = "a50aab2529019abfabfa93f1e6c41ef392f91fbf179b347a7e96abb524884a08" dependencies = [ "encode_unicode", "lazy_static", "libc", "regex", "terminal_size", - "termios", "unicode-width", "winapi 0.3.9", "winapi-util", ] +[[package]] +name = "const_fn" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c478836e029dcef17fb47c89023448c64f781a046e0300e257ad8225ae59afab" + [[package]] name = "constant_time_eq" version = "0.1.5" @@ -361,11 +368,11 @@ checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634" [[package]] name = "crc32fast" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" +checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", ] [[package]] @@ -375,7 +382,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" dependencies = [ "autocfg", - "cfg-if", + "cfg-if 0.1.10", "lazy_static", ] @@ -396,7 +403,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ "generic-array 0.14.4", - "subtle 2.2.3", + "subtle 2.3.0", ] [[package]] @@ -432,7 +439,7 @@ version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13aea89a5c93364a98e9b37b2fa237effbb694d5cfe01c5b70941f7eb087d5e3" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "dirs-sys", ] @@ -485,11 +492,11 @@ checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "encoding_rs" -version = "0.8.23" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8ac63f94732332f44fe654443c46f6375d1939684c17b0afb6cb56b0456e171" +checksum = "801bbab217d7f79c0062f4f7205b5d4427c6d1a7bd7aafdd1475f7c59d62b283" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", ] [[package]] @@ -537,9 +544,9 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e05b85ec287aac0dc34db7d4a569323df697f9c55b99b15d6b4ef8cde49f613" +checksum = "95314d38584ffbfda215621d723e0a3906f032e03ae5551e650058dac83d4797" dependencies = [ "futures-channel", "futures-core", @@ -552,9 +559,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f366ad74c28cca6ba456d95e6422883cfb4b252a83bed929c83abfdbbf2967d5" +checksum = "0448174b01148032eed37ac4aed28963aaaa8cfa93569a08e5b479bbc6c2c151" dependencies = [ "futures-core", "futures-sink", @@ -562,15 +569,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59f5fff90fd5d971f936ad674802482ba441b6f09ba5e15fd8b39145582ca399" +checksum = "18eaa56102984bed2c88ea39026cff3ce3b4c7f508ca970cedf2450ea10d4e46" [[package]] name = "futures-executor" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10d6bb888be1153d3abeb9006b11b02cf5e9b209fda28693c31ae1e4e012e314" +checksum = "f5f8e0c9258abaea85e78ebdda17ef9666d390e987f006be6080dfe354b708cb" dependencies = [ "futures-core", "futures-task", @@ -579,15 +586,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de27142b013a8e869c14957e6d2edeef89e97c289e69d042ee3a49acd8b51789" +checksum = "6e1798854a4727ff944a7b12aa999f58ce7aa81db80d2dfaaf2ba06f065ddd2b" [[package]] name = "futures-macro" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" +checksum = "e36fccf3fc58563b4a14d265027c627c3b665d7fed489427e88e7cc929559efe" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -597,24 +604,24 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f2032893cb734c7a05d85ce0cc8b8c4075278e93b24b66f9de99d6eb0fa8acc" +checksum = "0e3ca3f17d6e8804ae5d3df7a7d35b2b3a6fe89dac84b31872720fc3060a0b11" [[package]] name = "futures-task" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdb66b5f09e22019b1ab0830f7785bcea8e7a42148683f99214f73f8ec21a626" +checksum = "96d502af37186c4fef99453df03e374683f8a1eec9dcc1e66b3b82dc8278ce3c" dependencies = [ "once_cell", ] [[package]] name = "futures-util" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8764574ff08b701a084482c3c7031349104b07ac897393010494beaa18ce32c6" +checksum = "abcb44342f62e6f3e8ac427b8aa815f724fd705dfad060b18ac7866c15bb8e34" dependencies = [ "futures-channel", "futures-core", @@ -623,7 +630,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project", + "pin-project 1.0.1", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -651,26 +658,26 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" +checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", - "wasi", + "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] name = "gimli" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724" +checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" [[package]] name = "globset" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ad1da430bd7281dde2576f44c84cc3f0f7b475e7202cd503042dff01a8c8120" +checksum = "c152169ef1e421390738366d2f796655fec62621dabbd0fd476f905934061e4a" dependencies = [ "aho-corasick", "bstr", @@ -681,9 +688,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "993f9e0baeed60001cf565546b0d3dbe6a6ad23f2bd31644a133c641eccf6d53" +checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" dependencies = [ "bytes", "fnv", @@ -696,16 +703,14 @@ dependencies = [ "tokio", "tokio-util", "tracing", + "tracing-futures", ] [[package]] name = "hashbrown" -version = "0.8.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b62f79061a0bc2e046024cb7ba44b08419ed238ecbd9adbd787434b9e8c25" -dependencies = [ - "autocfg", -] +checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" [[package]] name = "heck" @@ -718,9 +723,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.15" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3deed196b6e7f9e44a2ae8d94225d80302d81208b1bb673fd21fe634645c85a9" +checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" dependencies = [ "libc", ] @@ -778,11 +783,17 @@ version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" +[[package]] +name = "httpdate" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" + [[package]] name = "hyper" -version = "0.13.7" +version = "0.13.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e68a8dd9716185d9e64ea473ea6ef63529252e3e27623295a0378a19665d5eb" +checksum = "f6ad767baac13b44d4529fcf58ba2cd0995e36e7b435bc5b039de6f47e880dbf" dependencies = [ "bytes", "futures-channel", @@ -792,10 +803,10 @@ dependencies = [ "http", "http-body", "httparse", + "httpdate", "itoa", - "pin-project", + "pin-project 1.0.1", "socket2", - "time 0.1.43", "tokio", "tower-service", "tracing", @@ -830,9 +841,9 @@ dependencies = [ "futures-util", "hyper", "log", - "rustls 0.18.0", + "rustls 0.18.1", "tokio", - "tokio-rustls 0.14.0", + "tokio-rustls 0.14.1", "webpki", ] @@ -862,9 +873,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b45e59b16c76b11bf9738fd5d38879d3bd28ad292d7b313608becb17ae2df9" +checksum = "55e2e4c765aa53a0424761bf9f41aa7a6ac1efa87238f59560640e27fca028f2" dependencies = [ "autocfg", "hashbrown", @@ -905,9 +916,9 @@ checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" [[package]] name = "js-sys" -version = "0.3.44" +version = "0.3.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85a7e2c92a4804dd459b86c339278d0fe87cf93757fae222c3fa3ae75458bc73" +checksum = "ca059e81d9486668f12d455a4ea6daa600bd408134cd17e3d3fb5a32d1f016f8" dependencies = [ "wasm-bindgen", ] @@ -930,9 +941,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.74" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2f02823cf78b754822df5f7f268fb59822e7296276d3e069d8e8cb26a14bd10" +checksum = "4d58d1b70b004888f764dfbf6a26a3b0342a1632d33968e4a179d8011c760614" [[package]] name = "log" @@ -940,7 +951,7 @@ version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", ] [[package]] @@ -957,9 +968,9 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" [[package]] name = "memchr" -version = "2.3.3" +version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" +checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" [[package]] name = "mime" @@ -979,11 +990,12 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.4.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be0f75932c1f6cfae3c04000e40114adf955636e19040f9c0a2c380702aa1c7f" +checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d" dependencies = [ "adler", + "autocfg", ] [[package]] @@ -992,7 +1004,7 @@ version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "fuchsia-zircon", "fuchsia-zircon-sys", "iovec", @@ -1070,11 +1082,11 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.34" +version = "0.2.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ba7c918ac76704fb42afcbbb43891e72731f3dcca3bef2a19786297baf14af7" +checksum = "3ebc3ec692ed7c9a255596c67808dee269f64655d8baf7b4f0638e51ba1d6853" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "winapi 0.3.9", ] @@ -1087,9 +1099,9 @@ checksum = "44a1290799eababa63ea60af0cbc3f03363e328e58f32fb0294798ed3e85f444" [[package]] name = "num-integer" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d59457e662d541ba17869cf51cf177c0b5f0cbf476c66bdc90bf1edac4f875b" +checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" dependencies = [ "autocfg", "num-traits", @@ -1097,9 +1109,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611" +checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" dependencies = [ "autocfg", ] @@ -1122,9 +1134,9 @@ checksum = "17b02fc0ff9a9e4b35b3342880f48e896ebf69f2967921fe8646bf5b7125956a" [[package]] name = "object" -version = "0.20.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5" +checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397" [[package]] name = "olpc-cjson" @@ -1139,9 +1151,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b631f7e854af39a1739f401cf34a8a013dfe09eac4fa4dba91e9768bd28168d" +checksum = "260e51e7efe62b592207e9e13a68e43692a7a279171d6ba57abd208bf23645ad" [[package]] name = "opaque-debug" @@ -1162,7 +1174,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d575eff3665419f9b83678ff2815858ad9d11567e082f5ac1814baba4e2bcb4" dependencies = [ "bitflags", - "cfg-if", + "cfg-if 0.1.10", "foreign-types", "lazy_static", "libc", @@ -1224,20 +1236,49 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +[[package]] +name = "pest" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" +dependencies = [ + "ucd-trie", +] + +[[package]] +name = "pin-project" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ffbc8e94b38ea3d2d8ba92aea2983b503cd75d0888d75b86bb37970b5698e15" +dependencies = [ + "pin-project-internal 0.4.27", +] + [[package]] name = "pin-project" -version = "0.4.23" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca4433fff2ae79342e497d9f8ee990d174071408f28f726d6d83af93e58e48aa" +checksum = "ee41d838744f60d959d7074e3afb6b35c7456d0f61cad38a24e35e6553f73841" dependencies = [ - "pin-project-internal", + "pin-project-internal 1.0.1", +] + +[[package]] +name = "pin-project-internal" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65ad2ae56b6abe3a1ee25f15ee605bacadb9a764edaba9c2bf4103800d4a1895" +dependencies = [ + "proc-macro2", + "quote", + "syn", ] [[package]] name = "pin-project-internal" -version = "0.4.23" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c0e815c3ee9a031fdf5af21c10aa17c573c9c6a566328d99e3936c34e36461f" +checksum = "81a4ffa594b66bff340084d4081df649a7dc049ac8d7fc458d8e628bfbbb2f86" dependencies = [ "proc-macro2", "quote", @@ -1246,9 +1287,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.1.7" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282adbf10f2698a7a77f8e983a74b2d18176c19a7fd32a45446139ae7b02b715" +checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" [[package]] name = "pin-utils" @@ -1258,15 +1299,15 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33" +checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" [[package]] name = "ppv-lite86" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" +checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" [[package]] name = "proc-macro-error" @@ -1294,9 +1335,9 @@ dependencies = [ [[package]] name = "proc-macro-hack" -version = "0.5.18" +version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99c605b9a0adc77b7211c6b1f722dcb613d68d66859a44f3d485a6da332b0598" +checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro-nested" @@ -1306,9 +1347,9 @@ checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" [[package]] name = "proc-macro2" -version = "1.0.19" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04f5f085b5d71e2188cb8271e5da0161ad52c3f227a661a3c135fdf28e258b12" +checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" dependencies = [ "unicode-xid", ] @@ -1335,7 +1376,7 @@ dependencies = [ "rusoto_signature 0.45.0", "rusoto_ssm 0.45.0", "rusoto_sts", - "semver 0.10.0", + "semver 0.11.0", "serde", "serde_json", "simplelog", @@ -1372,7 +1413,7 @@ dependencies = [ "log", "pubsys-config", "reqwest", - "sha2 0.9.1", + "sha2 0.9.2", "shell-words", "simplelog", "snafu", @@ -1440,9 +1481,9 @@ checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" [[package]] name = "redox_users" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431" +checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ "getrandom", "redox_syscall", @@ -1451,9 +1492,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.3.9" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6" +checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c" dependencies = [ "aho-corasick", "memchr", @@ -1463,9 +1504,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.18" +version = "0.6.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" +checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189" [[package]] name = "remove_dir_all" @@ -1478,9 +1519,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.10.7" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12427a5577082c24419c9c417db35cfeb65962efc7675bb6b0d5f1f9d315bfe6" +checksum = "e9eaa17ac5d7b838b7503d118fa16ad88f440498bf9ffe5424e621f93190d61e" dependencies = [ "base64 0.12.3", "bytes", @@ -1499,11 +1540,11 @@ dependencies = [ "mime_guess", "percent-encoding", "pin-project-lite", - "rustls 0.18.0", + "rustls 0.18.1", "serde", "serde_urlencoded", "tokio", - "tokio-rustls 0.14.0", + "tokio-rustls 0.14.1", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -1545,7 +1586,7 @@ dependencies = [ "log", "md5", "percent-encoding", - "pin-project", + "pin-project 0.4.27", "rusoto_credential 0.44.0", "rusoto_signature 0.44.0", "rustc_version", @@ -1574,7 +1615,7 @@ dependencies = [ "log", "md5", "percent-encoding", - "pin-project", + "pin-project 0.4.27", "rusoto_credential 0.45.0", "rusoto_signature 0.45.0", "rustc_version", @@ -1595,7 +1636,7 @@ dependencies = [ "dirs", "futures", "hyper", - "pin-project", + "pin-project 0.4.27", "regex", "serde", "serde_json", @@ -1615,7 +1656,7 @@ dependencies = [ "dirs", "futures", "hyper", - "pin-project", + "pin-project 0.4.27", "regex", "serde", "serde_json", @@ -1683,12 +1724,12 @@ dependencies = [ "log", "md5", "percent-encoding", - "pin-project", + "pin-project 0.4.27", "rusoto_credential 0.44.0", "rustc_version", "serde", "sha2 0.8.2", - "time 0.2.16", + "time 0.2.22", "tokio", ] @@ -1708,12 +1749,12 @@ dependencies = [ "log", "md5", "percent-encoding", - "pin-project", + "pin-project 0.4.27", "rusoto_credential 0.45.0", "rustc_version", "serde", - "sha2 0.9.1", - "time 0.2.16", + "sha2 0.9.2", + "time 0.2.22", "tokio", ] @@ -1763,11 +1804,11 @@ dependencies = [ [[package]] name = "rust-argon2" -version = "0.7.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bc8af4bda8e1ff4932523b94d3dd20ee30a87232323eda55903ffd71d2fb017" +checksum = "9dab61250775933275e84053ac235621dfb739556d5c54a2f2e9313b7cf43a19" dependencies = [ - "base64 0.11.0", + "base64 0.12.3", "blake2b_simd", "constant_time_eq", "crossbeam-utils", @@ -1775,9 +1816,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.16" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" +checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" [[package]] name = "rustc_version" @@ -1803,9 +1844,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.18.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cac94b333ee2aac3284c5b8a1b7fb4dd11cba88c244e3fe33cdbd047af0eb693" +checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" dependencies = [ "base64 0.12.3", "log", @@ -1890,16 +1931,16 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" dependencies = [ - "semver-parser", + "semver-parser 0.7.0", ] [[package]] name = "semver" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "394cec28fa623e00903caf7ba4fa6fb9a0e260280bb8cdbbba029611108a0190" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" dependencies = [ - "semver-parser", + "semver-parser 0.10.1", "serde", ] @@ -1909,20 +1950,29 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" +[[package]] +name = "semver-parser" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ef146c2ad5e5f4b037cd6ce2ebb775401729b19a82040c1beac9d36c7d1428" +dependencies = [ + "pest", +] + [[package]] name = "serde" -version = "1.0.114" +version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5317f7588f0a5078ee60ef675ef96735a1442132dc645eb1d12c018620ed8cd3" +checksum = "b88fa983de7720629c9387e9f517353ed404164b1e482c970a90c1a4aaf7dc1a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.114" +version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e" +checksum = "cbd1ae72adb44aab48f325a02444a5fc079349a8d804c1fc922aed3f7454c74e" dependencies = [ "proc-macro2", "quote", @@ -1931,9 +1981,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.57" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "164eacbdb13512ec2745fb09d51fd5b22b0d65ed294a1dcf7285a360c80a675c" +checksum = "dcac07dbffa1c65e7f816ab9eba78eb142c6d44410f4eeba1e26e4f5dfa56b95" dependencies = [ "itoa", "ryu", @@ -1981,12 +2031,12 @@ dependencies = [ [[package]] name = "sha2" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2933378ddfeda7ea26f48c555bdad8bb446bf8a3d17832dc83e380d444cfb8c1" +checksum = "6e7aab86fe2149bad8c507606bdb3f4ef5e7b2380eb92350f56122cca72a42a8" dependencies = [ "block-buffer 0.9.0", - "cfg-if", + "cfg-if 1.0.0", "cpuid-bool", "digest 0.9.0", "opaque-debug 0.3.0", @@ -2016,11 +2066,10 @@ checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" [[package]] name = "signal-hook-registry" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e12110bc539e657a646068aaf5eb5b63af9d0c1f7b29c97113fad80e15f035" +checksum = "ce32ea0c6c56d5eacaeb814fbed9960547021d3edd010ded1425f180536b20ab" dependencies = [ - "arc-swap", "libc", ] @@ -2043,9 +2092,9 @@ checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" [[package]] name = "snafu" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7f5aed652511f5c9123cf2afbe9c244c29db6effa2abb05c866e965c82405ce" +checksum = "9c4e6046e4691afe918fd1b603fd6e515bcda5388a1092a9edbada307d159f09" dependencies = [ "backtrace", "doc-comment", @@ -2054,9 +2103,9 @@ dependencies = [ [[package]] name = "snafu-derive" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebf8f7d5720104a9df0f7076a8682024e958bba0fe9848767bb44f251f3648e9" +checksum = "7073448732a89f2f3e6581989106067f403d378faeafb4a50812eb814170d3e5" dependencies = [ "proc-macro2", "quote", @@ -2065,11 +2114,11 @@ dependencies = [ [[package]] name = "socket2" -version = "0.3.12" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03088793f677dce356f3ccc2edb1b314ad191ab702a5de3faf49304f7e104918" +checksum = "b1fa70dc5c8104ec096f4fe7ede7a221d35ae13dcd19ba1ad9a81d2cab9a1c44" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "redox_syscall", "winapi 0.3.9", @@ -2083,9 +2132,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0437cfb83762844799a60e1e3b489d5ceb6a650fbacb86437badc1b6d87b246" +checksum = "f4e0831040d2cf2bdfd51b844be71885783d489898a192f254ae25d57cce725c" dependencies = [ "version_check", ] @@ -2147,9 +2196,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "structopt" -version = "0.3.16" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de5472fb24d7e80ae84a7801b7978f95a19ec32cb1876faea59ab711eb901976" +checksum = "126d630294ec449fae0b16f964e35bf3c74f940da9dca17ee9b905f7b3112eb8" dependencies = [ "clap", "lazy_static", @@ -2158,9 +2207,9 @@ dependencies = [ [[package]] name = "structopt-derive" -version = "0.4.9" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e0eb37335aeeebe51be42e2dc07f031163fbabfa6ac67d7ea68b5c2f68d5f99" +checksum = "65e51c492f9e23a220534971ff5afc14037289de430e3c83f9daf6a1b6ae91e8" dependencies = [ "heck", "proc-macro-error", @@ -2177,15 +2226,15 @@ checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" [[package]] name = "subtle" -version = "2.2.3" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "502d53007c02d7605a05df1c1a73ee436952781653da5d0bf57ad608f66932c1" +checksum = "343f3f510c2915908f155e94f17220b19ccfacf2a64a2a5d8004f2c3e311e7fd" [[package]] name = "syn" -version = "1.0.38" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e69abc24912995b3038597a7a593be5053eb0fb44f3cc5beec0deb421790c1f4" +checksum = "cc371affeffc477f42a221a1e4297aedcea33d47d19b61455588bd9d8f6b19ac" dependencies = [ "proc-macro2", "quote", @@ -2198,7 +2247,7 @@ version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "rand", "redox_syscall", @@ -2225,15 +2274,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "termios" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f0fcee7b24a25675de40d5bb4de6e41b0df07bc9856295e7e2b3a3600c400c2" -dependencies = [ - "libc", -] - [[package]] name = "textwrap" version = "0.11.0" @@ -2254,21 +2294,22 @@ dependencies = [ [[package]] name = "time" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ "libc", + "wasi 0.10.0+wasi-snapshot-preview1", "winapi 0.3.9", ] [[package]] name = "time" -version = "0.2.16" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a51cadc5b1eec673a685ff7c33192ff7b7603d0b75446fb354939ee615acb15" +checksum = "55b7151c9065e80917fbf285d9a5d1432f60db41d170ccafc749a136b41a93af" dependencies = [ - "cfg-if", + "const_fn", "libc", "standback", "stdweb", @@ -2279,9 +2320,9 @@ dependencies = [ [[package]] name = "time-macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ae9b6e9f095bc105e183e3cd493d72579be3181ad4004fceb01adbe9eecab2d" +checksum = "957e9c6e26f12cb6d0dd7fc776bb67a706312e7299aed74c8dd5b17ebb27e2f1" dependencies = [ "proc-macro-hack", "time-macros-impl", @@ -2312,9 +2353,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed" +checksum = "238ce071d267c5710f9d31451efec16c5ee22de34df17cc05e56cbc92e967117" [[package]] name = "tokio" @@ -2365,12 +2406,12 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "228139ddd4fea3fa345a29233009635235833e52807af7ea6448ead03890d6a9" +checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" dependencies = [ "futures-core", - "rustls 0.18.0", + "rustls 0.18.1", "tokio", "webpki", ] @@ -2401,9 +2442,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc92d160b1eef40665be3a05630d003936a3bc7da7421277846c2613e92c71a" +checksum = "75cf45bb0bef80604d001caaec0d09da99611b3c0fd39d3080468875cdb65645" dependencies = [ "serde", ] @@ -2494,24 +2535,35 @@ checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" [[package]] name = "tracing" -version = "0.1.18" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0aae59226cf195d8e74d4b34beae1859257efb4e5fed3f147d2dc2c7d372178" +checksum = "b0987850db3733619253fe60e17cb59b82d37c7e6c0236bb81e4d6b87c879f27" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "log", + "pin-project-lite", "tracing-core", ] [[package]] name = "tracing-core" -version = "0.1.13" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d593f98af59ebc017c0648f0117525db358745a8894a8d684e185ba3f45954f9" +checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" dependencies = [ "lazy_static", ] +[[package]] +name = "tracing-futures" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab7bb6f14721aa00656086e9335d363c5c8747bae02ebe32ea2c7dece5689b4c" +dependencies = [ + "pin-project 0.4.27", + "tracing", +] + [[package]] name = "try-lock" version = "0.2.3" @@ -2524,6 +2576,12 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" +[[package]] +name = "ucd-trie" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" + [[package]] name = "unicase" version = "2.6.0" @@ -2581,9 +2639,8 @@ version = "0.1.0" dependencies = [ "chrono", "parse-datetime", - "rand", "regex", - "semver 0.10.0", + "semver 0.11.0", "serde", "serde_json", "serde_plain", @@ -2649,13 +2706,19 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + [[package]] name = "wasm-bindgen" -version = "0.2.67" +version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0563a9a4b071746dd5aedbc3a28c6fe9be4586fb3fbadb67c400d4f53c6b16c" +checksum = "1ac64ead5ea5f05873d7c12b545865ca2b8d28adfc50a49b84770a3a97265d42" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "serde", "serde_json", "wasm-bindgen-macro", @@ -2663,9 +2726,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.67" +version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc71e4c5efa60fb9e74160e89b93353bc24059999c0ae0fb03affc39770310b0" +checksum = "f22b422e2a757c35a73774860af8e112bff612ce6cb604224e8e47641a9e4f68" dependencies = [ "bumpalo", "lazy_static", @@ -2678,11 +2741,11 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.17" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95f8d235a77f880bcef268d379810ea6c0af2eacfa90b1ad5af731776e0c4699" +checksum = "b7866cab0aa01de1edf8b5d7936938a7e397ee50ce24119aef3e1eaa3b6171da" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "js-sys", "wasm-bindgen", "web-sys", @@ -2690,9 +2753,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.67" +version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97c57cefa5fa80e2ba15641578b44d36e7a64279bc5ed43c6dbaf329457a2ed2" +checksum = "6b13312a745c08c469f0b292dd2fcd6411dba5f7160f593da6ef69b64e407038" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2700,9 +2763,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.67" +version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841a6d1c35c6f596ccea1f82504a192a60378f64b3bb0261904ad8f2f5657556" +checksum = "f249f06ef7ee334cc3b8ff031bfc11ec99d00f34d86da7498396dc1e3b1498fe" dependencies = [ "proc-macro2", "quote", @@ -2713,15 +2776,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.67" +version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93b162580e34310e5931c4b792560108b10fd14d64915d7fff8ff00180e70092" +checksum = "1d649a3145108d7d3fbcde896a468d1bd636791823c9921135218ad89be08307" [[package]] name = "web-sys" -version = "0.3.44" +version = "0.3.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dda38f4e5ca63eda02c059d243aa25b5f35ab98451e518c51612cd0f1bd19a47" +checksum = "4bf6ef87ad7ae8008e15a355ce696bed26012b7caa21605188cfd8214ab51e2d" dependencies = [ "js-sys", "wasm-bindgen", @@ -2816,6 +2879,6 @@ checksum = "b07db065a5cf61a7e4ba64f29e67db906fb1787316516c4e6e5ff0fea1efcd8a" [[package]] name = "zeroize" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cbac2ed2ba24cc90f5e06485ac8c7c1e5449fe8911aef4d8877218af021a5b8" +checksum = "05f33972566adbd2d3588b0491eb94b98b43695c4ef897903470ede4f3f5a28a" diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index 9bacf3d4..af00f253 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -28,7 +28,7 @@ rusoto_ssm = "0.45.0" rusoto_sts = "0.45.0" simplelog = "0.8" snafu = "0.6" -semver = "0.10.0" +semver = "0.11.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" structopt = { version = "0.3", default-features = false } From d9bfd41ec6ac59b849f331b621321bfb28ee37e8 Mon Sep 17 00:00:00 2001 From: Matt Briggs Date: Wed, 11 Nov 2020 15:39:59 -0800 Subject: [PATCH 0376/1356] pubsys: resolve rustls in coldsnap and tough Update coldsnap to v0.2.0 and set it to use rustls. This is a necessary prerequisite of any tough-related updates. When we update tough, then coldsnap and rough will both resolve to rusoto 0.45 and their feature selections will conflict. By bumping coldsnap and setting it to rustls, we are now able to update tough, tough-kms, or tough-ssm. This commit also explicitly sets the rusoto crates to use rustls. This was also found to be necessary when newer versions of tough* are introduced. --- tools/Cargo.lock | 109 ++++------------------------------------ tools/pubsys/Cargo.toml | 12 ++--- 2 files changed, 16 insertions(+), 105 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index c5f8d674..425d74b5 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -134,6 +134,12 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" +[[package]] +name = "base64" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" + [[package]] name = "bitflags" version = "1.2.1" @@ -296,12 +302,12 @@ dependencies = [ [[package]] name = "coldsnap" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89fd66a760caaf8647b3e60a068a03f4adee0091338c69b05a46d263e9efa3bd" +checksum = "4e1073ef4f7d65b3df89ce61595ff7fcad286b1d82ff646c360547ac75df1901" dependencies = [ "argh", - "base64 0.12.3", + "base64 0.13.0", "bytes", "futures", "indicatif", @@ -511,21 +517,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "fuchsia-zircon" version = "0.3.3" @@ -847,19 +838,6 @@ dependencies = [ "webpki", ] -[[package]] -name = "hyper-tls" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" -dependencies = [ - "bytes", - "hyper", - "native-tls", - "tokio", - "tokio-tls", -] - [[package]] name = "idna" version = "0.2.0" @@ -1062,24 +1040,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "native-tls" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b0d88c06fe90d5ee94048ba40409ef1d9315d86f6f38c2efdaad4fb50c58b2d" -dependencies = [ - "lazy_static", - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] - [[package]] name = "net2" version = "0.2.35" @@ -1167,39 +1127,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" -[[package]] -name = "openssl" -version = "0.10.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d575eff3665419f9b83678ff2815858ad9d11567e082f5ac1814baba4e2bcb4" -dependencies = [ - "bitflags", - "cfg-if 0.1.10", - "foreign-types", - "lazy_static", - "libc", - "openssl-sys", -] - [[package]] name = "openssl-probe" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" -[[package]] -name = "openssl-sys" -version = "0.9.58" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de" -dependencies = [ - "autocfg", - "cc", - "libc", - "pkg-config", - "vcpkg", -] - [[package]] name = "os_pipe" version = "0.9.2" @@ -1297,12 +1230,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkg-config" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" - [[package]] name = "ppv-lite86" version = "0.2.10" @@ -1610,7 +1537,7 @@ dependencies = [ "futures", "http", "hyper", - "hyper-tls", + "hyper-rustls 0.20.0", "lazy_static", "log", "md5", @@ -2416,16 +2343,6 @@ dependencies = [ "webpki", ] -[[package]] -name = "tokio-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" -dependencies = [ - "native-tls", - "tokio", -] - [[package]] name = "tokio-util" version = "0.3.1" @@ -2661,12 +2578,6 @@ dependencies = [ "serde", ] -[[package]] -name = "vcpkg" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" - [[package]] name = "vec_map" version = "0.8.2" diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index af00f253..7611c245 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -10,7 +10,7 @@ publish = false async-trait = "0.1.36" chrono = "0.4" clap = "2.33" -coldsnap = "0.1" +coldsnap = { version = "0.2", default-features = false, features = ["rusoto-rustls"]} pubsys-config = { path = "../pubsys-config/" } futures = "0.3.5" indicatif = "0.15.0" @@ -19,13 +19,13 @@ log = "0.4" parse-datetime = { path = "../../sources/parse-datetime" } # Need to bring in reqwest with a TLS feature so tough can support TLS repos. reqwest = { version = "0.10.1", default-features = false, features = ["rustls-tls", "blocking"] } -rusoto_core = "0.45.0" +rusoto_core = { version = "0.45.0", default-features = false, features = ["rustls"] } rusoto_credential = "0.45.0" -rusoto_ebs = "0.45.0" -rusoto_ec2 = "0.45.0" +rusoto_ebs = { version = "0.45.0", default-features = false, features = ["rustls"] } +rusoto_ec2 = { version = "0.45.0", default-features = false, features = ["rustls"] } rusoto_signature = "0.45.0" -rusoto_ssm = "0.45.0" -rusoto_sts = "0.45.0" +rusoto_ssm = { version = "0.45.0", default-features = false, features = ["rustls"] } +rusoto_sts = { version = "0.45.0", default-features = false, features = ["rustls"] } simplelog = "0.8" snafu = "0.6" semver = "0.11.0" From 38e242dc720a0d45cbf8a9391e7919e5a4267620 Mon Sep 17 00:00:00 2001 From: Matt Briggs Date: Wed, 11 Nov 2020 15:43:08 -0800 Subject: [PATCH 0377/1356] pubsys: update tough-kms to fix kms signing errors This commit fixes an occasional issue where a KMS-signed repo could not be loaded. The fix is in tough-kms v0.1.1. --- tools/Cargo.lock | 14 +++++++------- tools/pubsys/Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 425d74b5..a14d2b66 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -1623,14 +1623,14 @@ dependencies = [ [[package]] name = "rusoto_kms" -version = "0.44.0" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c5a083f44d08db76d4deedd7527bb215dd008fa08f4b1d8ca40071522bdbcb7" +checksum = "111b99b940b1b02f5a98a5fcc96467a24ab899c43c1caff60d4a863342798c6e" dependencies = [ "async-trait", "bytes", "futures", - "rusoto_core 0.44.0", + "rusoto_core 0.45.0", "serde", "serde_json", ] @@ -2413,15 +2413,15 @@ dependencies = [ [[package]] name = "tough-kms" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90d5273998150708e8639464e4fe21eba7afbf7372f57f4d7ae0dabffd3d8dab" +checksum = "cfb9491d4cc35dafcb72c69254c27da97bad4d5796305ffe9974ed1f470ffc39" dependencies = [ "bytes", "pem", "ring", - "rusoto_core 0.44.0", - "rusoto_credential 0.44.0", + "rusoto_core 0.45.0", + "rusoto_credential 0.45.0", "rusoto_kms", "snafu", "tokio", diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index 7611c245..8c147bfe 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -36,7 +36,7 @@ tinytemplate = "1.1" tokio = { version = "0.2.21", features = ["time"] } toml = "0.5" tough = { version = "0.9", features = ["http"] } -tough-kms = "0.1" +tough-kms = "0.1.1" tough-ssm = "0.4" update_metadata = { path = "../../sources/updater/update_metadata/" } url = { version = "2.1.0", features = ["serde"] } From 4abb0b188319f7e7a1bc8fbd4f19c2b9076ab7c4 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Wed, 18 Nov 2020 22:47:46 +0000 Subject: [PATCH 0378/1356] kernel: enable support for lockdown mode, disabled by default --- packages/kernel/config-bottlerocket | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/kernel/config-bottlerocket b/packages/kernel/config-bottlerocket index 1494d44e..331b57e3 100644 --- a/packages/kernel/config-bottlerocket +++ b/packages/kernel/config-bottlerocket @@ -47,3 +47,6 @@ CONFIG_IKHEADERS=y # BTF debug info at /sys/kernel/btf/vmlinux CONFIG_DEBUG_INFO_BTF=y + +# Enable support for the kernel lockdown security module. +CONFIG_SECURITY_LOCKDOWN_LSM=y From 7fbb7f265b24c9f48224c14bf931e8887690de56 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Wed, 18 Nov 2020 22:58:48 +0000 Subject: [PATCH 0379/1356] Add a kernel lockdown setting and corndog helper --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 4a9a5616..ab431a13 100644 --- a/README.md +++ b/README.md @@ -350,6 +350,10 @@ These settings can be changed at any time. #### Kernel settings +* `settings.kernel.lockdown`: This allows further restrictions on what the Linux kernel will allow, for example preventing the loading of unsigned modules. + May be set to "none" (the default), "integrity", or "confidentiality". + **Important note:** this setting cannot be lowered (toward 'none') at runtime. + You must reboot for a change to a lower level to take effect. * `settings.kernel.sysctl`: Key/value pairs representing Linux kernel parameters. Remember to quote keys (since they often contain ".") and to quote all values. * Example user data for setting up sysctl: From 57a2535823ff57b8b88284be9963f58801438ee9 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 18 Nov 2020 17:43:12 +0000 Subject: [PATCH 0380/1356] kernel: enable zstd compression for squashfs We use squashfs archives for files that must be included, but which are rarely or never accessed on most running systems. zstd offers compression ratios similar to xz, and decompression speeds like lz4. This saves space while keeping reads fast. Signed-off-by: Ben Cressey --- packages/kernel/config-bottlerocket | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/packages/kernel/config-bottlerocket b/packages/kernel/config-bottlerocket index 331b57e3..bb953ee4 100644 --- a/packages/kernel/config-bottlerocket +++ b/packages/kernel/config-bottlerocket @@ -38,6 +38,12 @@ CONFIG_SECURITY_SELINUX_DEVELOP=n # rather than the protection requested by userspace. CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=0 +# Enable support for the kernel lockdown security module. +CONFIG_SECURITY_LOCKDOWN_LSM=y + +# Enable zstd compression for squashfs. +CONFIG_SQUASHFS_ZSTD=y + # enable /proc/config.gz CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y @@ -47,6 +53,3 @@ CONFIG_IKHEADERS=y # BTF debug info at /sys/kernel/btf/vmlinux CONFIG_DEBUG_INFO_BTF=y - -# Enable support for the kernel lockdown security module. -CONFIG_SECURITY_LOCKDOWN_LSM=y From d509619667904527357366fa14af64790da6b60d Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 18 Nov 2020 17:53:39 +0000 Subject: [PATCH 0381/1356] kernel: overhaul kernel development sources Previously, we included host programs like `objtool` which are built with the default `gcc` compiler and not our cross-compiler toolchain. This works as long as the running system matches our build host, but would break if we began building x86_64 images on an aarch64 system. The reverse is not true today, but only because `objtool` is not yet required for the arm64 target. Ideally, we'd be able to cross-compile these host programs, but that isn't supported by the kernel's build system, and would be hard to implement. For example, `fixdep` is both a tool we'd want to ship, meaning it would need to be cross-compiled, and a tool that's used to build `objtool`, meaning it couldn't be cross-compiled and still run on the build host. Instead we push the problem out to the downstream consumer, who can be relied on to have a compiler that can build native versions of the host programs. This requires shipping all the headers, tools, and scripts needed to run `make prepare`. For compatibility with solutions like DKMS, which do not expect to run anything but the module build, we add a minimal prepare target to this path so that the host programs will be automatically rebuilt. We also make some edits and exclude some files to avoid dependencies on bison, flex, and OpenSSL. Signed-off-by: Ben Cressey --- ...-prepare-target-for-external-modules.patch | 50 ++++++++++ packages/kernel/kernel.spec | 98 ++++++++++++++----- 2 files changed, 126 insertions(+), 22 deletions(-) create mode 100644 packages/kernel/1001-Makefile-add-prepare-target-for-external-modules.patch diff --git a/packages/kernel/1001-Makefile-add-prepare-target-for-external-modules.patch b/packages/kernel/1001-Makefile-add-prepare-target-for-external-modules.patch new file mode 100644 index 00000000..80142928 --- /dev/null +++ b/packages/kernel/1001-Makefile-add-prepare-target-for-external-modules.patch @@ -0,0 +1,50 @@ +From 6e4fa756a327a510f8713d60dc257aaeed5e33d7 Mon Sep 17 00:00:00 2001 +From: Ben Cressey +Date: Fri, 13 Nov 2020 23:37:11 +0000 +Subject: [PATCH] Makefile: add prepare target for external modules + +We need to ensure that native versions of programs like `objtool` are +built before trying to build out-of-tree modules, or else the build +will fail. + +Unlike other distributions, we cannot include these programs in our +kernel-devel archive, because we rely on cross-compilation: these are +"host" programs and may not match the architecture of the target. + +Ideally, out-of-tree builds would run `make prepare` first, so that +these programs could be compiled in the normal fashion. We ship all +the files needed for this to work. However, this requirement is +specific to our use case, and DKMS does not support it. + +Adding a minimal prepare target to the dependency graph causes the +programs to be built automatically and improves compatibility with +existing solutions. + +Signed-off-by: Ben Cressey +--- + Makefile | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/Makefile b/Makefile +index 29948bc4a0d2..2f766911437c 100644 +--- a/Makefile ++++ b/Makefile +@@ -1613,6 +1613,15 @@ $(objtree)/Module.symvers: + echo " is missing; modules will have no dependencies and modversions."; \ + echo ) + ++PHONY += modules_prepare ++modules_prepare: $(objtool_target) ++ $(Q)$(MAKE) $(build)=scripts/basic ++ $(Q)$(MAKE) $(build)=scripts/dtc ++ $(Q)$(MAKE) $(build)=scripts/mod ++ $(Q)$(MAKE) $(build)=scripts ++ ++prepare: modules_prepare ++ + build-dirs := $(KBUILD_EXTMOD) + PHONY += modules + modules: descend $(objtree)/Module.symvers +-- +2.21.0 + diff --git a/packages/kernel/kernel.spec b/packages/kernel/kernel.spec index 8e8bc818..f2910479 100644 --- a/packages/kernel/kernel.spec +++ b/packages/kernel/kernel.spec @@ -9,7 +9,13 @@ URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. Source0: https://cdn.amazonlinux.com/blobstore/195cb5ec07623ef7be724bd99bd49eda17ebe7ad82d752d0e39b096de5411500/kernel-5.4.68-34.125.amzn2.src.rpm Source100: config-bottlerocket + +# Make Lustre FSx work with a newer GCC. Patch0001: 0001-lustrefsx-Disable-Werror-stringop-overflow.patch + +# Help out-of-tree module builds run `make prepare` automatically. +Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch + BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname @@ -84,35 +90,83 @@ find %{buildroot}%{_cross_prefix} \ \( -name .install -o -name .check -o \ -name ..install.cmd -o -name ..check.cmd \) -delete -# files for external module compilation +# For out-of-tree kmod builds, we need to support the following targets: +# make scripts -> make prepare -> make modules +# +# This requires enough of the kernel tree to build host programs under the +# "scripts" and "tools" directories. + +# Any existing ELF objects will not work properly if we're cross-compiling for +# a different architecture, so get rid of them to avoid confusing errors. +find arch scripts tools -type f -executable \ + -exec sh -c "head -c4 {} | grep -q ELF && rm {}" \; + +# We don't need to include these files. +find -type f \( -name \*.cmd -o -name \*.gitignore \) -delete + +# Avoid an OpenSSL dependency by stubbing out options for module signing and +# trusted keyrings, so `sign-file` and `extract-cert` won't be built. External +# kernel modules do not have access to the keys they would need to make use of +# these tools. +sed -i \ + -e 's,$(CONFIG_MODULE_SIG_FORMAT),n,g' \ + -e 's,$(CONFIG_SYSTEM_TRUSTED_KEYRING),n,g' \ + scripts/Makefile + ( - find * -name Kbuild\* -type f -print \ - -o -name Kconfig\* -type f -print \ - -o -name Makefile\* -type f -print \ - -o -name module.lds -type f -print \ - -o -name Platform -type f -print - find arch/*/include/ include/ -type f -o -type l - find scripts/ -executable -type f - find scripts/ ! \( -name Makefile\* -o -name Kbuild\* \) -type f + find * \ + -type f \ + \( -name Build\* -o -name Kbuild\* -o -name Kconfig\* -o -name Makefile\* \) \ + -print + + find arch/%{_cross_karch}/ \ + -type f \ + \( -name module.lds -o -name vmlinux.lds.S -o -name Platform -o -name \*.tbl \) \ + -print + + find arch/%{_cross_karch}/{include,lib}/ -type f ! -name \*.o ! -name \*.o.d -print + echo arch/%{_cross_karch}/kernel/asm-offsets.s + echo lib/vdso/gettimeofday.c + + for d in \ + arch/%{_cross_karch}/tools \ + arch/%{_cross_karch}/kernel/vdso ; do + [ -d "${d}" ] && find "${d}/" -type f -print + done + + find include -type f -print + find scripts -type f ! -name \*.l ! -name \*.y ! -name \*.o -print + + find tools/{arch/%{_cross_karch},include,objtool,scripts}/ -type f ! -name \*.o -print + echo tools/build/fixdep.c + find tools/lib/subcmd -type f -print + find tools/lib/{ctype,string,str_error_r}.c + + echo kernel/bounds.c + echo kernel/time/timeconst.bc + echo security/selinux/include/classmap.h + echo security/selinux/include/initial_sid_to_string.h + echo .config echo Module.symvers echo System.map ) | sort -u > kernel_devel_files -# remove x86 intermediate files like generated/asm/.syscalls_32.h.cmd -sed -i '/asm\/.*\.cmd$/d' kernel_devel_files - -## Create squashfs of kernel-devel files (ie. /usr/src/kernels/) -mkdir src_squashfs -for file in $(cat kernel_devel_files); do - install -D ${file} src_squashfs/%{version}/${file} -done -# if we have it, include objtool (not all arches support it yet) -if [ "%{_cross_karch}" == "x86" ]; then - install -D tools/objtool/objtool src_squashfs/%{version}/tools/objtool/objtool -fi +# Create squashfs of kernel-devel files (ie. /usr/src/kernels/). +# +# -no-exports: +# The filesystem does not need to be exported via NFS. +# +# -all-root: +# Make all files owned by root rather than the build user. +# +# -comp zstd: +# zstd offers compression ratios like xz and decompression speeds like lz4. +SQUASHFS_OPTS="-no-exports -all-root -comp zstd" +mkdir -p src_squashfs/%{version} +tar c -T kernel_devel_files | tar x -C src_squashfs/%{version} +mksquashfs src_squashfs kernel-devel.squashfs ${SQUASHFS_OPTS} -mksquashfs src_squashfs kernel-devel.squashfs install -D kernel-devel.squashfs %{buildroot}%{_cross_datadir}/bottlerocket/kernel-devel.squashfs install -d %{buildroot}%{kernel_sourcedir} From 211936ee4487b877ba58e28c5ec71ea997b88e94 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 18 Nov 2020 18:18:58 +0000 Subject: [PATCH 0382/1356] kernel: package archived development sources The squashfs filesystem is meant to be used on a running host, while a tarball is easier to work with when assembling a combined archive that also includes our toolchain. Signed-off-by: Ben Cressey --- packages/kernel/kernel.spec | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/packages/kernel/kernel.spec b/packages/kernel/kernel.spec index f2910479..4e7393e6 100644 --- a/packages/kernel/kernel.spec +++ b/packages/kernel/kernel.spec @@ -35,6 +35,12 @@ Requires: %{_cross_os}filesystem %description devel %{summary}. +%package archive +Summary: Archived Linux kernel source for module building + +%description archive +%{summary}. + %package modules Summary: Modules for the Linux kernel @@ -167,7 +173,15 @@ mkdir -p src_squashfs/%{version} tar c -T kernel_devel_files | tar x -C src_squashfs/%{version} mksquashfs src_squashfs kernel-devel.squashfs ${SQUASHFS_OPTS} +# Create a tarball of the same files, for use outside the running system. +# In theory we could extract these files with `unsquashfs`, but we do not want +# to require it to be installed on the build host, and it errors out when run +# inside Docker unless the limit for open files is lowered. +tar cf kernel-devel.tar src_squashfs/%{version} --transform='s|src_squashfs/%{version}|kernel-devel|' +xz -T0 kernel-devel.tar + install -D kernel-devel.squashfs %{buildroot}%{_cross_datadir}/bottlerocket/kernel-devel.squashfs +install -D kernel-devel.tar.xz %{buildroot}%{_cross_datadir}/bottlerocket/kernel-devel.tar.xz install -d %{buildroot}%{kernel_sourcedir} # Replace the incorrect links from modules_install. These will be bound @@ -216,4 +230,7 @@ ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{kernel_libdir}/source %dir %{kernel_sourcedir} %{_cross_datadir}/bottlerocket/kernel-devel.squashfs +%files archive +%{_cross_datadir}/bottlerocket/kernel-devel.tar.xz + %changelog From 15dbdbaceb46d621448b968150e444d939936fc5 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 16 Nov 2020 23:06:29 +0000 Subject: [PATCH 0383/1356] build: set options for license squashfs Apply the same options we use for the kernel-devel squashfs. Signed-off-by: Ben Cressey --- tools/rpm2img | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/rpm2img b/tools/rpm2img index d74575ad..d553e2a0 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -81,7 +81,10 @@ sgdisk --clear \ rpm -iv --root "${ROOT_MOUNT}" "${PACKAGE_DIR}"/*.rpm install -p -m 0644 /host/{COPYRIGHT,LICENSE-APACHE,LICENSE-MIT} "${ROOT_MOUNT}"/usr/share/licenses/ -mksquashfs "${ROOT_MOUNT}"/usr/share/licenses "${ROOT_MOUNT}"/usr/share/bottlerocket/licenses.squashfs +mksquashfs \ + "${ROOT_MOUNT}"/usr/share/licenses \ + "${ROOT_MOUNT}"/usr/share/bottlerocket/licenses.squashfs \ + -no-exports -all-root -comp zstd rm -rf "${ROOT_MOUNT}"/var/lib "${ROOT_MOUNT}"/usr/share/licenses/* if [[ "${ARCH}" == "x86_64" ]]; then From ece45e0a8834f2b737ca7a3b06bfe9853caf66b3 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 26 Oct 2020 18:11:09 +0000 Subject: [PATCH 0384/1356] build: add target for kmod kit To support compiling out-of-tree modules ahead of time, rather than on a running Bottlerocket host, we need to provide two things: the kernel development sources, such as headers and Makefiles; and the toolchain we use to build our kernel. Our toolchain is built separately as part of our cross-compiling SDK, and it's possible, if unlikely, that we would ship two releases with the same kernel version built with a different GCC. It's also possible that variants will use different kernels, so we cannot have just one development kit per release. This is not yet supported, but we need the ecosystem to anticipate the requirement for a per-variant, per-architecture kit. The build target combines the archives from the toolchain matching the SDK we used to build the kernel, and kernel development sources from the most recent build. This produces a single artifact that can be uploaded for later retrieval by a consumer that knows the variant, architecture, and version that they are targeting. Signed-off-by: Ben Cressey --- .dockerignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.dockerignore b/.dockerignore index 8a235523..7c5790e1 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,6 +2,7 @@ /.gomodcache /build/**/*.img /build/**/*.lz4 +/build/**/*.xz /build/**/*.tar /build/**/*-debuginfo-*.rpm /build/**/*-debugsource-*.rpm From d4479ebddea45661855cada3d115584041930aef Mon Sep 17 00:00:00 2001 From: Pranav Date: Sun, 6 Dec 2020 16:16:53 +0530 Subject: [PATCH 0385/1356] tools: Fix message in the partition size check condition --- tools/rpm2img | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/rpm2img b/tools/rpm2img index d553e2a0..ad40a22a 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -148,7 +148,7 @@ veritysetup_output="$(veritysetup format \ "${ROOT_IMAGE}" "${VERITY_IMAGE}" \ | tee /dev/stderr)" if ! stat -c %s "${VERITY_IMAGE}" | grep -q '^8388608$'; then - "verity partition is larger than expected (4M)" + echo "verity partition is larger than expected (8M)" exit 1 fi VERITY_DATA_4K_BLOCKS="$(grep '^Data blocks:' <<<$veritysetup_output | awk '{ print $NF }')" From 47eae022bdf7e98d3408299d76123b48c4743244 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Wed, 16 Dec 2020 03:35:38 +0000 Subject: [PATCH 0386/1356] Add host-container user-data setting You can set settings.host-containers.NAME.user-data to a valid base64-encoded string to have the (decoded) data placed in a file accessible to the host container at /.bottlerocket/host-containers/NAME/user-data. --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index ab431a13..af53f338 100644 --- a/README.md +++ b/README.md @@ -378,6 +378,9 @@ These settings can be changed at any time. Beyond just changing the settings above to affect the `admin` and `control` containers, you can add and remove host containers entirely. As long as you define the three fields above -- `source` with a URI, and `enabled` and `superpowered` with true/false -- you can add host containers with an API call or user data. +You can optionally define a `user-data` field with arbitrary base64-encoded data, which will be made available in the container at `/.bottlerocket/host-containers/$HOST_CONTAINER_NAME/user-data`. +(It was inspired by instance user data, but is entirely separate; it can be any data your host container feels like interpreting.) + Here's an example of adding a custom host container with API calls: ``` apiclient -u /settings -X PATCH -d '{"host-containers": {"custom": {"source": "MY-CONTAINER-URI", "enabled": true, "superpowered": false}}}' From 987479504a1aefde3dcfcca06e63cafcefbed3bd Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Wed, 11 Nov 2020 00:06:23 -0800 Subject: [PATCH 0387/1356] settings.network: add new proxy settings Add new settings for configuring HTTPS_PROXY/https_proxy and NO_PROXY/no_proxy environment variables. These environment variables will only be exposed to a limited set of services that need them. --- README.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/README.md b/README.md index af53f338..ee20bac1 100644 --- a/README.md +++ b/README.md @@ -344,6 +344,27 @@ These settings can be changed at any time. * `settings.updates.version-lock`: Controls the version that will be selected when you issue an update request. Can be locked to a specific version like `v1.0.0`, or `latest` to take the latest available version. Defaults to `latest`. * `settings.updates.ignore-waves`: Updates are rolled out in waves to reduce the impact of issues. For testing purposes, you can set this to `true` to ignore those waves and update immediately. +#### Network settings + +##### Proxy settings + +These settings will configure the proxying behavior of the following services: +* For all variants: + * [containerd.service](packages/containerd/containerd.service) + * [host-containerd.service](packages/host-ctr/host-containerd.service) +* For Kubernetes variants: + * [kubelet.service](packages/kubernetes-1.18/kubelet.service) +* For the ECS variant: + * [docker.service](packages/docker-engine/docker.service) + * [ecs.service](packages/ecs-agent/ecs.service) + +* `settings.network.https-proxy`: The HTTPS proxy server to be used by services listed above. +* `settings.network.no-proxy`: A list of hosts that are excluded from proxying. + +The no-proxy list will automatically include entries for localhost. + +If you're running a Kubernetes variant, the no-proxy list will automatically include the Kubernetes API server endpoint and other commonly used Kubernetes DNS suffixes to facilitate intra-cluster networking. + #### Time settings * `settings.ntp.time-servers`: A list of NTP servers used to set and verify the system time. From 811e252ce85bda12e3c226489e735d55b3f1ee47 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Tue, 5 Jan 2021 00:42:38 +0000 Subject: [PATCH 0388/1356] kernel: update to 5.4.80 --- packages/kernel/Cargo.toml | 4 ++-- packages/kernel/kernel.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel/Cargo.toml b/packages/kernel/Cargo.toml index cab9d3ec..dd2c7a86 100644 --- a/packages/kernel/Cargo.toml +++ b/packages/kernel/Cargo.toml @@ -10,5 +10,5 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/195cb5ec07623ef7be724bd99bd49eda17ebe7ad82d752d0e39b096de5411500/kernel-5.4.68-34.125.amzn2.src.rpm" -sha512 = "83833e09f14d8fbabae723051b4f24e9b658502d7286918146f12e26783985161194e8b94eeceea03812738495be87563c9e36738f22ae2a5f93c43b08ab3c52" +url = "https://cdn.amazonlinux.com/blobstore/5923078ac40834106f279fb42b9b177fea5c8136725a231e353772dbae9bce93/kernel-5.4.80-40.140.amzn2.src.rpm" +sha512 = "b09194da42aabe041992ed654dcca86bb245093e62b9a2c7a542fb6b6343f397cc96dd7d8734b258a01566a42027dc96ef80ebcf593222e50c722ec3ca74eff0" diff --git a/packages/kernel/kernel.spec b/packages/kernel/kernel.spec index 4e7393e6..ec69ff59 100644 --- a/packages/kernel/kernel.spec +++ b/packages/kernel/kernel.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel -Version: 5.4.68 +Version: 5.4.80 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/195cb5ec07623ef7be724bd99bd49eda17ebe7ad82d752d0e39b096de5411500/kernel-5.4.68-34.125.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/5923078ac40834106f279fb42b9b177fea5c8136725a231e353772dbae9bce93/kernel-5.4.80-40.140.amzn2.src.rpm Source100: config-bottlerocket # Make Lustre FSx work with a newer GCC. From b6627072ffe09cf76181d9f040c3954bd3a688e7 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 15 Dec 2020 18:07:56 -0800 Subject: [PATCH 0389/1356] Add aws-k8s-1.19 variant with Kubernetes 1.19 --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6c6503cb..19cf86d0 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -8,7 +8,7 @@ jobs: continue-on-error: ${{ matrix.supported }} strategy: matrix: - variant: [aws-k8s-1.15, aws-k8s-1.16, aws-k8s-1.17, aws-k8s-1.18, aws-ecs-1] + variant: [aws-k8s-1.15, aws-k8s-1.16, aws-k8s-1.17, aws-k8s-1.18, aws-k8s-1.19, aws-ecs-1] arch: [x86_64, aarch64] supported: [true] include: From a2c039e4d5d041127b5b499bceb8812130e65442 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Thu, 7 Jan 2021 14:28:32 -0800 Subject: [PATCH 0390/1356] BUILDING: Add note about CPU performance --- BUILDING.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/BUILDING.md b/BUILDING.md index 03c44ed6..6fddad01 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -12,6 +12,11 @@ You can skip to the [setup guide for Kubernetes](QUICKSTART-EKS.md) or the [setu The build process artifacts and resulting images can consume in excess of 80GB in the local directory. +The build process is also fairly demanding on your CPU, since we build all included software from scratch. +(The first time. Package builds are cached, and only changes are built afterward.) +The build scales well to 32+ cores. +The first time you build, the fastest machines can take about 12 minutes while slower machines with only a couple cores can take 3-4 hours. + #### Linux The build system requires certain operating system packages to be installed. From 0715cf4a4685bbc0d839cec65b06110e80e55478 Mon Sep 17 00:00:00 2001 From: Matt Briggs Date: Thu, 7 Jan 2021 13:34:01 -0800 Subject: [PATCH 0391/1356] tools: update cargo dependencies --- tools/Cargo.lock | 464 ++++++++++++++++++++------------- tools/buildsys/Cargo.toml | 2 +- tools/pubsys-setup/Cargo.toml | 2 +- tools/pubsys-setup/src/main.rs | 2 +- 4 files changed, 284 insertions(+), 186 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index a14d2b66..56467b64 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -2,9 +2,9 @@ # It is not intended for manual editing. [[package]] name = "addr2line" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c0929d69e78dd9bf5408269919fcbcaeb2e35e5d43e5815517cdc6a8e11a423" +checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" dependencies = [ "gimli", ] @@ -76,9 +76,9 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "async-trait" -version = "0.1.41" +version = "0.1.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b246867b8b3b6ae56035f1eb1ed557c1d8eae97f0d53696138a50fa0e3a3b8c0" +checksum = "8d3a45e77e34375a7923b1e8febb049bb011f064714a8e17a1a616fef01da13d" dependencies = [ "proc-macro2", "quote", @@ -104,9 +104,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.54" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2baad346b2d4e94a24347adeee9c7a93f412ee94b9cc26e5b59dea23848e9f28" +checksum = "ef5140344c85b01f9bbb4d4b7288a8aa4b3287ccef913a14bcc78a1063623598" dependencies = [ "addr2line", "cfg-if 1.0.0", @@ -118,9 +118,9 @@ dependencies = [ [[package]] name = "base-x" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2734baf8ed08920ccecce1b48a2dfce4ac74a973144add031163bd21a1c5dab" +checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" [[package]] name = "base64" @@ -203,7 +203,7 @@ dependencies = [ "duct", "hex", "nonzero_ext", - "rand", + "rand 0.8.1", "reqwest", "serde", "serde_plain", @@ -255,9 +255,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.61" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed67cbde08356238e75fc4656be4749481eeffb09e19f320a25237d5221c985d" +checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" [[package]] name = "cfg-if" @@ -324,9 +324,9 @@ dependencies = [ [[package]] name = "console" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a50aab2529019abfabfa93f1e6c41ef392f91fbf179b347a7e96abb524884a08" +checksum = "7cc80946b3480f421c2f17ed1cb841753a371c7c5104f51d507e13f532c856aa" dependencies = [ "encode_unicode", "lazy_static", @@ -335,14 +335,13 @@ dependencies = [ "terminal_size", "unicode-width", "winapi 0.3.9", - "winapi-util", ] [[package]] name = "const_fn" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c478836e029dcef17fb47c89023448c64f781a046e0300e257ad8225ae59afab" +checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6" [[package]] name = "constant_time_eq" @@ -383,12 +382,12 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.7.2" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" +checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d" dependencies = [ "autocfg", - "cfg-if 0.1.10", + "cfg-if 1.0.0", "lazy_static", ] @@ -409,7 +408,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ "generic-array 0.14.4", - "subtle 2.3.0", + "subtle 2.4.0", ] [[package]] @@ -474,9 +473,9 @@ checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" [[package]] name = "dtoa" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b" +checksum = "88d7ed2934d741c6b37e33e3832298e8850b53fd2d2bea03873375596c7cea4e" [[package]] name = "duct" @@ -517,6 +516,16 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "form_urlencoded" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ece68d15c92e84fa4f19d3780f1294e5ca82a78a6d515f1efaabcc144688be00" +dependencies = [ + "matches", + "percent-encoding", +] + [[package]] name = "fuchsia-zircon" version = "0.3.3" @@ -535,9 +544,9 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95314d38584ffbfda215621d723e0a3906f032e03ae5551e650058dac83d4797" +checksum = "9b3b0c040a1fe6529d30b3c5944b280c7f0dcb2930d2c3062bca967b602583d0" dependencies = [ "futures-channel", "futures-core", @@ -550,9 +559,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0448174b01148032eed37ac4aed28963aaaa8cfa93569a08e5b479bbc6c2c151" +checksum = "4b7109687aa4e177ef6fe84553af6280ef2778bdb7783ba44c9dc3399110fe64" dependencies = [ "futures-core", "futures-sink", @@ -560,15 +569,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18eaa56102984bed2c88ea39026cff3ce3b4c7f508ca970cedf2450ea10d4e46" +checksum = "847ce131b72ffb13b6109a221da9ad97a64cbe48feb1028356b836b47b8f1748" [[package]] name = "futures-executor" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5f8e0c9258abaea85e78ebdda17ef9666d390e987f006be6080dfe354b708cb" +checksum = "4caa2b2b68b880003057c1dd49f1ed937e38f22fcf6c212188a121f08cf40a65" dependencies = [ "futures-core", "futures-task", @@ -577,15 +586,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e1798854a4727ff944a7b12aa999f58ce7aa81db80d2dfaaf2ba06f065ddd2b" +checksum = "611834ce18aaa1bd13c4b374f5d653e1027cf99b6b502584ff8c9a64413b30bb" [[package]] name = "futures-macro" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36fccf3fc58563b4a14d265027c627c3b665d7fed489427e88e7cc929559efe" +checksum = "77408a692f1f97bcc61dc001d752e00643408fbc922e4d634c655df50d595556" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -595,24 +604,24 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e3ca3f17d6e8804ae5d3df7a7d35b2b3a6fe89dac84b31872720fc3060a0b11" +checksum = "f878195a49cee50e006b02b93cf7e0a95a38ac7b776b4c4d9cc1207cd20fcb3d" [[package]] name = "futures-task" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d502af37186c4fef99453df03e374683f8a1eec9dcc1e66b3b82dc8278ce3c" +checksum = "7c554eb5bf48b2426c4771ab68c6b14468b6e76cc90996f528c3338d761a4d0d" dependencies = [ "once_cell", ] [[package]] name = "futures-util" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abcb44342f62e6f3e8ac427b8aa815f724fd705dfad060b18ac7866c15bb8e34" +checksum = "d304cff4a7b99cfb7986f7d43fbe93d175e72e704a8860787cc95e9ffd85cbd2" dependencies = [ "futures-channel", "futures-core", @@ -621,7 +630,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project 1.0.1", + "pin-project 1.0.3", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -649,15 +658,26 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] +[[package]] +name = "getrandom" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4060f4657be78b8e766215b02b18a2e862d83745545de804638e2b545e81aee6" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi 0.10.0+wasi-snapshot-preview1", +] + [[package]] name = "gimli" version = "0.23.0" @@ -705,9 +725,9 @@ checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" [[package]] name = "heck" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" +checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" dependencies = [ "unicode-segmentation", ] @@ -749,9 +769,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" +checksum = "84129d298a6d57d246960ff8eb831ca4af3f96d29e2e28848dae275408658e26" dependencies = [ "bytes", "fnv", @@ -796,7 +816,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project 1.0.1", + "pin-project 1.0.3", "socket2", "tokio", "tower-service", @@ -851,9 +871,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55e2e4c765aa53a0424761bf9f41aa7a6ac1efa87238f59560640e27fca028f2" +checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b" dependencies = [ "autocfg", "hashbrown", @@ -888,15 +908,15 @@ checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" [[package]] name = "itoa" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" +checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" [[package]] name = "js-sys" -version = "0.3.45" +version = "0.3.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca059e81d9486668f12d455a4ea6daa600bd408134cd17e3d3fb5a32d1f016f8" +checksum = "cf3d7383929f7c9c7c2d0fa596f325832df98c3704f2c60553080f7127a58175" dependencies = [ "wasm-bindgen", ] @@ -919,9 +939,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.80" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58d1b70b004888f764dfbf6a26a3b0342a1632d33968e4a179d8011c760614" +checksum = "89203f3fba0a3795506acaad8ebce3c80c0af93f994d5a1d7a0b1eeb23271929" [[package]] name = "log" @@ -978,9 +998,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.6.22" +version = "0.6.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430" +checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" dependencies = [ "cfg-if 0.1.10", "fuchsia-zircon", @@ -989,7 +1009,7 @@ dependencies = [ "kernel32-sys", "libc", "log", - "miow 0.2.1", + "miow 0.2.2", "net2", "slab", "winapi 0.2.8", @@ -1003,7 +1023,7 @@ checksum = "0840c1c50fd55e521b247f949c241c9997709f23bd7f023b9762cd561e935656" dependencies = [ "log", "mio", - "miow 0.3.5", + "miow 0.3.6", "winapi 0.3.9", ] @@ -1020,9 +1040,9 @@ dependencies = [ [[package]] name = "miow" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" +checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" dependencies = [ "kernel32-sys", "net2", @@ -1032,9 +1052,9 @@ dependencies = [ [[package]] name = "miow" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07b88fb9795d4d36d62a012dfbf49a8f5cf12751f36d31a9dbe66d528e58979e" +checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" dependencies = [ "socket2", "winapi 0.3.9", @@ -1042,9 +1062,9 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.35" +version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ebc3ec692ed7c9a255596c67808dee269f64655d8baf7b4f0638e51ba1d6853" +checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" dependencies = [ "cfg-if 0.1.10", "libc", @@ -1111,9 +1131,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.4.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "260e51e7efe62b592207e9e13a68e43692a7a279171d6ba57abd208bf23645ad" +checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" [[package]] name = "opaque-debug" @@ -1154,11 +1174,11 @@ dependencies = [ [[package]] name = "pem" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59698ea79df9bf77104aefd39cc3ec990cb9693fb59c3b0a70ddf2646fdffb4b" +checksum = "f4c220d01f863d13d96ca82359d1e81e64a7c6bf0637bcde7b2349630addf0c6" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", "once_cell", "regex", ] @@ -1189,11 +1209,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.1" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee41d838744f60d959d7074e3afb6b35c7456d0f61cad38a24e35e6553f73841" +checksum = "5a83804639aad6ba65345661744708855f9fbcb71176ea8d28d05aeb11d975e7" dependencies = [ - "pin-project-internal 1.0.1", + "pin-project-internal 1.0.3", ] [[package]] @@ -1209,9 +1229,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.1" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81a4ffa594b66bff340084d4081df649a7dc049ac8d7fc458d8e628bfbbb2f86" +checksum = "b7bcc46b8f73443d15bc1c5fecbb315718491fa9187fa483f0e359323cde8b3a" dependencies = [ "proc-macro2", "quote", @@ -1224,6 +1244,12 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" +[[package]] +name = "pin-project-lite" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e36743d754ccdf9954c2e352ce2d4b106e024c814f6499c2dadff80da9a442d8" + [[package]] name = "pin-utils" version = "0.1.0" @@ -1306,7 +1332,7 @@ dependencies = [ "semver 0.11.0", "serde", "serde_json", - "simplelog", + "simplelog 0.8.0", "snafu", "structopt", "tempfile", @@ -1342,7 +1368,7 @@ dependencies = [ "reqwest", "sha2 0.9.2", "shell-words", - "simplelog", + "simplelog 0.9.0", "snafu", "structopt", "tempfile", @@ -1352,9 +1378,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" +checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" dependencies = [ "proc-macro2", ] @@ -1365,11 +1391,23 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom", + "getrandom 0.1.16", "libc", - "rand_chacha", - "rand_core", - "rand_hc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc 0.2.0", +] + +[[package]] +name = "rand" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c24fcd450d3fa2b592732565aa4f17a27a61c65ece4726353e000939b0edee34" +dependencies = [ + "libc", + "rand_chacha 0.3.0", + "rand_core 0.6.1", + "rand_hc 0.3.0", ] [[package]] @@ -1379,7 +1417,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_chacha" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.1", ] [[package]] @@ -1388,7 +1436,16 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom", + "getrandom 0.1.16", +] + +[[package]] +name = "rand_core" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c026d7df8b298d90ccbbc5190bd04d85e159eaf5576caeacf8741da93ccbd2e5" +dependencies = [ + "getrandom 0.2.1", ] [[package]] @@ -1397,7 +1454,16 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" dependencies = [ - "rand_core", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_hc" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +dependencies = [ + "rand_core 0.6.1", ] [[package]] @@ -1412,7 +1478,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ - "getrandom", + "getrandom 0.1.16", "redox_syscall", "rust-argon2", ] @@ -1446,11 +1512,11 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.10.8" +version = "0.10.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9eaa17ac5d7b838b7503d118fa16ad88f440498bf9ffe5424e621f93190d61e" +checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", "bytes", "encoding_rs", "futures-core", @@ -1466,10 +1532,10 @@ dependencies = [ "mime", "mime_guess", "percent-encoding", - "pin-project-lite", + "pin-project-lite 0.2.1", "rustls 0.18.1", "serde", - "serde_urlencoded", + "serde_urlencoded 0.7.0", "tokio", "tokio-rustls 0.14.1", "url", @@ -1482,9 +1548,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.16.15" +version = "0.16.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "952cd6b98c85bbc30efa1ba5783b8abf12fec8b3287ffa52605b9432313e34e4" +checksum = "024a1e66fea74c66c66624ee5622a7ff0e4b73a13b4f5c326ddb50c708944226" dependencies = [ "cc", "libc", @@ -1617,7 +1683,7 @@ dependencies = [ "bytes", "futures", "rusoto_core 0.45.0", - "serde_urlencoded", + "serde_urlencoded 0.6.1", "xml-rs", ] @@ -1656,7 +1722,7 @@ dependencies = [ "rustc_version", "serde", "sha2 0.8.2", - "time 0.2.22", + "time 0.2.23", "tokio", ] @@ -1681,7 +1747,7 @@ dependencies = [ "rustc_version", "serde", "sha2 0.9.2", - "time 0.2.22", + "time 0.2.23", "tokio", ] @@ -1724,18 +1790,18 @@ dependencies = [ "chrono", "futures", "rusoto_core 0.45.0", - "serde_urlencoded", + "serde_urlencoded 0.6.1", "tempfile", "xml-rs", ] [[package]] name = "rust-argon2" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dab61250775933275e84053ac235621dfb739556d5c54a2f2e9313b7cf43a19" +checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", "blake2b_simd", "constant_time_eq", "crossbeam-utils", @@ -1867,7 +1933,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" dependencies = [ - "semver-parser 0.10.1", + "semver-parser 0.10.2", "serde", ] @@ -1879,27 +1945,27 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "semver-parser" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ef146c2ad5e5f4b037cd6ce2ebb775401729b19a82040c1beac9d36c7d1428" +checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" dependencies = [ "pest", ] [[package]] name = "serde" -version = "1.0.117" +version = "1.0.118" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b88fa983de7720629c9387e9f517353ed404164b1e482c970a90c1a4aaf7dc1a" +checksum = "06c64263859d87aa2eb554587e2d23183398d617427327cf2b3d0ed8c69e4800" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.117" +version = "1.0.118" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbd1ae72adb44aab48f325a02444a5fc079349a8d804c1fc922aed3f7454c74e" +checksum = "c84d3526699cd55261af4b941e4e725444df67aa4f9e6a3564f18030d12672df" dependencies = [ "proc-macro2", "quote", @@ -1908,9 +1974,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.59" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcac07dbffa1c65e7f816ab9eba78eb142c6d44410f4eeba1e26e4f5dfa56b95" +checksum = "4fceb2595057b6891a4ee808f70054bd2d12f0e97f1cbb78689b59f676df325a" dependencies = [ "itoa", "ryu", @@ -1938,6 +2004,18 @@ dependencies = [ "url", ] +[[package]] +name = "serde_urlencoded" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + [[package]] name = "sha1" version = "0.6.0" @@ -1993,9 +2071,9 @@ checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" [[package]] name = "signal-hook-registry" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce32ea0c6c56d5eacaeb814fbed9960547021d3edd010ded1425f180536b20ab" +checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" dependencies = [ "libc", ] @@ -2011,6 +2089,17 @@ dependencies = [ "termcolor", ] +[[package]] +name = "simplelog" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bc0ffd69814a9b251d43afcabf96dad1b29f5028378056257be9e3fecc9f720" +dependencies = [ + "chrono", + "log", + "termcolor", +] + [[package]] name = "slab" version = "0.4.2" @@ -2019,9 +2108,9 @@ checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" [[package]] name = "snafu" -version = "0.6.9" +version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c4e6046e4691afe918fd1b603fd6e515bcda5388a1092a9edbada307d159f09" +checksum = "eab12d3c261b2308b0d80c26fffb58d17eba81a4be97890101f416b478c79ca7" dependencies = [ "backtrace", "doc-comment", @@ -2030,9 +2119,9 @@ dependencies = [ [[package]] name = "snafu-derive" -version = "0.6.9" +version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7073448732a89f2f3e6581989106067f403d378faeafb4a50812eb814170d3e5" +checksum = "1508efa03c362e23817f96cde18abed596a25219a8b2c66e8db33c03543d315b" dependencies = [ "proc-macro2", "quote", @@ -2041,13 +2130,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.3.15" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1fa70dc5c8104ec096f4fe7ede7a221d35ae13dcd19ba1ad9a81d2cab9a1c44" +checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", - "redox_syscall", "winapi 0.3.9", ] @@ -2059,9 +2147,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.11" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4e0831040d2cf2bdfd51b844be71885783d489898a192f254ae25d57cce725c" +checksum = "c66a8cff4fa24853fdf6b51f75c6d7f8206d7c75cab4e467bcd7f25c2b1febe0" dependencies = [ "version_check", ] @@ -2123,9 +2211,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "structopt" -version = "0.3.20" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126d630294ec449fae0b16f964e35bf3c74f940da9dca17ee9b905f7b3112eb8" +checksum = "5277acd7ee46e63e5168a80734c9f6ee81b1367a7d8772a2d765df2a3705d28c" dependencies = [ "clap", "lazy_static", @@ -2134,9 +2222,9 @@ dependencies = [ [[package]] name = "structopt-derive" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65e51c492f9e23a220534971ff5afc14037289de430e3c83f9daf6a1b6ae91e8" +checksum = "5ba9cdfda491b814720b6b06e0cac513d922fc407582032e8706e9f137976f90" dependencies = [ "heck", "proc-macro-error", @@ -2153,15 +2241,15 @@ checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" [[package]] name = "subtle" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "343f3f510c2915908f155e94f17220b19ccfacf2a64a2a5d8004f2c3e311e7fd" +checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" [[package]] name = "syn" -version = "1.0.48" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc371affeffc477f42a221a1e4297aedcea33d47d19b61455588bd9d8f6b19ac" +checksum = "cc60a3d73ea6594cd712d830cc1f0390fd71542d8c8cd24e70cc54cdfd5e05d5" dependencies = [ "proc-macro2", "quote", @@ -2176,7 +2264,7 @@ checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" dependencies = [ "cfg-if 0.1.10", "libc", - "rand", + "rand 0.7.3", "redox_syscall", "remove_dir_all", "winapi 0.3.9", @@ -2184,18 +2272,18 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f" +checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" dependencies = [ "winapi-util", ] [[package]] name = "terminal_size" -version = "0.1.13" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a14cd9f8c72704232f0bfc8455c0e861f0ad4eb60cc9ec8a170e231414c1e13" +checksum = "4bd2d183bd3fac5f5fe38ddbeb4dc9aec4a39a9d7d59e7491d900302da01cbe1" dependencies = [ "libc", "winapi 0.3.9", @@ -2232,9 +2320,9 @@ dependencies = [ [[package]] name = "time" -version = "0.2.22" +version = "0.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55b7151c9065e80917fbf285d9a5d1432f60db41d170ccafc749a136b41a93af" +checksum = "bcdaeea317915d59b2b4cd3b5efcd156c309108664277793f5351700c02ce98b" dependencies = [ "const_fn", "libc", @@ -2270,9 +2358,9 @@ dependencies = [ [[package]] name = "tinytemplate" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d3dc76004a03cec1c5932bca4cdc2e39aaa798e3f82363dd94f9adf6098c12f" +checksum = "a2ada8616fad06a2d0c455adc530de4ef57605a8120cc65da9653e0e9623ca74" dependencies = [ "serde", "serde_json", @@ -2280,15 +2368,24 @@ dependencies = [ [[package]] name = "tinyvec" -version = "0.3.4" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf8dbc19eb42fba10e8feaaec282fb50e2c14b2726d6301dbfeed0f73306a6f" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "238ce071d267c5710f9d31451efec16c5ee22de34df17cc05e56cbc92e967117" +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "0.2.22" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d34ca54d84bf2b5b4d7d31e901a8464f7b60ac145a284fba25ceb801f2ddccd" +checksum = "099837d3464c16a808060bb3f02263b412f6fafcb5d01c533d309985fbeebe48" dependencies = [ "bytes", "fnv", @@ -2301,7 +2398,7 @@ dependencies = [ "mio-named-pipes", "mio-uds", "num_cpus", - "pin-project-lite", + "pin-project-lite 0.1.11", "signal-hook-registry", "slab", "tokio-macros", @@ -2310,9 +2407,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" +checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" dependencies = [ "proc-macro2", "quote", @@ -2353,15 +2450,15 @@ dependencies = [ "futures-core", "futures-sink", "log", - "pin-project-lite", + "pin-project-lite 0.1.11", "tokio", ] [[package]] name = "toml" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75cf45bb0bef80604d001caaec0d09da99611b3c0fd39d3080468875cdb65645" +checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" dependencies = [ "serde", ] @@ -2452,13 +2549,13 @@ checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" [[package]] name = "tracing" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0987850db3733619253fe60e17cb59b82d37c7e6c0236bb81e4d6b87c879f27" +checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "log", - "pin-project-lite", + "pin-project-lite 0.2.1", "tracing-core", ] @@ -2519,18 +2616,18 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.13" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977" +checksum = "a13e63ab62dbe32aeee58d1c5408d35c36c392bba5d9d3142287219721afe606" dependencies = [ "tinyvec", ] [[package]] name = "unicode-segmentation" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" +checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" [[package]] name = "unicode-width" @@ -2568,10 +2665,11 @@ dependencies = [ [[package]] name = "url" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" +checksum = "5909f2b0817350449ed73e8bcd81c8c3c8d9a7a5d8acba4b27db277f1868976e" dependencies = [ + "form_urlencoded", "idna", "matches", "percent-encoding", @@ -2625,11 +2723,11 @@ checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasm-bindgen" -version = "0.2.68" +version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ac64ead5ea5f05873d7c12b545865ca2b8d28adfc50a49b84770a3a97265d42" +checksum = "3cd364751395ca0f68cafb17666eee36b63077fb5ecd972bbcd74c90c4bf736e" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "serde", "serde_json", "wasm-bindgen-macro", @@ -2637,9 +2735,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.68" +version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f22b422e2a757c35a73774860af8e112bff612ce6cb604224e8e47641a9e4f68" +checksum = "1114f89ab1f4106e5b55e688b828c0ab0ea593a1ea7c094b141b14cbaaec2d62" dependencies = [ "bumpalo", "lazy_static", @@ -2652,11 +2750,11 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.18" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7866cab0aa01de1edf8b5d7936938a7e397ee50ce24119aef3e1eaa3b6171da" +checksum = "1fe9756085a84584ee9457a002b7cdfe0bfff169f45d2591d8be1345a6780e35" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "js-sys", "wasm-bindgen", "web-sys", @@ -2664,9 +2762,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.68" +version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b13312a745c08c469f0b292dd2fcd6411dba5f7160f593da6ef69b64e407038" +checksum = "7a6ac8995ead1f084a8dea1e65f194d0973800c7f571f6edd70adf06ecf77084" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2674,9 +2772,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.68" +version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f249f06ef7ee334cc3b8ff031bfc11ec99d00f34d86da7498396dc1e3b1498fe" +checksum = "b5a48c72f299d80557c7c62e37e7225369ecc0c963964059509fbafe917c7549" dependencies = [ "proc-macro2", "quote", @@ -2687,15 +2785,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.68" +version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d649a3145108d7d3fbcde896a468d1bd636791823c9921135218ad89be08307" +checksum = "7e7811dd7f9398f14cc76efd356f98f03aa30419dea46aa810d71e819fc97158" [[package]] name = "web-sys" -version = "0.3.45" +version = "0.3.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bf6ef87ad7ae8008e15a355ce696bed26012b7caa21605188cfd8214ab51e2d" +checksum = "222b1ef9334f92a21d3fb53dc3fd80f30836959a90f9274a626d7e06315ba3c3" dependencies = [ "js-sys", "wasm-bindgen", @@ -2703,9 +2801,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.21.3" +version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab146130f5f790d45f82aeeb09e55a256573373ec64409fc19a6fb82fb1032ae" +checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" dependencies = [ "ring", "untrusted", @@ -2713,9 +2811,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8eff4b7516a57307f9349c64bf34caa34b940b66fed4b2fb3136cb7386e5739" +checksum = "0f20dea7535251981a9670857150d571846545088359b28e4951d350bdaf179f" dependencies = [ "webpki", ] @@ -2790,6 +2888,6 @@ checksum = "b07db065a5cf61a7e4ba64f29e67db906fb1787316516c4e6e5ff0fea1efcd8a" [[package]] name = "zeroize" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f33972566adbd2d3588b0491eb94b98b43695c4ef897903470ede4f3f5a28a" +checksum = "81a974bcdd357f0dca4d41677db03436324d45a4c9ed2d0b873a5a360ce41c36" diff --git a/tools/buildsys/Cargo.toml b/tools/buildsys/Cargo.toml index 5be0f61f..51be8fd6 100644 --- a/tools/buildsys/Cargo.toml +++ b/tools/buildsys/Cargo.toml @@ -11,7 +11,7 @@ exclude = ["README.md"] [dependencies] duct = "0.13.0" hex = "0.4.0" -rand = { version = "0.7", default-features = false, features = ["std"] } +rand = { version = "0.8", default-features = false, features = ["std", "std_rng"] } reqwest = { version = "0.10.1", default-features = false, features = ["rustls-tls", "blocking"] } serde = { version = "1.0", features = ["derive"] } serde_plain = "0.3.0" diff --git a/tools/pubsys-setup/Cargo.toml b/tools/pubsys-setup/Cargo.toml index 9df5fa75..8da543cf 100644 --- a/tools/pubsys-setup/Cargo.toml +++ b/tools/pubsys-setup/Cargo.toml @@ -13,7 +13,7 @@ pubsys-config = { path = "../pubsys-config/" } reqwest = { version = "0.10.1", default-features = false, features = ["rustls-tls", "blocking"] } sha2 = "0.9" shell-words = "1.0" -simplelog = "0.8" +simplelog = "0.9" snafu = "0.6" structopt = { version = "0.3", default-features = false } tempfile = "3.1" diff --git a/tools/pubsys-setup/src/main.rs b/tools/pubsys-setup/src/main.rs index cb5a9253..ca00fd88 100644 --- a/tools/pubsys-setup/src/main.rs +++ b/tools/pubsys-setup/src/main.rs @@ -341,7 +341,7 @@ mod error { }, #[snafu(display("Logger setup error: {}", source))] - Logger { source: simplelog::TermLogError }, + Logger { source: log::SetLoggerError }, #[snafu(display("'{}' repo has root role but no key. You wouldn't be able to update a repo without the matching key. To continue, pass '-e ALLOW_MISSING_KEY=true'", repo))] MissingKey { repo: String }, From 8320417d2ce13c828a7cf8791aa842296927729f Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Mon, 11 Jan 2021 11:46:04 -0800 Subject: [PATCH 0392/1356] Fix build.yml to ignore changes to all .md files That's a lot of CI to run for a PR to a markdown file. This PR should just say "hey, if it's all md files, just, y'know, don't do that." I can't think of any cases in which we'd want .md file changes to kick off CI runs, but if you can, feel free to reject this PR. Also, here's to the GitHub built-in editor for making sure the yaml is right. --- .github/workflows/build.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 19cf86d0..e3657fd4 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -2,6 +2,8 @@ name: Build on: pull_request: branches: [develop] + paths-ignore: + - '**.md' jobs: build: runs-on: [self-hosted, linux, x64] From 53dabdf249b4feb4bc58416edb8eac3e2007a47a Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Tue, 12 Jan 2021 18:32:36 -0500 Subject: [PATCH 0393/1356] Update README to point to discussions --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index ee20bac1..16699dd9 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,9 @@ You can look at [existing issues](https://github.com/bottlerocket-os/bottlerocke If not, you can select from a few templates and get some guidance on the type of information that would be most helpful. [Contact us with a new issue here.](https://github.com/bottlerocket-os/bottlerocket/issues/new/choose) -We don't have other communication channels set up quite yet, but don't worry about making an issue! +If you just have questions about Bottlerocket, please feel free to [start or join a discussion](https://github.com/bottlerocket-os/bottlerocket/discussions). + +We don't have other communication channels set up quite yet, but don't worry about making an issue or a discussion thread! You can let us know about things that seem difficult, or even ways you might like to help. ## Variants From a26117e881aa5b1694ef361ba61c319725929658 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 13 Jan 2021 00:32:10 +0000 Subject: [PATCH 0394/1356] kernel: enable early LSM support for lockdown This fixes an issue where the Lockdown LSM wasn't actually consulted for security decisions, because it does not appear in the CONFIG_LSM list. As an alternative, we could override CONFIG_LSM to add lockdown: CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity, selinux,smack,tomoyo,apparmor" However, then that list would need to be maintained when updating the kernel, or any new LSMs would be quietly ignored. Enabling early LSM support resolves the issue, and makes it possible for custom builds to specify the desired lockdown state on the kernel command line. Signed-off-by: Ben Cressey --- packages/kernel/config-bottlerocket | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/kernel/config-bottlerocket b/packages/kernel/config-bottlerocket index bb953ee4..caa6f106 100644 --- a/packages/kernel/config-bottlerocket +++ b/packages/kernel/config-bottlerocket @@ -41,6 +41,10 @@ CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=0 # Enable support for the kernel lockdown security module. CONFIG_SECURITY_LOCKDOWN_LSM=y +# Enable lockdown early so that if the option is present on the +# kernel command line, it can be enforced. +CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y + # Enable zstd compression for squashfs. CONFIG_SQUASHFS_ZSTD=y From 6454dbbeafeafe85f92da64f281e0683dde65941 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Thu, 21 Jan 2021 00:54:58 +0000 Subject: [PATCH 0395/1356] BUILDING: update to describe retrieval and use of kmod kit --- BUILDING.md | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/BUILDING.md b/BUILDING.md index 6fddad01..c9dee13b 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -119,3 +119,50 @@ See the [setup guide for Kubernetes](QUICKSTART-EKS.md) or the [setup guide for ## Publish your image See the [PUBLISHING](PUBLISHING.md) guide for information on deploying Bottlerocket images and repositories. + +## Building out-of-tree kernel modules + +To further extend Bottlerocket, you may want to build extra kernel modules. +The specifics of building an out-of-tree module will vary by project, but the first step is to download the "kmod kit" that contains the kernel headers and toolchain you'll need to use. + +### Downloading the kmod kit + +kmod kits are included in the official Bottlerocket repos starting with Bottlerocket v1.0.6. +Let's say you want to download the kit for building x86_64 modules for v1.0.6 and variant aws-k8s-1.18. + +First, you need tuftool: +```bash +cargo install tuftool +``` + +Next, you need the Bottlerocket root role, which is used by tuftool to verify the kmod kit. +This will download and verify the root role itself: +```bash +curl -O "https://cache.bottlerocket.aws/root.json" +sha512sum -c <<<"90393204232a1ad6b0a45528b1f7df1a3e37493b1e05b1c149f081849a292c8dafb4ea5f7ee17bcc664e35f66e37e4cfa4aae9de7a2a28aa31ae6ac3d9bea4d5 root.json" +``` + +Next, set your desired parameters, and download the kmod kit: +```bash +ARCH=x86_64 +VERSION=v1.0.6 +VARIANT=aws-k8s-1.18 + +tuftool download . --target-name ${VARIANT}-${ARCH}-kmod-kit-${VERSION}.tar.xz \ + --root ./root.json \ + --metadata-url "https://updates.bottlerocket.aws/2020-07-07/${VARIANT}/${ARCH}/" \ + --targets-url "https://updates.bottlerocket.aws/targets/" +``` + +### Using the kmod kit + +To use the kmod kit, extract it, and update your PATH to use its toolchain: +```bash +tar xf "${VARIANT}-${ARCH}-kmod-kit-${VERSION}.tar.xz" + +export CROSS_COMPILE="${ARCH}-bottlerocket-linux-musl-" +export KERNELDIR="${PWD}/${VARIANT}-${ARCH}-kmod-kit-${VERSION}/kernel-devel +export PATH="${PWD}/${VARIANT}-${ARCH}-kmod-kit-${VERSION}/toolchain/usr/bin:${PATH}" +``` + +Now you can compile modules against the kernel headers in `${KERNELDIR}`. From 72170a0cfc3afffac735bbbd2a184121fb2f41ea Mon Sep 17 00:00:00 2001 From: Matt Briggs Date: Wed, 28 Oct 2020 18:54:03 -0700 Subject: [PATCH 0396/1356] tough: update to v0.10.0 This version of tough changes the interface of the Repository struct and its load function. The usage sites have been updated to use the new interface. updog: The query parameter mechanism needed to be changed to accommodate the fact that Repository now owns a Transport instead of holding a reference. This is done by holding an Arc pointer to the query params both inside and outside of the custom Transport. pubsys: A custom Transport is no-longer necessary (its purpose was to support both http and file transport, which DefaultTransport now does). Additionally, the new TransportError gives pubsys the information it needs to know whether a repo already exists or not. update_metadata and migrator: no significant changes. --- tools/Cargo.lock | 308 +++--------------- tools/pubsys/Cargo.toml | 6 +- tools/pubsys/src/repo.rs | 68 ++-- .../pubsys/src/repo/check_expirations/mod.rs | 40 +-- tools/pubsys/src/repo/refresh_repo/mod.rs | 37 +-- tools/pubsys/src/repo/transport.rs | 63 ---- tools/pubsys/src/repo/validate_repo/mod.rs | 43 +-- 7 files changed, 127 insertions(+), 438 deletions(-) delete mode 100644 tools/pubsys/src/repo/transport.rs diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 56467b64..c5abc440 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -157,34 +157,13 @@ dependencies = [ "constant_time_eq", ] -[[package]] -name = "block-buffer" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -dependencies = [ - "block-padding", - "byte-tools", - "byteorder", - "generic-array 0.12.3", -] - [[package]] name = "block-buffer" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "generic-array 0.14.4", -] - -[[package]] -name = "block-padding" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" -dependencies = [ - "byte-tools", + "generic-array", ] [[package]] @@ -207,7 +186,7 @@ dependencies = [ "reqwest", "serde", "serde_plain", - "sha2 0.9.2", + "sha2", "snafu", "toml", "url", @@ -220,18 +199,6 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" -[[package]] -name = "byte-tools" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" - -[[package]] -name = "byteorder" -version = "1.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" - [[package]] name = "bytes" version = "0.5.6" @@ -311,12 +278,12 @@ dependencies = [ "bytes", "futures", "indicatif", - "rusoto_core 0.45.0", - "rusoto_credential 0.45.0", + "rusoto_core", + "rusoto_credential", "rusoto_ebs", "rusoto_ec2", - "rusoto_signature 0.45.0", - "sha2 0.9.2", + "rusoto_signature", + "sha2", "snafu", "tempfile", "tokio", @@ -391,24 +358,14 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "crypto-mac" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" -dependencies = [ - "generic-array 0.12.3", - "subtle 1.0.0", -] - [[package]] name = "crypto-mac" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ - "generic-array 0.14.4", - "subtle 2.4.0", + "generic-array", + "subtle", ] [[package]] @@ -420,22 +377,13 @@ dependencies = [ "sct", ] -[[package]] -name = "digest" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -dependencies = [ - "generic-array 0.12.3", -] - [[package]] name = "digest" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.4", + "generic-array", ] [[package]] @@ -489,6 +437,12 @@ dependencies = [ "shared_child", ] +[[package]] +name = "dyn-clone" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee2626afccd7561a06cf1367e2950c4718ea04565e20fb5029b6c7d8ad09abcf" + [[package]] name = "encode_unicode" version = "0.3.6" @@ -504,12 +458,6 @@ dependencies = [ "cfg-if 1.0.0", ] -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" - [[package]] name = "fnv" version = "1.0.7" @@ -637,15 +585,6 @@ dependencies = [ "slab", ] -[[package]] -name = "generic-array" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" -dependencies = [ - "typenum", -] - [[package]] name = "generic-array" version = "0.14.4" @@ -747,24 +686,14 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" -[[package]] -name = "hmac" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" -dependencies = [ - "crypto-mac 0.7.0", - "digest 0.8.1", -] - [[package]] name = "hmac" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" dependencies = [ - "crypto-mac 0.8.0", - "digest 0.9.0", + "crypto-mac", + "digest", ] [[package]] @@ -1135,12 +1064,6 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" -[[package]] -name = "opaque-debug" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" - [[package]] name = "opaque-debug" version = "0.3.0" @@ -1322,12 +1245,12 @@ dependencies = [ "parse-datetime", "pubsys-config", "reqwest", - "rusoto_core 0.45.0", - "rusoto_credential 0.45.0", + "rusoto_core", + "rusoto_credential", "rusoto_ebs", "rusoto_ec2", - "rusoto_signature 0.45.0", - "rusoto_ssm 0.45.0", + "rusoto_signature", + "rusoto_ssm", "rusoto_sts", "semver 0.11.0", "serde", @@ -1339,7 +1262,7 @@ dependencies = [ "tinytemplate", "tokio", "toml", - "tough 0.9.0", + "tough", "tough-kms", "tough-ssm", "update_metadata", @@ -1366,7 +1289,7 @@ dependencies = [ "log", "pubsys-config", "reqwest", - "sha2 0.9.2", + "sha2", "shell-words", "simplelog 0.9.0", "snafu", @@ -1561,35 +1484,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "rusoto_core" -version = "0.44.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841ca8f73e7498ba39146ab43acea906bbbb807d92ec0b7ea4b6293d2621f80d" -dependencies = [ - "async-trait", - "base64 0.12.3", - "bytes", - "futures", - "hmac 0.7.1", - "http", - "hyper", - "hyper-rustls 0.20.0", - "lazy_static", - "log", - "md5", - "percent-encoding", - "pin-project 0.4.27", - "rusoto_credential 0.44.0", - "rusoto_signature 0.44.0", - "rustc_version", - "serde", - "serde_json", - "sha2 0.8.2", - "tokio", - "xml-rs", -] - [[package]] name = "rusoto_core" version = "0.45.0" @@ -1609,8 +1503,8 @@ dependencies = [ "md5", "percent-encoding", "pin-project 0.4.27", - "rusoto_credential 0.45.0", - "rusoto_signature 0.45.0", + "rusoto_credential", + "rusoto_signature", "rustc_version", "serde", "serde_json", @@ -1618,26 +1512,6 @@ dependencies = [ "xml-rs", ] -[[package]] -name = "rusoto_credential" -version = "0.44.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60669ddc1bdbb83ce225593649d36b4c5f6bf9db47cc1ab3e81281abffc853f4" -dependencies = [ - "async-trait", - "chrono", - "dirs", - "futures", - "hyper", - "pin-project 0.4.27", - "regex", - "serde", - "serde_json", - "shlex", - "tokio", - "zeroize", -] - [[package]] name = "rusoto_credential" version = "0.45.0" @@ -1667,7 +1541,7 @@ dependencies = [ "async-trait", "bytes", "futures", - "rusoto_core 0.45.0", + "rusoto_core", "serde", "serde_derive", "serde_json", @@ -1682,7 +1556,7 @@ dependencies = [ "async-trait", "bytes", "futures", - "rusoto_core 0.45.0", + "rusoto_core", "serde_urlencoded 0.6.1", "xml-rs", ] @@ -1696,36 +1570,11 @@ dependencies = [ "async-trait", "bytes", "futures", - "rusoto_core 0.45.0", + "rusoto_core", "serde", "serde_json", ] -[[package]] -name = "rusoto_signature" -version = "0.44.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9eddff187ac18c5a91d9ccda9353f30cf531620dce437c4db661dfe2e23b2029" -dependencies = [ - "base64 0.12.3", - "bytes", - "futures", - "hex", - "hmac 0.7.1", - "http", - "hyper", - "log", - "md5", - "percent-encoding", - "pin-project 0.4.27", - "rusoto_credential 0.44.0", - "rustc_version", - "serde", - "sha2 0.8.2", - "time 0.2.23", - "tokio", -] - [[package]] name = "rusoto_signature" version = "0.45.0" @@ -1736,35 +1585,21 @@ dependencies = [ "bytes", "futures", "hex", - "hmac 0.8.1", + "hmac", "http", "hyper", "log", "md5", "percent-encoding", "pin-project 0.4.27", - "rusoto_credential 0.45.0", + "rusoto_credential", "rustc_version", "serde", - "sha2 0.9.2", + "sha2", "time 0.2.23", "tokio", ] -[[package]] -name = "rusoto_ssm" -version = "0.44.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e9224ad97be05dae1a0f6745252f3fa1430d6bea97c93f59e99edaeb7d70f5d" -dependencies = [ - "async-trait", - "bytes", - "futures", - "rusoto_core 0.44.0", - "serde", - "serde_json", -] - [[package]] name = "rusoto_ssm" version = "0.45.0" @@ -1774,7 +1609,7 @@ dependencies = [ "async-trait", "bytes", "futures", - "rusoto_core 0.45.0", + "rusoto_core", "serde", "serde_json", ] @@ -1789,7 +1624,7 @@ dependencies = [ "bytes", "chrono", "futures", - "rusoto_core 0.45.0", + "rusoto_core", "serde_urlencoded 0.6.1", "tempfile", "xml-rs", @@ -2022,29 +1857,17 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" -[[package]] -name = "sha2" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" -dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", -] - [[package]] name = "sha2" version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e7aab86fe2149bad8c507606bdb3f4ef5e7b2380eb92350f56122cca72a42a8" dependencies = [ - "block-buffer 0.9.0", + "block-buffer", "cfg-if 1.0.0", "cpuid-bool", - "digest 0.9.0", - "opaque-debug 0.3.0", + "digest", + "opaque-debug", ] [[package]] @@ -2233,12 +2056,6 @@ dependencies = [ "syn", ] -[[package]] -name = "subtle" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" - [[package]] name = "subtle" version = "2.4.0" @@ -2465,33 +2282,12 @@ dependencies = [ [[package]] name = "tough" -version = "0.8.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71b8d86994e9da2233fc30c54223bc448a15bdb782f8060c66107fc6b88619ba" -dependencies = [ - "chrono", - "globset", - "hex", - "log", - "olpc-cjson", - "pem", - "ring", - "serde", - "serde_json", - "serde_plain", - "snafu", - "untrusted", - "url", - "walkdir", -] - -[[package]] -name = "tough" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc05d902ccf136ba55d5e2c7222ddc1623f657e6add3f030e93c4dc5341bbdb7" +checksum = "7dc3534fa46badec98ac633028f47a3cea590e9c9a63d85bd15a0436f8b6eb94" dependencies = [ "chrono", + "dyn-clone", "globset", "hex", "log", @@ -2503,6 +2299,7 @@ dependencies = [ "serde_json", "serde_plain", "snafu", + "tempfile", "untrusted", "url", "walkdir", @@ -2510,35 +2307,34 @@ dependencies = [ [[package]] name = "tough-kms" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfb9491d4cc35dafcb72c69254c27da97bad4d5796305ffe9974ed1f470ffc39" +checksum = "1aeb5ad37ac31ba5b10f4f53a7c1073a1a28a88dd4537c9abff8718148b95f37" dependencies = [ - "bytes", "pem", "ring", - "rusoto_core 0.45.0", - "rusoto_credential 0.45.0", + "rusoto_core", + "rusoto_credential", "rusoto_kms", "snafu", "tokio", - "tough 0.9.0", + "tough", ] [[package]] name = "tough-ssm" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63b7e51b42318a756ab7cbcf036f2adb23a8462d762519d5a3e50b886430ed23" +checksum = "42cb7ac27150db2f2321adb2ac05a9b2e1f8d9ad739262c2779f298083e96eea" dependencies = [ - "rusoto_core 0.44.0", - "rusoto_credential 0.44.0", - "rusoto_ssm 0.44.0", + "rusoto_core", + "rusoto_credential", + "rusoto_ssm", "serde", "serde_json", "snafu", "tokio", - "tough 0.9.0", + "tough", ] [[package]] @@ -2660,7 +2456,7 @@ dependencies = [ "serde_plain", "snafu", "toml", - "tough 0.8.0", + "tough", ] [[package]] diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index 8c147bfe..94aeb5f5 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -35,9 +35,9 @@ structopt = { version = "0.3", default-features = false } tinytemplate = "1.1" tokio = { version = "0.2.21", features = ["time"] } toml = "0.5" -tough = { version = "0.9", features = ["http"] } -tough-kms = "0.1.1" -tough-ssm = "0.4" +tough = { version = "0.10", features = ["http"] } +tough-kms = "0.2" +tough-ssm = "0.5" update_metadata = { path = "../../sources/updater/update_metadata/" } url = { version = "2.1.0", features = ["serde"] } tempfile = "3.1" diff --git a/tools/pubsys/src/repo.rs b/tools/pubsys/src/repo.rs index 159dde96..385b6ac1 100644 --- a/tools/pubsys/src/repo.rs +++ b/tools/pubsys/src/repo.rs @@ -2,7 +2,6 @@ pub(crate) mod check_expirations; pub(crate) mod refresh_repo; -mod transport; pub(crate) mod validate_repo; use crate::{friendly_version, Args}; @@ -18,17 +17,16 @@ use std::fs::{self, File}; use std::num::NonZeroU64; use std::path::{Path, PathBuf}; use structopt::StructOpt; -use tempfile::{tempdir, NamedTempFile}; +use tempfile::NamedTempFile; use tough::{ editor::signed::PathExists, editor::RepositoryEditor, key_source::{KeySource, LocalKeySource}, schema::Target, - ExpirationEnforcement, Limits, Repository, Settings, + RepositoryLoader, TransportErrorKind, }; use tough_kms::{KmsKeySource, KmsSigningAlgorithm}; use tough_ssm::SsmKeySource; -use transport::RepoTransport; use update_metadata::{Images, Manifest, Release, UpdateWaves}; use url::Url; @@ -187,8 +185,8 @@ fn update_manifest(repo_args: &RepoArgs, manifest: &mut Manifest) -> Result<()> /// Set expirations of all non-root role metadata based on a given `RepoExpirationPolicy` and an /// expiration start time -fn set_expirations<'a>( - editor: &mut RepositoryEditor<'a, RepoTransport>, +fn set_expirations( + editor: &mut RepositoryEditor, expiration_policy: &RepoExpirationPolicy, expiration_start_time: DateTime, ) -> Result<()> { @@ -211,7 +209,7 @@ fn set_expirations<'a>( } /// Set versions of all role metadata; the version will be the UNIX timestamp of the current time. -fn set_versions<'a>(editor: &mut RepositoryEditor<'a, RepoTransport>) -> Result<()> { +fn set_versions(editor: &mut RepositoryEditor) -> Result<()> { let seconds = Utc::now().timestamp(); let unsigned_seconds = seconds.try_into().expect("System clock before 1970??"); let version = NonZeroU64::new(unsigned_seconds).expect("System clock exactly 1970??"); @@ -228,7 +226,7 @@ fn set_versions<'a>(editor: &mut RepositoryEditor<'a, RepoTransport>) -> Result< /// Adds targets, expirations, and version to the RepositoryEditor fn update_editor<'a, P>( repo_args: &'a RepoArgs, - editor: &mut RepositoryEditor<'a, RepoTransport>, + editor: &mut RepositoryEditor, targets: impl Iterator, manifest_path: P, ) -> Result<()> @@ -328,30 +326,25 @@ fn repo_urls<'a>( /// that the repo does not exist. fn load_editor_and_manifest<'a, P>( root_role_path: P, - transport: &'a RepoTransport, - datastore: &'a Path, metadata_url: &'a Url, targets_url: &'a Url, -) -> Result, Manifest)>> +) -> Result> where P: AsRef, { let root_role_path = root_role_path.as_ref(); - // Create a temporary directory where the TUF client can store metadata - let settings = Settings { - root: File::open(root_role_path).context(error::File { + // Try to load the repo... + let repo_load_result = RepositoryLoader::new( + File::open(root_role_path).context(error::File { path: root_role_path, })?, - datastore, - metadata_base_url: metadata_url.as_str(), - targets_base_url: targets_url.as_str(), - limits: Limits::default(), - expiration_enforcement: ExpirationEnforcement::Safe, - }; + metadata_url.clone(), + targets_url.clone(), + ) + .load(); - // Try to load the repo... - match Repository::load(transport, settings) { + match repo_load_result { // If we load it successfully, build an editor and manifest from it. Ok(repo) => { let reader = repo @@ -374,17 +367,26 @@ where // If we fail to load, but we only failed because the repo doesn't exist yet, then start // fresh by signalling that there is no known repo. Otherwise, fail hard. Err(e) => { - if transport.repo_not_found.get() { + if is_file_not_found_error(&e) { Ok(None) } else { Err(e).with_context(|| error::RepoLoad { metadata_base_url: metadata_url.clone(), - })? + }) } } } } +/// Inspects the `tough` error to see if it is a `Transport` error, and if so, is it `FileNotFound`. +fn is_file_not_found_error(e: &tough::error::Error) -> bool { + if let tough::error::Error::Transport { source, .. } = e { + matches!(source.kind(), TransportErrorKind::FileNotFound) + } else { + false + } +} + /// Gets the corresponding `KeySource` according to the signing key config from Infra.toml fn get_signing_key_source(signing_key_config: &SigningKeyConfig) -> Box { match signing_key_config { @@ -443,24 +445,19 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { }) { repo_config } else { - info!("Didn't find repo '{}' in Infra.toml, using default configuration", repo_args.repo); + info!( + "Didn't find repo '{}' in Infra.toml, using default configuration", + repo_args.repo + ); &default_repo_config }; // Build a repo editor and manifest, from an existing repo if available, otherwise fresh let maybe_urls = repo_urls(&repo_config, &repo_args.variant, &repo_args.arch)?; - let workdir = tempdir().context(error::TempDir)?; - let transport = RepoTransport::default(); let (mut editor, mut manifest) = if let Some((metadata_url, targets_url)) = maybe_urls.as_ref() { info!("Found metadata and target URLs, loading existing repository"); - match load_editor_and_manifest( - &repo_args.root_role_path, - &transport, - workdir.path(), - &metadata_url, - &targets_url, - )? { + match load_editor_and_manifest(&repo_args.root_role_path, &metadata_url, &targets_url)? { Some((editor, manifest)) => (editor, manifest), None => { info!( @@ -729,9 +726,6 @@ mod error { source: update_metadata::error::Error, }, - #[snafu(display("Failed to create tempdir: {}", source))] - TempDir { source: io::Error }, - #[snafu(display("Failed to create temporary file: {}", source))] TempFile { source: io::Error }, diff --git a/tools/pubsys/src/repo/check_expirations/mod.rs b/tools/pubsys/src/repo/check_expirations/mod.rs index cdd9e9a9..dd726054 100644 --- a/tools/pubsys/src/repo/check_expirations/mod.rs +++ b/tools/pubsys/src/repo/check_expirations/mod.rs @@ -1,7 +1,6 @@ //! The check_expirations module owns the 'check-repo-expirations' subcommand and provide methods for //! checking the metadata expirations of a given TUF repository. -use super::RepoTransport; use crate::repo::{error as repo_error, repo_urls}; use crate::Args; use chrono::{DateTime, Utc}; @@ -13,8 +12,7 @@ use std::collections::HashMap; use std::fs::File; use std::path::PathBuf; use structopt::StructOpt; -use tempfile::tempdir; -use tough::{ExpirationEnforcement, Limits, Repository, Settings}; +use tough::{ExpirationEnforcement, Repository, RepositoryLoader}; use url::Url; /// Checks for metadata expirations for a set of TUF repositories @@ -42,13 +40,10 @@ pub(crate) struct CheckExpirationsArgs { } /// Checks for upcoming role expirations, gathering them in a map of role to expiration datetime. -fn find_upcoming_metadata_expiration( - repo: &Repository<'_, T>, +fn find_upcoming_metadata_expiration( + repo: &Repository, end_date: DateTime, -) -> HashMap> -where - T: tough::Transport, -{ +) -> HashMap> { let mut expirations = HashMap::new(); info!( "Looking for metadata expirations happening from now to {}", @@ -80,28 +75,23 @@ where } fn check_expirations( - transport: &RepoTransport, root_role_path: &PathBuf, metadata_url: &Url, targets_url: &Url, expiration_limit: DateTime, ) -> Result<()> { - // Create a temporary directory where the TUF client can store metadata - let workdir = tempdir().context(repo_error::TempDir)?; - let settings = Settings { - root: File::open(root_role_path).context(repo_error::File { + // Load the repository + let repo = RepositoryLoader::new( + File::open(root_role_path).context(repo_error::File { path: root_role_path, })?, - datastore: workdir.path(), - metadata_base_url: metadata_url.as_str(), - targets_base_url: targets_url.as_str(), - limits: Limits::default(), - // We're gonna check the expiration ourselves - expiration_enforcement: ExpirationEnforcement::Unsafe, - }; - - // Load the repository - let repo = Repository::load(transport, settings).context(repo_error::RepoLoad { + metadata_url.clone(), + targets_url.clone(), + ) + // We're gonna check the expiration ourselves + .expiration_enforcement(ExpirationEnforcement::Unsafe) + .load() + .context(repo_error::RepoLoad { metadata_base_url: metadata_url.clone(), })?; info!("Loaded TUF repo:\t{}", metadata_url); @@ -158,7 +148,6 @@ pub(crate) fn run(args: &Args, check_expirations_args: &CheckExpirationsArgs) -> missing: format!("definition for repo {}", &check_expirations_args.repo), })?; - let transport = RepoTransport::default(); let repo_urls = repo_urls( &repo_config, &check_expirations_args.variant, @@ -168,7 +157,6 @@ pub(crate) fn run(args: &Args, check_expirations_args: &CheckExpirationsArgs) -> repo: &check_expirations_args.repo, })?; check_expirations( - &transport, &check_expirations_args.root_role_path, &repo_urls.0, repo_urls.1, diff --git a/tools/pubsys/src/repo/refresh_repo/mod.rs b/tools/pubsys/src/repo/refresh_repo/mod.rs index 80844422..a198ae93 100644 --- a/tools/pubsys/src/repo/refresh_repo/mod.rs +++ b/tools/pubsys/src/repo/refresh_repo/mod.rs @@ -1,7 +1,6 @@ //! The refresh_repo module owns the 'refresh-repo' subcommand and provide methods for //! refreshing and re-signing the metadata files of a given TUF repository. -use super::RepoTransport; use crate::repo::{ error as repo_error, get_signing_key_source, repo_urls, set_expirations, set_versions, }; @@ -15,10 +14,9 @@ use std::fs; use std::fs::File; use std::path::{Path, PathBuf}; use structopt::StructOpt; -use tempfile::tempdir; use tough::editor::RepositoryEditor; use tough::key_source::KeySource; -use tough::{ExpirationEnforcement, Limits, Repository, Settings}; +use tough::{ExpirationEnforcement, RepositoryLoader}; use url::Url; lazy_static! { @@ -59,7 +57,6 @@ pub(crate) struct RefreshRepoArgs { } fn refresh_repo( - transport: &RepoTransport, root_role_path: &PathBuf, metadata_out_dir: &PathBuf, metadata_url: &Url, @@ -77,25 +74,23 @@ fn refresh_repo( } ); - // Create a temporary directory where the TUF client can store metadata - let workdir = tempdir().context(repo_error::TempDir)?; - let settings = Settings { - root: File::open(root_role_path).context(repo_error::File { - path: root_role_path, - })?, - datastore: workdir.path(), - metadata_base_url: metadata_url.as_str(), - targets_base_url: targets_url.as_str(), - limits: Limits::default(), - expiration_enforcement: if unsafe_refresh { - ExpirationEnforcement::Unsafe - } else { - ExpirationEnforcement::Safe - }, + let expiration_enforcement = if unsafe_refresh { + ExpirationEnforcement::Unsafe + } else { + ExpirationEnforcement::Safe }; // Load the repository and get the repo editor for it - let repo = Repository::load(transport, settings).context(repo_error::RepoLoad { + let repo = RepositoryLoader::new( + File::open(root_role_path).context(repo_error::File { + path: root_role_path, + })?, + metadata_url.clone(), + targets_url.clone(), + ) + .expiration_enforcement(expiration_enforcement) + .load() + .context(repo_error::RepoLoad { metadata_base_url: metadata_url.clone(), })?; let mut repo_editor = @@ -167,7 +162,6 @@ pub(crate) fn run(args: &Args, refresh_repo_args: &RefreshRepoArgs) -> Result<() RepoExpirationPolicy::from_path(&refresh_repo_args.repo_expiration_policy_path) .context(repo_error::Config)?; - let transport = RepoTransport::default(); let repo_urls = repo_urls( &repo_config, &refresh_repo_args.variant, @@ -177,7 +171,6 @@ pub(crate) fn run(args: &Args, refresh_repo_args: &RefreshRepoArgs) -> Result<() repo: &refresh_repo_args.repo, })?; refresh_repo( - &transport, &refresh_repo_args.root_role_path, &refresh_repo_args .outdir diff --git a/tools/pubsys/src/repo/transport.rs b/tools/pubsys/src/repo/transport.rs deleted file mode 100644 index e889dc48..00000000 --- a/tools/pubsys/src/repo/transport.rs +++ /dev/null @@ -1,63 +0,0 @@ -use super::error; -use std::cell::Cell; -use std::io::Read; -use tough::{FilesystemTransport, HttpTransport, Transport}; -use url::Url; - -/// RepoTransport delegates to FilesystemTransport or HttpTransport based on the url scheme. If we -/// detect that the repo isn't found we return a special error so we can start a new repo. -#[derive(Debug, Default, Clone)] -pub(crate) struct RepoTransport { - // If we fail to fetch the repo, we need a way of conveying whether it happened because the - // repo doesn't exist or because we failed to fetch/load a repo that does exist. This - // information can be used to determine whether we want to start a new repo from scratch or to - // fail early, for example. - // - // tough uses a trait object to represent the source error inside its Error::Transport variant, - // so we can't check our own, inner error type to determine which of our variants is inside. - // Also, it defines the `fetch` method of `Transport` to take an immutable reference to self, - // so we can't use a struct field naively to communicate back. - // - // So, we use this Cell to safely convey the information outward in our single-threaded usage. - pub(crate) repo_not_found: Cell, -} - -impl Transport for RepoTransport { - type Stream = Box; - type Error = error::Error; - - fn fetch(&self, url: Url) -> std::result::Result { - if url.scheme() == "file" { - match FilesystemTransport.fetch(url.clone()) { - Ok(reader) => Ok(Box::new(reader)), - Err(e) => match e.kind() { - std::io::ErrorKind::NotFound => { - self.repo_not_found.set(true); - error::RepoNotFound { url }.fail() - } - _ => error::RepoFetch { - url, - msg: e.to_string(), - } - .fail(), - }, - } - } else { - let transport = HttpTransport::new(); - match transport.fetch(url.clone()) { - Ok(reader) => Ok(Box::new(reader)), - Err(e) => match e { - tough::error::Error::HttpFetch { .. } => { - self.repo_not_found.set(true); - error::RepoNotFound { url }.fail() - } - _ => error::RepoFetch { - url, - msg: e.to_string(), - } - .fail(), - }, - } - } - } -} diff --git a/tools/pubsys/src/repo/validate_repo/mod.rs b/tools/pubsys/src/repo/validate_repo/mod.rs index b2b0d04b..9391eab2 100644 --- a/tools/pubsys/src/repo/validate_repo/mod.rs +++ b/tools/pubsys/src/repo/validate_repo/mod.rs @@ -1,7 +1,6 @@ //! The validate_repo module owns the 'validate-repo' subcommand and provides methods for validating //! a given TUF repository by attempting to load the repository and download its targets. -use super::RepoTransport; use crate::repo::{error as repo_error, repo_urls}; use crate::Args; use futures::future::join_all; @@ -12,8 +11,7 @@ use std::fs::File; use std::io; use std::path::PathBuf; use structopt::StructOpt; -use tempfile::tempdir; -use tough::{ExpirationEnforcement, Limits, Repository, Settings}; +use tough::{Repository, RepositoryLoader}; use url::Url; /// Validates a set of TUF repositories @@ -40,21 +38,12 @@ pub(crate) struct ValidateRepoArgs { validate_targets: bool, } -/// Retrieves listed targets and attempt to download them for validation purposes -async fn retrieve_targets( - repo: &Repository<'_, T>, -) -> Result<(), Error> -where - T: tough::Transport, - ::Stream: std::marker::Send, -{ +/// Retrieves listed targets and attempts to download them for validation purposes +async fn retrieve_targets(repo: &Repository) -> Result<(), Error> { let targets = &repo.targets().signed.targets; let mut tasks = Vec::new(); - for target in targets - .keys() - .cloned() - { + for target in targets.keys().cloned() { let target = target.to_string(); let mut reader = repo .read_target(&target) @@ -81,27 +70,21 @@ where } async fn validate_repo( - transport: &RepoTransport, root_role_path: &PathBuf, metadata_url: Url, targets_url: &Url, validate_targets: bool, ) -> Result<(), Error> { - // Create a temporary directory where the TUF client can store metadata - let workdir = tempdir().context(repo_error::TempDir)?; - let settings = Settings { - root: File::open(root_role_path).context(repo_error::File { + // Load the repository + let repo = RepositoryLoader::new( + File::open(root_role_path).context(repo_error::File { path: root_role_path, })?, - datastore: workdir.path(), - metadata_base_url: metadata_url.as_str(), - targets_base_url: targets_url.as_str(), - limits: Limits::default(), - expiration_enforcement: ExpirationEnforcement::Safe, - }; - - // Load the repository - let repo = Repository::load(transport, settings).context(repo_error::RepoLoad { + metadata_url.clone(), + targets_url.clone(), + ) + .load() + .context(repo_error::RepoLoad { metadata_base_url: metadata_url.clone(), })?; info!("Loaded TUF repo: {}", metadata_url); @@ -133,7 +116,6 @@ pub(crate) async fn run(args: &Args, validate_repo_args: &ValidateRepoArgs) -> R missing: format!("definition for repo {}", &validate_repo_args.repo), })?; - let transport = RepoTransport::default(); let repo_urls = repo_urls( &repo_config, &validate_repo_args.variant, @@ -143,7 +125,6 @@ pub(crate) async fn run(args: &Args, validate_repo_args: &ValidateRepoArgs) -> R repo: &validate_repo_args.repo, })?; validate_repo( - &transport, &validate_repo_args.root_role_path, repo_urls.0, repo_urls.1, From aa529b0b8999bc42e49ef9eb4a7eb14a7ddfd5c3 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 20 Jan 2021 16:52:16 +0000 Subject: [PATCH 0397/1356] build: track and clean output files This adds a layer of indirection between the build artifacts and the final output directory, so we can keep track of what we've previously built for a given package or variant. That allows us to remove the files before the next build, so they do not interact with other builds in unexpected ways. Signed-off-by: Ben Cressey --- tools/buildsys/src/builder.rs | 154 ++++++++++++++++++++++++++-- tools/buildsys/src/builder/error.rs | 31 ++++++ 2 files changed, 175 insertions(+), 10 deletions(-) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index 999a6411..d3387c44 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -11,10 +11,13 @@ use duct::cmd; use nonzero_ext::nonzero; use rand::Rng; use sha2::{Digest, Sha512}; -use snafu::{ensure, ResultExt}; +use snafu::{ensure, OptionExt, ResultExt}; use std::env; +use std::fs::{self, File}; use std::num::NonZeroU16; +use std::path::{Path, PathBuf}; use std::process::Output; +use walkdir::{DirEntry, WalkDir}; /* There's a bug in BuildKit that can lead to a build failure during parallel @@ -41,7 +44,7 @@ impl PackageBuilder { /// Build RPMs for the specified package. pub(crate) fn build(package: &str) -> Result { let arch = getenv("BUILDSYS_ARCH")?; - let output = getenv("BUILDSYS_PACKAGES_DIR")?; + let output_dir: PathBuf = getenv("BUILDSYS_PACKAGES_DIR")?.into(); // We do *not* want to rebuild most packages when the variant changes, becauses most aren't // affected; packages that care about variant should "echo cargo:rerun-if-env-changed=VAR" @@ -53,7 +56,6 @@ impl PackageBuilder { let var = "PUBLISH_REPO"; let repo = env::var(var).context(error::Environment { var })?; - let target = "package"; let build_args = format!( "--build-arg PACKAGE={package} \ --build-arg ARCH={arch} \ @@ -70,7 +72,7 @@ impl PackageBuilder { arch = arch, ); - build(&target, &build_args, &tag, &output)?; + build(BuildType::Package, &package, &build_args, &tag, &output_dir)?; Ok(Self) } @@ -88,13 +90,12 @@ impl VariantBuilder { let variant = getenv("BUILDSYS_VARIANT")?; let version_image = getenv("BUILDSYS_VERSION_IMAGE")?; let version_build = getenv("BUILDSYS_VERSION_BUILD")?; - let output = getenv("BUILDSYS_OUTPUT_DIR")?; + let output_dir: PathBuf = getenv("BUILDSYS_OUTPUT_DIR")?.into(); // Always rebuild variants since they are located in a different workspace, // and don't directly track changes in the underlying packages. getenv("BUILDSYS_TIMESTAMP")?; - let target = "variant"; let build_args = format!( "--build-arg PACKAGES={packages} \ --build-arg ARCH={arch} \ @@ -113,17 +114,28 @@ impl VariantBuilder { arch = arch ); - build(&target, &build_args, &tag, &output)?; + build(BuildType::Variant, &variant, &build_args, &tag, &output_dir)?; Ok(Self) } } +enum BuildType { + Package, + Variant, +} + /// Invoke a series of `docker` commands to drive a package or variant build. -fn build(target: &str, build_args: &str, tag: &str, output: &str) -> Result<()> { +fn build( + kind: BuildType, + what: &str, + build_args: &str, + tag: &str, + output_dir: &PathBuf, +) -> Result<()> { // Our Dockerfile is in the top-level directory. let root = getenv("BUILDSYS_ROOT_DIR")?; - std::env::set_current_dir(&root).context(error::DirectoryChange { path: &root })?; + env::set_current_dir(&root).context(error::DirectoryChange { path: &root })?; // Compute a per-checkout prefix for the tag to avoid collisions. let mut d = Sha512::new(); @@ -143,6 +155,17 @@ fn build(target: &str, build_args: &str, tag: &str, output: &str) -> Result<()> // Avoid using a cached layer from a concurrent build in another checkout. let token_args = format!("--build-arg TOKEN={}", token); + // Create a directory for tracking outputs before we move them into position. + let build_dir = create_build_dir(&kind, &what)?; + + // Clean up any previous outputs we have tracked. + clean_build_files(&build_dir, &output_dir)?; + + let target = match kind { + BuildType::Package => "package", + BuildType::Variant => "variant", + }; + let build = args(format!( "build . \ --network none \ @@ -161,7 +184,7 @@ fn build(target: &str, build_args: &str, tag: &str, output: &str) -> Result<()> )); let create = args(format!("create --name {tag} {tag} true", tag = tag)); - let cp = args(format!("cp {}:/output/. {}", tag, output)); + let cp = args(format!("cp {}:/output/. {}", tag, build_dir.display())); let rm = args(format!("rm --force {}", tag)); let rmi = args(format!("rmi --force {}", tag)); @@ -193,6 +216,9 @@ fn build(target: &str, build_args: &str, tag: &str, output: &str) -> Result<()> // Clean up our image now that we're done. docker(&rmi, Retry::No)?; + // Copy artifacts to the expected directory and write markers to track them. + copy_build_files(&build_dir, &output_dir)?; + Ok(()) } @@ -255,6 +281,114 @@ where .collect() } +/// Create a directory for build artifacts. +fn create_build_dir(kind: &BuildType, name: &str) -> Result { + let prefix = match kind { + BuildType::Package => "packages", + BuildType::Variant => "variants", + }; + + let path = [&getenv("BUILDSYS_STATE_DIR")?, prefix, name] + .iter() + .collect(); + + fs::create_dir_all(&path).context(error::DirectoryCreate { path: &path })?; + + Ok(path) +} + +const MARKER_EXTENSION: &str = ".buildsys_marker"; + +/// Copy build artifacts to the output directory. +/// Currently we expect a "flat" structure where all files are in the same directory. +/// Before we copy each file, we create a corresponding marker file to record its existence. +fn copy_build_files

(build_dir: P, output_dir: P) -> Result<()> +where + P: AsRef, +{ + fn is_artifact(entry: &DirEntry) -> bool { + entry.file_type().is_file() + && entry + .file_name() + .to_str() + .map(|s| !s.ends_with(MARKER_EXTENSION)) + .unwrap_or(false) + } + + for artifact_file in find_files(&build_dir, is_artifact) { + let mut marker_file = artifact_file.clone().into_os_string(); + marker_file.push(MARKER_EXTENSION); + File::create(&marker_file).context(error::FileCreate { path: &marker_file })?; + + let mut output_file: PathBuf = output_dir.as_ref().into(); + output_file.push( + artifact_file + .file_name() + .context(error::BadFilename { path: &output_file })?, + ); + + fs::rename(&artifact_file, &output_file).context(error::FileRename { + old_path: &artifact_file, + new_path: &output_file, + })?; + } + + Ok(()) +} + +/// Remove build artifacts from the output directory. +/// Any marker file we find could have a corresponding file that should be cleaned up. +/// We also clean up the marker files so they do not accumulate across builds. +fn clean_build_files

(build_dir: P, output_dir: P) -> Result<()> +where + P: AsRef, +{ + fn is_marker(entry: &DirEntry) -> bool { + entry + .file_name() + .to_str() + .map(|s| s.ends_with(MARKER_EXTENSION)) + .unwrap_or(false) + } + + for marker_file in find_files(&build_dir, is_marker) { + let mut output_file: PathBuf = output_dir.as_ref().into(); + output_file.push( + marker_file + .file_name() + .context(error::BadFilename { path: &marker_file })?, + ); + + output_file.set_extension(""); + if output_file.exists() { + std::fs::remove_file(&output_file).context(error::FileRemove { path: &output_file })?; + } + + std::fs::remove_file(&marker_file).context(error::FileRemove { path: &marker_file })?; + } + + Ok(()) +} + +/// Create an iterator over files matching the supplied filter. +fn find_files

( + dir: P, + filter: for<'r> fn(&'r walkdir::DirEntry) -> bool, +) -> impl Iterator +where + P: AsRef, +{ + WalkDir::new(&dir) + .follow_links(false) + .same_file_system(true) + .min_depth(1) + .max_depth(1) + .into_iter() + .filter_entry(move |e| filter(e)) + .flat_map(|e| e.context(error::DirectoryWalk)) + .map(|e| e.into_path()) +} + /// Retrieve a BUILDSYS_* variable that we expect to be set in the environment, /// and ensure that we track it for changes, since it will directly affect the /// output. diff --git a/tools/buildsys/src/builder/error.rs b/tools/buildsys/src/builder/error.rs index 0a20a91e..d57f6354 100644 --- a/tools/buildsys/src/builder/error.rs +++ b/tools/buildsys/src/builder/error.rs @@ -16,6 +16,37 @@ pub(crate) enum Error { source: std::io::Error, }, + #[snafu(display("Failed to get filename for '{}'", path.display()))] + BadFilename { path: PathBuf }, + + #[snafu(display("Failed to create directory '{}': {}", path.display(), source))] + DirectoryCreate { + path: PathBuf, + source: std::io::Error, + }, + + #[snafu(display("Failed to walk directory to find marker files: {}", source))] + DirectoryWalk { source: walkdir::Error }, + + #[snafu(display("Failed to create file '{}': {}", path.display(), source))] + FileCreate { + path: PathBuf, + source: std::io::Error, + }, + + #[snafu(display("Failed to remove file '{}': {}", path.display(), source))] + FileRemove { + path: PathBuf, + source: std::io::Error, + }, + + #[snafu(display("Failed to rename file '{}' to '{}': {}", old_path.display(), new_path.display(), source))] + FileRename { + old_path: PathBuf, + new_path: PathBuf, + source: std::io::Error, + }, + #[snafu(display("Missing environment variable '{}'", var))] Environment { var: String, From e3e09bf5607041764cefa0e3050e6ae4d1c86fb5 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Fri, 22 Jan 2021 19:07:03 +0000 Subject: [PATCH 0398/1356] Add the ability to output vmdk via qemu-img This change allows users to edit the metadata in a variant's cargo.toml to output a vmdk through buildsys via qemu-img. --- tools/buildsys/src/builder.rs | 14 ++++++++++--- tools/buildsys/src/main.rs | 3 ++- tools/buildsys/src/manifest.rs | 13 ++++++++++++ tools/rpm2img | 36 +++++++++++++++++++++++++++------- 4 files changed, 55 insertions(+), 11 deletions(-) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index d3387c44..cab18ebe 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -19,6 +19,8 @@ use std::path::{Path, PathBuf}; use std::process::Output; use walkdir::{DirEntry, WalkDir}; +use crate::manifest::ImageFormat; + /* There's a bug in BuildKit that can lead to a build failure during parallel `docker build` executions: @@ -46,7 +48,7 @@ impl PackageBuilder { let arch = getenv("BUILDSYS_ARCH")?; let output_dir: PathBuf = getenv("BUILDSYS_PACKAGES_DIR")?.into(); - // We do *not* want to rebuild most packages when the variant changes, becauses most aren't + // We do *not* want to rebuild most packages when the variant changes, because most aren't // affected; packages that care about variant should "echo cargo:rerun-if-env-changed=VAR" // themselves in the package's spec file. let var = "BUILDSYS_VARIANT"; @@ -82,7 +84,7 @@ pub(crate) struct VariantBuilder; impl VariantBuilder { /// Build a variant with the specified packages installed. - pub(crate) fn build(packages: &[String]) -> Result { + pub(crate) fn build(packages: &[String], image_format: Option<&ImageFormat>) -> Result { // We want PACKAGES to be a value that contains spaces, since that's // easier to work with in the shell than other forms of structured data. let packages = packages.join("|"); @@ -91,6 +93,10 @@ impl VariantBuilder { let version_image = getenv("BUILDSYS_VERSION_IMAGE")?; let version_build = getenv("BUILDSYS_VERSION_BUILD")?; let output_dir: PathBuf = getenv("BUILDSYS_OUTPUT_DIR")?.into(); + let image_format = match image_format { + Some(ImageFormat::Raw) | None => String::from("raw"), + Some(ImageFormat::Vmdk) => String::from("vmdk"), + }; // Always rebuild variants since they are located in a different workspace, // and don't directly track changes in the underlying packages. @@ -101,12 +107,14 @@ impl VariantBuilder { --build-arg ARCH={arch} \ --build-arg VARIANT={variant} \ --build-arg VERSION_ID={version_image} \ - --build-arg BUILD_ID={version_build}", + --build-arg BUILD_ID={version_build} \ + --build-arg IMAGE_FORMAT={image_format}", packages = packages, arch = arch, variant = variant, version_image = version_image, version_build = version_build, + image_format = image_format, ); let tag = format!( "buildsys-var-{variant}-{arch}", diff --git a/tools/buildsys/src/main.rs b/tools/buildsys/src/main.rs index a7794872..ac4f8578 100644 --- a/tools/buildsys/src/main.rs +++ b/tools/buildsys/src/main.rs @@ -166,7 +166,8 @@ fn build_variant() -> Result<()> { ManifestInfo::new(manifest_dir.join(manifest_file)).context(error::ManifestParse)?; if let Some(packages) = manifest.included_packages() { - VariantBuilder::build(&packages).context(error::BuildAttempt)?; + let image_format = manifest.image_format(); + VariantBuilder::build(&packages, image_format).context(error::BuildAttempt)?; } else { println!("cargo:warning=No included packages in manifest. Skipping variant build."); } diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index 2e726696..a606048f 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -110,6 +110,11 @@ impl ManifestInfo { .and_then(|b| b.included_packages.as_ref()) } + /// Convenience method to return the image format override, if any. + pub(crate) fn image_format(&self) -> Option<&ImageFormat> { + self.build_variant().and_then(|b| b.image_format.as_ref()) + } + /// Helper methods to navigate the series of optional struct fields. fn build_package(&self) -> Option<&BuildPackage> { self.package @@ -152,6 +157,14 @@ pub(crate) struct BuildPackage { #[serde(rename_all = "kebab-case")] pub(crate) struct BuildVariant { pub(crate) included_packages: Option>, + pub(crate) image_format: Option, +} + +#[derive(Deserialize, Debug)] +#[serde(rename_all = "lowercase")] +pub(crate) enum ImageFormat { + Raw, + Vmdk, } #[derive(Deserialize, Debug)] diff --git a/tools/rpm2img b/tools/rpm2img index ad40a22a..35d146f6 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -3,22 +3,35 @@ set -eu -o pipefail shopt -qs failglob +OUTPUT_FMT="raw" + for opt in "$@"; do optarg="$(expr "${opt}" : '[^=]*=\(.*\)')" case "${opt}" in --package-dir=*) PACKAGE_DIR="${optarg}" ;; --output-dir=*) OUTPUT_DIR="${optarg}" ;; + --output-fmt=*) OUTPUT_FMT="${optarg}" ;; esac done +case "${OUTPUT_FMT}" in + raw|vmdk) ;; + *) + echo "unexpected image output format '${OUTPUT_FMT}'" >&2 + exit 1 + ;; +esac + mkdir -p "${OUTPUT_DIR}" FILENAME_PREFIX="bottlerocket-${VARIANT}-${ARCH}-${VERSION_ID}-${BUILD_ID}" -DISK_IMAGE_NAME="${FILENAME_PREFIX}.img.lz4" + +DISK_IMAGE_BASENAME="${FILENAME_PREFIX}" +DATA_IMAGE_BASENAME="${FILENAME_PREFIX}-data" + BOOT_IMAGE_NAME="${FILENAME_PREFIX}-boot.ext4.lz4" VERITY_IMAGE_NAME="${FILENAME_PREFIX}-root.verity.lz4" ROOT_IMAGE_NAME="${FILENAME_PREFIX}-root.ext4.lz4" -DATA_IMAGE_NAME="${FILENAME_PREFIX}-data.img.lz4" DISK_IMAGE="$(mktemp)" BOOT_IMAGE="$(mktemp)" @@ -206,14 +219,23 @@ dd if="${BOTTLEROCKET_DATA}" of="${DATA_IMAGE}" conv=notrunc bs=1M seek=1 sgdisk -v "${DISK_IMAGE}" sgdisk -v "${DATA_IMAGE}" -lz4 -vc "${DISK_IMAGE}" >"${OUTPUT_DIR}/${DISK_IMAGE_NAME}" -lz4 -vc "${DATA_IMAGE}" >"${OUTPUT_DIR}/${DATA_IMAGE_NAME}" +if [[ ${OUTPUT_FMT} == "raw" ]]; then + lz4 -vc "${DISK_IMAGE}" >"${OUTPUT_DIR}/${DISK_IMAGE_BASENAME}.img.lz4" + lz4 -vc "${DATA_IMAGE}" >"${OUTPUT_DIR}/${DATA_IMAGE_BASENAME}.img.lz4" + chown 1000:1000 "${OUTPUT_DIR}/${DISK_IMAGE_BASENAME}.img.lz4" \ + "${OUTPUT_DIR}/${DATA_IMAGE_BASENAME}.img.lz4" +elif [[ ${OUTPUT_FMT} == "vmdk" ]]; then + # Stream optimization is required for creating an Open Virtual Appliance (OVA) + qemu-img convert -f raw -O vmdk -o subformat=streamOptimized "${DISK_IMAGE}" "${OUTPUT_DIR}/${DISK_IMAGE_BASENAME}.vmdk" + qemu-img convert -f raw -O vmdk -o subformat=streamOptimized "${DATA_IMAGE}" "${OUTPUT_DIR}/${DATA_IMAGE_BASENAME}.vmdk" + chown 1000:1000 "${OUTPUT_DIR}/${DISK_IMAGE_BASENAME}.vmdk" \ + "${OUTPUT_DIR}/${DATA_IMAGE_BASENAME}.vmdk" +fi + lz4 -9vc "${BOOT_IMAGE}" >"${OUTPUT_DIR}/${BOOT_IMAGE_NAME}" lz4 -9vc "${VERITY_IMAGE}" >"${OUTPUT_DIR}/${VERITY_IMAGE_NAME}" lz4 -9vc "${ROOT_IMAGE}" >"${OUTPUT_DIR}/${ROOT_IMAGE_NAME}" -chown 1000:1000 "${OUTPUT_DIR}/${DISK_IMAGE_NAME}" \ - "${OUTPUT_DIR}/${DATA_IMAGE_NAME}" \ - "${OUTPUT_DIR}/${BOOT_IMAGE_NAME}" \ +chown 1000:1000 "${OUTPUT_DIR}/${BOOT_IMAGE_NAME}" \ "${OUTPUT_DIR}/${VERITY_IMAGE_NAME}" \ "${OUTPUT_DIR}/${ROOT_IMAGE_NAME}" From 3ad2ac4ac883d4d9e52b40bb07aedbae43a264bb Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Fri, 29 Jan 2021 18:20:41 +0000 Subject: [PATCH 0399/1356] Add double quotes to some unwrapped variables Double quotes were added to several places in rpm2img to further prevent unwanted globbing and word splitting (SC2086). --- tools/rpm2img | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tools/rpm2img b/tools/rpm2img index 35d146f6..a642e18e 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -103,13 +103,13 @@ rm -rf "${ROOT_MOUNT}"/var/lib "${ROOT_MOUNT}"/usr/share/licenses/* if [[ "${ARCH}" == "x86_64" ]]; then SYS_ROOT="x86_64-bottlerocket-linux-gnu/sys-root" # MBR and BIOS-BOOT - echo "(hd0) ${DISK_IMAGE}" > ${ROOT_MOUNT}/boot/grub/device.map + echo "(hd0) ${DISK_IMAGE}" > "${ROOT_MOUNT}/boot/grub/device.map" "${ROOT_MOUNT}/sbin/grub-bios-setup" \ --directory="${ROOT_MOUNT}/boot/grub" \ --device-map="${ROOT_MOUNT}/boot/grub/device.map" \ --root="hd0" \ --skip-fs-probe \ - ${DISK_IMAGE} + "${DISK_IMAGE}" rm -vf "${ROOT_MOUNT}"/boot/grub/* "${ROOT_MOUNT}"/sbin/grub* else @@ -117,29 +117,29 @@ else # For aarch64 we need an EFI partition instead, formatted # FAT32 with the .efi binary at the correct path, eg /efi/boot. # grub-mkimage has put bootaa64.efi at /boot/efi/EFI/BOOT - mv ${ROOT_MOUNT}/boot/efi/* "${EFI_MOUNT}" + mv "${ROOT_MOUNT}/boot/efi"/* "${EFI_MOUNT}" # The 'recommended' size for the EFI partition is 100MB but our aarch64.efi # only takes up around 700KB, so this will suffice for now. - dd if=/dev/zero of=${EFI_IMAGE} bs=1M count=4 + dd if=/dev/zero of="${EFI_IMAGE}" bs=1M count=4 mkfs.vfat -I -S 512 "${EFI_IMAGE}" $((4*2048)) - mmd -i ${EFI_IMAGE} ::/EFI - mmd -i ${EFI_IMAGE} ::/EFI/BOOT - mcopy -i ${EFI_IMAGE} ${EFI_MOUNT}/EFI/BOOT/bootaa64.efi ::/EFI/BOOT + mmd -i "${EFI_IMAGE}" ::/EFI + mmd -i "${EFI_IMAGE}" ::/EFI/BOOT + mcopy -i "${EFI_IMAGE}" "${EFI_MOUNT}/EFI/BOOT/bootaa64.efi" ::/EFI/BOOT dd if="${EFI_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek=1 # Create the grub directory which grub-bios-setup would have otherwise done. - mkdir -p ${ROOT_MOUNT}/boot/grub + mkdir -p "${ROOT_MOUNT}/boot/grub" fi # Now that we're done messing with /, move /boot out of it mv "${ROOT_MOUNT}/boot"/* "${BOOT_MOUNT}" # Set the Bottlerocket variant, version, and build-id -echo "PRETTY_NAME=\"Bottlerocket OS ${VERSION_ID}\"" >> ${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release -echo "VARIANT_ID=${VARIANT}" >> ${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release -echo "VERSION_ID=${VERSION_ID}" >> ${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release -echo "BUILD_ID=${BUILD_ID}" >> ${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release +echo "PRETTY_NAME=\"Bottlerocket OS ${VERSION_ID}\"" >> "${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release" +echo "VARIANT_ID=${VARIANT}" >> "${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release" +echo "VERSION_ID=${VERSION_ID}" >> "${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release" +echo "BUILD_ID=${BUILD_ID}" >> "${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release" # BOTTLEROCKET-ROOT-A mkdir -p "${ROOT_MOUNT}/lost+found" @@ -172,7 +172,7 @@ veritysetup verify "${ROOT_IMAGE}" "${VERITY_IMAGE}" "${VERITY_ROOT_HASH}" dd if="${VERITY_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek=965 # write GRUB config -cat < ${BOOT_MOUNT}/grub/grub.cfg +cat < "${BOOT_MOUNT}/grub/grub.cfg" set default="0" set timeout="0" From 69e1c324d4e3b4d415c59160ec205d4f4ee1a8d6 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Tue, 9 Feb 2021 19:57:35 +0000 Subject: [PATCH 0400/1356] Pass BUILDSYS_NAME to rpm2img so we use requested image name prefix --- tools/buildsys/src/builder.rs | 3 +++ tools/rpm2img | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index cab18ebe..9973e490 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -93,6 +93,7 @@ impl VariantBuilder { let version_image = getenv("BUILDSYS_VERSION_IMAGE")?; let version_build = getenv("BUILDSYS_VERSION_BUILD")?; let output_dir: PathBuf = getenv("BUILDSYS_OUTPUT_DIR")?.into(); + let image_name = getenv("BUILDSYS_NAME")?; let image_format = match image_format { Some(ImageFormat::Raw) | None => String::from("raw"), Some(ImageFormat::Vmdk) => String::from("vmdk"), @@ -108,12 +109,14 @@ impl VariantBuilder { --build-arg VARIANT={variant} \ --build-arg VERSION_ID={version_image} \ --build-arg BUILD_ID={version_build} \ + --build-arg IMAGE_NAME={image_name} \ --build-arg IMAGE_FORMAT={image_format}", packages = packages, arch = arch, variant = variant, version_image = version_image, version_build = version_build, + image_name = image_name, image_format = image_format, ); let tag = format!( diff --git a/tools/rpm2img b/tools/rpm2img index a642e18e..ba7286bc 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -24,7 +24,7 @@ esac mkdir -p "${OUTPUT_DIR}" -FILENAME_PREFIX="bottlerocket-${VARIANT}-${ARCH}-${VERSION_ID}-${BUILD_ID}" +FILENAME_PREFIX="${IMAGE_NAME}-${VARIANT}-${ARCH}-${VERSION_ID}-${BUILD_ID}" DISK_IMAGE_BASENAME="${FILENAME_PREFIX}" DATA_IMAGE_BASENAME="${FILENAME_PREFIX}-data" From 89c01b79b5e0e3f34fa3ca07a426b0f7f206e1e4 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Thu, 4 Feb 2021 23:39:30 +0000 Subject: [PATCH 0401/1356] Refactor default settings files to allow sharing Previously, storewolf would read two hard-coded paths, a global defaults.toml and a variant-specific override-defaults.toml. TOML doesn't have a way of including other files, so the layout was fairly strict. Not all variants want all of the settings in defaults.toml, and there's no way for a variant to remove the global settings defined in defaults.toml, only add or replace, which can be a problem if, for example, it doesn't want the default host containers. This change makes each variant have a `defaults.d` directory in its model where it can include any number of files containing default settings. In this way, common settings can be shared through symlinks, and settings that are not common to all variants can be moved to separate files and not included by a variant. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 16699dd9..0a44086d 100644 --- a/README.md +++ b/README.md @@ -262,7 +262,7 @@ motd = "my own value!" Here we'll describe each setting you can change. -**Note:** You can see the default values (for any settings that are not generated at runtime) by looking at [defaults.toml](sources/models/defaults.toml). +**Note:** You can see the default values (for any settings that are not generated at runtime) by looking in the `defaults.d` directory for a variant, for example [aws-ecs-1](sources/models/src/aws-ecs-1/defaults.d/). When you're sending settings to the API, or receiving settings from the API, they're in a structured JSON format. This allows modification of any number of keys at once. From 9ef7ef0fc1e0aad0230e5da36a9102ad42ec1847 Mon Sep 17 00:00:00 2001 From: Matt Briggs Date: Mon, 24 Aug 2020 14:34:23 -0700 Subject: [PATCH 0402/1356] metricdog: anonymous bottlerocket metrics Metricdog is a new command-line program that sends anonymous information about a Bottlerocket instance to a metrics endpoint. A report of boot success is sent by the mark-boot-success service and at 2 minutes after boot, and every 6 hours thereafter, a report of service health is sent. --- GLOSSARY.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/GLOSSARY.md b/GLOSSARY.md index 2bb46b85..c43c6d94 100644 --- a/GLOSSARY.md +++ b/GLOSSARY.md @@ -14,6 +14,8 @@ Used for system maintenance and connectivity. * [**host-ctr**](sources/host-ctr): The program started by `host-containers@.service` for each host container. Its job is to start the specified host container on the “host” instance of containerd, which is separate from the “user” instance of containerd used for Kubernetes pods. +* [**logdog**](sources/logdog): A program that one can use to collect logs when things go wrong. +* [**metricdog**](sources/metricdog): A program that sends anonymous health pings. * [**model**](sources/models): The API system has a data model defined for each variant, and this model is used by other programs to serialize and deserialize requests while maintaining safety around data types. * [**netdog**](sources/api/netdog): A program called by wicked to retrieve and write out network configuration from DHCP. * [**pluto**](sources/api/pluto): A setting generator called by sundog to find networking settings required by Kubernetes. From f2be2e35fe0ed052383544e6e62313fb4bb441a5 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Wed, 27 Jan 2021 17:46:52 +0000 Subject: [PATCH 0403/1356] Add a vmware-dev variant This change adds the necessary files for a VMWare development variant. The files are currently a copy of the aws-dev variant but will diverge over time. --- .github/workflows/build.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index e3657fd4..f4358ed6 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -17,6 +17,9 @@ jobs: - variant: aws-dev arch: x86_64 supported: false + - variant: vmware-dev + arch: x86_64 + supported: false fail-fast: false steps: - uses: actions/checkout@v2 From 43d37f8e6e29f2f030af8e4c79bf528792c13215 Mon Sep 17 00:00:00 2001 From: Yenlin Chen <3822365+hencrice@users.noreply.github.com> Date: Sun, 17 Jan 2021 00:11:55 +0000 Subject: [PATCH 0404/1356] Switched from TermLogger to SimpleLogger Resolves #1157. --- tools/pubsys-setup/src/main.rs | 6 +++--- tools/pubsys/src/main.rs | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tools/pubsys-setup/src/main.rs b/tools/pubsys-setup/src/main.rs index ca00fd88..50f75423 100644 --- a/tools/pubsys-setup/src/main.rs +++ b/tools/pubsys-setup/src/main.rs @@ -9,7 +9,7 @@ existing role. use log::{debug, info, trace, warn}; use pubsys_config::InfraConfig; use sha2::{Digest, Sha512}; -use simplelog::{Config as LogConfig, LevelFilter, TermLogger, TerminalMode}; +use simplelog::{Config as LogConfig, LevelFilter, SimpleLogger}; use snafu::{ensure, OptionExt, ResultExt}; use std::convert::TryFrom; use std::fs; @@ -75,8 +75,8 @@ fn run() -> Result<()> { // Parse and store the args passed to the program let args = Args::from_args(); - // TerminalMode::Mixed will send errors to stderr and anything less to stdout. - TermLogger::init(args.log_level, LogConfig::default(), TerminalMode::Mixed) + // SimpleLogger will send errors to stderr and anything less to stdout. + SimpleLogger::init(args.log_level, LogConfig::default()) .context(error::Logger)?; // Make /roles and /keys directories, if they don't exist, so we can write generated files. diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs index 62252b5e..9784238d 100644 --- a/tools/pubsys/src/main.rs +++ b/tools/pubsys/src/main.rs @@ -27,7 +27,7 @@ mod aws; mod repo; use semver::Version; -use simplelog::{Config as LogConfig, LevelFilter, TermLogger, TerminalMode}; +use simplelog::{Config as LogConfig, LevelFilter, SimpleLogger}; use snafu::ResultExt; use std::path::PathBuf; use std::process; @@ -38,8 +38,8 @@ fn run() -> Result<()> { // Parse and store the args passed to the program let args = Args::from_args(); - // TerminalMode::Mixed will send errors to stderr and anything less to stdout. - TermLogger::init(args.log_level, LogConfig::default(), TerminalMode::Mixed) + // SimpleLogger will send errors to stderr and anything less to stdout. + SimpleLogger::init(args.log_level, LogConfig::default()) .context(error::Logger)?; match args.subcommand { @@ -144,7 +144,7 @@ mod error { Ami { source: crate::aws::ami::Error }, #[snafu(display("Logger setup error: {}", source))] - Logger { source: simplelog::TermLogError }, + Logger { source: log::SetLoggerError }, #[snafu(display("Failed to publish AMI: {}", source))] PublishAmi { From 8092a671e52d6a201ddde3845b314e72650af3b3 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 9 Feb 2021 13:20:22 -0800 Subject: [PATCH 0405/1356] settings, static-pods: new settings and helper for k8s static pods Adds a set of new settings for managing k8s static pods. Adds a new binary helper to manage k8s static pod manifests. --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 0a44086d..bfbd1e52 100644 --- a/README.md +++ b/README.md @@ -304,6 +304,10 @@ The following settings can be optionally set to customize the node labels and ta The following settings are optional and allow you to further configure your cluster. * `settings.kubernetes.cluster-domain`: The DNS domain for this cluster, allowing all Kubernetes-run containers to search this domain before the host's search domains. Defaults to `cluster.local`. +You can also optionally specify static pods for your node with the following settings. +* `settings.kubernetes.static-pods..manifest`: A base64-encoded pod manifest. +* `settings.kubernetes.static-pods..enabled`: Whether the static pod is enabled. + The following settings are set for you automatically by [pluto](sources/api/) based on runtime instance information, but you can override them if you know what you're doing! * `settings.kubernetes.max-pods`: The maximum number of pods that can be scheduled on this node (limited by number of available IPv4 addresses) * `settings.kubernetes.cluster-dns-ip`: The CIDR block of the primary network interface. From 6aa5dd1fae62c56adf2338ca2ec83a604dffe3b2 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Fri, 12 Feb 2021 23:43:02 +0000 Subject: [PATCH 0406/1356] Allow overriding the "pretty" name of the OS inside the image --- tools/buildsys/src/builder.rs | 5 +++++ tools/rpm2img | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index 9973e490..1b37c6ed 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -93,6 +93,9 @@ impl VariantBuilder { let version_image = getenv("BUILDSYS_VERSION_IMAGE")?; let version_build = getenv("BUILDSYS_VERSION_BUILD")?; let output_dir: PathBuf = getenv("BUILDSYS_OUTPUT_DIR")?.into(); + // We expect users' PRETTY_NAME values to contain spaces for things like "Bottlerocket OS" + // and so we need to transform them the same way as PACKAGES above. + let pretty_name = getenv("BUILDSYS_PRETTY_NAME")?.replace(' ', "|"); let image_name = getenv("BUILDSYS_NAME")?; let image_format = match image_format { Some(ImageFormat::Raw) | None => String::from("raw"), @@ -109,6 +112,7 @@ impl VariantBuilder { --build-arg VARIANT={variant} \ --build-arg VERSION_ID={version_image} \ --build-arg BUILD_ID={version_build} \ + --build-arg PRETTY_NAME={pretty_name} \ --build-arg IMAGE_NAME={image_name} \ --build-arg IMAGE_FORMAT={image_format}", packages = packages, @@ -116,6 +120,7 @@ impl VariantBuilder { variant = variant, version_image = version_image, version_build = version_build, + pretty_name = pretty_name, image_name = image_name, image_format = image_format, ); diff --git a/tools/rpm2img b/tools/rpm2img index ba7286bc..e63bfe3a 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -136,7 +136,7 @@ fi mv "${ROOT_MOUNT}/boot"/* "${BOOT_MOUNT}" # Set the Bottlerocket variant, version, and build-id -echo "PRETTY_NAME=\"Bottlerocket OS ${VERSION_ID}\"" >> "${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release" +echo "PRETTY_NAME=\"${PRETTY_NAME} ${VERSION_ID}\"" >> "${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release" echo "VARIANT_ID=${VARIANT}" >> "${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release" echo "VERSION_ID=${VERSION_ID}" >> "${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release" echo "BUILD_ID=${BUILD_ID}" >> "${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release" @@ -176,7 +176,7 @@ cat < "${BOOT_MOUNT}/grub/grub.cfg" set default="0" set timeout="0" -menuentry "Bottlerocket OS ${VERSION_ID}" { +menuentry "${PRETTY_NAME} ${VERSION_ID}" { linux (\$root)/vmlinuz root=/dev/dm-0 rootwait ro \\ console=tty0 console=ttyS0 random.trust_cpu=on selinux=1 enforcing=1 \\ systemd.log_target=journal-or-kmsg systemd.log_color=0 net.ifnames=0 \\ From 9cd80822e77822f3e4a9f3d1da6936f5c087a947 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Thu, 18 Feb 2021 14:47:04 -0800 Subject: [PATCH 0407/1356] buildsys: use argument vecs instead of string manipulation Some arguments have internal spaces, like the list of packages in --build-arg PACKAGES=a b c. This was handled using a "|" marker that was later replaced by spaces. For other uses of --build-arg, like PRETTY_NAME, we don't want to arbitrarily restrict the characters that can be used, in case someone wants "|" in their OS name. This change stores arguments directly in vectors, rather than strings that we split on space, so that we can pass through arbitrary characters. --- tools/buildsys/src/builder.rs | 166 +++++++++++++++++----------------- 1 file changed, 85 insertions(+), 81 deletions(-) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index 1b37c6ed..7040ef59 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -58,23 +58,19 @@ impl PackageBuilder { let var = "PUBLISH_REPO"; let repo = env::var(var).context(error::Environment { var })?; - let build_args = format!( - "--build-arg PACKAGE={package} \ - --build-arg ARCH={arch} \ - --build-arg VARIANT={variant} \ - --build-arg REPO={repo}", - package = package, - arch = arch, - variant = variant, - repo = repo, - ); + let mut args = Vec::new(); + args.build_arg("PACKAGE", package); + args.build_arg("ARCH", &arch); + args.build_arg("VARIANT", variant); + args.build_arg("REPO", repo); + let tag = format!( "buildsys-pkg-{package}-{arch}", package = package, arch = arch, ); - build(BuildType::Package, &package, &build_args, &tag, &output_dir)?; + build(BuildType::Package, &package, args, &tag, &output_dir)?; Ok(Self) } @@ -85,57 +81,45 @@ pub(crate) struct VariantBuilder; impl VariantBuilder { /// Build a variant with the specified packages installed. pub(crate) fn build(packages: &[String], image_format: Option<&ImageFormat>) -> Result { - // We want PACKAGES to be a value that contains spaces, since that's - // easier to work with in the shell than other forms of structured data. - let packages = packages.join("|"); - let arch = getenv("BUILDSYS_ARCH")?; - let variant = getenv("BUILDSYS_VARIANT")?; - let version_image = getenv("BUILDSYS_VERSION_IMAGE")?; - let version_build = getenv("BUILDSYS_VERSION_BUILD")?; let output_dir: PathBuf = getenv("BUILDSYS_OUTPUT_DIR")?.into(); - // We expect users' PRETTY_NAME values to contain spaces for things like "Bottlerocket OS" - // and so we need to transform them the same way as PACKAGES above. - let pretty_name = getenv("BUILDSYS_PRETTY_NAME")?.replace(' ', "|"); - let image_name = getenv("BUILDSYS_NAME")?; - let image_format = match image_format { - Some(ImageFormat::Raw) | None => String::from("raw"), - Some(ImageFormat::Vmdk) => String::from("vmdk"), - }; + + let variant = getenv("BUILDSYS_VARIANT")?; + let arch = getenv("BUILDSYS_ARCH")?; + + let mut args = Vec::new(); + args.build_arg("PACKAGES", packages.join(" ")); + args.build_arg("ARCH", &arch); + args.build_arg("VARIANT", &variant); + args.build_arg("VERSION_ID", getenv("BUILDSYS_VERSION_IMAGE")?); + args.build_arg("BUILD_ID", getenv("BUILDSYS_VERSION_BUILD")?); + args.build_arg("PRETTY_NAME", getenv("BUILDSYS_PRETTY_NAME")?); + args.build_arg("IMAGE_NAME", getenv("BUILDSYS_NAME")?); + args.build_arg( + "IMAGE_FORMAT", + match image_format { + Some(ImageFormat::Raw) | None => "raw", + Some(ImageFormat::Vmdk) => "vmdk", + }, + ); // Always rebuild variants since they are located in a different workspace, // and don't directly track changes in the underlying packages. getenv("BUILDSYS_TIMESTAMP")?; - let build_args = format!( - "--build-arg PACKAGES={packages} \ - --build-arg ARCH={arch} \ - --build-arg VARIANT={variant} \ - --build-arg VERSION_ID={version_image} \ - --build-arg BUILD_ID={version_build} \ - --build-arg PRETTY_NAME={pretty_name} \ - --build-arg IMAGE_NAME={image_name} \ - --build-arg IMAGE_FORMAT={image_format}", - packages = packages, - arch = arch, - variant = variant, - version_image = version_image, - version_build = version_build, - pretty_name = pretty_name, - image_name = image_name, - image_format = image_format, - ); let tag = format!( "buildsys-var-{variant}-{arch}", variant = variant, arch = arch ); - build(BuildType::Variant, &variant, &build_args, &tag, &output_dir)?; + build(BuildType::Variant, &variant, args, &tag, &output_dir)?; Ok(Self) } } +// =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= + enum BuildType { Package, Variant, @@ -145,7 +129,7 @@ enum BuildType { fn build( kind: BuildType, what: &str, - build_args: &str, + build_args: Vec, tag: &str, output_dir: &PathBuf, ) -> Result<()> { @@ -162,14 +146,9 @@ fn build( // Our SDK image is picked by the external `cargo make` invocation. let sdk = getenv("BUILDSYS_SDK_IMAGE")?; - let sdk_args = format!("--build-arg SDK={}", sdk); // Avoid using a cached layer from a previous build. let nocache = rand::thread_rng().gen::(); - let nocache_args = format!("--build-arg NOCACHE={}", nocache); - - // Avoid using a cached layer from a concurrent build in another checkout. - let token_args = format!("--build-arg TOKEN={}", token); // Create a directory for tracking outputs before we move them into position. let build_dir = create_build_dir(&kind, &what)?; @@ -182,27 +161,26 @@ fn build( BuildType::Variant => "variant", }; - let build = args(format!( + let mut build = format!( "build . \ - --network none \ - --target {target} \ - {build_args} \ - {sdk_args} \ - {nocache_args} \ - {token_args} \ - --tag {tag}", + --network none \ + --target {target} \ + --tag {tag}", target = target, - build_args = build_args, - sdk_args = sdk_args, - nocache_args = nocache_args, - token_args = token_args, tag = tag, - )); + ) + .split_string(); + + build.extend(build_args); + build.build_arg("SDK", sdk); + build.build_arg("NOCACHE", nocache.to_string()); + // Avoid using a cached layer from a concurrent build in another checkout. + build.build_arg("TOKEN", token); - let create = args(format!("create --name {tag} {tag} true", tag = tag)); - let cp = args(format!("cp {}:/output/. {}", tag, build_dir.display())); - let rm = args(format!("rm --force {}", tag)); - let rmi = args(format!("rmi --force {}", tag)); + let create = format!("create --name {} {} true", tag, tag).split_string(); + let cp = format!("cp {}:/output/. {}", tag, build_dir.display()).split_string(); + let rm = format!("rm --force {}", tag).split_string(); + let rmi = format!("rmi --force {}", tag).split_string(); // Clean up the stopped container if it exists. let _ = docker(&rm, Retry::No); @@ -283,19 +261,7 @@ enum Retry<'a> { }, } -/// Convert an argument string into a collection of positional arguments. -fn args(input: S) -> Vec -where - S: AsRef, -{ - // Treat "|" as a placeholder that indicates where the argument should - // contain spaces after we split on whitespace. - input - .as_ref() - .split_whitespace() - .map(|s| s.replace("|", " ")) - .collect() -} +// =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= /// Create a directory for build artifacts. fn create_build_dir(kind: &BuildType, name: &str) -> Result { @@ -412,3 +378,41 @@ fn getenv(var: &str) -> Result { println!("cargo:rerun-if-env-changed={}", var); env::var(var).context(error::Environment { var }) } + +// =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= + +/// Helper trait for constructing buildkit --build-arg arguments. +trait BuildArg { + fn build_arg(&mut self, key: S1, value: S2) + where + S1: AsRef, + S2: AsRef; +} + +impl BuildArg for Vec { + fn build_arg(&mut self, key: S1, value: S2) + where + S1: AsRef, + S2: AsRef, + { + self.push("--build-arg".to_string()); + self.push(format!("{}={}", key.as_ref(), value.as_ref())); + } +} + +/// Helper trait for splitting a string on spaces into owned Strings. +/// +/// If you need an element with internal spaces, you should handle that separately, for example +/// with BuildArg. +trait SplitString { + fn split_string(&self) -> Vec; +} + +impl SplitString for S +where + S: AsRef, +{ + fn split_string(&self) -> Vec { + self.as_ref().split(' ').map(String::from).collect() + } +} From 6aaa63c1bbce7dc78eeb381984947c4d3ab604cf Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 8 Feb 2021 18:04:35 +0000 Subject: [PATCH 0408/1356] kubernetes: add support for standalone mode This also makes the API server, cluster name, and cluster certificate settings optional, since the kubelet does not need these values when running in standalone mode. Signed-off-by: Ben Cressey --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index bfbd1e52..04c4920d 100644 --- a/README.md +++ b/README.md @@ -303,8 +303,10 @@ The following settings can be optionally set to customize the node labels and ta The following settings are optional and allow you to further configure your cluster. * `settings.kubernetes.cluster-domain`: The DNS domain for this cluster, allowing all Kubernetes-run containers to search this domain before the host's search domains. Defaults to `cluster.local`. +* `settings.kubernetes.standalone-mode`: Whether to run the kubelet in standalone mode, without connecting to an API server. Defaults to `false`. You can also optionally specify static pods for your node with the following settings. +Static pods can be particularly useful when running in standalone mode. * `settings.kubernetes.static-pods..manifest`: A base64-encoded pod manifest. * `settings.kubernetes.static-pods..enabled`: Whether the static pod is enabled. From ce8a9a313d46e8c7ef41843a2afc074ec0bf5a09 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 9 Feb 2021 00:24:49 +0000 Subject: [PATCH 0409/1356] kubernetes: add support for TLS auth This enables the kubelet to use TLS for authentication instead of AWS role credentials. If a bootstrap token is provided, the kubelet will use it to perform TLS bootstrapping. Signed-off-by: Ben Cressey --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 04c4920d..8f388c0b 100644 --- a/README.md +++ b/README.md @@ -304,6 +304,8 @@ The following settings can be optionally set to customize the node labels and ta The following settings are optional and allow you to further configure your cluster. * `settings.kubernetes.cluster-domain`: The DNS domain for this cluster, allowing all Kubernetes-run containers to search this domain before the host's search domains. Defaults to `cluster.local`. * `settings.kubernetes.standalone-mode`: Whether to run the kubelet in standalone mode, without connecting to an API server. Defaults to `false`. +* `settings.kubernetes.authentication-mode`: Which authentication method the kubelet should use to connect to the API server, and for incoming requests. Defaults to `aws` for AWS variants, and `tls` for other variants. +* `settings.kubernetes.bootstrap-token`: The token to use for [TLS bootstrapping](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/). This is only used with the `tls` authentication mode, and is otherwise ignored. You can also optionally specify static pods for your node with the following settings. Static pods can be particularly useful when running in standalone mode. From 9f2dcda17082a692334804c77346bc5b2347f2bb Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Sun, 21 Feb 2021 21:14:52 +0000 Subject: [PATCH 0410/1356] kernel: update to 5.4.91 --- packages/kernel/Cargo.toml | 4 ++-- packages/kernel/kernel.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel/Cargo.toml b/packages/kernel/Cargo.toml index dd2c7a86..e5b7e51b 100644 --- a/packages/kernel/Cargo.toml +++ b/packages/kernel/Cargo.toml @@ -10,5 +10,5 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/5923078ac40834106f279fb42b9b177fea5c8136725a231e353772dbae9bce93/kernel-5.4.80-40.140.amzn2.src.rpm" -sha512 = "b09194da42aabe041992ed654dcca86bb245093e62b9a2c7a542fb6b6343f397cc96dd7d8734b258a01566a42027dc96ef80ebcf593222e50c722ec3ca74eff0" +url = "https://cdn.amazonlinux.com/blobstore/49c95ec15f0cae8eda22d691177e2cc401c5d1a16ef31680b542de9bcad1490a/kernel-5.4.91-41.139.amzn2.src.rpm" +sha512 = "4500ab769265aa8c3ada672e0e12ad06d63ce5c22079bdc8850294d3bd0e5b673f391c404c133b1df25787c8ac1fc4f3724e0e35a5324ab6e4928da2aabc785a" diff --git a/packages/kernel/kernel.spec b/packages/kernel/kernel.spec index ec69ff59..83b193bb 100644 --- a/packages/kernel/kernel.spec +++ b/packages/kernel/kernel.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel -Version: 5.4.80 +Version: 5.4.91 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/5923078ac40834106f279fb42b9b177fea5c8136725a231e353772dbae9bce93/kernel-5.4.80-40.140.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/49c95ec15f0cae8eda22d691177e2cc401c5d1a16ef31680b542de9bcad1490a/kernel-5.4.91-41.139.amzn2.src.rpm Source100: config-bottlerocket # Make Lustre FSx work with a newer GCC. From 4b47ce85558c7bf189169ebeac3d04f9b1b045cb Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Wed, 24 Feb 2021 18:29:31 +0000 Subject: [PATCH 0411/1356] tools: update cargo dependencies --- tools/Cargo.lock | 437 ++++++++++++++++++++++++----------------------- 1 file changed, 223 insertions(+), 214 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index c5abc440..12108b1f 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -1,5 +1,7 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +version = 3 + [[package]] name = "addr2line" version = "0.14.1" @@ -104,9 +106,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.55" +version = "0.3.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef5140344c85b01f9bbb4d4b7288a8aa4b3287ccef913a14bcc78a1063623598" +checksum = "9d117600f438b1707d4e4ae15d3595657288f8235a0eb593e80ecc98ab34e1bc" dependencies = [ "addr2line", "cfg-if 1.0.0", @@ -168,9 +170,9 @@ dependencies = [ [[package]] name = "bstr" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "473fc6b38233f9af7baa94fb5852dca389e3d95b8e21c8e3719301462c5d9faf" +checksum = "a40b47ad93e1a5404e6c18dec46b628214fee441c70f4ab5d6942142cc268a3d" dependencies = [ "memchr", ] @@ -182,7 +184,7 @@ dependencies = [ "duct", "hex", "nonzero_ext", - "rand 0.8.1", + "rand", "reqwest", "serde", "serde_plain", @@ -195,9 +197,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.4.0" +version = "3.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" +checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe" [[package]] name = "bytes" @@ -205,6 +207,12 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" +[[package]] +name = "bytes" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" + [[package]] name = "cargo-readme" version = "3.2.0" @@ -222,9 +230,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.66" +version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" +checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" [[package]] name = "cfg-if" @@ -248,7 +256,7 @@ dependencies = [ "num-integer", "num-traits", "serde", - "time 0.1.44", + "time 0.1.43", "winapi 0.3.9", ] @@ -275,7 +283,7 @@ checksum = "4e1073ef4f7d65b3df89ce61595ff7fcad286b1d82ff646c360547ac75df1901" dependencies = [ "argh", "base64 0.13.0", - "bytes", + "bytes 0.5.6", "futures", "indicatif", "rusoto_core", @@ -349,13 +357,14 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d" +checksum = "bae8f328835f8f5a6ceb6a7842a7f2d0c03692adb5c889347235d59194731fe3" dependencies = [ "autocfg", "cfg-if 1.0.0", "lazy_static", + "loom", ] [[package]] @@ -427,9 +436,9 @@ checksum = "88d7ed2934d741c6b37e33e3832298e8850b53fd2d2bea03873375596c7cea4e" [[package]] name = "duct" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90a9c3a25aafbd538c7d40a53f83c4487ee8216c12d1c8ef2c01eb2f6ea1553" +checksum = "0fc6a0a59ed0888e0041cf708e66357b7ae1a82f1c67247e1f93b5e0818f7d8d" dependencies = [ "libc", "once_cell", @@ -451,9 +460,9 @@ checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "encoding_rs" -version = "0.8.26" +version = "0.8.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "801bbab217d7f79c0062f4f7205b5d4427c6d1a7bd7aafdd1475f7c59d62b283" +checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" dependencies = [ "cfg-if 1.0.0", ] @@ -466,9 +475,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "form_urlencoded" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ece68d15c92e84fa4f19d3780f1294e5ca82a78a6d515f1efaabcc144688be00" +checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" dependencies = [ "matches", "percent-encoding", @@ -492,9 +501,9 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b3b0c040a1fe6529d30b3c5944b280c7f0dcb2930d2c3062bca967b602583d0" +checksum = "7f55667319111d593ba876406af7c409c0ebb44dc4be6132a783ccf163ea14c1" dependencies = [ "futures-channel", "futures-core", @@ -507,9 +516,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b7109687aa4e177ef6fe84553af6280ef2778bdb7783ba44c9dc3399110fe64" +checksum = "8c2dd2df839b57db9ab69c2c9d8f3e8c81984781937fe2807dc6dcf3b2ad2939" dependencies = [ "futures-core", "futures-sink", @@ -517,15 +526,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "847ce131b72ffb13b6109a221da9ad97a64cbe48feb1028356b836b47b8f1748" +checksum = "15496a72fabf0e62bdc3df11a59a3787429221dd0710ba8ef163d6f7a9112c94" [[package]] name = "futures-executor" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4caa2b2b68b880003057c1dd49f1ed937e38f22fcf6c212188a121f08cf40a65" +checksum = "891a4b7b96d84d5940084b2a37632dd65deeae662c114ceaa2c879629c9c0ad1" dependencies = [ "futures-core", "futures-task", @@ -534,15 +543,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611834ce18aaa1bd13c4b374f5d653e1027cf99b6b502584ff8c9a64413b30bb" +checksum = "d71c2c65c57704c32f5241c1223167c2c3294fd34ac020c807ddbe6db287ba59" [[package]] name = "futures-macro" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77408a692f1f97bcc61dc001d752e00643408fbc922e4d634c655df50d595556" +checksum = "ea405816a5139fb39af82c2beb921d52143f556038378d6db21183a5c37fbfb7" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -552,24 +561,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f878195a49cee50e006b02b93cf7e0a95a38ac7b776b4c4d9cc1207cd20fcb3d" +checksum = "85754d98985841b7d4f5e8e6fbfa4a4ac847916893ec511a2917ccd8525b8bb3" [[package]] name = "futures-task" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c554eb5bf48b2426c4771ab68c6b14468b6e76cc90996f528c3338d761a4d0d" -dependencies = [ - "once_cell", -] +checksum = "fa189ef211c15ee602667a6fcfe1c1fd9e07d42250d2156382820fba33c9df80" [[package]] name = "futures-util" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d304cff4a7b99cfb7986f7d43fbe93d175e72e704a8860787cc95e9ffd85cbd2" +checksum = "1812c7ab8aedf8d6f2701a43e1243acdbcc2b36ab26e2ad421eb99ac963d96d1" dependencies = [ "futures-channel", "futures-core", @@ -578,13 +584,26 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project 1.0.3", + "pin-project-lite 0.2.4", "pin-utils", "proc-macro-hack", "proc-macro-nested", "slab", ] +[[package]] +name = "generator" +version = "0.6.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9fed24fd1e18827652b4d55652899a1e9da8e54d91624dc3437a5bc3a9f9a9c" +dependencies = [ + "cc", + "libc", + "log", + "rustversion", + "winapi 0.3.9", +] + [[package]] name = "generic-array" version = "0.14.4" @@ -608,13 +627,13 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4060f4657be78b8e766215b02b18a2e862d83745545de804638e2b545e81aee6" +checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi 0.10.0+wasi-snapshot-preview1", + "wasi 0.10.2+wasi-snapshot-preview1", ] [[package]] @@ -642,7 +661,7 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" dependencies = [ - "bytes", + "bytes 0.5.6", "fnv", "futures-core", "futures-sink", @@ -673,9 +692,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" +checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" dependencies = [ "libc", ] @@ -698,11 +717,11 @@ dependencies = [ [[package]] name = "http" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84129d298a6d57d246960ff8eb831ca4af3f96d29e2e28848dae275408658e26" +checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" dependencies = [ - "bytes", + "bytes 1.0.1", "fnv", "itoa", ] @@ -713,15 +732,15 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" dependencies = [ - "bytes", + "bytes 0.5.6", "http", ] [[package]] name = "httparse" -version = "1.3.4" +version = "1.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" +checksum = "615caabe2c3160b313d52ccc905335f4ed5f10881dd63dc5699d47e90be85691" [[package]] name = "httpdate" @@ -731,11 +750,11 @@ checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" [[package]] name = "hyper" -version = "0.13.9" +version = "0.13.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ad767baac13b44d4529fcf58ba2cd0995e36e7b435bc5b039de6f47e880dbf" +checksum = "8a6f157065790a3ed2f88679250419b5cdd96e714a0d65f7797fd337186e96bb" dependencies = [ - "bytes", + "bytes 0.5.6", "futures-channel", "futures-core", "futures-util", @@ -745,7 +764,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project 1.0.3", + "pin-project 1.0.5", "socket2", "tokio", "tower-service", @@ -759,7 +778,7 @@ version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac965ea399ec3a25ac7d13b8affd4b8f39325cca00858ddf5eb29b79e6b14b08" dependencies = [ - "bytes", + "bytes 0.5.6", "ct-logs", "futures-util", "hyper", @@ -777,7 +796,7 @@ version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37743cc83e8ee85eacfce90f2f4102030d9ff0a95244098d781e9bee4a90abb6" dependencies = [ - "bytes", + "bytes 0.5.6", "futures-util", "hyper", "log", @@ -789,9 +808,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.2.0" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" +checksum = "89829a5d69c23d348314a7ac337fe39173b61149a9864deabd260983aed48c21" dependencies = [ "matches", "unicode-bidi", @@ -843,9 +862,9 @@ checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" [[package]] name = "js-sys" -version = "0.3.46" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d7383929f7c9c7c2d0fa596f325832df98c3704f2c60553080f7127a58175" +checksum = "5cfb73131c35423a367daf8cbd24100af0d077668c8c2943f0e7dd775fef0f65" dependencies = [ "wasm-bindgen", ] @@ -868,17 +887,28 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.82" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89203f3fba0a3795506acaad8ebce3c80c0af93f994d5a1d7a0b1eeb23271929" +checksum = "b7282d924be3275cec7f6756ff4121987bc6481325397dde6ba3e7802b1a8b1c" [[package]] name = "log" -version = "0.4.11" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", +] + +[[package]] +name = "loom" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d44c73b4636e497b4917eb21c33539efa3816741a2d3ff26c6316f1b529481a4" +dependencies = [ + "cfg-if 1.0.0", + "generator", + "scoped-tls", ] [[package]] @@ -1043,9 +1073,9 @@ checksum = "17b02fc0ff9a9e4b35b3342880f48e896ebf69f2967921fe8646bf5b7125956a" [[package]] name = "object" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397" +checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" [[package]] name = "olpc-cjson" @@ -1060,9 +1090,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.5.2" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" +checksum = "4ad167a2f54e832b82dbe003a046280dceffe5227b5f79e08e363a29638cfddd" [[package]] name = "opaque-debug" @@ -1097,9 +1127,9 @@ dependencies = [ [[package]] name = "pem" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c220d01f863d13d96ca82359d1e81e64a7c6bf0637bcde7b2349630addf0c6" +checksum = "fd56cbd21fea48d0c440b41cd69c589faacade08c992d9a54e471b79d0fd13eb" dependencies = [ "base64 0.13.0", "once_cell", @@ -1132,11 +1162,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a83804639aad6ba65345661744708855f9fbcb71176ea8d28d05aeb11d975e7" +checksum = "96fa8ebb90271c4477f144354485b8068bd8f6b78b428b01ba892ca26caf0b63" dependencies = [ - "pin-project-internal 1.0.3", + "pin-project-internal 1.0.5", ] [[package]] @@ -1152,9 +1182,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7bcc46b8f73443d15bc1c5fecbb315718491fa9187fa483f0e359323cde8b3a" +checksum = "758669ae3558c6f74bd2a18b41f7ac0b5a195aea6639d6a9b5e5d1ad5ba24c0b" dependencies = [ "proc-macro2", "quote", @@ -1169,9 +1199,9 @@ checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" [[package]] name = "pin-project-lite" -version = "0.2.1" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36743d754ccdf9954c2e352ce2d4b106e024c814f6499c2dadff80da9a442d8" +checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827" [[package]] name = "pin-utils" @@ -1217,9 +1247,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro-nested" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" +checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" @@ -1301,46 +1331,23 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" +checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" dependencies = [ "proc-macro2", ] [[package]] name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc 0.2.0", -] - -[[package]] -name = "rand" -version = "0.8.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c24fcd450d3fa2b592732565aa4f17a27a61c65ece4726353e000939b0edee34" +checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" dependencies = [ "libc", - "rand_chacha 0.3.0", - "rand_core 0.6.1", - "rand_hc 0.3.0", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", + "rand_chacha", + "rand_core", + "rand_hc", ] [[package]] @@ -1350,34 +1357,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" dependencies = [ "ppv-lite86", - "rand_core 0.6.1", + "rand_core", ] [[package]] name = "rand_core" -version = "0.5.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" dependencies = [ - "getrandom 0.1.16", -] - -[[package]] -name = "rand_core" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c026d7df8b298d90ccbbc5190bd04d85e159eaf5576caeacf8741da93ccbd2e5" -dependencies = [ - "getrandom 0.2.1", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", + "getrandom 0.2.2", ] [[package]] @@ -1386,7 +1375,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" dependencies = [ - "rand_core 0.6.1", + "rand_core", ] [[package]] @@ -1395,6 +1384,15 @@ version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" +[[package]] +name = "redox_syscall" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94341e4e44e24f6b591b59e47a8a027df12e008d73fd5672dbea9cc22f4507d9" +dependencies = [ + "bitflags", +] + [[package]] name = "redox_users" version = "0.3.5" @@ -1402,15 +1400,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ "getrandom 0.1.16", - "redox_syscall", + "redox_syscall 0.1.57", "rust-argon2", ] [[package]] name = "regex" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c" +checksum = "d9251239e129e16308e70d853559389de218ac275b515068abc96829d05b948a" dependencies = [ "aho-corasick", "memchr", @@ -1420,9 +1418,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.21" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189" +checksum = "b5eb417147ba9860a96cfe72a0b93bf88fee1744b5636ec99ab20c1aa9376581" [[package]] name = "remove_dir_all" @@ -1440,7 +1438,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" dependencies = [ "base64 0.13.0", - "bytes", + "bytes 0.5.6", "encoding_rs", "futures-core", "futures-util", @@ -1455,7 +1453,7 @@ dependencies = [ "mime", "mime_guess", "percent-encoding", - "pin-project-lite 0.2.1", + "pin-project-lite 0.2.4", "rustls 0.18.1", "serde", "serde_urlencoded 0.7.0", @@ -1471,9 +1469,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.16.19" +version = "0.16.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "024a1e66fea74c66c66624ee5622a7ff0e4b73a13b4f5c326ddb50c708944226" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" dependencies = [ "cc", "libc", @@ -1492,7 +1490,7 @@ checksum = "e977941ee0658df96fca7291ecc6fc9a754600b21ad84b959eb1dbbc9d5abcc7" dependencies = [ "async-trait", "base64 0.12.3", - "bytes", + "bytes 0.5.6", "crc32fast", "futures", "http", @@ -1539,7 +1537,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec4b9c1321c10ac639bb285e46bc558d24937b1744183c0da17f1acc752fe01c" dependencies = [ "async-trait", - "bytes", + "bytes 0.5.6", "futures", "rusoto_core", "serde", @@ -1554,7 +1552,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5145366791ba9097d917330944ef460e1ebd67da871a8e04ad9f51cecc64375f" dependencies = [ "async-trait", - "bytes", + "bytes 0.5.6", "futures", "rusoto_core", "serde_urlencoded 0.6.1", @@ -1568,7 +1566,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "111b99b940b1b02f5a98a5fcc96467a24ab899c43c1caff60d4a863342798c6e" dependencies = [ "async-trait", - "bytes", + "bytes 0.5.6", "futures", "rusoto_core", "serde", @@ -1582,7 +1580,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97a740a88dde8ded81b6f2cff9cd5e054a5a2e38a38397260f7acdd2c85d17dd" dependencies = [ "base64 0.12.3", - "bytes", + "bytes 0.5.6", "futures", "hex", "hmac", @@ -1596,7 +1594,7 @@ dependencies = [ "rustc_version", "serde", "sha2", - "time 0.2.23", + "time 0.2.25", "tokio", ] @@ -1607,7 +1605,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e4950a5600f4aab2eeb1f525d7843acbfbc7a720275d26c2afcddbb112ffd17" dependencies = [ "async-trait", - "bytes", + "bytes 0.5.6", "futures", "rusoto_core", "serde", @@ -1621,7 +1619,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3815b8c0fc1c50caf9e87603f23daadfedb18d854de287b361c69f68dc9d49e0" dependencies = [ "async-trait", - "bytes", + "bytes 0.5.6", "chrono", "futures", "rusoto_core", @@ -1695,6 +1693,12 @@ dependencies = [ "security-framework", ] +[[package]] +name = "rustversion" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb5d2a036dc6d2d8fd16fde3498b04306e29bd193bf306a57427019b823d5acd" + [[package]] name = "ryu" version = "1.0.5" @@ -1720,6 +1724,12 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "scoped-tls" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" + [[package]] name = "sct" version = "0.6.0" @@ -1789,18 +1799,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.118" +version = "1.0.123" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06c64263859d87aa2eb554587e2d23183398d617427327cf2b3d0ed8c69e4800" +checksum = "92d5161132722baa40d802cc70b15262b98258453e85e5d1d365c757c73869ae" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.118" +version = "1.0.123" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c84d3526699cd55261af4b941e4e725444df67aa4f9e6a3564f18030d12672df" +checksum = "9391c295d64fc0abb2c556bad848f33cb8296276b1ad2677d1ae1ace4f258f31" dependencies = [ "proc-macro2", "quote", @@ -1809,9 +1819,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.61" +version = "1.0.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fceb2595057b6891a4ee808f70054bd2d12f0e97f1cbb78689b59f676df325a" +checksum = "ea1c6153794552ea7cf7cf63b1231a25de00ec90db326ba6264440fa08e31486" dependencies = [ "itoa", "ryu", @@ -1859,9 +1869,9 @@ checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" [[package]] name = "sha2" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e7aab86fe2149bad8c507606bdb3f4ef5e7b2380eb92350f56122cca72a42a8" +checksum = "fa827a14b29ab7f44778d14a88d3cb76e949c45083f7dbfa507d0cb699dc12de" dependencies = [ "block-buffer", "cfg-if 1.0.0", @@ -1970,9 +1980,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c66a8cff4fa24853fdf6b51f75c6d7f8206d7c75cab4e467bcd7f25c2b1febe0" +checksum = "a2beb4d1860a61f571530b3f855a1b538d0200f7871c63331ecd6f17b1f014f8" dependencies = [ "version_check", ] @@ -2064,9 +2074,9 @@ checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" [[package]] name = "syn" -version = "1.0.58" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc60a3d73ea6594cd712d830cc1f0390fd71542d8c8cd24e70cc54cdfd5e05d5" +checksum = "c700597eca8a5a762beb35753ef6b94df201c81cca676604f547495a0d7f0081" dependencies = [ "proc-macro2", "quote", @@ -2075,14 +2085,14 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", - "rand 0.7.3", - "redox_syscall", + "rand", + "redox_syscall 0.2.5", "remove_dir_all", "winapi 0.3.9", ] @@ -2098,9 +2108,9 @@ dependencies = [ [[package]] name = "terminal_size" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bd2d183bd3fac5f5fe38ddbeb4dc9aec4a39a9d7d59e7491d900302da01cbe1" +checksum = "86ca8ced750734db02076f44132d802af0b33b09942331f4459dde8636fd2406" dependencies = [ "libc", "winapi 0.3.9", @@ -2117,29 +2127,28 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.0.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" dependencies = [ - "lazy_static", + "once_cell", ] [[package]] name = "time" -version = "0.1.44" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" dependencies = [ "libc", - "wasi 0.10.0+wasi-snapshot-preview1", "winapi 0.3.9", ] [[package]] name = "time" -version = "0.2.23" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcdaeea317915d59b2b4cd3b5efcd156c309108664277793f5351700c02ce98b" +checksum = "1195b046942c221454c2539395f85413b33383a067449d78aab2b7b052a142f7" dependencies = [ "const_fn", "libc", @@ -2185,9 +2194,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf8dbc19eb42fba10e8feaaec282fb50e2c14b2726d6301dbfeed0f73306a6f" +checksum = "317cca572a0e89c3ce0ca1f1bdc9369547fe318a683418e42ac8f59d14701023" dependencies = [ "tinyvec_macros", ] @@ -2200,11 +2209,11 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "0.2.24" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099837d3464c16a808060bb3f02263b412f6fafcb5d01c533d309985fbeebe48" +checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092" dependencies = [ - "bytes", + "bytes 0.5.6", "fnv", "futures-core", "iovec", @@ -2263,7 +2272,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" dependencies = [ - "bytes", + "bytes 0.5.6", "futures-core", "futures-sink", "log", @@ -2339,19 +2348,19 @@ dependencies = [ [[package]] name = "tower-service" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.22" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" +checksum = "01ebdc2bb4498ab1ab5f5b73c5803825e60199229ccba0698170e3be0e7f959f" dependencies = [ "cfg-if 1.0.0", "log", - "pin-project-lite 0.2.1", + "pin-project-lite 0.2.4", "tracing-core", ] @@ -2366,11 +2375,11 @@ dependencies = [ [[package]] name = "tracing-futures" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab7bb6f14721aa00656086e9335d363c5c8747bae02ebe32ea2c7dece5689b4c" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "pin-project 0.4.27", + "pin-project 1.0.5", "tracing", ] @@ -2412,9 +2421,9 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a13e63ab62dbe32aeee58d1c5408d35c36c392bba5d9d3142287219721afe606" +checksum = "07fbfce1c8a97d547e8b5334978438d9d6ec8c20e38f56d4a4374d181493eaef" dependencies = [ "tinyvec", ] @@ -2461,9 +2470,9 @@ dependencies = [ [[package]] name = "url" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5909f2b0817350449ed73e8bcd81c8c3c8d9a7a5d8acba4b27db277f1868976e" +checksum = "9ccd964113622c8e9322cfac19eb1004a07e636c545f325da085d5cdde6f1f8b" dependencies = [ "form_urlencoded", "idna", @@ -2513,15 +2522,15 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" +version = "0.10.2+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cd364751395ca0f68cafb17666eee36b63077fb5ecd972bbcd74c90c4bf736e" +checksum = "55c0f7123de74f0dab9b7d00fd614e7b19349cd1e2f5252bbe9b1754b59433be" dependencies = [ "cfg-if 1.0.0", "serde", @@ -2531,9 +2540,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1114f89ab1f4106e5b55e688b828c0ab0ea593a1ea7c094b141b14cbaaec2d62" +checksum = "7bc45447f0d4573f3d65720f636bbcc3dd6ce920ed704670118650bcd47764c7" dependencies = [ "bumpalo", "lazy_static", @@ -2546,9 +2555,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fe9756085a84584ee9457a002b7cdfe0bfff169f45d2591d8be1345a6780e35" +checksum = "3de431a2910c86679c34283a33f66f4e4abd7e0aec27b6669060148872aadf94" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -2558,9 +2567,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6ac8995ead1f084a8dea1e65f194d0973800c7f571f6edd70adf06ecf77084" +checksum = "3b8853882eef39593ad4174dd26fc9865a64e84026d223f63bb2c42affcbba2c" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2568,9 +2577,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a48c72f299d80557c7c62e37e7225369ecc0c963964059509fbafe917c7549" +checksum = "4133b5e7f2a531fa413b3a1695e925038a05a71cf67e87dafa295cb645a01385" dependencies = [ "proc-macro2", "quote", @@ -2581,15 +2590,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e7811dd7f9398f14cc76efd356f98f03aa30419dea46aa810d71e819fc97158" +checksum = "dd4945e4943ae02d15c13962b38a5b1e81eadd4b71214eee75af64a4d6a4fd64" [[package]] name = "web-sys" -version = "0.3.46" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222b1ef9334f92a21d3fb53dc3fd80f30836959a90f9274a626d7e06315ba3c3" +checksum = "c40dc691fc48003eba817c38da7113c15698142da971298003cac3ef175680b3" dependencies = [ "js-sys", "wasm-bindgen", From de0739be20e0faf1101361f70b8da4ae44361f93 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Wed, 24 Feb 2021 18:36:57 +0000 Subject: [PATCH 0412/1356] tools: update outdated cargo dependencies --- tools/Cargo.lock | 15 ++------------- tools/pubsys/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 14 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 12108b1f..a9ae2a4b 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -1285,7 +1285,7 @@ dependencies = [ "semver 0.11.0", "serde", "serde_json", - "simplelog 0.8.0", + "simplelog", "snafu", "structopt", "tempfile", @@ -1321,7 +1321,7 @@ dependencies = [ "reqwest", "sha2", "shell-words", - "simplelog 0.9.0", + "simplelog", "snafu", "structopt", "tempfile", @@ -1911,17 +1911,6 @@ dependencies = [ "libc", ] -[[package]] -name = "simplelog" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b2736f58087298a448859961d3f4a0850b832e72619d75adc69da7993c2cd3c" -dependencies = [ - "chrono", - "log", - "termcolor", -] - [[package]] name = "simplelog" version = "0.9.0" diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index 94aeb5f5..8cf3ad69 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -26,7 +26,7 @@ rusoto_ec2 = { version = "0.45.0", default-features = false, features = ["rustls rusoto_signature = "0.45.0" rusoto_ssm = { version = "0.45.0", default-features = false, features = ["rustls"] } rusoto_sts = { version = "0.45.0", default-features = false, features = ["rustls"] } -simplelog = "0.8" +simplelog = "0.9.0" snafu = "0.6" semver = "0.11.0" serde = { version = "1.0", features = ["derive"] } From e94672cf5f30a55b9da83cb0425f0f5a13d61c4b Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Thu, 18 Feb 2021 03:45:55 +0000 Subject: [PATCH 0413/1356] Add "shibaken" generator to populate user-data with public keys from IMDS This populates the admin container's user-data setting with public keys available from IMDS in the event that user-data has not been populated by the user. --- GLOSSARY.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/GLOSSARY.md b/GLOSSARY.md index c43c6d94..6476d601 100644 --- a/GLOSSARY.md +++ b/GLOSSARY.md @@ -14,13 +14,14 @@ Used for system maintenance and connectivity. * [**host-ctr**](sources/host-ctr): The program started by `host-containers@.service` for each host container. Its job is to start the specified host container on the “host” instance of containerd, which is separate from the “user” instance of containerd used for Kubernetes pods. -* [**logdog**](sources/logdog): A program that one can use to collect logs when things go wrong. +* [**logdog**](sources/logdog): A program that one can use to collect logs when things go wrong. * [**metricdog**](sources/metricdog): A program that sends anonymous health pings. * [**model**](sources/models): The API system has a data model defined for each variant, and this model is used by other programs to serialize and deserialize requests while maintaining safety around data types. * [**netdog**](sources/api/netdog): A program called by wicked to retrieve and write out network configuration from DHCP. * [**pluto**](sources/api/pluto): A setting generator called by sundog to find networking settings required by Kubernetes. * [**schnauzer**](sources/api/schnauzer): A setting generator called by sundog to build setting values that contain template variables referencing other settings. * **setting generator**: A binary that generates the default value of a setting. +* [**shibaken**](sources/api/shibaken): A setting generator called by sundog to populate the admin container's user-data with public keys from IMDS, when running in AWS. * [**signpost**](sources/updater/signpost): A program used to manipulate the GPT header of the OS disk; fields in the header are used by GRUB to determine the partition set we should boot from. * [**storewolf**](sources/api/storewolf): A program that sets up the data store for the API upon boot. * [**sundog**](sources/api/sundog): A program run during boot that generates any settings that depend on runtime system information. From 39dea2de3d498217a3a512aa2bd9f27c33f43112 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Thu, 25 Feb 2021 21:00:52 +0000 Subject: [PATCH 0414/1356] README: Add note to user-data section of custom host containers --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 8f388c0b..96e4d1cd 100644 --- a/README.md +++ b/README.md @@ -288,7 +288,7 @@ You should [specify them in user data](#using-user-data). * `settings.kubernetes.cluster-certificate`: This is the base64-encoded certificate authority of the cluster. * `settings.kubernetes.api-server`: This is the cluster's Kubernetes API endpoint. -The following settings can be optionally set to customize the node labels and taints. +The following settings can be optionally set to customize the node labels and taints. * `settings.kubernetes.node-labels`: [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) in the form of key, value pairs added when registering the node in the cluster. * `settings.kubernetes.node-taints`: [Taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) in the form of key, value and effect entries added when registering the node in the cluster. * Example user data for setting up labels and taints: @@ -341,7 +341,7 @@ These settings can be changed at any time. Bottlerocket enables the `json-file`, `awslogs`, and `none` drivers by default. * `settings.ecs.allow-privileged-containers`: Whether launching privileged containers is allowed on the container instance. If this value is set to false, privileged containers are not permitted. - Bottlerocket sets this value to false by default. + Bottlerocket sets this value to false by default. * `settings.ecs.loglevel`: The level of verbosity for the ECS agent's logs. Supported values are `debug`, `info`, `warn`, `error`, and `crit`, and the default is `info`. * `settings.ecs.enable-spot-instance-draining`: If the instance receives a spot termination notice, the agent will set the instance's state to `DRAINING`, so the workload can be moved gracefully before the instance is removed. Defaults to `false`. @@ -412,6 +412,8 @@ As long as you define the three fields above -- `source` with a URI, and `enable You can optionally define a `user-data` field with arbitrary base64-encoded data, which will be made available in the container at `/.bottlerocket/host-containers/$HOST_CONTAINER_NAME/user-data`. (It was inspired by instance user data, but is entirely separate; it can be any data your host container feels like interpreting.) +Keep in mind that the default admin container (since Bottlerocket v1.0.6) relies on `user-data` to store SSH keys. You can set `user-data` to [customize the keys](https://github.com/bottlerocket-os/bottlerocket-admin-container/#authenticating-with-the-admin-container), or you can use it for your own purposes in a custom container. + Here's an example of adding a custom host container with API calls: ``` apiclient -u /settings -X PATCH -d '{"host-containers": {"custom": {"source": "MY-CONTAINER-URI", "enabled": true, "superpowered": false}}}' From fbd22f75f80986c3f889ccd4cd61268065e756b0 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Thu, 25 Feb 2021 15:23:17 -0800 Subject: [PATCH 0415/1356] kernel: update to 5.4.95 --- packages/kernel/Cargo.toml | 4 ++-- packages/kernel/kernel.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel/Cargo.toml b/packages/kernel/Cargo.toml index e5b7e51b..a912b663 100644 --- a/packages/kernel/Cargo.toml +++ b/packages/kernel/Cargo.toml @@ -10,5 +10,5 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/49c95ec15f0cae8eda22d691177e2cc401c5d1a16ef31680b542de9bcad1490a/kernel-5.4.91-41.139.amzn2.src.rpm" -sha512 = "4500ab769265aa8c3ada672e0e12ad06d63ce5c22079bdc8850294d3bd0e5b673f391c404c133b1df25787c8ac1fc4f3724e0e35a5324ab6e4928da2aabc785a" +url = "https://cdn.amazonlinux.com/blobstore/cf12975d70edce3beb7042007609dc355b47ce27babb08b436829f7500de6b76/kernel-5.4.95-42.163.amzn2.src.rpm" +sha512 = "4dcfb86a2664edd9cf08d1f32b388ec6b9874ae62a21fc655aa80599270af5fdf15ff1f4dc250e36e7559a1c8a08901e428823d7c3e212cf13bada298fdf4dbd" diff --git a/packages/kernel/kernel.spec b/packages/kernel/kernel.spec index 83b193bb..23919a31 100644 --- a/packages/kernel/kernel.spec +++ b/packages/kernel/kernel.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel -Version: 5.4.91 +Version: 5.4.95 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/49c95ec15f0cae8eda22d691177e2cc401c5d1a16ef31680b542de9bcad1490a/kernel-5.4.91-41.139.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/cf12975d70edce3beb7042007609dc355b47ce27babb08b436829f7500de6b76/kernel-5.4.95-42.163.amzn2.src.rpm Source100: config-bottlerocket # Make Lustre FSx work with a newer GCC. From 987b448a87877bf2b2a98996eb04d71527c1aeb2 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Wed, 3 Mar 2021 16:49:28 -0800 Subject: [PATCH 0416/1356] early-boot-config: allow gzip compression of user data --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 96e4d1cd..afd0452a 100644 --- a/README.md +++ b/README.md @@ -258,6 +258,9 @@ Here's the user data to change the message of the day setting, as we did in the motd = "my own value!" ``` +If your user data is over the size limit of the platform (e.g. 16KiB for EC2) you can compress the contents with gzip. +(With [aws-cli](https://aws.amazon.com/cli/), you can use `--user-data fileb:///path/to/gz-file` to pass binary data.) + ### Description of settings Here we'll describe each setting you can change. From 3b84c640a72fd29767fd0ae0db6a79f5464339a4 Mon Sep 17 00:00:00 2001 From: Matt Briggs Date: Mon, 8 Mar 2021 13:05:01 -0800 Subject: [PATCH 0417/1356] tools: update to tokio v1 reqwest v0.11 tough v0.11 --- tools/Cargo.lock | 745 +++++++++++--------------------- tools/buildsys/Cargo.toml | 2 +- tools/deny.toml | 5 +- tools/pubsys-setup/Cargo.toml | 2 +- tools/pubsys/Cargo.toml | 27 +- tools/pubsys/src/aws/ssm/ssm.rs | 12 +- tools/pubsys/src/main.rs | 13 +- 7 files changed, 277 insertions(+), 529 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index a9ae2a4b..4e6cfe18 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -1,7 +1,5 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 - [[package]] name = "addr2line" version = "0.14.1" @@ -32,7 +30,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] @@ -64,18 +62,6 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "781f336cc9826dbaddb9754cb5db61e64cab4f69668bd19dcc4a0394a86f4cb1" -[[package]] -name = "arrayref" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" - -[[package]] -name = "arrayvec" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" - [[package]] name = "async-trait" version = "0.1.42" @@ -95,7 +81,7 @@ checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ "hermit-abi", "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -111,7 +97,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d117600f438b1707d4e4ae15d3595657288f8235a0eb593e80ecc98ab34e1bc" dependencies = [ "addr2line", - "cfg-if 1.0.0", + "cfg-if", "libc", "miniz_oxide", "object", @@ -124,18 +110,6 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" -[[package]] -name = "base64" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" - -[[package]] -name = "base64" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" - [[package]] name = "base64" version = "0.13.0" @@ -148,17 +122,6 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" -[[package]] -name = "blake2b_simd" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" -dependencies = [ - "arrayref", - "arrayvec", - "constant_time_eq", -] - [[package]] name = "block-buffer" version = "0.9.0" @@ -201,12 +164,6 @@ version = "3.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe" -[[package]] -name = "bytes" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" - [[package]] name = "bytes" version = "1.0.1" @@ -234,12 +191,6 @@ version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -257,7 +208,7 @@ dependencies = [ "num-traits", "serde", "time 0.1.43", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -277,13 +228,13 @@ dependencies = [ [[package]] name = "coldsnap" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e1073ef4f7d65b3df89ce61595ff7fcad286b1d82ff646c360547ac75df1901" +checksum = "b6fe5edca2eabd87be4dfee412700406e0865cafbbd36ffe89a1725ec1a0b579" dependencies = [ "argh", - "base64 0.13.0", - "bytes 0.5.6", + "base64", + "bytes", "futures", "indicatif", "rusoto_core", @@ -309,7 +260,7 @@ dependencies = [ "regex", "terminal_size", "unicode-width", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -318,17 +269,11 @@ version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6" -[[package]] -name = "constant_time_eq" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" - [[package]] name = "core-foundation" -version = "0.7.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" +checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62" dependencies = [ "core-foundation-sys", "libc", @@ -336,9 +281,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.7.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" +checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" [[package]] name = "cpuid-bool" @@ -352,26 +297,14 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bae8f328835f8f5a6ceb6a7842a7f2d0c03692adb5c889347235d59194731fe3" -dependencies = [ - "autocfg", - "cfg-if 1.0.0", - "lazy_static", - "loom", + "cfg-if", ] [[package]] name = "crypto-mac" -version = "0.8.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" +checksum = "4857fd85a0c34b3c3297875b747c1e02e06b6a0ea32dd892d8192b9ce0813ea6" dependencies = [ "generic-array", "subtle", @@ -379,9 +312,9 @@ dependencies = [ [[package]] name = "ct-logs" -version = "0.6.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d3686f5fa27dbc1d76c751300376e167c5a43387f44bb451fd1c24776e49113" +checksum = "c1a816186fa68d9e426e3cb4ae4dff1fcd8e4a2c34b781bf7a822574a0d0aac8" dependencies = [ "sct", ] @@ -396,24 +329,24 @@ dependencies = [ ] [[package]] -name = "dirs" -version = "2.0.2" +name = "dirs-next" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13aea89a5c93364a98e9b37b2fa237effbb694d5cfe01c5b70941f7eb087d5e3" +checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" dependencies = [ - "cfg-if 0.1.10", - "dirs-sys", + "cfg-if", + "dirs-sys-next", ] [[package]] -name = "dirs-sys" -version = "0.3.5" +name = "dirs-sys-next" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" dependencies = [ "libc", "redox_users", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -464,7 +397,7 @@ version = "0.8.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -483,22 +416,6 @@ dependencies = [ "percent-encoding", ] -[[package]] -name = "fuchsia-zircon" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" -dependencies = [ - "bitflags", - "fuchsia-zircon-sys", -] - -[[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" - [[package]] name = "futures" version = "0.3.13" @@ -584,26 +501,13 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.4", + "pin-project-lite", "pin-utils", "proc-macro-hack", "proc-macro-nested", "slab", ] -[[package]] -name = "generator" -version = "0.6.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9fed24fd1e18827652b4d55652899a1e9da8e54d91624dc3437a5bc3a9f9a9c" -dependencies = [ - "cc", - "libc", - "log", - "rustversion", - "winapi 0.3.9", -] - [[package]] name = "generic-array" version = "0.14.4" @@ -614,26 +518,15 @@ dependencies = [ "version_check", ] -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - [[package]] name = "getrandom" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "wasi 0.10.2+wasi-snapshot-preview1", + "wasi", ] [[package]] @@ -657,11 +550,11 @@ dependencies = [ [[package]] name = "h2" -version = "0.2.7" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" +checksum = "d832b01df74254fe364568d6ddc294443f61cbec82816b60904303af87efae78" dependencies = [ - "bytes 0.5.6", + "bytes", "fnv", "futures-core", "futures-sink", @@ -672,7 +565,6 @@ dependencies = [ "tokio", "tokio-util", "tracing", - "tracing-futures", ] [[package]] @@ -707,9 +599,9 @@ checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" [[package]] name = "hmac" -version = "0.8.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" dependencies = [ "crypto-mac", "digest", @@ -721,18 +613,18 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" dependencies = [ - "bytes 1.0.1", + "bytes", "fnv", "itoa", ] [[package]] name = "http-body" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" +checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" dependencies = [ - "bytes 0.5.6", + "bytes", "http", ] @@ -750,11 +642,11 @@ checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" [[package]] name = "hyper" -version = "0.13.10" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a6f157065790a3ed2f88679250419b5cdd96e714a0d65f7797fd337186e96bb" +checksum = "e8e946c2b1349055e0b72ae281b238baf1a3ea7307c7e9f9d64673bdd9c26ac7" dependencies = [ - "bytes 0.5.6", + "bytes", "futures-channel", "futures-core", "futures-util", @@ -764,7 +656,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project 1.0.5", + "pin-project", "socket2", "tokio", "tower-service", @@ -774,35 +666,18 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.20.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac965ea399ec3a25ac7d13b8affd4b8f39325cca00858ddf5eb29b79e6b14b08" +checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64" dependencies = [ - "bytes 0.5.6", "ct-logs", "futures-util", "hyper", "log", - "rustls 0.17.0", + "rustls", "rustls-native-certs", "tokio", - "tokio-rustls 0.13.1", - "webpki", -] - -[[package]] -name = "hyper-rustls" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37743cc83e8ee85eacfce90f2f4102030d9ff0a95244098d781e9bee4a90abb6" -dependencies = [ - "bytes 0.5.6", - "futures-util", - "hyper", - "log", - "rustls 0.18.1", - "tokio", - "tokio-rustls 0.14.1", + "tokio-rustls", "webpki", ] @@ -840,12 +715,12 @@ dependencies = [ ] [[package]] -name = "iovec" -version = "0.1.4" +name = "instant" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" +checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" dependencies = [ - "libc", + "cfg-if", ] [[package]] @@ -869,16 +744,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] - [[package]] name = "lazy_static" version = "1.4.0" @@ -892,23 +757,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7282d924be3275cec7f6756ff4121987bc6481325397dde6ba3e7802b1a8b1c" [[package]] -name = "log" -version = "0.4.14" +name = "lock_api" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" +checksum = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312" dependencies = [ - "cfg-if 1.0.0", + "scopeguard", ] [[package]] -name = "loom" -version = "0.4.0" +name = "log" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d44c73b4636e497b4917eb21c33539efa3816741a2d3ff26c6316f1b529481a4" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ - "cfg-if 1.0.0", - "generator", - "scoped-tls", + "cfg-if", ] [[package]] @@ -935,16 +798,6 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -[[package]] -name = "mime_guess" -version = "2.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" -dependencies = [ - "mime", - "unicase", -] - [[package]] name = "miniz_oxide" version = "0.4.3" @@ -957,56 +810,15 @@ dependencies = [ [[package]] name = "mio" -version = "0.6.23" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" +checksum = "a5dede4e2065b3842b8b0af444119f3aa331cc7cc2dd20388bfb0f5d5a38823a" dependencies = [ - "cfg-if 0.1.10", - "fuchsia-zircon", - "fuchsia-zircon-sys", - "iovec", - "kernel32-sys", "libc", "log", - "miow 0.2.2", - "net2", - "slab", - "winapi 0.2.8", -] - -[[package]] -name = "mio-named-pipes" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0840c1c50fd55e521b247f949c241c9997709f23bd7f023b9762cd561e935656" -dependencies = [ - "log", - "mio", - "miow 0.3.6", - "winapi 0.3.9", -] - -[[package]] -name = "mio-uds" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" -dependencies = [ - "iovec", - "libc", - "mio", -] - -[[package]] -name = "miow" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" -dependencies = [ - "kernel32-sys", - "net2", - "winapi 0.2.8", - "ws2_32-sys", + "miow", + "ntapi", + "winapi", ] [[package]] @@ -1016,18 +828,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" dependencies = [ "socket2", - "winapi 0.3.9", -] - -[[package]] -name = "net2" -version = "0.2.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" -dependencies = [ - "cfg-if 0.1.10", - "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1036,6 +837,15 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44a1290799eababa63ea60af0cbc3f03363e328e58f32fb0294798ed3e85f444" +[[package]] +name = "ntapi" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +dependencies = [ + "winapi", +] + [[package]] name = "num-integer" version = "0.1.44" @@ -1113,7 +923,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb233f06c2307e1f5ce2ecad9f8121cffbbee2c95428f44ea85222e460d0d213" dependencies = [ "libc", - "winapi 0.3.9", + "winapi", +] + +[[package]] +name = "parking_lot" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" +dependencies = [ + "instant", + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" +dependencies = [ + "cfg-if", + "instant", + "libc", + "redox_syscall", + "smallvec", + "winapi", ] [[package]] @@ -1131,7 +966,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd56cbd21fea48d0c440b41cd69c589faacade08c992d9a54e471b79d0fd13eb" dependencies = [ - "base64 0.13.0", + "base64", "once_cell", "regex", ] @@ -1151,33 +986,13 @@ dependencies = [ "ucd-trie", ] -[[package]] -name = "pin-project" -version = "0.4.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffbc8e94b38ea3d2d8ba92aea2983b503cd75d0888d75b86bb37970b5698e15" -dependencies = [ - "pin-project-internal 0.4.27", -] - [[package]] name = "pin-project" version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96fa8ebb90271c4477f144354485b8068bd8f6b78b428b01ba892ca26caf0b63" dependencies = [ - "pin-project-internal 1.0.5", -] - -[[package]] -name = "pin-project-internal" -version = "0.4.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65ad2ae56b6abe3a1ee25f15ee605bacadb9a764edaba9c2bf4103800d4a1895" -dependencies = [ - "proc-macro2", - "quote", - "syn", + "pin-project-internal", ] [[package]] @@ -1191,12 +1006,6 @@ dependencies = [ "syn", ] -[[package]] -name = "pin-project-lite" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" - [[package]] name = "pin-project-lite" version = "0.2.4" @@ -1291,8 +1100,9 @@ dependencies = [ "tempfile", "tinytemplate", "tokio", + "tokio-stream", "toml", - "tough", + "tough 0.11.0", "tough-kms", "tough-ssm", "update_metadata", @@ -1366,7 +1176,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" dependencies = [ - "getrandom 0.2.2", + "getrandom", ] [[package]] @@ -1378,12 +1188,6 @@ dependencies = [ "rand_core", ] -[[package]] -name = "redox_syscall" -version = "0.1.57" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" - [[package]] name = "redox_syscall" version = "0.2.5" @@ -1395,13 +1199,12 @@ dependencies = [ [[package]] name = "redox_users" -version = "0.3.5" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" +checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" dependencies = [ - "getrandom 0.1.16", - "redox_syscall 0.1.57", - "rust-argon2", + "getrandom", + "redox_syscall", ] [[package]] @@ -1428,37 +1231,36 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] name = "reqwest" -version = "0.10.10" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" +checksum = "bf12057f289428dbf5c591c74bf10392e4a8003f993405a902f20117019022d4" dependencies = [ - "base64 0.13.0", - "bytes 0.5.6", + "base64", + "bytes", "encoding_rs", "futures-core", "futures-util", "http", "http-body", "hyper", - "hyper-rustls 0.21.0", + "hyper-rustls", "ipnet", "js-sys", "lazy_static", "log", "mime", - "mime_guess", "percent-encoding", - "pin-project-lite 0.2.4", - "rustls 0.18.1", + "pin-project-lite", + "rustls", "serde", "serde_urlencoded 0.7.0", "tokio", - "tokio-rustls 0.14.1", + "tokio-rustls", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -1479,28 +1281,25 @@ dependencies = [ "spin", "untrusted", "web-sys", - "winapi 0.3.9", + "winapi", ] [[package]] name = "rusoto_core" -version = "0.45.0" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e977941ee0658df96fca7291ecc6fc9a754600b21ad84b959eb1dbbc9d5abcc7" +checksum = "02aff20978970d47630f08de5f0d04799497818d16cafee5aec90c4b4d0806cf" dependencies = [ "async-trait", - "base64 0.12.3", - "bytes 0.5.6", + "base64", + "bytes", "crc32fast", "futures", "http", "hyper", - "hyper-rustls 0.20.0", + "hyper-rustls", "lazy_static", "log", - "md5", - "percent-encoding", - "pin-project 0.4.27", "rusoto_credential", "rusoto_signature", "rustc_version", @@ -1512,17 +1311,15 @@ dependencies = [ [[package]] name = "rusoto_credential" -version = "0.45.0" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ac05563f83489b19b4d413607a30821ab08bbd9007d14fa05618da3ef09d8b" +checksum = "8e91e4c25ea8bfa6247684ff635299015845113baaa93ba8169b9e565701b58e" dependencies = [ "async-trait", "chrono", - "dirs", + "dirs-next", "futures", "hyper", - "pin-project 0.4.27", - "regex", "serde", "serde_json", "shlex", @@ -1532,12 +1329,12 @@ dependencies = [ [[package]] name = "rusoto_ebs" -version = "0.45.0" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec4b9c1321c10ac639bb285e46bc558d24937b1744183c0da17f1acc752fe01c" +checksum = "d42287c611c85d1ad06ac41fb91bad8a87fd8607836c5135ae3254be36c19402" dependencies = [ "async-trait", - "bytes 0.5.6", + "bytes", "futures", "rusoto_core", "serde", @@ -1547,12 +1344,12 @@ dependencies = [ [[package]] name = "rusoto_ec2" -version = "0.45.0" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5145366791ba9097d917330944ef460e1ebd67da871a8e04ad9f51cecc64375f" +checksum = "83733a43d1369fb58e5d7198e31d6196de22539f2cd87c4168f5e25a794612d6" dependencies = [ "async-trait", - "bytes 0.5.6", + "bytes", "futures", "rusoto_core", "serde_urlencoded 0.6.1", @@ -1561,12 +1358,12 @@ dependencies = [ [[package]] name = "rusoto_kms" -version = "0.45.0" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "111b99b940b1b02f5a98a5fcc96467a24ab899c43c1caff60d4a863342798c6e" +checksum = "8e5655f80886a4b0f6f57ca0921e38b4f96e5c70135dd8d6d2a7ee8e70f0e013" dependencies = [ "async-trait", - "bytes 0.5.6", + "bytes", "futures", "rusoto_core", "serde", @@ -1575,12 +1372,12 @@ dependencies = [ [[package]] name = "rusoto_signature" -version = "0.45.0" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97a740a88dde8ded81b6f2cff9cd5e054a5a2e38a38397260f7acdd2c85d17dd" +checksum = "5486e6b1673ab3e0ba1ded284fb444845fe1b7f41d13989a54dd60f62a7b2baa" dependencies = [ - "base64 0.12.3", - "bytes 0.5.6", + "base64", + "bytes", "futures", "hex", "hmac", @@ -1589,7 +1386,7 @@ dependencies = [ "log", "md5", "percent-encoding", - "pin-project 0.4.27", + "pin-project-lite", "rusoto_credential", "rustc_version", "serde", @@ -1600,12 +1397,12 @@ dependencies = [ [[package]] name = "rusoto_ssm" -version = "0.45.0" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e4950a5600f4aab2eeb1f525d7843acbfbc7a720275d26c2afcddbb112ffd17" +checksum = "d08d672711b9e8dad45565c86ecc93a0a1bfbab57a2547a428fda5486473405d" dependencies = [ "async-trait", - "bytes 0.5.6", + "bytes", "futures", "rusoto_core", "serde", @@ -1614,32 +1411,19 @@ dependencies = [ [[package]] name = "rusoto_sts" -version = "0.45.0" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3815b8c0fc1c50caf9e87603f23daadfedb18d854de287b361c69f68dc9d49e0" +checksum = "2f93005e0c3b9e40a424b50ca71886d2445cc19bb6cdac3ac84c2daff482eb59" dependencies = [ "async-trait", - "bytes 0.5.6", + "bytes", "chrono", "futures", "rusoto_core", "serde_urlencoded 0.6.1", - "tempfile", "xml-rs", ] -[[package]] -name = "rust-argon2" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" -dependencies = [ - "base64 0.13.0", - "blake2b_simd", - "constant_time_eq", - "crossbeam-utils", -] - [[package]] name = "rustc-demangle" version = "0.1.18" @@ -1657,24 +1441,11 @@ dependencies = [ [[package]] name = "rustls" -version = "0.17.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0d4a31f5d68413404705d6982529b0e11a9aacd4839d1d6222ee3b8cb4015e1" +checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" dependencies = [ - "base64 0.11.0", - "log", - "ring", - "sct", - "webpki", -] - -[[package]] -name = "rustls" -version = "0.18.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" -dependencies = [ - "base64 0.12.3", + "base64", "log", "ring", "sct", @@ -1683,22 +1454,16 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75ffeb84a6bd9d014713119542ce415db3a3e4748f0bfce1e1416cd224a23a5" +checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" dependencies = [ "openssl-probe", - "rustls 0.17.0", + "rustls", "schannel", "security-framework", ] -[[package]] -name = "rustversion" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb5d2a036dc6d2d8fd16fde3498b04306e29bd193bf306a57427019b823d5acd" - [[package]] name = "ryu" version = "1.0.5" @@ -1721,14 +1486,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" dependencies = [ "lazy_static", - "winapi 0.3.9", + "winapi", ] [[package]] -name = "scoped-tls" -version = "1.0.0" +name = "scopeguard" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "sct" @@ -1742,9 +1507,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "0.4.4" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64808902d7d99f78eaddd2b4e2509713babc3dc3c85ad6f4c447680f3c01e535" +checksum = "2dfd318104249865096c8da1dfabf09ddbb6d0330ea176812a62ec75e40c4166" dependencies = [ "bitflags", "core-foundation", @@ -1755,9 +1520,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "0.4.3" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17bf11d99252f512695eb468de5516e5cf75455521e69dfe343f3b74e4748405" +checksum = "dee48cdde5ed250b0d3252818f646e174ab414036edb884dde62d80a3ac6082d" dependencies = [ "core-foundation-sys", "libc", @@ -1819,9 +1584,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.62" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea1c6153794552ea7cf7cf63b1231a25de00ec90db326ba6264440fa08e31486" +checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" dependencies = [ "itoa", "ryu", @@ -1874,7 +1639,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa827a14b29ab7f44778d14a88d3cb76e949c45083f7dbfa507d0cb699dc12de" dependencies = [ "block-buffer", - "cfg-if 1.0.0", + "cfg-if", "cpuid-bool", "digest", "opaque-debug", @@ -1887,7 +1652,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8cebcf3a403e4deafaf34dc882c4a1b6a648b43e5670aa2e4bb985914eaeb2d2" dependencies = [ "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1928,6 +1693,12 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" +[[package]] +name = "smallvec" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" + [[package]] name = "snafu" version = "0.6.10" @@ -1956,9 +1727,9 @@ version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2078,12 +1849,12 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "rand", - "redox_syscall 0.2.5", + "redox_syscall", "remove_dir_all", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2102,7 +1873,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86ca8ced750734db02076f44132d802af0b33b09942331f4459dde8636fd2406" dependencies = [ "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2130,7 +1901,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" dependencies = [ "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2145,7 +1916,7 @@ dependencies = [ "stdweb", "time-macros", "version_check", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2173,9 +1944,9 @@ dependencies = [ [[package]] name = "tinytemplate" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2ada8616fad06a2d0c455adc530de4ef57605a8120cc65da9653e0e9623ca74" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" dependencies = [ "serde", "serde_json", @@ -2198,33 +1969,29 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "0.2.25" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092" +checksum = "8d56477f6ed99e10225f38f9f75f872f29b8b8bd8c0b946f63345bb144e9eeda" dependencies = [ - "bytes 0.5.6", - "fnv", - "futures-core", - "iovec", - "lazy_static", + "autocfg", + "bytes", "libc", "memchr", "mio", - "mio-named-pipes", - "mio-uds", "num_cpus", - "pin-project-lite 0.1.11", + "once_cell", + "parking_lot", + "pin-project-lite", "signal-hook-registry", - "slab", "tokio-macros", - "winapi 0.3.9", + "winapi", ] [[package]] name = "tokio-macros" -version = "0.2.6" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" +checksum = "caf7b11a536f46a809a8a9f0bb4237020f70ecbf115b842360afb127ea2fda57" dependencies = [ "proc-macro2", "quote", @@ -2233,39 +2000,37 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.13.1" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15cb62a0d2770787abc96e99c1cd98fcf17f94959f3af63ca85bdfb203f051b4" +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ - "futures-core", - "rustls 0.17.0", + "rustls", "tokio", "webpki", ] [[package]] -name = "tokio-rustls" -version = "0.14.1" +name = "tokio-stream" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" +checksum = "1981ad97df782ab506a1f43bf82c967326960d278acf3bf8279809648c3ff3ea" dependencies = [ "futures-core", - "rustls 0.18.1", + "pin-project-lite", "tokio", - "webpki", ] [[package]] name = "tokio-util" -version = "0.3.1" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" +checksum = "ebb7cb2f00c5ae8df755b252306272cd1790d39728363936e01827e11f0b017b" dependencies = [ - "bytes 0.5.6", + "bytes", "futures-core", "futures-sink", "log", - "pin-project-lite 0.1.11", + "pin-project-lite", "tokio", ] @@ -2283,6 +2048,30 @@ name = "tough" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dc3534fa46badec98ac633028f47a3cea590e9c9a63d85bd15a0436f8b6eb94" +dependencies = [ + "chrono", + "dyn-clone", + "globset", + "hex", + "log", + "olpc-cjson", + "pem", + "ring", + "serde", + "serde_json", + "serde_plain", + "snafu", + "tempfile", + "untrusted", + "url", + "walkdir", +] + +[[package]] +name = "tough" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "203cc46930159ea049e9308ca0e31815bb57f761e6345fa5d42dbcbd06c1809c" dependencies = [ "chrono", "dyn-clone", @@ -2305,9 +2094,9 @@ dependencies = [ [[package]] name = "tough-kms" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aeb5ad37ac31ba5b10f4f53a7c1073a1a28a88dd4537c9abff8718148b95f37" +checksum = "b99dba0d219a96733df48372dfe315b4d5bbfa9a151649c6bba9d32a0274aed8" dependencies = [ "pem", "ring", @@ -2316,14 +2105,14 @@ dependencies = [ "rusoto_kms", "snafu", "tokio", - "tough", + "tough 0.11.0", ] [[package]] name = "tough-ssm" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42cb7ac27150db2f2321adb2ac05a9b2e1f8d9ad739262c2779f298083e96eea" +checksum = "3abe301282442a697b40ae02f845bbf0d340e959a610c02d03d032258d0ce0fe" dependencies = [ "rusoto_core", "rusoto_credential", @@ -2332,7 +2121,7 @@ dependencies = [ "serde_json", "snafu", "tokio", - "tough", + "tough 0.11.0", ] [[package]] @@ -2347,9 +2136,8 @@ version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01ebdc2bb4498ab1ab5f5b73c5803825e60199229ccba0698170e3be0e7f959f" dependencies = [ - "cfg-if 1.0.0", - "log", - "pin-project-lite 0.2.4", + "cfg-if", + "pin-project-lite", "tracing-core", ] @@ -2362,16 +2150,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "tracing-futures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" -dependencies = [ - "pin-project 1.0.5", - "tracing", -] - [[package]] name = "try-lock" version = "0.2.3" @@ -2390,15 +2168,6 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - [[package]] name = "unicode-bidi" version = "0.3.4" @@ -2454,7 +2223,7 @@ dependencies = [ "serde_plain", "snafu", "toml", - "tough", + "tough 0.10.0", ] [[package]] @@ -2489,7 +2258,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" dependencies = [ "same-file", - "winapi 0.3.9", + "winapi", "winapi-util", ] @@ -2503,12 +2272,6 @@ dependencies = [ "try-lock", ] -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - [[package]] name = "wasi" version = "0.10.2+wasi-snapshot-preview1" @@ -2521,7 +2284,7 @@ version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55c0f7123de74f0dab9b7d00fd614e7b19349cd1e2f5252bbe9b1754b59433be" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "serde", "serde_json", "wasm-bindgen-macro", @@ -2548,7 +2311,7 @@ version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3de431a2910c86679c34283a33f66f4e4abd7e0aec27b6669060148872aadf94" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -2605,19 +2368,13 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f20dea7535251981a9670857150d571846545088359b28e4951d350bdaf179f" +checksum = "82015b7e0b8bad8185994674a13a93306bea76cf5a16c5a181382fd3a5ec2376" dependencies = [ "webpki", ] -[[package]] -name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" - [[package]] name = "winapi" version = "0.3.9" @@ -2628,12 +2385,6 @@ dependencies = [ "winapi-x86_64-pc-windows-gnu", ] -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" - [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" @@ -2646,7 +2397,7 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2661,17 +2412,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" dependencies = [ - "winapi 0.3.9", -] - -[[package]] -name = "ws2_32-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" -dependencies = [ - "winapi 0.2.8", - "winapi-build", + "winapi", ] [[package]] diff --git a/tools/buildsys/Cargo.toml b/tools/buildsys/Cargo.toml index 51be8fd6..b7a33f0b 100644 --- a/tools/buildsys/Cargo.toml +++ b/tools/buildsys/Cargo.toml @@ -12,7 +12,7 @@ exclude = ["README.md"] duct = "0.13.0" hex = "0.4.0" rand = { version = "0.8", default-features = false, features = ["std", "std_rng"] } -reqwest = { version = "0.10.1", default-features = false, features = ["rustls-tls", "blocking"] } +reqwest = { version = "0.11.1", default-features = false, features = ["rustls-tls", "blocking"] } serde = { version = "1.0", features = ["derive"] } serde_plain = "0.3.0" sha2 = "0.9" diff --git a/tools/deny.toml b/tools/deny.toml index e7afc76f..d9ea74c8 100644 --- a/tools/deny.toml +++ b/tools/deny.toml @@ -9,12 +9,13 @@ default = "deny" # We want really high confidence when inferring licenses from text confidence-threshold = 0.93 +# Commented license types are allowed but not currently used allow = [ "Apache-2.0", - "BSD-2-Clause", + # "BSD-2-Clause", "BSD-3-Clause", "BSL-1.0", - "CC0-1.0", + # "CC0-1.0", "ISC", "MIT", "OpenSSL", diff --git a/tools/pubsys-setup/Cargo.toml b/tools/pubsys-setup/Cargo.toml index 8da543cf..4e95b6e6 100644 --- a/tools/pubsys-setup/Cargo.toml +++ b/tools/pubsys-setup/Cargo.toml @@ -10,7 +10,7 @@ publish = false hex = "0.4.0" log = "0.4" pubsys-config = { path = "../pubsys-config/" } -reqwest = { version = "0.10.1", default-features = false, features = ["rustls-tls", "blocking"] } +reqwest = { version = "0.11.1", default-features = false, features = ["rustls-tls", "blocking"] } sha2 = "0.9" shell-words = "1.0" simplelog = "0.9" diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index 8cf3ad69..b2f7d5a1 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -10,7 +10,7 @@ publish = false async-trait = "0.1.36" chrono = "0.4" clap = "2.33" -coldsnap = { version = "0.2", default-features = false, features = ["rusoto-rustls"]} +coldsnap = { version = "0.3", default-features = false, features = ["rusoto-rustls"]} pubsys-config = { path = "../pubsys-config/" } futures = "0.3.5" indicatif = "0.15.0" @@ -18,14 +18,14 @@ lazy_static = "1.4" log = "0.4" parse-datetime = { path = "../../sources/parse-datetime" } # Need to bring in reqwest with a TLS feature so tough can support TLS repos. -reqwest = { version = "0.10.1", default-features = false, features = ["rustls-tls", "blocking"] } -rusoto_core = { version = "0.45.0", default-features = false, features = ["rustls"] } -rusoto_credential = "0.45.0" -rusoto_ebs = { version = "0.45.0", default-features = false, features = ["rustls"] } -rusoto_ec2 = { version = "0.45.0", default-features = false, features = ["rustls"] } -rusoto_signature = "0.45.0" -rusoto_ssm = { version = "0.45.0", default-features = false, features = ["rustls"] } -rusoto_sts = { version = "0.45.0", default-features = false, features = ["rustls"] } +reqwest = { version = "0.11.1", default-features = false, features = ["rustls-tls", "blocking"] } +rusoto_core = { version = "0.46.0", default-features = false, features = ["rustls"] } +rusoto_credential = "0.46.0" +rusoto_ebs = { version = "0.46.0", default-features = false, features = ["rustls"] } +rusoto_ec2 = { version = "0.46.0", default-features = false, features = ["rustls"] } +rusoto_signature = "0.46.0" +rusoto_ssm = { version = "0.46.0", default-features = false, features = ["rustls"] } +rusoto_sts = { version = "0.46.0", default-features = false, features = ["rustls"] } simplelog = "0.9.0" snafu = "0.6" semver = "0.11.0" @@ -33,11 +33,12 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" structopt = { version = "0.3", default-features = false } tinytemplate = "1.1" -tokio = { version = "0.2.21", features = ["time"] } +tokio = { version = "1", features = ["full"] } +tokio-stream = { version = "0.1", features = ["time"] } toml = "0.5" -tough = { version = "0.10", features = ["http"] } -tough-kms = "0.2" -tough-ssm = "0.5" +tough = { version = "0.11", features = ["http"] } +tough-kms = "0.3" +tough-ssm = "0.6" update_metadata = { path = "../../sources/updater/update_metadata/" } url = { version = "2.1.0", features = ["serde"] } tempfile = "3.1" diff --git a/tools/pubsys/src/aws/ssm/ssm.rs b/tools/pubsys/src/aws/ssm/ssm.rs index 0d408793..3f2dc472 100644 --- a/tools/pubsys/src/aws/ssm/ssm.rs +++ b/tools/pubsys/src/aws/ssm/ssm.rs @@ -12,7 +12,6 @@ use rusoto_ssm::{ use snafu::{ensure, OptionExt, ResultExt}; use std::collections::{HashMap, HashSet}; use std::time::Duration; -use tokio::time::throttle; /// Fetches the values of the given SSM keys using the given clients // TODO: We can batch GET requests so throttling is less likely here, but if we need to handle @@ -220,7 +219,10 @@ pub(crate) async fn set_parameters( // need the region again here.) let mut throttled_streams = Vec::new(); for (_region, request_list) in regional_requests { - throttled_streams.push(throttle(request_interval, stream::iter(request_list))); + throttled_streams.push(Box::pin(tokio_stream::StreamExt::throttle( + stream::iter(request_list), + request_interval, + ))); } // Run all regions in parallel and wait for responses. @@ -355,7 +357,11 @@ mod error { missing: String, }, - #[snafu(display("Failed to set {} of {} parameters; see above", failure_count, total_count))] + #[snafu(display( + "Failed to set {} of {} parameters; see above", + failure_count, + total_count + ))] SetParameters { failure_count: usize, total_count: usize, diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs index 9784238d..903eec30 100644 --- a/tools/pubsys/src/main.rs +++ b/tools/pubsys/src/main.rs @@ -39,13 +39,12 @@ fn run() -> Result<()> { let args = Args::from_args(); // SimpleLogger will send errors to stderr and anything less to stdout. - SimpleLogger::init(args.log_level, LogConfig::default()) - .context(error::Logger)?; + SimpleLogger::init(args.log_level, LogConfig::default()).context(error::Logger)?; match args.subcommand { SubCommand::Repo(ref repo_args) => repo::run(&args, &repo_args).context(error::Repo), SubCommand::ValidateRepo(ref validate_repo_args) => { - let mut rt = Runtime::new().context(error::Runtime)?; + let rt = Runtime::new().context(error::Runtime)?; rt.block_on(async { repo::validate_repo::run(&args, &validate_repo_args) .await @@ -60,11 +59,11 @@ fn run() -> Result<()> { repo::refresh_repo::run(&args, &refresh_repo_args).context(error::RefreshRepo) } SubCommand::Ami(ref ami_args) => { - let mut rt = Runtime::new().context(error::Runtime)?; + let rt = Runtime::new().context(error::Runtime)?; rt.block_on(async { aws::ami::run(&args, &ami_args).await.context(error::Ami) }) } SubCommand::PublishAmi(ref publish_args) => { - let mut rt = Runtime::new().context(error::Runtime)?; + let rt = Runtime::new().context(error::Runtime)?; rt.block_on(async { aws::publish_ami::run(&args, &publish_args) .await @@ -72,11 +71,11 @@ fn run() -> Result<()> { }) } SubCommand::Ssm(ref ssm_args) => { - let mut rt = Runtime::new().context(error::Runtime)?; + let rt = Runtime::new().context(error::Runtime)?; rt.block_on(async { aws::ssm::run(&args, &ssm_args).await.context(error::Ssm) }) } SubCommand::PromoteSsm(ref promote_args) => { - let mut rt = Runtime::new().context(error::Runtime)?; + let rt = Runtime::new().context(error::Runtime)?; rt.block_on(async { aws::promote_ssm::run(&args, &promote_args) .await From 32b2c7442e0500216d68e8a1252e53d6651a7cb4 Mon Sep 17 00:00:00 2001 From: Matt Briggs Date: Fri, 12 Mar 2021 12:14:07 -0800 Subject: [PATCH 0418/1356] update_metadata: remove tough dependency Removes the update_metadata dependency on tough. This removes a lot of heavyweight dependencies from update_metadata, including tokio, reqwest, hyper, and tough. update_metadata is used cross-workspace by pubsys, so removing tough from update_metadata keeps the cross-workspace dependency duplication to a minimum. --- tools/Cargo.lock | 31 +++---------------------------- 1 file changed, 3 insertions(+), 28 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 4e6cfe18..e0d089e5 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -1102,7 +1102,7 @@ dependencies = [ "tokio", "tokio-stream", "toml", - "tough 0.11.0", + "tough", "tough-kms", "tough-ssm", "update_metadata", @@ -2043,30 +2043,6 @@ dependencies = [ "serde", ] -[[package]] -name = "tough" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dc3534fa46badec98ac633028f47a3cea590e9c9a63d85bd15a0436f8b6eb94" -dependencies = [ - "chrono", - "dyn-clone", - "globset", - "hex", - "log", - "olpc-cjson", - "pem", - "ring", - "serde", - "serde_json", - "serde_plain", - "snafu", - "tempfile", - "untrusted", - "url", - "walkdir", -] - [[package]] name = "tough" version = "0.11.0" @@ -2105,7 +2081,7 @@ dependencies = [ "rusoto_kms", "snafu", "tokio", - "tough 0.11.0", + "tough", ] [[package]] @@ -2121,7 +2097,7 @@ dependencies = [ "serde_json", "snafu", "tokio", - "tough 0.11.0", + "tough", ] [[package]] @@ -2223,7 +2199,6 @@ dependencies = [ "serde_plain", "snafu", "toml", - "tough 0.10.0", ] [[package]] From 5f2a0a51d7864494b0554dedbf4dad38cae0328c Mon Sep 17 00:00:00 2001 From: Matt Briggs Date: Mon, 15 Mar 2021 13:09:09 -0700 Subject: [PATCH 0419/1356] update default variant to aws-k8s-1.19 Make the build system's default variant aws-k8s-1.19, and update documentation that refers to the previous default variant. --- BUILDING.md | 2 +- README.md | 15 ++++++++++++--- tools/pubsys/policies/ssm/README.md | 4 ++-- 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/BUILDING.md b/BUILDING.md index c9dee13b..e8b06945 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -72,7 +72,7 @@ To build an image, run: cargo make ``` -This will build an image for the default variant, `aws-k8s-1.17`. +This will build an image for the default variant, `aws-k8s-1.19`. All packages will be built in turn, and then compiled into an `img` file in the `build/images/` directory. The version number in [Release.toml](Release.toml) will be used in naming the file, and will be used inside the image as the release version. diff --git a/README.md b/README.md index afd0452a..8e7cb258 100644 --- a/README.md +++ b/README.md @@ -46,10 +46,19 @@ We’re excited to get early feedback and to continue working on more use cases! Bottlerocket is architected such that different cloud environments and container orchestrators can be supported in the future. A build of Bottlerocket that supports different features or integration characteristics is known as a 'variant'. The artifacts of a build will include the architecture and variant name. -For example, an `x86_64` build of the `aws-k8s-1.17` variant will produce an image named `bottlerocket-aws-k8s-1.17-x86_64--.img`. +For example, an `x86_64` build of the `aws-k8s-1.19` variant will produce an image named `bottlerocket-aws-k8s-1.19-x86_64--.img`. -Our first supported variants, `aws-k8s-1.15`, `aws-k8s-1.16`, and `aws-k8s-1.17`, support EKS as described above. -We also have a new `aws-ecs-1` variant designed to work with ECS. +The following variants support EKS, as described above: + +- `aws-k8s-1.15` +- `aws-k8s-1.16` +- `aws-k8s-1.17` +- `aws-k8s-1.18` +- `aws-k8s-1.19` + +We also have a variant designed to work with ECS, currently in preview: + +- `aws-ecs-1` ## Architectures diff --git a/tools/pubsys/policies/ssm/README.md b/tools/pubsys/policies/ssm/README.md index 28560c38..d4751b03 100644 --- a/tools/pubsys/policies/ssm/README.md +++ b/tools/pubsys/policies/ssm/README.md @@ -14,11 +14,11 @@ value = "{image_id}" The `name` and `value` can contain template variables that will be replaced with information from the current build and from the AMI registered from that build. The available variables include: -* `variant`, for example "aws-k8s-1.17" +* `variant`, for example "aws-ecs-1" * `arch`, for example "x86_64" or "arm64". * Note: "amd64" and "aarch64" are mapped to "x86_64" and "arm64", respectively, to match the names used by EC2. * `image_id`, for example "ami-0123456789abcdef0" -* `image_name`, for example "bottlerocket-aws-k8s-1.17-x86_64-v0.5.0-e0ddf1b" +* `image_name`, for example "bottlerocket-aws-ecs-1-x86_64-v0.5.0-e0ddf1b" * `image_version`, for example "0.5.0-e0ddf1b" * `region`, for example "us-west-2" From 6477d20f5b01c5554700490f60c7a2ac00636460 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Thu, 25 Mar 2021 13:41:02 -0700 Subject: [PATCH 0420/1356] README: use `apiclient set` in introduction, explain raw mode separately --- README.md | 37 +++++++++---------------------------- 1 file changed, 9 insertions(+), 28 deletions(-) diff --git a/README.md b/README.md index 8e7cb258..c7663f47 100644 --- a/README.md +++ b/README.md @@ -145,8 +145,7 @@ enable-admin-container If you're using a custom control container, or want to make the API calls directly, you can enable the admin container like this instead: ``` -apiclient -u /settings -m PATCH -d '{"host-containers": {"admin": {"enabled": true}}}' -apiclient -u /tx/commit_and_apply -m POST +apiclient set host-containers.admin.enabled=true ``` Once you're in the admin container, you can run `sheltie` to get a full root shell in the Bottlerocket host. @@ -227,33 +226,13 @@ For example, here's an abbreviated response: {"motd":"...", {"kubernetes": ...}} ``` -You can change settings by sending back the same type of JSON data in a PATCH request. -This can include any number of settings changes. +You can change settings like this: ``` -apiclient -m PATCH -u /settings -d '{"motd": "my own value!"}' +apiclient set motd="hi there" kubernetes.node-labels.environment=test ``` -This will *stage* the setting in a "pending" area - a transaction. -You can see all your pending settings like this: -``` -apiclient -u /tx -``` - -To *commit* the settings, and let the system apply them to any relevant configuration files or services, do this: -``` -apiclient -m POST -u /tx/commit_and_apply -``` - -Behind the scenes, these commands are working with the "default" transaction. -This keeps the interface simple. -System services use their own transactions, so you don't have to worry about conflicts. -For example, there's a "bottlerocket-launch" transaction used to coordinate changes at startup. - -If you want to group sets of changes yourself, pick a transaction name and append a `tx` parameter to the URLs above. -For example, if you want the name "FOO", you can `PATCH` to `/settings?tx=FOO` and `POST` to `/tx/commit_and_apply?tx=FOO`. -(Transactions are created automatically when used, and are cleaned up on reboot.) - -For more details on using the client, see the [apiclient documentation](sources/api/apiclient/). +You can also use a JSON input mode to help change many related settings at once, and a "raw" mode if you want more control over how the settings are committed and applied to the system. +See the [apiclient README](sources/api/apiclient/) for details. #### Using user data @@ -428,8 +407,10 @@ Keep in mind that the default admin container (since Bottlerocket v1.0.6) relies Here's an example of adding a custom host container with API calls: ``` -apiclient -u /settings -X PATCH -d '{"host-containers": {"custom": {"source": "MY-CONTAINER-URI", "enabled": true, "superpowered": false}}}' -apiclient -u /tx/commit_and_apply -X POST +apiclient set \ + host-containers.custom.source=MY-CONTAINER-URI \ + host-containers.custom.enabled=true \ + host-containers.custom.superpowered=false ``` Here's the same example, but with the settings you'd add to user data: From de289a2c819d1ad829e60dfffbd3a1032c505698 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Thu, 25 Mar 2021 14:31:13 -0700 Subject: [PATCH 0421/1356] README: document `apiclient update` as primary CLI update method --- README.md | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index c7663f47..e266388e 100644 --- a/README.md +++ b/README.md @@ -170,26 +170,35 @@ For the ECS preview variant of Bottlerocket, we recommend updating hosts using o #### Update API -The [Bottlerocket API](#api) includes methods for checking and starting system updates. You can read more about the update APIs in our [update system documentation](sources/updater/README.md#update-api). +The [Bottlerocket API](#api) includes methods for checking and starting system updates. +You can read more about the update APIs in our [update system documentation](sources/updater/README.md#update-api). -#### Updog +apiclient knows how to handle those update APIs for you, and you can run it from the [control](#control-container) or [admin](#admin-container) containers. -You can update Bottlerocket using a CLI tool, `updog`, if you [connect through the admin container](#admin-container). - -Here's how you can see whether there's an update: +To see what updates are available: +``` +apiclient update check +``` +If an update is available, it will show up in the `chosen_update` field. +The `available_updates` field will show the full list of available versions, including older versions, because Bottlerocket supports safely rolling back. +To apply the latest update: ``` -updog check-update +apiclient update apply ``` -Here's how you initiate an update: +The next time you reboot, you'll start up in the new version, and system configuration will be automatically [migrated](sources/api/migration/). +To reboot right away: +``` +apiclient reboot +``` +If you're confident about updating, the `apiclient update apply` command has `--check` and `--reboot` flags to combine the above actions, so you can accomplish all of the above steps like this: ``` -updog update -reboot +apiclient update apply --check --reboot ``` -(If you know what you're doing and want to update *now*, you can run `updog update --reboot --now`) +See the [apiclient documentation](sources/api/apiclient/) for more details. #### Bottlerocket Update Operator @@ -205,6 +214,8 @@ signpost rollback-to-inactive reboot ``` +This doesn't require any external communication, so it's quicker than `apiclient`, and it's made to be as reliable as possible. + ## Settings Here we'll describe the settings you can configure on your Bottlerocket instance, and how to do it. From 7c686a8ac29d5f071965e5e8114c35ac1e0c698c Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sun, 28 Mar 2021 04:12:28 +0000 Subject: [PATCH 0422/1356] build: ignore more artifacts during builds We use the build directory for output other than package builds, but only the RPM packages need to be included in the build context that Docker creates. Switch to an allowlist approach so that we exclude new extensions by default. Signed-off-by: Ben Cressey --- .dockerignore | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/.dockerignore b/.dockerignore index 7c5790e1..e19dcedb 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,9 +1,7 @@ /.git /.gomodcache -/build/**/*.img -/build/**/*.lz4 -/build/**/*.xz -/build/**/*.tar -/build/**/*-debuginfo-*.rpm -/build/**/*-debugsource-*.rpm +/build/* +!/build/rpms/*.rpm +/build/rpms/*-debuginfo-*.rpm +/build/rpms/*-debugsource-*.rpm **/target/* From 6097457ddacce60f1d148777bdf1db35be8f9ed2 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Fri, 26 Mar 2021 22:50:30 +0000 Subject: [PATCH 0423/1356] host-ctr: Add "current" generic persistent storage location This adds an additional mount of the /local/host-containers/NAME directory as /.bottlerocket/host-containers/current to provide a generic persistent storage location that isn't tied to the name of the container. This makes it easier for scripts to utilize the storage. --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index e266388e..36429767 100644 --- a/README.md +++ b/README.md @@ -411,7 +411,7 @@ If you're running a Kubernetes variant, the no-proxy list will automatically inc Beyond just changing the settings above to affect the `admin` and `control` containers, you can add and remove host containers entirely. As long as you define the three fields above -- `source` with a URI, and `enabled` and `superpowered` with true/false -- you can add host containers with an API call or user data. -You can optionally define a `user-data` field with arbitrary base64-encoded data, which will be made available in the container at `/.bottlerocket/host-containers/$HOST_CONTAINER_NAME/user-data`. +You can optionally define a `user-data` field with arbitrary base64-encoded data, which will be made available in the container at `/.bottlerocket/host-containers/$HOST_CONTAINER_NAME/user-data` and (since Bottlerocket v1.0.8) `/.bottlerocket/host-containers/current/user-data`. (It was inspired by instance user data, but is entirely separate; it can be any data your host container feels like interpreting.) Keep in mind that the default admin container (since Bottlerocket v1.0.6) relies on `user-data` to store SSH keys. You can set `user-data` to [customize the keys](https://github.com/bottlerocket-os/bottlerocket-admin-container/#authenticating-with-the-admin-container), or you can use it for your own purposes in a custom container. @@ -436,7 +436,8 @@ If the `enabled` flag is `true`, it will be started automatically. All host containers will have the `apiclient` binary available at `/usr/local/bin/apiclient` so they're able to [interact with the API](#using-the-api-client). -In addition, all host containers come with persistent storage at `/.bottlerocket/host-containers/$HOST_CONTAINER_NAME` that is persisted across reboots and container start/stop cycles. +In addition, all host containers come with persistent storage that survives reboots and container start/stop cycles. +It's available at `/.bottlerocket/host-containers/$HOST_CONTAINER_NAME` and (since Bottlerocket v1.0.8) `/.bottlerocket/host-containers/current`. The default `admin` host-container, for example, stores its SSH host keys under `/.bottlerocket/host-containers/admin/etc/ssh/`. There are a few important caveats to understand about host containers: From 33450a1645b1a11e2b1b75334cfb7ca229876e9c Mon Sep 17 00:00:00 2001 From: Michael Still Date: Mon, 29 Mar 2021 17:26:21 +1100 Subject: [PATCH 0424/1356] Add support for qcow2 as an image format. qcow2 is pretty common in terms of image formats. Its what OpenStack, KVM, and so on use as their VM image format. This patch adds support for creating images in that format. I have verified the output images by booting them on a KVM system. --- tools/buildsys/src/builder.rs | 1 + tools/buildsys/src/manifest.rs | 1 + tools/rpm2img | 14 ++++++-------- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index 7040ef59..018ae32a 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -98,6 +98,7 @@ impl VariantBuilder { "IMAGE_FORMAT", match image_format { Some(ImageFormat::Raw) | None => "raw", + Some(ImageFormat::Qcow2) => "qcow2", Some(ImageFormat::Vmdk) => "vmdk", }, ); diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index a606048f..a886d5b4 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -163,6 +163,7 @@ pub(crate) struct BuildVariant { #[derive(Deserialize, Debug)] #[serde(rename_all = "lowercase")] pub(crate) enum ImageFormat { + Qcow2, Raw, Vmdk, } diff --git a/tools/rpm2img b/tools/rpm2img index e63bfe3a..17d17c85 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -15,7 +15,7 @@ for opt in "$@"; do done case "${OUTPUT_FMT}" in - raw|vmdk) ;; + raw|qcow2|vmdk) ;; *) echo "unexpected image output format '${OUTPUT_FMT}'" >&2 exit 1 @@ -222,22 +222,20 @@ sgdisk -v "${DATA_IMAGE}" if [[ ${OUTPUT_FMT} == "raw" ]]; then lz4 -vc "${DISK_IMAGE}" >"${OUTPUT_DIR}/${DISK_IMAGE_BASENAME}.img.lz4" lz4 -vc "${DATA_IMAGE}" >"${OUTPUT_DIR}/${DATA_IMAGE_BASENAME}.img.lz4" - chown 1000:1000 "${OUTPUT_DIR}/${DISK_IMAGE_BASENAME}.img.lz4" \ - "${OUTPUT_DIR}/${DATA_IMAGE_BASENAME}.img.lz4" +elif [[ ${OUTPUT_FMT} == "qcow2" ]]; then + qemu-img convert -f raw -O qcow2 "${DISK_IMAGE}" "${OUTPUT_DIR}/${DISK_IMAGE_BASENAME}.qcow2" + qemu-img convert -f raw -O qcow2 "${DATA_IMAGE}" "${OUTPUT_DIR}/${DATA_IMAGE_BASENAME}.qcow2" elif [[ ${OUTPUT_FMT} == "vmdk" ]]; then # Stream optimization is required for creating an Open Virtual Appliance (OVA) qemu-img convert -f raw -O vmdk -o subformat=streamOptimized "${DISK_IMAGE}" "${OUTPUT_DIR}/${DISK_IMAGE_BASENAME}.vmdk" qemu-img convert -f raw -O vmdk -o subformat=streamOptimized "${DATA_IMAGE}" "${OUTPUT_DIR}/${DATA_IMAGE_BASENAME}.vmdk" - chown 1000:1000 "${OUTPUT_DIR}/${DISK_IMAGE_BASENAME}.vmdk" \ - "${OUTPUT_DIR}/${DATA_IMAGE_BASENAME}.vmdk" fi lz4 -9vc "${BOOT_IMAGE}" >"${OUTPUT_DIR}/${BOOT_IMAGE_NAME}" lz4 -9vc "${VERITY_IMAGE}" >"${OUTPUT_DIR}/${VERITY_IMAGE_NAME}" lz4 -9vc "${ROOT_IMAGE}" >"${OUTPUT_DIR}/${ROOT_IMAGE_NAME}" -chown 1000:1000 "${OUTPUT_DIR}/${BOOT_IMAGE_NAME}" \ - "${OUTPUT_DIR}/${VERITY_IMAGE_NAME}" \ - "${OUTPUT_DIR}/${ROOT_IMAGE_NAME}" + +find "${OUTPUT_DIR}" -type f -print -exec chown 1000:1000 {} \; # Clean up temporary files to reduce size of layer. rm -f "${PACKAGE_DIR}"/*.rpm From ee390d6f2467246bb6ceb9f263b895b7e0b54921 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Thu, 18 Mar 2021 20:41:27 +0000 Subject: [PATCH 0425/1356] buildsys: Add ability for variants to specify supported architectures This change adds an additional key that can be specified in a variant's `Cargo.toml`, `supported-arches`. It is a list and the supported enum values are `x86_64` and `aarch64`. If `supported-arches` is specified, the current `BUILDSYS_ARCH` is checked against the list. If `supported-arches` is not specified, the build continues as before. --- tools/buildsys/src/main.rs | 52 ++++++++++++++++++++++++++++++++-- tools/buildsys/src/manifest.rs | 25 ++++++++++++++++ 2 files changed, 74 insertions(+), 3 deletions(-) diff --git a/tools/buildsys/src/main.rs b/tools/buildsys/src/main.rs index ac4f8578..e6b3a18a 100644 --- a/tools/buildsys/src/main.rs +++ b/tools/buildsys/src/main.rs @@ -16,10 +16,10 @@ mod spec; use builder::{PackageBuilder, VariantBuilder}; use cache::LookasideCache; -use manifest::ManifestInfo; +use manifest::{ManifestInfo, SupportedArch}; use project::ProjectInfo; use serde::Deserialize; -use snafu::ResultExt; +use snafu::{ensure, ResultExt}; use spec::SpecInfo; use std::env; use std::path::PathBuf; @@ -56,6 +56,22 @@ mod error { var: String, source: std::env::VarError, }, + + #[snafu(display("Unknown architecture: '{}'", arch))] + UnknownArch { + arch: String, + source: serde_plain::Error, + }, + + #[snafu(display( + "Unsupported architecture {}, this variant supports {}", + arch, + supported_arches.join(", ") + ))] + UnsupportedArch { + arch: String, + supported_arches: Vec, + }, } } @@ -102,10 +118,17 @@ fn run() -> Result<()> { } fn build_package() -> Result<()> { - let manifest_dir: PathBuf = getenv("CARGO_MANIFEST_DIR")?.into(); let manifest_file = "Cargo.toml"; println!("cargo:rerun-if-changed={}", manifest_file); + let root_dir: PathBuf = getenv("BUILDSYS_ROOT_DIR")?.into(); + let variant = getenv("BUILDSYS_VARIANT")?; + let variant_manifest_path = root_dir.join("variants").join(variant).join(manifest_file); + let variant_manifest = + ManifestInfo::new(variant_manifest_path).context(error::ManifestParse)?; + supported_arch(&variant_manifest)?; + + let manifest_dir: PathBuf = getenv("CARGO_MANIFEST_DIR")?.into(); let manifest = ManifestInfo::new(manifest_dir.join(manifest_file)).context(error::ManifestParse)?; @@ -165,6 +188,8 @@ fn build_variant() -> Result<()> { let manifest = ManifestInfo::new(manifest_dir.join(manifest_file)).context(error::ManifestParse)?; + supported_arch(&manifest)?; + if let Some(packages) = manifest.included_packages() { let image_format = manifest.image_format(); VariantBuilder::build(&packages, image_format).context(error::BuildAttempt)?; @@ -175,6 +200,27 @@ fn build_variant() -> Result<()> { Ok(()) } +/// Ensure that the current arch is supported by the current variant +fn supported_arch(manifest: &ManifestInfo) -> Result<()> { + if let Some(supported_arches) = manifest.supported_arches() { + let arch = getenv("BUILDSYS_ARCH")?; + let current_arch: SupportedArch = + serde_plain::from_str(&arch).context(error::UnknownArch { arch: &arch })?; + + ensure!( + supported_arches.contains(¤t_arch), + error::UnsupportedArch { + arch: &arch, + supported_arches: supported_arches + .into_iter() + .map(|a| a.to_string()) + .collect::>() + } + ) + } + Ok(()) +} + /// Retrieve a variable that we expect to be set in the environment. fn getenv(var: &str) -> Result { env::var(var).context(error::Environment { var }) diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index a886d5b4..c0563a4f 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -65,6 +65,8 @@ use error::Result; use serde::Deserialize; use snafu::ResultExt; +use std::collections::HashSet; +use std::fmt; use std::fs; use std::path::{Path, PathBuf}; @@ -115,6 +117,12 @@ impl ManifestInfo { self.build_variant().and_then(|b| b.image_format.as_ref()) } + /// Convenience method to return the supported architectures for this variant. + pub(crate) fn supported_arches(&self) -> Option<&HashSet> { + self.build_variant() + .and_then(|b| b.supported_arches.as_ref()) + } + /// Helper methods to navigate the series of optional struct fields. fn build_package(&self) -> Option<&BuildPackage> { self.package @@ -158,6 +166,7 @@ pub(crate) struct BuildPackage { pub(crate) struct BuildVariant { pub(crate) included_packages: Option>, pub(crate) image_format: Option, + pub(crate) supported_arches: Option>, } #[derive(Deserialize, Debug)] @@ -168,6 +177,13 @@ pub(crate) enum ImageFormat { Vmdk, } +#[derive(Deserialize, Debug, PartialEq, Eq, Hash)] +#[serde(rename_all = "lowercase")] +pub(crate) enum SupportedArch { + X86_64, + Aarch64, +} + #[derive(Deserialize, Debug)] #[serde(rename_all = "kebab-case")] pub(crate) struct ExternalFile { @@ -175,3 +191,12 @@ pub(crate) struct ExternalFile { pub(crate) sha512: String, pub(crate) url: String, } + +impl fmt::Display for SupportedArch { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + SupportedArch::X86_64 => write!(f, "x86_64"), + SupportedArch::Aarch64 => write!(f, "aarch64"), + } + } +} From a774e003f296863102fdc79b476b1c5d84999182 Mon Sep 17 00:00:00 2001 From: gthao313 Date: Wed, 24 Feb 2021 20:18:37 +0000 Subject: [PATCH 0426/1356] Add new setting: Eviction Hard --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 36429767..af1759e6 100644 --- a/README.md +++ b/README.md @@ -308,6 +308,13 @@ The following settings are optional and allow you to further configure your clus * `settings.kubernetes.standalone-mode`: Whether to run the kubelet in standalone mode, without connecting to an API server. Defaults to `false`. * `settings.kubernetes.authentication-mode`: Which authentication method the kubelet should use to connect to the API server, and for incoming requests. Defaults to `aws` for AWS variants, and `tls` for other variants. * `settings.kubernetes.bootstrap-token`: The token to use for [TLS bootstrapping](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/). This is only used with the `tls` authentication mode, and is otherwise ignored. +* `settings.kubernetes.eviction-hard`: The signals and thresholds that trigger pod eviction. + Remember to quote signals (since they all contain ".") and to quote all values. + * Example user data for setting up eviction hard: + ``` + [settings.kubernetes.eviction-hard] + "memory.available" = "15%" + ``` You can also optionally specify static pods for your node with the following settings. Static pods can be particularly useful when running in standalone mode. From 272ed7027239ee779918ff190b03b6bd877aaddc Mon Sep 17 00:00:00 2001 From: gthao313 Date: Mon, 29 Mar 2021 22:22:05 +0000 Subject: [PATCH 0427/1356] Add new setting: Kube Reserved --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index af1759e6..2f884f05 100644 --- a/README.md +++ b/README.md @@ -326,6 +326,11 @@ The following settings are set for you automatically by [pluto](sources/api/) ba * `settings.kubernetes.cluster-dns-ip`: The CIDR block of the primary network interface. * `settings.kubernetes.node-ip`: The IPv4 address of this node. * `settings.kubernetes.pod-infra-container-image`: The URI of the "pause" container. +* `settings.kubernetes.kube-reserved`: Resources reserved for node components. + * Bottlerocket provides default values for the resources by [schnauzer](sources/api/): + * `cpu`: in millicores from the total number of vCPUs available on the instance. + * `memory`: in mebibytes from the max num of pods on the instance. `memory_to_reserve = max_num_pods * 11 + 255`. + * `ephemeral-storage`: defaults to `1Gi`. #### Amazon ECS settings From 9b2ec6e7da5753f58b50d0ac87ca226f451d33d8 Mon Sep 17 00:00:00 2001 From: gthao313 Date: Mon, 1 Mar 2021 20:18:29 +0000 Subject: [PATCH 0428/1356] Add new setting: Allowed Unsafe Sysctls --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 2f884f05..824ab620 100644 --- a/README.md +++ b/README.md @@ -315,6 +315,11 @@ The following settings are optional and allow you to further configure your clus [settings.kubernetes.eviction-hard] "memory.available" = "15%" ``` +* `settings.kubernetes.allowed-unsafe-sysctls`: Enables specified list of unsafe sysctls. + * Example user data for setting up allowed unsafe sysctls: + ``` + allowed-unsafe-sysctls = ["net.core.somaxconn", "net.ipv4.ip_local_port_range"] + ``` You can also optionally specify static pods for your node with the following settings. Static pods can be particularly useful when running in standalone mode. From 9bc08fa9718b4ca328a8b92af823b3b372943d6f Mon Sep 17 00:00:00 2001 From: Matt Briggs Date: Mon, 5 Apr 2021 12:29:46 -0700 Subject: [PATCH 0429/1356] readme: document metrics settings --- README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/README.md b/README.md index 824ab620..1fcea898 100644 --- a/README.md +++ b/README.md @@ -394,6 +394,16 @@ The no-proxy list will automatically include entries for localhost. If you're running a Kubernetes variant, the no-proxy list will automatically include the Kubernetes API server endpoint and other commonly used Kubernetes DNS suffixes to facilitate intra-cluster networking. +#### Metrics settings + +By default, Bottlerocket sends anonymous metrics when it boots, and once every six hours. +This can be disabled by setting `send-metrics` to false. +Here are the metrics settings: + +* `settings.metrics.metrics-url`: The endpoint to which metrics will be sent. The default is `https://metrics.bottlerocket.aws/v1/metrics`. +* `settings.metrics.send-metrics`: Whether Bottlerocket will send anonymous metrics. +* `settings.metrics.service-checks`: A list of systemd services that will be checked to determine whether a host is healthy. + #### Time settings * `settings.ntp.time-servers`: A list of NTP servers used to set and verify the system time. From 6df2ac4c81dfcce43e65d0f0bde8520cc39413cf Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Wed, 7 Apr 2021 10:31:35 -0700 Subject: [PATCH 0430/1356] cargo update the tools/ workspace --- tools/Cargo.lock | 207 ++++++++++++++++++++++------------------------- 1 file changed, 98 insertions(+), 109 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index e0d089e5..cb04a3ea 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -11,9 +11,9 @@ dependencies = [ [[package]] name = "adler" -version = "0.2.3" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aho-corasick" @@ -64,9 +64,9 @@ checksum = "781f336cc9826dbaddb9754cb5db61e64cab4f69668bd19dcc4a0394a86f4cb1" [[package]] name = "async-trait" -version = "0.1.42" +version = "0.1.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d3a45e77e34375a7923b1e8febb049bb011f064714a8e17a1a616fef01da13d" +checksum = "36ea56748e10732c49404c153638a15ec3d6211ec5ff35d9bb20e13b93576adf" dependencies = [ "proc-macro2", "quote", @@ -250,9 +250,9 @@ dependencies = [ [[package]] name = "console" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cc80946b3480f421c2f17ed1cb841753a371c7c5104f51d507e13f532c856aa" +checksum = "3993e6445baa160675931ec041a5e03ca84b9c6e32a056150d3aa2bdda0a1f45" dependencies = [ "encode_unicode", "lazy_static", @@ -265,9 +265,9 @@ dependencies = [ [[package]] name = "const_fn" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6" +checksum = "076a6803b0dacd6a88cfe64deba628b01533ff5ef265687e6938280c1afd0a28" [[package]] name = "core-foundation" @@ -363,9 +363,9 @@ checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" [[package]] name = "dtoa" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d7ed2934d741c6b37e33e3832298e8850b53fd2d2bea03873375596c7cea4e" +checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" [[package]] name = "duct" @@ -550,9 +550,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d832b01df74254fe364568d6ddc294443f61cbec82816b60904303af87efae78" +checksum = "fc018e188373e2777d0ef2467ebff62a08e66c3f5857b23c8fbec3018210dc00" dependencies = [ "bytes", "fnv", @@ -593,9 +593,9 @@ dependencies = [ [[package]] name = "hex" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hmac" @@ -620,12 +620,13 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" +checksum = "5dfb77c123b4e2f72a2069aeae0b4b4949cc7e966df277813fc16347e7549737" dependencies = [ "bytes", "http", + "pin-project-lite", ] [[package]] @@ -642,9 +643,9 @@ checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" [[package]] name = "hyper" -version = "0.14.4" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8e946c2b1349055e0b72ae281b238baf1a3ea7307c7e9f9d64673bdd9c26ac7" +checksum = "8bf09f61b52cfcf4c00de50df88ae423d6c02354e385a86341133b5338630ad1" dependencies = [ "bytes", "futures-channel", @@ -694,9 +695,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.6.1" +version = "1.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b" +checksum = "824845a0bf897a9042383849b02c1bc219c2383772efcd5c6f9766fa4b81aef3" dependencies = [ "autocfg", "hashbrown", @@ -737,9 +738,9 @@ checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" [[package]] name = "js-sys" -version = "0.3.47" +version = "0.3.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cfb73131c35423a367daf8cbd24100af0d077668c8c2943f0e7dd775fef0f65" +checksum = "2d99f9e3e84b8f67f846ef5b4cbbc3b1c29f6c759fcbce6f01aa0e73d932a24c" dependencies = [ "wasm-bindgen", ] @@ -752,15 +753,15 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.86" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7282d924be3275cec7f6756ff4121987bc6481325397dde6ba3e7802b1a8b1c" +checksum = "9385f66bf6105b241aa65a61cb923ef20efc665cb9f9bb50ac2f0c4b7f378d41" [[package]] name = "lock_api" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312" +checksum = "5a3c91c24eae6777794bb1997ad98bbb87daf92890acab859f7eaa4320333176" dependencies = [ "scopeguard", ] @@ -800,9 +801,9 @@ checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" [[package]] name = "miniz_oxide" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d" +checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" dependencies = [ "adler", "autocfg", @@ -810,9 +811,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.7.9" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5dede4e2065b3842b8b0af444119f3aa331cc7cc2dd20388bfb0f5d5a38823a" +checksum = "cf80d3e903b34e0bd7282b218398aec54e082c840d9baf8339e0080a0c542956" dependencies = [ "libc", "log", @@ -823,11 +824,10 @@ dependencies = [ [[package]] name = "miow" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" +checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" dependencies = [ - "socket2", "winapi", ] @@ -900,9 +900,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.6.0" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ad167a2f54e832b82dbe003a046280dceffe5227b5f79e08e363a29638cfddd" +checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" [[package]] name = "opaque-debug" @@ -988,18 +988,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96fa8ebb90271c4477f144354485b8068bd8f6b78b428b01ba892ca26caf0b63" +checksum = "bc174859768806e91ae575187ada95c91a29e96a98dc5d2cd9a1fed039501ba6" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "758669ae3558c6f74bd2a18b41f7ac0b5a195aea6639d6a9b5e5d1ad5ba24c0b" +checksum = "a490329918e856ed1b083f244e3bfe2d8c4f336407e4ea9e1a9f479ff09049e5" dependencies = [ "proc-macro2", "quote", @@ -1008,9 +1008,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827" +checksum = "dc0e1f259c92177c30a4c9d177246edd0a3568b25756a977d0632cf8fa37e905" [[package]] name = "pin-utils" @@ -1062,9 +1062,9 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.24" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" +checksum = "a152013215dca273577e18d2bf00fa862b89b24169fb78c4c95aeb07992c9cec" dependencies = [ "unicode-xid", ] @@ -1209,21 +1209,20 @@ dependencies = [ [[package]] name = "regex" -version = "1.4.3" +version = "1.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9251239e129e16308e70d853559389de218ac275b515068abc96829d05b948a" +checksum = "957056ecddbeba1b26965114e191d2e8589ce74db242b6ea25fc4062427a5c19" dependencies = [ "aho-corasick", "memchr", "regex-syntax", - "thread_local", ] [[package]] name = "regex-syntax" -version = "0.6.22" +version = "0.6.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5eb417147ba9860a96cfe72a0b93bf88fee1744b5636ec99ab20c1aa9376581" +checksum = "24d5f089152e60f62d28b835fbff2cd2e8dc0baf1ac13343bef92ab7eed84548" [[package]] name = "remove_dir_all" @@ -1391,7 +1390,7 @@ dependencies = [ "rustc_version", "serde", "sha2", - "time 0.2.25", + "time 0.2.26", "tokio", ] @@ -1507,9 +1506,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dfd318104249865096c8da1dfabf09ddbb6d0330ea176812a62ec75e40c4166" +checksum = "3670b1d2fdf6084d192bc71ead7aabe6c06aa2ea3fbd9cc3ac111fa5c2b1bd84" dependencies = [ "bitflags", "core-foundation", @@ -1520,9 +1519,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee48cdde5ed250b0d3252818f646e174ab414036edb884dde62d80a3ac6082d" +checksum = "3676258fd3cfe2c9a0ec99ce3038798d847ce3e4bb17746373eb9f0f1ac16339" dependencies = [ "core-foundation-sys", "libc", @@ -1564,18 +1563,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.123" +version = "1.0.125" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d5161132722baa40d802cc70b15262b98258453e85e5d1d365c757c73869ae" +checksum = "558dc50e1a5a5fa7112ca2ce4effcb321b0300c0d4ccf0776a9f60cd89031171" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.123" +version = "1.0.125" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9391c295d64fc0abb2c556bad848f33cb8296276b1ad2677d1ae1ace4f258f31" +checksum = "b093b7a2bb58203b5da3056c05b4ec1fed827dcfdb37347a8841695263b3d06d" dependencies = [ "proc-macro2", "quote", @@ -1647,9 +1646,9 @@ dependencies = [ [[package]] name = "shared_child" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cebcf3a403e4deafaf34dc882c4a1b6a648b43e5670aa2e4bb985914eaeb2d2" +checksum = "6be9f7d5565b1483af3e72975e2dee33879b3b86bd48c0929fccf6585d79e65a" dependencies = [ "libc", "winapi", @@ -1723,11 +1722,10 @@ dependencies = [ [[package]] name = "socket2" -version = "0.3.19" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" +checksum = "9e3dfc207c526015c632472a77be09cf1b6e46866581aecae5cc38fb4235dea2" dependencies = [ - "cfg-if", "libc", "winapi", ] @@ -1740,9 +1738,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.15" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2beb4d1860a61f571530b3f855a1b538d0200f7871c63331ecd6f17b1f014f8" +checksum = "e113fb6f3de07a243d434a56ec6f186dfd51cb08448239fe7bcae73f87ff28ff" dependencies = [ "version_check", ] @@ -1834,9 +1832,9 @@ checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" [[package]] name = "syn" -version = "1.0.60" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c700597eca8a5a762beb35753ef6b94df201c81cca676604f547495a0d7f0081" +checksum = "3ce15dd3ed8aa2f8eeac4716d6ef5ab58b6b9256db41d7e1a0224c2788e8fd87" dependencies = [ "proc-macro2", "quote", @@ -1885,15 +1883,6 @@ dependencies = [ "unicode-width", ] -[[package]] -name = "thread_local" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" -dependencies = [ - "once_cell", -] - [[package]] name = "time" version = "0.1.43" @@ -1906,9 +1895,9 @@ dependencies = [ [[package]] name = "time" -version = "0.2.25" +version = "0.2.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1195b046942c221454c2539395f85413b33383a067449d78aab2b7b052a142f7" +checksum = "08a8cbfbf47955132d0202d1662f49b2423ae35862aee471f3ba4b133358f372" dependencies = [ "const_fn", "libc", @@ -1954,9 +1943,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317cca572a0e89c3ce0ca1f1bdc9369547fe318a683418e42ac8f59d14701023" +checksum = "5b5220f05bb7de7f3f53c7c065e1199b3172696fe2db9f9c4d8ad9b4ee74c342" dependencies = [ "tinyvec_macros", ] @@ -1969,9 +1958,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d56477f6ed99e10225f38f9f75f872f29b8b8bd8c0b946f63345bb144e9eeda" +checksum = "134af885d758d645f0f0505c9a8b3f9bf8a348fd822e112ab5248138348f1722" dependencies = [ "autocfg", "bytes", @@ -2011,9 +2000,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.3" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1981ad97df782ab506a1f43bf82c967326960d278acf3bf8279809648c3ff3ea" +checksum = "e177a5d8c3bf36de9ebe6d58537d8879e964332f93fb3339e43f618c81361af0" dependencies = [ "futures-core", "pin-project-lite", @@ -2022,9 +2011,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.3" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebb7cb2f00c5ae8df755b252306272cd1790d39728363936e01827e11f0b017b" +checksum = "5143d049e85af7fbc36f5454d990e62c2df705b3589f123b71f441b6b59f443f" dependencies = [ "bytes", "futures-core", @@ -2134,9 +2123,9 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "typenum" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" +checksum = "879f6906492a7cd215bfa4cf595b600146ccfac0c79bcbd1f3000162af5e8b06" [[package]] name = "ucd-trie" @@ -2146,9 +2135,9 @@ checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" [[package]] name = "unicode-bidi" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" +checksum = "eeb8be209bb1c96b7c177c7420d26e04eccacb0eeae6b980e35fcb74678107e0" dependencies = [ "matches", ] @@ -2222,15 +2211,15 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "version_check" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" +checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" [[package]] name = "walkdir" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" +checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" dependencies = [ "same-file", "winapi", @@ -2255,9 +2244,9 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.70" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55c0f7123de74f0dab9b7d00fd614e7b19349cd1e2f5252bbe9b1754b59433be" +checksum = "83240549659d187488f91f33c0f8547cbfef0b2088bc470c116d1d260ef623d9" dependencies = [ "cfg-if", "serde", @@ -2267,9 +2256,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.70" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bc45447f0d4573f3d65720f636bbcc3dd6ce920ed704670118650bcd47764c7" +checksum = "ae70622411ca953215ca6d06d3ebeb1e915f0f6613e3b495122878d7ebec7dae" dependencies = [ "bumpalo", "lazy_static", @@ -2282,9 +2271,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.20" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3de431a2910c86679c34283a33f66f4e4abd7e0aec27b6669060148872aadf94" +checksum = "81b8b767af23de6ac18bf2168b690bed2902743ddf0fb39252e36f9e2bfc63ea" dependencies = [ "cfg-if", "js-sys", @@ -2294,9 +2283,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.70" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b8853882eef39593ad4174dd26fc9865a64e84026d223f63bb2c42affcbba2c" +checksum = "3e734d91443f177bfdb41969de821e15c516931c3c3db3d318fa1b68975d0f6f" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2304,9 +2293,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.70" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4133b5e7f2a531fa413b3a1695e925038a05a71cf67e87dafa295cb645a01385" +checksum = "d53739ff08c8a68b0fdbcd54c372b8ab800b1449ab3c9d706503bc7dd1621b2c" dependencies = [ "proc-macro2", "quote", @@ -2317,15 +2306,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.70" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd4945e4943ae02d15c13962b38a5b1e81eadd4b71214eee75af64a4d6a4fd64" +checksum = "d9a543ae66aa233d14bb765ed9af4a33e81b8b58d1584cf1b47ff8cd0b9e4489" [[package]] name = "web-sys" -version = "0.3.47" +version = "0.3.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c40dc691fc48003eba817c38da7113c15698142da971298003cac3ef175680b3" +checksum = "a905d57e488fec8861446d3393670fb50d27a262344013181c2cdf9fff5481be" dependencies = [ "js-sys", "wasm-bindgen", @@ -2343,9 +2332,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.21.0" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82015b7e0b8bad8185994674a13a93306bea76cf5a16c5a181382fd3a5ec2376" +checksum = "aabe153544e473b775453675851ecc86863d2a81d786d741f6b76778f2a48940" dependencies = [ "webpki", ] From 1b89b5b16a9b623386860337dd4cdd23a376a43a Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Wed, 7 Apr 2021 10:38:15 -0700 Subject: [PATCH 0431/1356] tools: update simplelog --- tools/Cargo.lock | 4 ++-- tools/pubsys-setup/Cargo.toml | 2 +- tools/pubsys/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index cb04a3ea..a1800a0f 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -1677,9 +1677,9 @@ dependencies = [ [[package]] name = "simplelog" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bc0ffd69814a9b251d43afcabf96dad1b29f5028378056257be9e3fecc9f720" +checksum = "59d0fe306a0ced1c88a58042dc22fc2ddd000982c26d75f6aa09a394547c41e0" dependencies = [ "chrono", "log", diff --git a/tools/pubsys-setup/Cargo.toml b/tools/pubsys-setup/Cargo.toml index 4e95b6e6..093f9207 100644 --- a/tools/pubsys-setup/Cargo.toml +++ b/tools/pubsys-setup/Cargo.toml @@ -13,7 +13,7 @@ pubsys-config = { path = "../pubsys-config/" } reqwest = { version = "0.11.1", default-features = false, features = ["rustls-tls", "blocking"] } sha2 = "0.9" shell-words = "1.0" -simplelog = "0.9" +simplelog = "0.10" snafu = "0.6" structopt = { version = "0.3", default-features = false } tempfile = "3.1" diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index b2f7d5a1..19482f42 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -26,7 +26,7 @@ rusoto_ec2 = { version = "0.46.0", default-features = false, features = ["rustls rusoto_signature = "0.46.0" rusoto_ssm = { version = "0.46.0", default-features = false, features = ["rustls"] } rusoto_sts = { version = "0.46.0", default-features = false, features = ["rustls"] } -simplelog = "0.9.0" +simplelog = "0.10.0" snafu = "0.6" semver = "0.11.0" serde = { version = "1.0", features = ["derive"] } From 8b9f4562765c28f7853c3082102633f03300179c Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Fri, 12 Mar 2021 02:44:33 +0000 Subject: [PATCH 0432/1356] Add support for bootstrap-containers via settings This commit adds support to create bootstrap containers through the API. Bootstrap containers are host containers that can be used to setup the host during the execution of the `configured` target. These containers are created with the prefix `boot` to prevent collisions with normal host containers. They can be setup to fail the boot process if the underlying container task exists with a non-zero status code. --- README.md | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/README.md b/README.md index 1fcea898..65d558b4 100644 --- a/README.md +++ b/README.md @@ -478,6 +478,46 @@ We use it for the control container because it needs to be available early to gi Be careful, and make sure you have a similar low-level use case before reaching for host containers. +#### Bootstrap containers settings +* `settings.bootstrap-containers..source`: the image for the container +* `settings.bootstrap-containers..mode`: the mode of the container, it could be one of `off`, `once` or `always`. See below for a description of modes. +* `settings.bootstrap-containers..essential`: whether or not the container should fail the boot process, defaults to `false` +* `settings.bootstrap-containers..user-data`: field with arbitrary base64-encoded data + +Bootstrap containers are host containers that can be used to "bootstrap" the host before services like ECS Agent, Kubernetes, and Docker start. + +Bootstrap containers are very similar to normal host containers; they come with persistent storage and with optional user data. +Unlike normal host containers, bootstrap containers can't be treated as `superpowered` containers. +However, these containers have access to the underlying root filesystem on `/.bottlerocket/rootfs`. +Bootstrap containers are set up to run after the systemd `configured.target` unit is active. +The containers' systemd unit depends on this target (and not on any of the bootstrap containers' peers) which means that bootstrap containers will not execute in a deterministic order +The boot process will "wait" for as long as the bootstrap containers run. +Bootstrap containers configured with `essential=true` will stop the boot process if they exit code is a non-zero value. + +Bootstrap containers have three different modes: + +* `always`: with this setting, the container is executed on every boot. +* `off`: the container won't run +* `once`: with this setting, the container only runs on the first boot where the container is defined. Upon completion, the mode is changed to `off`. + +Here's an example of adding a bootstrap container with API calls: + +``` +apiclient set \ + bootstrap-containers.bootstrap.source=MY-CONTAINER-URI \ + bootstrap-containers.bootstrap.mode=once \ + bootstrap-containers.bootstrap.essential=true +``` + +Here's the same example, but with the settings you'd add to user data: + +``` +[settings.bootstrap-containers.bootstrap] +source = "MY-CONTAINER-URI" +mode = "once" +essential = true +``` + #### Platform-specific settings Platform-specific settings are automatically set at boot time by [early-boot-config](sources/api/early-boot-config) based on metadata available on the running platform. From 1557429a1ce8ded5edc3c6986ef14bb8339c9184 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Tue, 6 Apr 2021 20:23:09 -0700 Subject: [PATCH 0433/1356] Update kernel to 5.4.105 --- packages/kernel/Cargo.toml | 4 ++-- packages/kernel/kernel.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel/Cargo.toml b/packages/kernel/Cargo.toml index a912b663..eb45aabe 100644 --- a/packages/kernel/Cargo.toml +++ b/packages/kernel/Cargo.toml @@ -10,5 +10,5 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/cf12975d70edce3beb7042007609dc355b47ce27babb08b436829f7500de6b76/kernel-5.4.95-42.163.amzn2.src.rpm" -sha512 = "4dcfb86a2664edd9cf08d1f32b388ec6b9874ae62a21fc655aa80599270af5fdf15ff1f4dc250e36e7559a1c8a08901e428823d7c3e212cf13bada298fdf4dbd" +url = "https://cdn.amazonlinux.com/blobstore/c9c16a56ef978680bd95df30d81add144807ffe0c43def257038586bb6b52388/kernel-5.4.105-48.177.amzn2.src.rpm" +sha512 = "ef506706434bc94df6e845e5262c8d022ebb91ff6bc6a71ac656851c0de66d81392acedb9be39b2be4724f106df21d3b58de71387410e103e4b05a48fa955059" diff --git a/packages/kernel/kernel.spec b/packages/kernel/kernel.spec index 23919a31..617932ba 100644 --- a/packages/kernel/kernel.spec +++ b/packages/kernel/kernel.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel -Version: 5.4.95 +Version: 5.4.105 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/cf12975d70edce3beb7042007609dc355b47ce27babb08b436829f7500de6b76/kernel-5.4.95-42.163.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/c9c16a56ef978680bd95df30d81add144807ffe0c43def257038586bb6b52388/kernel-5.4.105-48.177.amzn2.src.rpm Source100: config-bottlerocket # Make Lustre FSx work with a newer GCC. From 923c4beddd62932b99671da5745f23cdbf6aae45 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Thu, 8 Apr 2021 17:05:03 -0700 Subject: [PATCH 0434/1356] kernel: disable CMDLINE_EXTEND This can be used to extend the kernel command line with default options. Bottlerocket has fairly specific needs for the kernel command line, so we want to use what we set in grub (via rpm2img) and nothing else. --- packages/kernel/config-bottlerocket | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/kernel/config-bottlerocket b/packages/kernel/config-bottlerocket index caa6f106..26cce3d6 100644 --- a/packages/kernel/config-bottlerocket +++ b/packages/kernel/config-bottlerocket @@ -57,3 +57,7 @@ CONFIG_IKHEADERS=y # BTF debug info at /sys/kernel/btf/vmlinux CONFIG_DEBUG_INFO_BTF=y + +# We don't want to extend the kernel command line with any upstream defaults; +# Bottlerocket uses a fairly custom setup that needs tight control over it. +CONFIG_CMDLINE_EXTEND=n From 6ee87b24735186dc28813dc15ff54515f6ea3cfc Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Fri, 9 Apr 2021 11:12:01 -0700 Subject: [PATCH 0435/1356] Document the deprecation of the aws-k8s-1.15 variant --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 65d558b4..93120e3c 100644 --- a/README.md +++ b/README.md @@ -50,7 +50,6 @@ For example, an `x86_64` build of the `aws-k8s-1.19` variant will produce an ima The following variants support EKS, as described above: -- `aws-k8s-1.15` - `aws-k8s-1.16` - `aws-k8s-1.17` - `aws-k8s-1.18` @@ -60,6 +59,9 @@ We also have a variant designed to work with ECS, currently in preview: - `aws-ecs-1` +The `aws-k8s-1.15` variant is deprecated and will no longer be supported in Bottlerocket releases. +We recommend users replace `aws-k8s-1.15` nodes with the [latest variant compatible with their cluster](variants/). + ## Architectures Our supported architectures include `x86_64` and `aarch64` (written as `arm64` in some contexts). From a80a237bcfbd6fcc2ffc037018f64cce029de39e Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Wed, 14 Apr 2021 14:09:58 -0700 Subject: [PATCH 0436/1356] variants, models: remove aws-k8s-1.15 files and references This deletes aws-k8s-1.15 models and defaults and the variant directory. The aws-k8s-1.15 can no longer be built. --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f4358ed6..8a9c4cf7 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,7 +10,7 @@ jobs: continue-on-error: ${{ matrix.supported }} strategy: matrix: - variant: [aws-k8s-1.15, aws-k8s-1.16, aws-k8s-1.17, aws-k8s-1.18, aws-k8s-1.19, aws-ecs-1] + variant: [aws-k8s-1.16, aws-k8s-1.17, aws-k8s-1.18, aws-k8s-1.19, aws-ecs-1] arch: [x86_64, aarch64] supported: [true] include: From a861861142d46a9c7862a4e70fd1fe1c5a8c3d7a Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 13 Apr 2021 13:40:59 -0700 Subject: [PATCH 0437/1356] k8s: add setting for configuring serverTLSBootstrap Adds a new setting `kubernetes.server-tls-bootstrap` for configuring whether to enable server certificate bootstrap for the kubelet. --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 93120e3c..ae698b8a 100644 --- a/README.md +++ b/README.md @@ -309,6 +309,7 @@ The following settings are optional and allow you to further configure your clus * `settings.kubernetes.cluster-domain`: The DNS domain for this cluster, allowing all Kubernetes-run containers to search this domain before the host's search domains. Defaults to `cluster.local`. * `settings.kubernetes.standalone-mode`: Whether to run the kubelet in standalone mode, without connecting to an API server. Defaults to `false`. * `settings.kubernetes.authentication-mode`: Which authentication method the kubelet should use to connect to the API server, and for incoming requests. Defaults to `aws` for AWS variants, and `tls` for other variants. +* `settings.kubernetes.server-tls-bootstrap`: Enables or disables server certificate bootstrap. When enabled, the kubelet will request a certificate from the certificates.k8s.io API. This requires an approver to approve the certificate signing requests (CSR). Defaults to `true`. * `settings.kubernetes.bootstrap-token`: The token to use for [TLS bootstrapping](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/). This is only used with the `tls` authentication mode, and is otherwise ignored. * `settings.kubernetes.eviction-hard`: The signals and thresholds that trigger pod eviction. Remember to quote signals (since they all contain ".") and to quote all values. From d525ebe1e6a1664ca20747fea9df955c9a8d1053 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Thu, 15 Apr 2021 13:51:59 -0700 Subject: [PATCH 0438/1356] packages, logdog: remove k8s-1.15 related files --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ae698b8a..4025c5fc 100644 --- a/README.md +++ b/README.md @@ -583,7 +583,7 @@ We currently package the following major third-party components: * systemd as init ([background](https://en.wikipedia.org/wiki/Systemd), [packaging](packages/systemd/)) * wicked for networking ([background](https://github.com/openSUSE/wicked), [packaging](packages/wicked/)) * containerd ([background](https://containerd.io/), [packaging](packages/containerd/)) -* Kubernetes ([background](https://kubernetes.io/), [packaging](packages/kubernetes-1.15/)) +* Kubernetes ([background](https://kubernetes.io/), [packaging](packages/kubernetes-1.19/)) * aws-iam-authenticator ([background](https://github.com/kubernetes-sigs/aws-iam-authenticator), [packaging](packages/aws-iam-authenticator/)) * Amazon ECS agent ([background](https://github.com/aws/amazon-ecs-agent), [packaging](packages/ecs-agent/)) From 78a2e05201d1d4582e0b8cde6eb53c4f356a3b7b Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Wed, 14 Apr 2021 16:23:36 -0700 Subject: [PATCH 0439/1356] Allow variants to specify extra kernel parameters For example, this is useful when variants designed to run on different platforms need different serial console parameters. --- tools/buildsys/src/builder.rs | 12 +++++++++++- tools/buildsys/src/main.rs | 4 +++- tools/buildsys/src/manifest.rs | 7 +++++++ tools/rpm2img | 4 +++- 4 files changed, 24 insertions(+), 3 deletions(-) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index 018ae32a..101eb8ca 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -80,7 +80,11 @@ pub(crate) struct VariantBuilder; impl VariantBuilder { /// Build a variant with the specified packages installed. - pub(crate) fn build(packages: &[String], image_format: Option<&ImageFormat>) -> Result { + pub(crate) fn build( + packages: &[String], + image_format: Option<&ImageFormat>, + kernel_parameters: Option<&Vec>, + ) -> Result { let output_dir: PathBuf = getenv("BUILDSYS_OUTPUT_DIR")?.into(); let variant = getenv("BUILDSYS_VARIANT")?; @@ -102,6 +106,12 @@ impl VariantBuilder { Some(ImageFormat::Vmdk) => "vmdk", }, ); + args.build_arg( + "KERNEL_PARAMETERS", + kernel_parameters + .map(|v| v.join(" ")) + .unwrap_or_else(|| "".to_string()), + ); // Always rebuild variants since they are located in a different workspace, // and don't directly track changes in the underlying packages. diff --git a/tools/buildsys/src/main.rs b/tools/buildsys/src/main.rs index e6b3a18a..69037840 100644 --- a/tools/buildsys/src/main.rs +++ b/tools/buildsys/src/main.rs @@ -192,7 +192,9 @@ fn build_variant() -> Result<()> { if let Some(packages) = manifest.included_packages() { let image_format = manifest.image_format(); - VariantBuilder::build(&packages, image_format).context(error::BuildAttempt)?; + let kernel_parameters = manifest.kernel_parameters(); + VariantBuilder::build(&packages, image_format, kernel_parameters) + .context(error::BuildAttempt)?; } else { println!("cargo:warning=No included packages in manifest. Skipping variant build."); } diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index c0563a4f..b1985198 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -123,6 +123,12 @@ impl ManifestInfo { .and_then(|b| b.supported_arches.as_ref()) } + /// Convenience method to return the kernel parameters for this variant. + pub(crate) fn kernel_parameters(&self) -> Option<&Vec> { + self.build_variant() + .and_then(|b| b.kernel_parameters.as_ref()) + } + /// Helper methods to navigate the series of optional struct fields. fn build_package(&self) -> Option<&BuildPackage> { self.package @@ -167,6 +173,7 @@ pub(crate) struct BuildVariant { pub(crate) included_packages: Option>, pub(crate) image_format: Option, pub(crate) supported_arches: Option>, + pub(crate) kernel_parameters: Option>, } #[derive(Deserialize, Debug)] diff --git a/tools/rpm2img b/tools/rpm2img index 17d17c85..a0cabb31 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -177,7 +177,9 @@ set default="0" set timeout="0" menuentry "${PRETTY_NAME} ${VERSION_ID}" { - linux (\$root)/vmlinuz root=/dev/dm-0 rootwait ro \\ + linux (\$root)/vmlinuz \\ + ${KERNEL_PARAMETERS} \\ + root=/dev/dm-0 rootwait ro \\ console=tty0 console=ttyS0 random.trust_cpu=on selinux=1 enforcing=1 \\ systemd.log_target=journal-or-kmsg systemd.log_color=0 net.ifnames=0 \\ biosdevname=0 dm_verity.max_bios=-1 dm_verity.dev_wait=1 \\ From 5f3b6d87bc223f160257fb10aaf9065606dba440 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Wed, 14 Apr 2021 16:35:18 -0700 Subject: [PATCH 0440/1356] buildsys: document new variant metadata keys --- tools/buildsys/src/manifest.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index b1985198..b8f82c35 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -58,6 +58,30 @@ variant-sensitive = true [package.metadata.build-variant] included-packages = ["release"] ``` + +`image-format` is the desired format of the built image. +This can be `raw` (the default), `vmdk`, or `qcow2`. +``` +[package.metadata.build-variant] +image-format = "vmdk" +``` + +`supported-arches` is the list of architectures the variant is able to run on. +The values can be `x86_64` and `aarch64`. +If not specified, the variant can run on any of those architectures. +``` +[package.metadata.build-variant] +supported-arches = ["x86_64"] +``` + +`kernel-parameters` is a list of extra parameters to be added to the kernel command line. +The given parameters are inserted at the start of the command line. +``` +[package.metadata.build-variant] +kernel-parameters = [ + "console=ttyS42", +] +``` */ pub(crate) mod error; From cf2cf26fe13da4fefc4cabb661a0a8f35a65aa1c Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Mon, 19 Apr 2021 18:36:01 +0000 Subject: [PATCH 0441/1356] k8s: add setting for configuring cloudProvider Adds a new setting `kubernetes.cloud-provider` for configuring whether the cloud provider is `aws` or `external`. Prior to this, the argument was hard-coded to `aws`. --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 4025c5fc..2cee842a 100644 --- a/README.md +++ b/README.md @@ -308,6 +308,7 @@ The following settings can be optionally set to customize the node labels and ta The following settings are optional and allow you to further configure your cluster. * `settings.kubernetes.cluster-domain`: The DNS domain for this cluster, allowing all Kubernetes-run containers to search this domain before the host's search domains. Defaults to `cluster.local`. * `settings.kubernetes.standalone-mode`: Whether to run the kubelet in standalone mode, without connecting to an API server. Defaults to `false`. +* `settings.kubernetes.cloud-provider`: The cloud provider for this cluster. Defaults to `aws` for AWS variants, and `external` for other variants. * `settings.kubernetes.authentication-mode`: Which authentication method the kubelet should use to connect to the API server, and for incoming requests. Defaults to `aws` for AWS variants, and `tls` for other variants. * `settings.kubernetes.server-tls-bootstrap`: Enables or disables server certificate bootstrap. When enabled, the kubelet will request a certificate from the certificates.k8s.io API. This requires an approver to approve the certificate signing requests (CSR). Defaults to `true`. * `settings.kubernetes.bootstrap-token`: The token to use for [TLS bootstrapping](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/). This is only used with the `tls` authentication mode, and is otherwise ignored. From d066271f9dd90e2b4fe96621dc35823a7b8c7a19 Mon Sep 17 00:00:00 2001 From: Matt Briggs Date: Tue, 23 Mar 2021 16:24:21 -0700 Subject: [PATCH 0442/1356] packages: express installation dependencies in cargo Previously we expressed RPM BuildRequires dependencies in Cargo's dependency graph to ensure the necessary RPM's exist before we build a package. If we add to this RPM Requires dependencies (dependencies that are needed when software packages are installed), then we can use Cargo to select only the packages that are needed for a set of desired install packages. --- packages/kernel/Cargo.toml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/packages/kernel/Cargo.toml b/packages/kernel/Cargo.toml index eb45aabe..749ebe8a 100644 --- a/packages/kernel/Cargo.toml +++ b/packages/kernel/Cargo.toml @@ -12,3 +12,11 @@ path = "pkg.rs" # Use latest-srpm-url.sh to get this. url = "https://cdn.amazonlinux.com/blobstore/c9c16a56ef978680bd95df30d81add144807ffe0c43def257038586bb6b52388/kernel-5.4.105-48.177.amzn2.src.rpm" sha512 = "ef506706434bc94df6e845e5262c8d022ebb91ff6bc6a71ac656851c0de66d81392acedb9be39b2be4724f106df21d3b58de71387410e103e4b05a48fa955059" + +# RPM BuildRequires +[build-dependencies] +# Provided by Bottlerocket SDK + +# RPM Requires +[dependencies] +filesystem = { path = "../filesystem" } From 62eddf080496f905199134a81dc884f98c0a9db3 Mon Sep 17 00:00:00 2001 From: Matt Briggs Date: Tue, 23 Mar 2021 20:47:12 -0700 Subject: [PATCH 0443/1356] build: only build packages for current variant Adds the packages that are needed to build a variant to the variant's Cargo dependencies. Previously the build-variant makefile target assumed that all packages were pre-built. Instead we now tell Cargo to build the packages we need for the variant. Creates a workspace in the variants directory and removes the workspace in the packages directory. Updates the makefile accordingly. Now only the packages that are part of the dependency tree starting with the variant will be built. The build-kmod-kit target previously depended on build-packages, which is no longer available. So a new target, build-package, and a specialization of it, build-kernel, have been added. These use the same workspace context that is used when building variants to ensure that the same rpm is used by build-kmod-kit. --- BUILDING.md | 1 + 1 file changed, 1 insertion(+) diff --git a/BUILDING.md b/BUILDING.md index e8b06945..3c4ad104 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -40,6 +40,7 @@ yum install make automake gcc openssl openssl-devel pkg-config lz4 perl-FindBin The build system is based on the Rust language. We recommend you install the latest stable Rust using [rustup](https://rustup.rs/), either from the official site or your development host's package manager. +Rust 1.51.0 or higher is required. To organize build tasks, we use [cargo-make](https://sagiegurari.github.io/cargo-make/). We also use [cargo-deny](https://github.com/EmbarkStudios/cargo-deny) during the build process. From b8363cbc24bff32806f00f5e56bbaca002aefc41 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Thu, 22 Apr 2021 21:23:09 +0000 Subject: [PATCH 0444/1356] Move kernel console settings to variant tomls This moves console settings like `console=tty0` to variant tomls to allow for different variants to have different console configurations. --- tools/rpm2img | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/rpm2img b/tools/rpm2img index a0cabb31..1fe27f72 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -177,10 +177,10 @@ set default="0" set timeout="0" menuentry "${PRETTY_NAME} ${VERSION_ID}" { - linux (\$root)/vmlinuz \\ + linux (\$root)/vmlinuz root=/dev/dm-0 \\ ${KERNEL_PARAMETERS} \\ - root=/dev/dm-0 rootwait ro \\ - console=tty0 console=ttyS0 random.trust_cpu=on selinux=1 enforcing=1 \\ + rootwait ro \\ + random.trust_cpu=on selinux=1 enforcing=1 \\ systemd.log_target=journal-or-kmsg systemd.log_color=0 net.ifnames=0 \\ biosdevname=0 dm_verity.max_bios=-1 dm_verity.dev_wait=1 \\ dm-mod.create="root,,,ro,0 $VERITY_DATA_512B_BLOCKS verity $VERITY_VERSION PARTUUID=\$boot_uuid/PARTNROFF=1 PARTUUID=\$boot_uuid/PARTNROFF=2 \\ From 8700214fc6a73b8157a428e5e78ad32a103458db Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Wed, 31 Mar 2021 17:37:47 -0700 Subject: [PATCH 0445/1356] Add aws-k8s-1.20 variant with Kubernetes 1.20 --- .github/workflows/build.yml | 2 +- README.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 8a9c4cf7..c550c771 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,7 +10,7 @@ jobs: continue-on-error: ${{ matrix.supported }} strategy: matrix: - variant: [aws-k8s-1.16, aws-k8s-1.17, aws-k8s-1.18, aws-k8s-1.19, aws-ecs-1] + variant: [aws-k8s-1.16, aws-k8s-1.17, aws-k8s-1.18, aws-k8s-1.19, aws-k8s-1.20, aws-ecs-1] arch: [x86_64, aarch64] supported: [true] include: diff --git a/README.md b/README.md index 2cee842a..cc53e052 100644 --- a/README.md +++ b/README.md @@ -54,6 +54,7 @@ The following variants support EKS, as described above: - `aws-k8s-1.17` - `aws-k8s-1.18` - `aws-k8s-1.19` +- `aws-k8s-1.20` We also have a variant designed to work with ECS, currently in preview: From fe2b951f457fa48e22e8aaa4f824a7c06a0011d9 Mon Sep 17 00:00:00 2001 From: Matt Briggs Date: Fri, 23 Apr 2021 15:54:10 -0700 Subject: [PATCH 0446/1356] pubsys: validate-repo without tokio runtime Re-implement the parallelism of pubsys validate-repo so that it does not need a tokio runtime. The runtime was interfering with the runtime created by reqwest::blocking. --- tools/pubsys/src/main.rs | 7 +---- tools/pubsys/src/repo/validate_repo/mod.rs | 34 +++++++++++++++------- 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs index 903eec30..b4b85b29 100644 --- a/tools/pubsys/src/main.rs +++ b/tools/pubsys/src/main.rs @@ -44,12 +44,7 @@ fn run() -> Result<()> { match args.subcommand { SubCommand::Repo(ref repo_args) => repo::run(&args, &repo_args).context(error::Repo), SubCommand::ValidateRepo(ref validate_repo_args) => { - let rt = Runtime::new().context(error::Runtime)?; - rt.block_on(async { - repo::validate_repo::run(&args, &validate_repo_args) - .await - .context(error::ValidateRepo) - }) + repo::validate_repo::run(&args, &validate_repo_args).context(error::ValidateRepo) } SubCommand::CheckRepoExpirations(ref check_expirations_args) => { repo::check_expirations::run(&args, &check_expirations_args) diff --git a/tools/pubsys/src/repo/validate_repo/mod.rs b/tools/pubsys/src/repo/validate_repo/mod.rs index 9391eab2..26ea168d 100644 --- a/tools/pubsys/src/repo/validate_repo/mod.rs +++ b/tools/pubsys/src/repo/validate_repo/mod.rs @@ -3,13 +3,13 @@ use crate::repo::{error as repo_error, repo_urls}; use crate::Args; -use futures::future::join_all; use log::{info, trace}; use pubsys_config::InfraConfig; use snafu::{OptionExt, ResultExt}; use std::fs::File; use std::io; use std::path::PathBuf; +use std::thread::spawn; use structopt::StructOpt; use tough::{Repository, RepositoryLoader}; use url::Url; @@ -39,7 +39,7 @@ pub(crate) struct ValidateRepoArgs { } /// Retrieves listed targets and attempts to download them for validation purposes -async fn retrieve_targets(repo: &Repository) -> Result<(), Error> { +fn retrieve_targets(repo: &Repository) -> Result<(), Error> { let targets = &repo.targets().signed.targets; let mut tasks = Vec::new(); @@ -54,22 +54,35 @@ async fn retrieve_targets(repo: &Repository) -> Result<(), Error> { target: target.to_string(), })?; info!("Downloading target: {}", target); - tasks.push(tokio::spawn(async move { + // TODO - limit threads https://github.com/bottlerocket-os/bottlerocket/issues/1522 + tasks.push(spawn(move || { // tough's `Read` implementation validates the target as it's being downloaded io::copy(&mut reader, &mut io::sink()).context(error::TargetDownload { target: target.to_string(), }) })); } - let results = join_all(tasks).await; + + // ensure that we join all threads before checking the results + let mut results = Vec::new(); + for task in tasks { + let result = task.join().map_err(|e| error::Error::Join { + // the join function is returning an error type that does not implement error or display + inner: format!("{:?}", e), + })?; + results.push(result); + } + + // check all results and return the first error we see for result in results { - result.context(error::Join)??; + result?; } + // no errors were found, the targets are validated Ok(()) } -async fn validate_repo( +fn validate_repo( root_role_path: &PathBuf, metadata_url: Url, targets_url: &Url, @@ -90,14 +103,14 @@ async fn validate_repo( info!("Loaded TUF repo: {}", metadata_url); if validate_targets { // Try retrieving listed targets - retrieve_targets(&repo).await?; + retrieve_targets(&repo)?; } Ok(()) } /// Common entrypoint from main() -pub(crate) async fn run(args: &Args, validate_repo_args: &ValidateRepoArgs) -> Result<(), Error> { +pub(crate) fn run(args: &Args, validate_repo_args: &ValidateRepoArgs) -> Result<(), Error> { info!( "Using infra config from path: {}", args.infra_config_path.display() @@ -130,7 +143,6 @@ pub(crate) async fn run(args: &Args, validate_repo_args: &ValidateRepoArgs) -> R repo_urls.1, validate_repo_args.validate_targets, ) - .await } mod error { @@ -152,8 +164,8 @@ mod error { #[snafu(display("Missing target: {}", target))] TargetMissing { target: String }, - #[snafu(display("Failed to spawn task for fetching target: {}", source))] - Join { source: tokio::task::JoinError }, + #[snafu(display("Failed to join thread: {}", inner))] + Join { inner: String }, } } pub(crate) use error::Error; From 7eae9ab7f5f31f974ad145ae9633083015d70d9e Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 14 Apr 2021 19:39:18 +0000 Subject: [PATCH 0447/1356] grub: add quotes to conditional expressions Support for bare words in expressions was removed in rpm 4.16. Signed-off-by: Ben Cressey --- packages/grub/grub.spec | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index 94fbd523..bd2ade58 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -119,14 +119,14 @@ grub2-mkimage \ -O "%{_cross_grub_tuple}" \ -o "%{buildroot}%{_cross_grubdir}/%{_cross_grub_image}" \ -p "%{_cross_grub_prefix}" \ -%if %{_cross_arch} == x86_64 +%if "%{_cross_arch}" == "x86_64" biosdisk \ %else efi_gop \ %endif configfile echo ext2 gptprio linux normal part_gpt reboot sleep -%if %{_cross_arch} == x86_64 +%if "%{_cross_arch}" == "x86_64" install -m 0644 ./grub-core/boot.img \ %{buildroot}%{_cross_grubdir}/boot.img %endif @@ -135,7 +135,7 @@ install -m 0644 ./grub-core/boot.img \ %license COPYING COPYING.unicode %{_cross_attribution_file} %dir %{_cross_grubdir} -%if %{_cross_arch} == x86_64 +%if "%{_cross_arch}" == "x86_64" %{_cross_grubdir}/boot.img %endif %{_cross_grubdir}/%{_cross_grub_image} From 6110fac94071da19815e34b28b6beb98344e87f4 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 15 Apr 2021 16:13:28 +0000 Subject: [PATCH 0448/1356] grub: do not strip files This build produces a mix of host tools and target modules, and new versions of RPM build scripts from Fedora will attempt to strip both. When cross-compiling, there's no single version of `strip` that can do this successfully, and this now fails when building on an x86_64 host for the aarch64 target. We only use the tools and modules to generate the bootloader images, and they are not shipped anywhere. Hence we do not really care about extracting debuginfo and can just leave the files unstripped. Signed-off-by: Ben Cressey --- packages/grub/grub.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index bd2ade58..dd9e830d 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -1,5 +1,5 @@ %global debug_package %{nil} -%global __strip %{_bindir}/strip +%global __strip %{_bindir}/true Name: %{_cross_os}grub Version: 2.04 From eff2b8d43cc3f8081c1f2cc2a7d7543207f4b8aa Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 15 Apr 2021 13:49:16 +0000 Subject: [PATCH 0449/1356] build: update SDK to 0.20.0 The SDK and toolchain are now vended as containers, so we pull them from ECR rather than from S3. The SDK includes `cargo deny`, so we use its copy and no longer need it to be installed on the build host. Signed-off-by: Ben Cressey --- .github/workflows/build.yml | 1 - BUILDING.md | 4 +--- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c550c771..e177f846 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -24,6 +24,5 @@ jobs: steps: - uses: actions/checkout@v2 - run: cargo install --version 0.30.0 cargo-make - - run: cargo install --version 0.6.6 cargo-deny --no-default-features - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} unit-tests - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} -e BUILDSYS_ARCH=${{ matrix.arch }} -e BUILDSYS_JOBS=12 diff --git a/BUILDING.md b/BUILDING.md index 3c4ad104..ccc596ee 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -43,12 +43,10 @@ We recommend you install the latest stable Rust using [rustup](https://rustup.rs Rust 1.51.0 or higher is required. To organize build tasks, we use [cargo-make](https://sagiegurari.github.io/cargo-make/). -We also use [cargo-deny](https://github.com/EmbarkStudios/cargo-deny) during the build process. -To get these, run: +To get it, run: ``` cargo install cargo-make -cargo install cargo-deny --version 0.6.2 ``` #### Docker From c69b7f2bdbaf0c48be76bd989fc591f3d8c2d7c6 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 19 Apr 2021 17:45:51 +0000 Subject: [PATCH 0450/1356] rename kernel package to kernel-5.4 This allows us to add other kernels to the packages tree. Signed-off-by: Ben Cressey --- README.md | 2 +- packages/{kernel => kernel-5.4}/.gitignore | 0 .../0001-lustrefsx-Disable-Werror-stringop-overflow.patch | 0 ...1-Makefile-add-prepare-target-for-external-modules.patch | 0 packages/{kernel => kernel-5.4}/Cargo.toml | 5 ++++- packages/{kernel => kernel-5.4}/build.rs | 0 packages/{kernel => kernel-5.4}/config-bottlerocket | 0 packages/{kernel/kernel.spec => kernel-5.4/kernel-5.4.spec} | 6 +++++- packages/kernel-5.4/latest-srpm-url.sh | 2 ++ packages/{kernel => kernel-5.4}/pkg.rs | 0 packages/kernel/latest-srpm-url.sh | 2 -- 11 files changed, 12 insertions(+), 5 deletions(-) rename packages/{kernel => kernel-5.4}/.gitignore (100%) rename packages/{kernel => kernel-5.4}/0001-lustrefsx-Disable-Werror-stringop-overflow.patch (100%) rename packages/{kernel => kernel-5.4}/1001-Makefile-add-prepare-target-for-external-modules.patch (100%) rename packages/{kernel => kernel-5.4}/Cargo.toml (88%) rename packages/{kernel => kernel-5.4}/build.rs (100%) rename packages/{kernel => kernel-5.4}/config-bottlerocket (100%) rename packages/{kernel/kernel.spec => kernel-5.4/kernel-5.4.spec} (97%) create mode 100755 packages/kernel-5.4/latest-srpm-url.sh rename packages/{kernel => kernel-5.4}/pkg.rs (100%) delete mode 100755 packages/kernel/latest-srpm-url.sh diff --git a/README.md b/README.md index cc53e052..96399dbc 100644 --- a/README.md +++ b/README.md @@ -578,7 +578,7 @@ We use RPM package definitions to build and install individual packages into an RPM itself is not in the image - it's just a common and convenient package definition format. We currently package the following major third-party components: -* Linux kernel ([background](https://en.wikipedia.org/wiki/Linux), [packaging](packages/kernel/)) +* Linux kernel ([background](https://en.wikipedia.org/wiki/Linux), [packaging](packages/kernel-5.4/)) * glibc ([background](https://www.gnu.org/software/libc/), [packaging](packages/glibc/)) * Buildroot as build toolchain ([background](https://buildroot.org/), via the [SDK](https://github.com/bottlerocket-os/bottlerocket-sdk)) * GRUB, with patches for partition flip updates ([background](https://www.gnu.org/software/grub/), [packaging](packages/grub/)) diff --git a/packages/kernel/.gitignore b/packages/kernel-5.4/.gitignore similarity index 100% rename from packages/kernel/.gitignore rename to packages/kernel-5.4/.gitignore diff --git a/packages/kernel/0001-lustrefsx-Disable-Werror-stringop-overflow.patch b/packages/kernel-5.4/0001-lustrefsx-Disable-Werror-stringop-overflow.patch similarity index 100% rename from packages/kernel/0001-lustrefsx-Disable-Werror-stringop-overflow.patch rename to packages/kernel-5.4/0001-lustrefsx-Disable-Werror-stringop-overflow.patch diff --git a/packages/kernel/1001-Makefile-add-prepare-target-for-external-modules.patch b/packages/kernel-5.4/1001-Makefile-add-prepare-target-for-external-modules.patch similarity index 100% rename from packages/kernel/1001-Makefile-add-prepare-target-for-external-modules.patch rename to packages/kernel-5.4/1001-Makefile-add-prepare-target-for-external-modules.patch diff --git a/packages/kernel/Cargo.toml b/packages/kernel-5.4/Cargo.toml similarity index 88% rename from packages/kernel/Cargo.toml rename to packages/kernel-5.4/Cargo.toml index 749ebe8a..9f56c579 100644 --- a/packages/kernel/Cargo.toml +++ b/packages/kernel-5.4/Cargo.toml @@ -1,10 +1,13 @@ [package] -name = "kernel" +name = "kernel-5_4" version = "0.1.0" edition = "2018" publish = false build = "build.rs" +[package.metadata.build-package] +package-name = "kernel-5.4" + [lib] path = "pkg.rs" diff --git a/packages/kernel/build.rs b/packages/kernel-5.4/build.rs similarity index 100% rename from packages/kernel/build.rs rename to packages/kernel-5.4/build.rs diff --git a/packages/kernel/config-bottlerocket b/packages/kernel-5.4/config-bottlerocket similarity index 100% rename from packages/kernel/config-bottlerocket rename to packages/kernel-5.4/config-bottlerocket diff --git a/packages/kernel/kernel.spec b/packages/kernel-5.4/kernel-5.4.spec similarity index 97% rename from packages/kernel/kernel.spec rename to packages/kernel-5.4/kernel-5.4.spec index 617932ba..96c9c559 100644 --- a/packages/kernel/kernel.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -1,6 +1,6 @@ %global debug_package %{nil} -Name: %{_cross_os}kernel +Name: %{_cross_os}kernel-5.4 Version: 5.4.105 Release: 1%{?dist} Summary: The Linux kernel @@ -22,6 +22,10 @@ BuildRequires: hostname BuildRequires: kmod BuildRequires: openssl-devel +# Pull in expected modules and development files. +Requires: %{name}-modules = %{version}-%{release} +Requires: %{name}-devel = %{version}-%{release} + %global kernel_sourcedir %{_cross_usrsrc}/kernels %global kernel_libdir %{_cross_libdir}/modules/%{version} diff --git a/packages/kernel-5.4/latest-srpm-url.sh b/packages/kernel-5.4/latest-srpm-url.sh new file mode 100755 index 00000000..5e9e4591 --- /dev/null +++ b/packages/kernel-5.4/latest-srpm-url.sh @@ -0,0 +1,2 @@ +#!/bin/sh +docker run --rm amazonlinux:2 sh -c 'amazon-linux-extras enable kernel-5.4 >/dev/null && yum install -q -y yum-utils && yumdownloader -q --source --urls kernel | grep ^http' diff --git a/packages/kernel/pkg.rs b/packages/kernel-5.4/pkg.rs similarity index 100% rename from packages/kernel/pkg.rs rename to packages/kernel-5.4/pkg.rs diff --git a/packages/kernel/latest-srpm-url.sh b/packages/kernel/latest-srpm-url.sh deleted file mode 100755 index 3913cef9..00000000 --- a/packages/kernel/latest-srpm-url.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -docker run --rm amazonlinux:2 sh -c 'amazon-linux-extras enable kernel-ng >/dev/null && yum install -q -y yum-utils && yumdownloader -q --source --urls kernel | grep ^http' From 2a1f912df86e1aa7a4e9f2ef0bda6c41a1522b6d Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 19 Apr 2021 19:06:30 +0000 Subject: [PATCH 0451/1356] add kernel 5.10 package Signed-off-by: Ben Cressey --- packages/kernel-5.10/.gitignore | 1 + ...-prepare-target-for-external-modules.patch | 50 ++++ packages/kernel-5.10/Cargo.toml | 17 ++ packages/kernel-5.10/build.rs | 9 + packages/kernel-5.10/config-bottlerocket | 63 +++++ packages/kernel-5.10/kernel-5.10.spec | 239 ++++++++++++++++++ packages/kernel-5.10/latest-srpm-url.sh | 2 + packages/kernel-5.10/pkg.rs | 1 + 8 files changed, 382 insertions(+) create mode 100644 packages/kernel-5.10/.gitignore create mode 100644 packages/kernel-5.10/1001-Makefile-add-prepare-target-for-external-modules.patch create mode 100644 packages/kernel-5.10/Cargo.toml create mode 100644 packages/kernel-5.10/build.rs create mode 100644 packages/kernel-5.10/config-bottlerocket create mode 100644 packages/kernel-5.10/kernel-5.10.spec create mode 100755 packages/kernel-5.10/latest-srpm-url.sh create mode 100644 packages/kernel-5.10/pkg.rs diff --git a/packages/kernel-5.10/.gitignore b/packages/kernel-5.10/.gitignore new file mode 100644 index 00000000..f0af3ba1 --- /dev/null +++ b/packages/kernel-5.10/.gitignore @@ -0,0 +1 @@ +kernel-*.src.rpm diff --git a/packages/kernel-5.10/1001-Makefile-add-prepare-target-for-external-modules.patch b/packages/kernel-5.10/1001-Makefile-add-prepare-target-for-external-modules.patch new file mode 100644 index 00000000..13da689c --- /dev/null +++ b/packages/kernel-5.10/1001-Makefile-add-prepare-target-for-external-modules.patch @@ -0,0 +1,50 @@ +From b6d859b7089dd68d3186f2a088823c322ad4852e Mon Sep 17 00:00:00 2001 +From: Ben Cressey +Date: Mon, 19 Apr 2021 18:46:04 +0000 +Subject: [PATCH] Makefile: add prepare target for external modules + +We need to ensure that native versions of programs like `objtool` are +built before trying to build out-of-tree modules, or else the build +will fail. + +Unlike other distributions, we cannot include these programs in our +kernel-devel archive, because we rely on cross-compilation: these are +"host" programs and may not match the architecture of the target. + +Ideally, out-of-tree builds would run `make prepare` first, so that +these programs could be compiled in the normal fashion. We ship all +the files needed for this to work. However, this requirement is +specific to our use case, and DKMS does not support it. + +Adding a minimal prepare target to the dependency graph causes the +programs to be built automatically and improves compatibility with +existing solutions. + +Signed-off-by: Ben Cressey +--- + Makefile | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/Makefile b/Makefile +index 1d4a50ebe3b7..b9347d1e69e2 100644 +--- a/Makefile ++++ b/Makefile +@@ -1719,6 +1719,15 @@ else # KBUILD_EXTMOD + KBUILD_BUILTIN := + KBUILD_MODULES := 1 + ++PHONY += modules_prepare ++modules_prepare: $(objtool_target) ++ $(Q)$(MAKE) $(build)=scripts/basic ++ $(Q)$(MAKE) $(build)=scripts/dtc ++ $(Q)$(MAKE) $(build)=scripts/mod ++ $(Q)$(MAKE) $(build)=scripts ++ ++prepare: modules_prepare ++ + build-dirs := $(KBUILD_EXTMOD) + PHONY += modules + modules: $(MODORDER) +-- +2.21.3 + diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml new file mode 100644 index 00000000..bd608ef0 --- /dev/null +++ b/packages/kernel-5.10/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "kernel-5_10" +version = "0.1.0" +edition = "2018" +publish = false +build = "build.rs" + +[package.metadata.build-package] +package-name = "kernel-5.10" + +[lib] +path = "pkg.rs" + +[[package.metadata.build-package.external-files]] +# Use latest-srpm-url.sh to get this. +url = "https://cdn.amazonlinux.com/blobstore/fa04b98fc067a4943beac60d0c2971e2fbef1a29faed4bac1c4096abe4ad4c12/kernel-5.10.29-27.126.amzn2.src.rpm" +sha512 = "47341f4a1c13ba7e5ea72bad13fe689eefd22cc7547aea08a08fe47238b4a3fe1659786a406b84a1d1508143be20d9be2fae6fe3e7a6924bc85043bf61d4bfce" diff --git a/packages/kernel-5.10/build.rs b/packages/kernel-5.10/build.rs new file mode 100644 index 00000000..cad8999a --- /dev/null +++ b/packages/kernel-5.10/build.rs @@ -0,0 +1,9 @@ +use std::process::{exit, Command}; + +fn main() -> Result<(), std::io::Error> { + let ret = Command::new("buildsys").arg("build-package").status()?; + if !ret.success() { + exit(1); + } + Ok(()) +} diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket new file mode 100644 index 00000000..26cce3d6 --- /dev/null +++ b/packages/kernel-5.10/config-bottlerocket @@ -0,0 +1,63 @@ +# Because Bottlerocket does not have an initramfs, modules required to mount +# the root filesystem must be set to y. + +# The root filesystem is ext4 +CONFIG_EXT4_FS=y + +# NVMe for EC2 Nitro platforms (C5, M5, and later) +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_CORE=y + +# Xen blkfront for Xen-based EC2 platforms +CONFIG_XEN_BLKDEV_FRONTEND=y + +# virtio for local testing with QEMU +CONFIG_VIRTIO=y +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_PCI=y + +# dm-verity and enabling it on the kernel command line +CONFIG_BLK_DEV_DM=y +CONFIG_DAX=y +CONFIG_DM_INIT=y +CONFIG_DM_VERITY=y + +# yama LSM for ptrace restrictions +CONFIG_SECURITY_YAMA=y + +# Do not allow SELinux to be disabled at boot. +CONFIG_SECURITY_SELINUX_BOOTPARAM=n + +# Do not allow SELinux to be disabled at runtime. +CONFIG_SECURITY_SELINUX_DISABLE=n + +# Do not allow SELinux to use `enforcing=0` behavior. +CONFIG_SECURITY_SELINUX_DEVELOP=n + +# Check the protection applied by the kernel for mmap and mprotect, +# rather than the protection requested by userspace. +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=0 + +# Enable support for the kernel lockdown security module. +CONFIG_SECURITY_LOCKDOWN_LSM=y + +# Enable lockdown early so that if the option is present on the +# kernel command line, it can be enforced. +CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y + +# Enable zstd compression for squashfs. +CONFIG_SQUASHFS_ZSTD=y + +# enable /proc/config.gz +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y + +# kernel headers at /sys/kernel/kheaders.tar.xz +CONFIG_IKHEADERS=y + +# BTF debug info at /sys/kernel/btf/vmlinux +CONFIG_DEBUG_INFO_BTF=y + +# We don't want to extend the kernel command line with any upstream defaults; +# Bottlerocket uses a fairly custom setup that needs tight control over it. +CONFIG_CMDLINE_EXTEND=n diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec new file mode 100644 index 00000000..5da57565 --- /dev/null +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -0,0 +1,239 @@ +%global debug_package %{nil} + +Name: %{_cross_os}kernel-5.10 +Version: 5.10.29 +Release: 1%{?dist} +Summary: The Linux kernel +License: GPL-2.0 WITH Linux-syscall-note +URL: https://www.kernel.org/ +# Use latest-srpm-url.sh to get this. +Source0: https://cdn.amazonlinux.com/blobstore/fa04b98fc067a4943beac60d0c2971e2fbef1a29faed4bac1c4096abe4ad4c12/kernel-5.10.29-27.126.amzn2.src.rpm +Source100: config-bottlerocket + +# Help out-of-tree module builds run `make prepare` automatically. +Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch + +BuildRequires: bc +BuildRequires: elfutils-devel +BuildRequires: hostname +BuildRequires: kmod +BuildRequires: openssl-devel + +# Pull in expected modules and development files. +Requires: %{name}-modules = %{version}-%{release} +Requires: %{name}-devel = %{version}-%{release} + +%global kernel_sourcedir %{_cross_usrsrc}/kernels +%global kernel_libdir %{_cross_libdir}/modules/%{version} + +%description +%{summary}. + +%package devel +Summary: Configured Linux kernel source for module building +Requires: %{_cross_os}filesystem + +%description devel +%{summary}. + +%package archive +Summary: Archived Linux kernel source for module building + +%description archive +%{summary}. + +%package modules +Summary: Modules for the Linux kernel + +%description modules +%{summary}. + +%package headers +Summary: Header files for the Linux kernel for use by glibc + +%description headers +%{summary}. + +%prep +rpm2cpio %{SOURCE0} | cpio -iu linux-%{version}.tar config-%{_cross_arch} "*.patch" +tar -xof linux-%{version}.tar; rm linux-%{version}.tar +%setup -TDn linux-%{version} +# Patches from the Source0 SRPM +for patch in ../*.patch; do + patch -p1 <"$patch" +done +# Patches listed in this spec (Patch0001...) +%autopatch -p1 +KCONFIG_CONFIG="arch/%{_cross_karch}/configs/%{_cross_vendor}_defconfig" \ + ARCH="%{_cross_karch}" \ + scripts/kconfig/merge_config.sh ../config-%{_cross_arch} %{SOURCE100} +rm -f ../config-%{_cross_arch} ../*.patch + +%global kmake \ +make -s\\\ + ARCH="%{_cross_karch}"\\\ + CROSS_COMPILE="%{_cross_target}-"\\\ + INSTALL_HDR_PATH="%{buildroot}%{_cross_prefix}"\\\ + INSTALL_MOD_PATH="%{buildroot}%{_cross_prefix}"\\\ + INSTALL_MOD_STRIP=1\\\ +%{nil} + +%build +%kmake mrproper +%kmake %{_cross_vendor}_defconfig +%kmake %{?_smp_mflags} %{_cross_kimage} +%kmake %{?_smp_mflags} modules + +%install +%kmake headers_install +%kmake modules_install + +install -d %{buildroot}/boot +install -T -m 0755 arch/%{_cross_karch}/boot/%{_cross_kimage} %{buildroot}/boot/vmlinuz +install -m 0644 .config %{buildroot}/boot/config +install -m 0644 System.map %{buildroot}/boot/System.map + +find %{buildroot}%{_cross_prefix} \ + \( -name .install -o -name .check -o \ + -name ..install.cmd -o -name ..check.cmd \) -delete + +# For out-of-tree kmod builds, we need to support the following targets: +# make scripts -> make prepare -> make modules +# +# This requires enough of the kernel tree to build host programs under the +# "scripts" and "tools" directories. + +# Any existing ELF objects will not work properly if we're cross-compiling for +# a different architecture, so get rid of them to avoid confusing errors. +find arch scripts tools -type f -executable \ + -exec sh -c "head -c4 {} | grep -q ELF && rm {}" \; + +# We don't need to include these files. +find -type f \( -name \*.cmd -o -name \*.gitignore \) -delete + +# Avoid an OpenSSL dependency by stubbing out options for module signing and +# trusted keyrings, so `sign-file` and `extract-cert` won't be built. External +# kernel modules do not have access to the keys they would need to make use of +# these tools. +sed -i \ + -e 's,$(CONFIG_MODULE_SIG_FORMAT),n,g' \ + -e 's,$(CONFIG_SYSTEM_TRUSTED_KEYRING),n,g' \ + scripts/Makefile + +( + find * \ + -type f \ + \( -name Build\* -o -name Kbuild\* -o -name Kconfig\* -o -name Makefile\* \) \ + -print + + find arch/%{_cross_karch}/ \ + -type f \ + \( -name module.lds -o -name vmlinux.lds.S -o -name Platform -o -name \*.tbl \) \ + -print + + find arch/%{_cross_karch}/{include,lib}/ -type f ! -name \*.o ! -name \*.o.d -print + echo arch/%{_cross_karch}/kernel/asm-offsets.s + echo lib/vdso/gettimeofday.c + + for d in \ + arch/%{_cross_karch}/tools \ + arch/%{_cross_karch}/kernel/vdso ; do + [ -d "${d}" ] && find "${d}/" -type f -print + done + + find include -type f -print + find scripts -type f ! -name \*.l ! -name \*.y ! -name \*.o -print + + find tools/{arch/%{_cross_karch},include,objtool,scripts}/ -type f ! -name \*.o -print + echo tools/build/fixdep.c + find tools/lib/subcmd -type f -print + find tools/lib/{ctype,rbtree,string,str_error_r}.c + + echo kernel/bounds.c + echo kernel/time/timeconst.bc + echo security/selinux/include/classmap.h + echo security/selinux/include/initial_sid_to_string.h + echo security/selinux/include/policycap.h + echo security/selinux/include/policycap_names.h + + echo .config + echo Module.symvers + echo System.map +) | sort -u > kernel_devel_files + +# Create squashfs of kernel-devel files (ie. /usr/src/kernels/). +# +# -no-exports: +# The filesystem does not need to be exported via NFS. +# +# -all-root: +# Make all files owned by root rather than the build user. +# +# -comp zstd: +# zstd offers compression ratios like xz and decompression speeds like lz4. +SQUASHFS_OPTS="-no-exports -all-root -comp zstd" +mkdir -p src_squashfs/%{version} +tar c -T kernel_devel_files | tar x -C src_squashfs/%{version} +mksquashfs src_squashfs kernel-devel.squashfs ${SQUASHFS_OPTS} + +# Create a tarball of the same files, for use outside the running system. +# In theory we could extract these files with `unsquashfs`, but we do not want +# to require it to be installed on the build host, and it errors out when run +# inside Docker unless the limit for open files is lowered. +tar cf kernel-devel.tar src_squashfs/%{version} --transform='s|src_squashfs/%{version}|kernel-devel|' +xz -T0 kernel-devel.tar + +install -D kernel-devel.squashfs %{buildroot}%{_cross_datadir}/bottlerocket/kernel-devel.squashfs +install -D kernel-devel.tar.xz %{buildroot}%{_cross_datadir}/bottlerocket/kernel-devel.tar.xz +install -d %{buildroot}%{kernel_sourcedir} + +# Replace the incorrect links from modules_install. These will be bound +# into a host container (and unused in the host) so they must not point +# to %{_cross_usrsrc} (eg. /x86_64-bottlerocket-linux-gnu/sys-root/...) +rm -f %{buildroot}%{kernel_libdir}/build %{buildroot}%{kernel_libdir}/source +ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{kernel_libdir}/build +ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{kernel_libdir}/source + +%files +%license COPYING LICENSES/preferred/GPL-2.0 LICENSES/exceptions/Linux-syscall-note +%{_cross_attribution_file} +/boot/vmlinuz +/boot/config +/boot/System.map + +%files modules +%dir %{_cross_libdir}/modules +%{_cross_libdir}/modules/* + +%files headers +%dir %{_cross_includedir}/asm +%dir %{_cross_includedir}/asm-generic +%dir %{_cross_includedir}/drm +%dir %{_cross_includedir}/linux +%dir %{_cross_includedir}/misc +%dir %{_cross_includedir}/mtd +%dir %{_cross_includedir}/rdma +%dir %{_cross_includedir}/scsi +%dir %{_cross_includedir}/sound +%dir %{_cross_includedir}/video +%dir %{_cross_includedir}/xen +%{_cross_includedir}/asm/* +%{_cross_includedir}/asm-generic/* +%{_cross_includedir}/drm/* +%{_cross_includedir}/linux/* +%{_cross_includedir}/misc/* +%{_cross_includedir}/mtd/* +%{_cross_includedir}/rdma/* +%{_cross_includedir}/scsi/* +%{_cross_includedir}/sound/* +%{_cross_includedir}/video/* +%{_cross_includedir}/xen/* + +%files devel +%dir %{kernel_sourcedir} +%{_cross_datadir}/bottlerocket/kernel-devel.squashfs + +%files archive +%{_cross_datadir}/bottlerocket/kernel-devel.tar.xz + +%changelog diff --git a/packages/kernel-5.10/latest-srpm-url.sh b/packages/kernel-5.10/latest-srpm-url.sh new file mode 100755 index 00000000..46001c33 --- /dev/null +++ b/packages/kernel-5.10/latest-srpm-url.sh @@ -0,0 +1,2 @@ +#!/bin/sh +docker run --rm amazonlinux:2 sh -c 'amazon-linux-extras enable kernel-5.10 >/dev/null && yum install -q -y yum-utils && yumdownloader -q --source --urls kernel | grep ^http' diff --git a/packages/kernel-5.10/pkg.rs b/packages/kernel-5.10/pkg.rs new file mode 100644 index 00000000..d799fb2d --- /dev/null +++ b/packages/kernel-5.10/pkg.rs @@ -0,0 +1 @@ +// not used From adb52caf31328de4352071df21ab6fa3e21e1da8 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 28 Apr 2021 14:27:04 +0000 Subject: [PATCH 0452/1356] kernel: omit filesystem package dependency The "filesystem" package is an implicit dependency for all packages, and it's pulled in by release. Signed-off-by: Ben Cressey --- packages/kernel-5.10/kernel-5.10.spec | 1 - packages/kernel-5.4/Cargo.toml | 4 ---- packages/kernel-5.4/kernel-5.4.spec | 1 - 3 files changed, 6 deletions(-) diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 5da57565..6908fb32 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -31,7 +31,6 @@ Requires: %{name}-devel = %{version}-%{release} %package devel Summary: Configured Linux kernel source for module building -Requires: %{_cross_os}filesystem %description devel %{summary}. diff --git a/packages/kernel-5.4/Cargo.toml b/packages/kernel-5.4/Cargo.toml index 9f56c579..aaf09531 100644 --- a/packages/kernel-5.4/Cargo.toml +++ b/packages/kernel-5.4/Cargo.toml @@ -19,7 +19,3 @@ sha512 = "ef506706434bc94df6e845e5262c8d022ebb91ff6bc6a71ac656851c0de66d81392ace # RPM BuildRequires [build-dependencies] # Provided by Bottlerocket SDK - -# RPM Requires -[dependencies] -filesystem = { path = "../filesystem" } diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec index 96c9c559..3bf79bef 100644 --- a/packages/kernel-5.4/kernel-5.4.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -34,7 +34,6 @@ Requires: %{name}-devel = %{version}-%{release} %package devel Summary: Configured Linux kernel source for module building -Requires: %{_cross_os}filesystem %description devel %{summary}. From 878a5f1b4a9e7844822255fdb5b093a167e2eebc Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 21 Apr 2021 23:38:25 +0000 Subject: [PATCH 0453/1356] refactor kmod kit creation Now that the installed kernel is a property of the variant, we need to ensure that the kmod kit includes the development files that match the chosen kernel. By creating it as another stage in the variant build, it's easier to find the right files, and to guarantee that the kmod kit is recreated whenever the image changes. Signed-off-by: Ben Cressey --- tools/buildsys/src/builder.rs | 16 ++++++++-- tools/buildsys/src/builder/error.rs | 6 ++++ tools/buildsys/src/manifest.rs | 10 +++++++ tools/rpm2kmodkit | 46 +++++++++++++++++++++++++++++ 4 files changed, 75 insertions(+), 3 deletions(-) create mode 100755 tools/rpm2kmodkit diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index 101eb8ca..f1dd8ed7 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -19,7 +19,7 @@ use std::path::{Path, PathBuf}; use std::process::Output; use walkdir::{DirEntry, WalkDir}; -use crate::manifest::ImageFormat; +use crate::manifest::{ImageFormat, SupportedArch}; /* There's a bug in BuildKit that can lead to a build failure during parallel @@ -45,8 +45,11 @@ pub(crate) struct PackageBuilder; impl PackageBuilder { /// Build RPMs for the specified package. pub(crate) fn build(package: &str) -> Result { - let arch = getenv("BUILDSYS_ARCH")?; let output_dir: PathBuf = getenv("BUILDSYS_PACKAGES_DIR")?.into(); + let arch = getenv("BUILDSYS_ARCH")?; + let goarch = serde_plain::from_str::(&arch) + .context(error::UnsupportedArch { arch: &arch })? + .goarch(); // We do *not* want to rebuild most packages when the variant changes, because most aren't // affected; packages that care about variant should "echo cargo:rerun-if-env-changed=VAR" @@ -61,6 +64,7 @@ impl PackageBuilder { let mut args = Vec::new(); args.build_arg("PACKAGE", package); args.build_arg("ARCH", &arch); + args.build_arg("GOARCH", &goarch); args.build_arg("VARIANT", variant); args.build_arg("REPO", repo); @@ -89,10 +93,14 @@ impl VariantBuilder { let variant = getenv("BUILDSYS_VARIANT")?; let arch = getenv("BUILDSYS_ARCH")?; + let goarch = serde_plain::from_str::(&arch) + .context(error::UnsupportedArch { arch: &arch })? + .goarch(); let mut args = Vec::new(); args.build_arg("PACKAGES", packages.join(" ")); args.build_arg("ARCH", &arch); + args.build_arg("GOARCH", &goarch); args.build_arg("VARIANT", &variant); args.build_arg("VERSION_ID", getenv("BUILDSYS_VERSION_IMAGE")?); args.build_arg("BUILD_ID", getenv("BUILDSYS_VERSION_BUILD")?); @@ -155,8 +163,9 @@ fn build( let token = &digest[..12]; let tag = format!("{}-{}", tag, token); - // Our SDK image is picked by the external `cargo make` invocation. + // Our SDK and toolchain are picked by the external `cargo make` invocation. let sdk = getenv("BUILDSYS_SDK_IMAGE")?; + let toolchain = getenv("BUILDSYS_TOOLCHAIN")?; // Avoid using a cached layer from a previous build. let nocache = rand::thread_rng().gen::(); @@ -184,6 +193,7 @@ fn build( build.extend(build_args); build.build_arg("SDK", sdk); + build.build_arg("TOOLCHAIN", toolchain); build.build_arg("NOCACHE", nocache.to_string()); // Avoid using a cached layer from a concurrent build in another checkout. build.build_arg("TOKEN", token); diff --git a/tools/buildsys/src/builder/error.rs b/tools/buildsys/src/builder/error.rs index d57f6354..a6476e93 100644 --- a/tools/buildsys/src/builder/error.rs +++ b/tools/buildsys/src/builder/error.rs @@ -52,6 +52,12 @@ pub(crate) enum Error { var: String, source: std::env::VarError, }, + + #[snafu(display("Unsupported architecture '{}'", arch))] + UnsupportedArch { + arch: String, + source: serde_plain::Error, + }, } pub(super) type Result = std::result::Result; diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index b8f82c35..49d4c341 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -215,6 +215,16 @@ pub(crate) enum SupportedArch { Aarch64, } +/// Map a Linux architecture into the corresponding Docker architecture. +impl SupportedArch { + pub(crate) fn goarch(&self) -> &'static str { + match self { + SupportedArch::X86_64 => "amd64", + SupportedArch::Aarch64 => "arm64", + } + } +} + #[derive(Deserialize, Debug)] #[serde(rename_all = "kebab-case")] pub(crate) struct ExternalFile { diff --git a/tools/rpm2kmodkit b/tools/rpm2kmodkit new file mode 100755 index 00000000..1c0c8871 --- /dev/null +++ b/tools/rpm2kmodkit @@ -0,0 +1,46 @@ +#!/usr/bin/env bash +# +# Create an archive of kernel development sources and toolchain. +set -eu -o pipefail + +for opt in "$@"; do + optarg="$(expr "${opt}" : '[^=]*=\(.*\)')" + case "${opt}" in + --archive-dir=*) ARCHIVE_DIR="${optarg}" ;; + --toolchain-dir=*) TOOLCHAIN_DIR="${optarg}" ;; + --output-dir=*) OUTPUT_DIR="${optarg}" ;; + esac +done + +# Use a friendly name for the top-level directory inside the archive. +KMOD_KIT="${VARIANT}-${ARCH}-kmod-kit-v${VERSION_ID}" + +# Use the build ID within the filename, to align with our build's expectations. +KMOD_KIT_FULL="bottlerocket-${VARIANT}-${ARCH}-${VERSION_ID}-${BUILD_ID}-kmod-kit" + +EXTRACT_DIR="$(mktemp -d)" +KIT_DIR="$(mktemp -d)" +mkdir -p "${OUTPUT_DIR}" "${KIT_DIR}/${KMOD_KIT}" + +# Extract any RPMs and find the kernel development archive. +pushd "${EXTRACT_DIR}" >/dev/null +find "${ARCHIVE_DIR}" -type f -name '*.rpm' \ + -exec rpm2cpio {} \; | cpio -idm --quiet +find -name 'kernel-devel.tar.xz' \ + -exec mv {} "${KIT_DIR}/${KMOD_KIT}" \; +popd >/dev/null + +# Extract it and copy in the toolchain. +pushd "${KIT_DIR}/${KMOD_KIT}" >/dev/null +tar xf kernel-devel.tar.xz +rm kernel-devel.tar.xz +cp -a "${TOOLCHAIN_DIR}" toolchain +popd >/dev/null + +# Merge them together into a unified archive. +pushd "${KIT_DIR}" >/dev/null +tar cf "${OUTPUT_DIR}/${KMOD_KIT_FULL}.tar" "${KMOD_KIT}" +xz -T0 "${OUTPUT_DIR}/${KMOD_KIT_FULL}.tar" +popd >/dev/null + +rm -rf "${EXTRACT_DIR}" "${KIT_DIR}" From 0938d788ee8feab168f76bf8435448e91c0a858c Mon Sep 17 00:00:00 2001 From: Tianhao Geng Date: Fri, 23 Apr 2021 17:08:55 +0000 Subject: [PATCH 0454/1356] kubelet: add setting for configuring registryPullQPS pass registry-qps argument to kubelet --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 96399dbc..d3965f3a 100644 --- a/README.md +++ b/README.md @@ -325,6 +325,7 @@ The following settings are optional and allow you to further configure your clus ``` allowed-unsafe-sysctls = ["net.core.somaxconn", "net.ipv4.ip_local_port_range"] ``` +* `settings.kubernetes.registry-qps`: The registry pull QPS. You can also optionally specify static pods for your node with the following settings. Static pods can be particularly useful when running in standalone mode. From 522cd18f6e4e34208a651563ac46a424a6e6d5c6 Mon Sep 17 00:00:00 2001 From: Tianhao Geng Date: Sun, 25 Apr 2021 20:26:21 +0000 Subject: [PATCH 0455/1356] kubelet: add setting for configuring registryBurst pass registry-burst argument to kubelet --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index d3965f3a..2c9939c0 100644 --- a/README.md +++ b/README.md @@ -326,6 +326,7 @@ The following settings are optional and allow you to further configure your clus allowed-unsafe-sysctls = ["net.core.somaxconn", "net.ipv4.ip_local_port_range"] ``` * `settings.kubernetes.registry-qps`: The registry pull QPS. +* `settings.kubernetes.registry-burst`: The maximum size of bursty pulls. You can also optionally specify static pods for your node with the following settings. Static pods can be particularly useful when running in standalone mode. From cfb4f7bb489d0c6be1cc2713e74a2624d7e2428f Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Thu, 29 Apr 2021 16:01:56 -0700 Subject: [PATCH 0456/1356] Update kernel-5.4 to 5.4.110 --- packages/kernel-5.4/Cargo.toml | 4 ++-- packages/kernel-5.4/kernel-5.4.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.4/Cargo.toml b/packages/kernel-5.4/Cargo.toml index aaf09531..582f6402 100644 --- a/packages/kernel-5.4/Cargo.toml +++ b/packages/kernel-5.4/Cargo.toml @@ -13,8 +13,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/c9c16a56ef978680bd95df30d81add144807ffe0c43def257038586bb6b52388/kernel-5.4.105-48.177.amzn2.src.rpm" -sha512 = "ef506706434bc94df6e845e5262c8d022ebb91ff6bc6a71ac656851c0de66d81392acedb9be39b2be4724f106df21d3b58de71387410e103e4b05a48fa955059" +url = "https://cdn.amazonlinux.com/blobstore/b5b3738a3efe0842f6b4db451c2bc1bbeafb1857a10ec508081e75b52681f13e/kernel-5.4.110-54.182.amzn2.src.rpm" +sha512 = "09739ceb8c5923845f76c5c2322243ffce53433fd24fccc0239fa23ee4951a4288752de87db07dbc0a0c5e81b3a6f9537feff0a2149332956216cf2e03527ecd" # RPM BuildRequires [build-dependencies] diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec index 3bf79bef..8870c8d0 100644 --- a/packages/kernel-5.4/kernel-5.4.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.4 -Version: 5.4.105 +Version: 5.4.110 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/c9c16a56ef978680bd95df30d81add144807ffe0c43def257038586bb6b52388/kernel-5.4.105-48.177.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/b5b3738a3efe0842f6b4db451c2bc1bbeafb1857a10ec508081e75b52681f13e/kernel-5.4.110-54.182.amzn2.src.rpm Source100: config-bottlerocket # Make Lustre FSx work with a newer GCC. From 211904c2f1fbb2daae63b3ed479e2e36cfa865ab Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Fri, 30 Apr 2021 11:31:47 -0700 Subject: [PATCH 0457/1356] cargo update the tools/ workspace --- tools/Cargo.lock | 129 ++++++++++++++++++++++++----------------------- 1 file changed, 65 insertions(+), 64 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index a1800a0f..7edc7392 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -64,9 +64,9 @@ checksum = "781f336cc9826dbaddb9754cb5db61e64cab4f69668bd19dcc4a0394a86f4cb1" [[package]] name = "async-trait" -version = "0.1.48" +version = "0.1.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36ea56748e10732c49404c153638a15ec3d6211ec5ff35d9bb20e13b93576adf" +checksum = "0b98e84bbb4cbcdd97da190ba0c58a1bb0de2c1fdf67d159e192ed766aeca722" dependencies = [ "proc-macro2", "quote", @@ -92,11 +92,12 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.56" +version = "0.3.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d117600f438b1707d4e4ae15d3595657288f8235a0eb593e80ecc98ab34e1bc" +checksum = "88fb5a785d6b44fd9d6700935608639af1b8356de1e55d5f7c2740f4faa15d82" dependencies = [ "addr2line", + "cc", "cfg-if", "libc", "miniz_oxide", @@ -265,9 +266,9 @@ dependencies = [ [[package]] name = "const_fn" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076a6803b0dacd6a88cfe64deba628b01533ff5ef265687e6938280c1afd0a28" +checksum = "402da840495de3f976eaefc3485b7f5eb5b0bf9761f9a47be27fe975b3b8c2ec" [[package]] name = "core-foundation" @@ -418,9 +419,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f55667319111d593ba876406af7c409c0ebb44dc4be6132a783ccf163ea14c1" +checksum = "a9d5813545e459ad3ca1bff9915e9ad7f1a47dc6a91b627ce321d5863b7dd253" dependencies = [ "futures-channel", "futures-core", @@ -433,9 +434,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c2dd2df839b57db9ab69c2c9d8f3e8c81984781937fe2807dc6dcf3b2ad2939" +checksum = "ce79c6a52a299137a6013061e0cf0e688fce5d7f1bc60125f520912fdb29ec25" dependencies = [ "futures-core", "futures-sink", @@ -443,15 +444,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15496a72fabf0e62bdc3df11a59a3787429221dd0710ba8ef163d6f7a9112c94" +checksum = "098cd1c6dda6ca01650f1a37a794245eb73181d0d4d4e955e2f3c37db7af1815" [[package]] name = "futures-executor" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891a4b7b96d84d5940084b2a37632dd65deeae662c114ceaa2c879629c9c0ad1" +checksum = "10f6cb7042eda00f0049b1d2080aa4b93442997ee507eb3828e8bd7577f94c9d" dependencies = [ "futures-core", "futures-task", @@ -460,15 +461,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71c2c65c57704c32f5241c1223167c2c3294fd34ac020c807ddbe6db287ba59" +checksum = "365a1a1fb30ea1c03a830fdb2158f5236833ac81fa0ad12fe35b29cddc35cb04" [[package]] name = "futures-macro" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea405816a5139fb39af82c2beb921d52143f556038378d6db21183a5c37fbfb7" +checksum = "668c6733a182cd7deb4f1de7ba3bf2120823835b3bcfbeacf7d2c4a773c1bb8b" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -478,21 +479,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85754d98985841b7d4f5e8e6fbfa4a4ac847916893ec511a2917ccd8525b8bb3" +checksum = "5c5629433c555de3d82861a7a4e3794a4c40040390907cfbfd7143a92a426c23" [[package]] name = "futures-task" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa189ef211c15ee602667a6fcfe1c1fd9e07d42250d2156382820fba33c9df80" +checksum = "ba7aa51095076f3ba6d9a1f702f74bd05ec65f555d70d2033d55ba8d69f581bc" [[package]] name = "futures-util" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1812c7ab8aedf8d6f2701a43e1243acdbcc2b36ab26e2ad421eb99ac963d96d1" +checksum = "3c144ad54d60f23927f0a6b6d816e4271278b64f005ad65e4e35291d2de9c025" dependencies = [ "futures-channel", "futures-core", @@ -550,9 +551,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc018e188373e2777d0ef2467ebff62a08e66c3f5857b23c8fbec3018210dc00" +checksum = "825343c4eef0b63f541f8903f395dc5beb362a979b5799a84062527ef1e37726" dependencies = [ "bytes", "fnv", @@ -609,9 +610,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" +checksum = "527e8c9ac747e28542699a951517aa9a6945af506cd1f2e1b53a576c17b6cc11" dependencies = [ "bytes", "fnv", @@ -631,21 +632,21 @@ dependencies = [ [[package]] name = "httparse" -version = "1.3.5" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "615caabe2c3160b313d52ccc905335f4ed5f10881dd63dc5699d47e90be85691" +checksum = "4a1ce40d6fc9764887c2fdc7305c3dcc429ba11ff981c1509416afd5697e4437" [[package]] name = "httpdate" -version = "0.3.2" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" +checksum = "05842d0d43232b23ccb7060ecb0f0626922c21f30012e97b767b30afd4a5d4b9" [[package]] name = "hyper" -version = "0.14.5" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bf09f61b52cfcf4c00de50df88ae423d6c02354e385a86341133b5338630ad1" +checksum = "1e5f105c494081baa3bf9e200b279e27ec1623895cd504c7dbef8d0b080fcf54" dependencies = [ "bytes", "futures-channel", @@ -684,9 +685,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89829a5d69c23d348314a7ac337fe39173b61149a9864deabd260983aed48c21" +checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" dependencies = [ "matches", "unicode-bidi", @@ -753,9 +754,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9385f66bf6105b241aa65a61cb923ef20efc665cb9f9bb50ac2f0c4b7f378d41" +checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e" [[package]] name = "lock_api" @@ -988,18 +989,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc174859768806e91ae575187ada95c91a29e96a98dc5d2cd9a1fed039501ba6" +checksum = "c7509cc106041c40a4518d2af7a61530e1eed0e6285296a3d8c5472806ccc4a4" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a490329918e856ed1b083f244e3bfe2d8c4f336407e4ea9e1a9f479ff09049e5" +checksum = "48c950132583b500556b1efd71d45b319029f2b71518d979fcc208e16b42426f" dependencies = [ "proc-macro2", "quote", @@ -1190,9 +1191,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.5" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94341e4e44e24f6b591b59e47a8a027df12e008d73fd5672dbea9cc22f4507d9" +checksum = "85dd92e586f7355c633911e11f77f3d12f04b1b1bd76a198bd34ae3af8341ef2" dependencies = [ "bitflags", ] @@ -1209,9 +1210,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.4.5" +version = "1.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957056ecddbeba1b26965114e191d2e8589ce74db242b6ea25fc4062427a5c19" +checksum = "2a26af418b574bd56588335b3a3659a65725d4e636eb1016c2f9e3b38c7cc759" dependencies = [ "aho-corasick", "memchr", @@ -1235,9 +1236,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf12057f289428dbf5c591c74bf10392e4a8003f993405a902f20117019022d4" +checksum = "2296f2fac53979e8ccbc4a1136b25dcefd37be9ed7e4a1f6b05a6029c84ff124" dependencies = [ "base64", "bytes", @@ -1440,9 +1441,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.19.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" +checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ "base64", "log", @@ -1496,9 +1497,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "sct" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" +checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" dependencies = [ "ring", "untrusted", @@ -1688,9 +1689,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" +checksum = "f173ac3d1a7e3b28003f40de0b5ce7fe2710f9b9dc3fc38664cebee46b3b6527" [[package]] name = "smallvec" @@ -1832,9 +1833,9 @@ checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" [[package]] name = "syn" -version = "1.0.68" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ce15dd3ed8aa2f8eeac4716d6ef5ab58b6b9256db41d7e1a0224c2788e8fd87" +checksum = "ad184cc9470f9117b2ac6817bfe297307418819ba40552f9b3846f05c33d5373" dependencies = [ "proc-macro2", "quote", @@ -1958,9 +1959,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134af885d758d645f0f0505c9a8b3f9bf8a348fd822e112ab5248138348f1722" +checksum = "83f0c8e7c0addab50b663055baf787d0af7f413a46e6e7fb9559a4e4db7137a5" dependencies = [ "autocfg", "bytes", @@ -2011,9 +2012,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5143d049e85af7fbc36f5454d990e62c2df705b3589f123b71f441b6b59f443f" +checksum = "940a12c99365c31ea8dd9ba04ec1be183ffe4920102bb7122c2f515437601e8e" dependencies = [ "bytes", "futures-core", @@ -2165,9 +2166,9 @@ checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" [[package]] name = "unicode-xid" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" [[package]] name = "untrusted" @@ -2387,6 +2388,6 @@ checksum = "b07db065a5cf61a7e4ba64f29e67db906fb1787316516c4e6e5ff0fea1efcd8a" [[package]] name = "zeroize" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81a974bcdd357f0dca4d41677db03436324d45a4c9ed2d0b873a5a360ce41c36" +checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd" From e1962352fb18015f3c1b177bfa84ad2f5b0d4e5b Mon Sep 17 00:00:00 2001 From: Ellis Tarn Date: Fri, 30 Apr 2021 17:01:49 -0700 Subject: [PATCH 0458/1356] Fixed a misleading typo for setting Labels/Taints (#1550) Document the need to quote most Kubernetes labels/taints --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 2c9939c0..e90904b5 100644 --- a/README.md +++ b/README.md @@ -293,17 +293,17 @@ You should [specify them in user data](#using-user-data). * `settings.kubernetes.cluster-certificate`: This is the base64-encoded certificate authority of the cluster. * `settings.kubernetes.api-server`: This is the cluster's Kubernetes API endpoint. -The following settings can be optionally set to customize the node labels and taints. +The following settings can be optionally set to customize the node labels and taints. Remember to quote keys (since they often contain ".") and to quote all values. * `settings.kubernetes.node-labels`: [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) in the form of key, value pairs added when registering the node in the cluster. * `settings.kubernetes.node-taints`: [Taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) in the form of key, value and effect entries added when registering the node in the cluster. * Example user data for setting up labels and taints: ``` [settings.kubernetes.node-labels] - label1 = "foo" - label2 = "bar" + "label1" = "foo" + "label2" = "bar" [settings.kubernetes.node-taints] - dedicated = "experimental:PreferNoSchedule" - special = "true:NoSchedule" + "dedicated" = "experimental:PreferNoSchedule" + "special" = "true:NoSchedule" ``` The following settings are optional and allow you to further configure your cluster. From b4ad7f4d488e9da012e99c13fbac4a38cc0ac07d Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Mon, 19 Apr 2021 18:42:15 +0000 Subject: [PATCH 0459/1356] Add a vmware-k8s-1.20 variant This change adds the necessary files for a VMware Kubernetes 1.20 variant. --- .github/workflows/build.yml | 3 +++ README.md | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index e177f846..6cc38417 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -20,6 +20,9 @@ jobs: - variant: vmware-dev arch: x86_64 supported: false + - variant: vmware-k8s-1.20 + arch: x86_64 + supported: true fail-fast: false steps: - uses: actions/checkout@v2 diff --git a/README.md b/README.md index e90904b5..ca6929f0 100644 --- a/README.md +++ b/README.md @@ -60,6 +60,10 @@ We also have a variant designed to work with ECS, currently in preview: - `aws-ecs-1` +Another variant we have in preview is designed to be a Kubernetes worker node in VMware: + +- `vmware-k8s-1.20` + The `aws-k8s-1.15` variant is deprecated and will no longer be supported in Bottlerocket releases. We recommend users replace `aws-k8s-1.15` nodes with the [latest variant compatible with their cluster](variants/). From dc2bfe05c865340598862ff5fe9fe938edcbba8a Mon Sep 17 00:00:00 2001 From: Tianhao Geng Date: Wed, 28 Apr 2021 00:12:25 +0000 Subject: [PATCH 0460/1356] kubelet: add setting for configuring eventBurst pass event-burst argument to kubelet --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index ca6929f0..68583c88 100644 --- a/README.md +++ b/README.md @@ -331,6 +331,7 @@ The following settings are optional and allow you to further configure your clus ``` * `settings.kubernetes.registry-qps`: The registry pull QPS. * `settings.kubernetes.registry-burst`: The maximum size of bursty pulls. +* `settings.kubernetes.event-burst`: The maximum size of a burst of event creations. You can also optionally specify static pods for your node with the following settings. Static pods can be particularly useful when running in standalone mode. From 3d1c3cd6aebcc81b410591d7ab55af7c92120189 Mon Sep 17 00:00:00 2001 From: Tianhao Geng Date: Wed, 28 Apr 2021 00:16:16 +0000 Subject: [PATCH 0461/1356] kubelet: add setting for configuring eventRecordQPS pass event-qps argument to kubelet --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 68583c88..a42b573d 100644 --- a/README.md +++ b/README.md @@ -331,6 +331,7 @@ The following settings are optional and allow you to further configure your clus ``` * `settings.kubernetes.registry-qps`: The registry pull QPS. * `settings.kubernetes.registry-burst`: The maximum size of bursty pulls. +* `settings.kubernetes.event-qps`: The maximum event creations per second. * `settings.kubernetes.event-burst`: The maximum size of a burst of event creations. You can also optionally specify static pods for your node with the following settings. From 567dba49ba78340449c57b8dc6c7a351a4cfb927 Mon Sep 17 00:00:00 2001 From: Tianhao Geng Date: Thu, 29 Apr 2021 18:57:20 +0000 Subject: [PATCH 0462/1356] kubelet: add setting for configuring kubeAPIQPS pass kube-api-qps argument to kubelet --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index a42b573d..a4291098 100644 --- a/README.md +++ b/README.md @@ -333,6 +333,7 @@ The following settings are optional and allow you to further configure your clus * `settings.kubernetes.registry-burst`: The maximum size of bursty pulls. * `settings.kubernetes.event-qps`: The maximum event creations per second. * `settings.kubernetes.event-burst`: The maximum size of a burst of event creations. +* `settings.kubernetes.kube-api-qps`: The QPS to use while talking with kubernetes apiserver. You can also optionally specify static pods for your node with the following settings. Static pods can be particularly useful when running in standalone mode. From 278c1106f4b1592b37a12b135c9e77796b130f65 Mon Sep 17 00:00:00 2001 From: Tianhao Geng Date: Thu, 29 Apr 2021 19:00:34 +0000 Subject: [PATCH 0463/1356] kubelet: add setting for configuring kubeAPIBurst pass kube-api-burst argument to kubelet --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index a4291098..1ff93752 100644 --- a/README.md +++ b/README.md @@ -334,6 +334,7 @@ The following settings are optional and allow you to further configure your clus * `settings.kubernetes.event-qps`: The maximum event creations per second. * `settings.kubernetes.event-burst`: The maximum size of a burst of event creations. * `settings.kubernetes.kube-api-qps`: The QPS to use while talking with kubernetes apiserver. +* `settings.kubernetes.kube-api-burst`: The burst to allow while talking with kubernetes. You can also optionally specify static pods for your node with the following settings. Static pods can be particularly useful when running in standalone mode. From 93c12415079e113dc0fb7b7f98af9022aa3c327c Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Mon, 3 May 2021 17:40:54 -0700 Subject: [PATCH 0464/1356] Update kernel-5.4 to 5.4.110-54.189 --- packages/kernel-5.4/Cargo.toml | 4 ++-- packages/kernel-5.4/kernel-5.4.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.4/Cargo.toml b/packages/kernel-5.4/Cargo.toml index 582f6402..012fc5ae 100644 --- a/packages/kernel-5.4/Cargo.toml +++ b/packages/kernel-5.4/Cargo.toml @@ -13,8 +13,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/b5b3738a3efe0842f6b4db451c2bc1bbeafb1857a10ec508081e75b52681f13e/kernel-5.4.110-54.182.amzn2.src.rpm" -sha512 = "09739ceb8c5923845f76c5c2322243ffce53433fd24fccc0239fa23ee4951a4288752de87db07dbc0a0c5e81b3a6f9537feff0a2149332956216cf2e03527ecd" +url = "https://cdn.amazonlinux.com/blobstore/30c599278ce31259b6ad8fcfb05d25c9bdbbdce8398f0ca686e70c36e7b4986b/kernel-5.4.110-54.189.amzn2.src.rpm" +sha512 = "ad38a02ec569dcd088e4013f2c9aa50ddf50775b4ded9da5ca367ae19cd141a7d7cd539c986cdcd70656a17e3e9fe874332942bdb027462ef0e029ac1c5fc38b" # RPM BuildRequires [build-dependencies] diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec index 8870c8d0..a2e0f9bb 100644 --- a/packages/kernel-5.4/kernel-5.4.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -2,12 +2,12 @@ Name: %{_cross_os}kernel-5.4 Version: 5.4.110 -Release: 1%{?dist} +Release: 2%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/b5b3738a3efe0842f6b4db451c2bc1bbeafb1857a10ec508081e75b52681f13e/kernel-5.4.110-54.182.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/30c599278ce31259b6ad8fcfb05d25c9bdbbdce8398f0ca686e70c36e7b4986b/kernel-5.4.110-54.189.amzn2.src.rpm Source100: config-bottlerocket # Make Lustre FSx work with a newer GCC. From 41af7d375e285746c62001eeca3e4f48d7e1e1cd Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Mon, 3 May 2021 17:41:50 -0700 Subject: [PATCH 0465/1356] Update kernel-5.10 to 5.10.29-27.128 --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index bd608ef0..68021b52 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,5 +13,5 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/fa04b98fc067a4943beac60d0c2971e2fbef1a29faed4bac1c4096abe4ad4c12/kernel-5.10.29-27.126.amzn2.src.rpm" -sha512 = "47341f4a1c13ba7e5ea72bad13fe689eefd22cc7547aea08a08fe47238b4a3fe1659786a406b84a1d1508143be20d9be2fae6fe3e7a6924bc85043bf61d4bfce" +url = "https://cdn.amazonlinux.com/blobstore/9d3856424e8b2b45e2871c0fd558641435e81650c01a70c2c27c0115c86f04c5/kernel-5.10.29-27.128.amzn2.src.rpm" +sha512 = "372b4fa3f69cea03469b4305adfea13b4f67eece27a5a1847fd12913fd1f42a2c7dccc4569c5781573db6a7044b5e073f32ad57e9954bc0290c5ee1d90fe5640" diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 6908fb32..1fe53eb8 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -2,12 +2,12 @@ Name: %{_cross_os}kernel-5.10 Version: 5.10.29 -Release: 1%{?dist} +Release: 2%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/fa04b98fc067a4943beac60d0c2971e2fbef1a29faed4bac1c4096abe4ad4c12/kernel-5.10.29-27.126.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/9d3856424e8b2b45e2871c0fd558641435e81650c01a70c2c27c0115c86f04c5/kernel-5.10.29-27.128.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 3c010de624fa2f8ff03420636d270d3523483972 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Tue, 4 May 2021 11:35:52 -0700 Subject: [PATCH 0466/1356] buildsys: retry builds on "unexpected EOF" error This is a rare failure that we see in CI. Retry it so the entire run is not marked as failing. Co-authored-by: Ben Cressey Co-authored-by: Tom Kirchner --- tools/Cargo.lock | 2 ++ tools/buildsys/Cargo.toml | 2 ++ tools/buildsys/src/builder.rs | 34 ++++++++++++++++++++++++---------- 3 files changed, 28 insertions(+), 10 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 7edc7392..1e540afc 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -147,8 +147,10 @@ version = "0.1.0" dependencies = [ "duct", "hex", + "lazy_static", "nonzero_ext", "rand", + "regex", "reqwest", "serde", "serde_plain", diff --git a/tools/buildsys/Cargo.toml b/tools/buildsys/Cargo.toml index b7a33f0b..5c23cb25 100644 --- a/tools/buildsys/Cargo.toml +++ b/tools/buildsys/Cargo.toml @@ -11,7 +11,9 @@ exclude = ["README.md"] [dependencies] duct = "0.13.0" hex = "0.4.0" +lazy_static = "1.4" rand = { version = "0.8", default-features = false, features = ["std", "std_rng"] } +regex = "1" reqwest = { version = "0.11.1", default-features = false, features = ["rustls-tls", "blocking"] } serde = { version = "1.0", features = ["derive"] } serde_plain = "0.3.0" diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index f1dd8ed7..d6703cc3 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -8,8 +8,10 @@ pub(crate) mod error; use error::Result; use duct::cmd; +use lazy_static::lazy_static; use nonzero_ext::nonzero; use rand::Rng; +use regex::Regex; use sha2::{Digest, Sha512}; use snafu::{ensure, OptionExt, ResultExt}; use std::env; @@ -32,11 +34,23 @@ the bug is fixed there will be many older versions of Docker in the wild. The failure has an exit code of 1, which is too generic to be helpful. All we can do is check the output for the error's signature, and retry if we find it. */ -static DOCKER_BUILD_FRONTEND_ERROR: &str = concat!( - r#"failed to solve with frontend dockerfile.v0: "#, - r#"failed to solve with frontend gateway.v0: "#, - r#"frontend grpc server closed unexpectedly"# -); +lazy_static! { + static ref DOCKER_BUILD_FRONTEND_ERROR: Regex = Regex::new(concat!( + r#"failed to solve with frontend dockerfile.v0: "#, + r#"failed to solve with frontend gateway.v0: "#, + r#"frontend grpc server closed unexpectedly"# + )) + .unwrap(); +} + +/* +We also see sporadic CI failures with only this error message. +We use (?m) for multi-line mode so we can match the message on a line of its own without splitting +the output ourselves; we match the regexes against the whole of stdout. +*/ +lazy_static! { + static ref UNEXPECTED_EOF_ERROR: Regex = Regex::new("(?m)^unexpected EOF$").unwrap(); +} static DOCKER_BUILD_MAX_ATTEMPTS: NonZeroU16 = nonzero!(10u16); @@ -210,12 +224,12 @@ fn build( let _ = docker(&rmi, Retry::No); // Build the image, which builds the artifacts we want. - // Work around a transient, known failure case with Docker. + // Work around transient, known failure cases with Docker. docker( &build, Retry::Yes { attempts: DOCKER_BUILD_MAX_ATTEMPTS, - messages: &[DOCKER_BUILD_FRONTEND_ERROR], + messages: &[&*DOCKER_BUILD_FRONTEND_ERROR, &*UNEXPECTED_EOF_ERROR], }, )?; @@ -240,7 +254,7 @@ fn build( /// Run `docker` with the specified arguments. fn docker(args: &[String], retry: Retry) -> Result { let mut max_attempts: u16 = 1; - let mut retry_messages: &[&str] = &[]; + let mut retry_messages: &[&Regex] = &[]; if let Retry::Yes { attempts, messages } = retry { max_attempts = attempts.into(); retry_messages = messages; @@ -262,7 +276,7 @@ fn docker(args: &[String], retry: Retry) -> Result { } ensure!( - retry_messages.iter().any(|&m| stdout.contains(m)) && attempt < max_attempts, + retry_messages.iter().any(|m| m.is_match(&stdout)) && attempt < max_attempts, error::DockerExecution { args: &args.join(" ") } @@ -278,7 +292,7 @@ enum Retry<'a> { No, Yes { attempts: NonZeroU16, - messages: &'a [&'a str], + messages: &'a [&'static Regex], }, } From 606cc1aea3ea2a5391697e519faa745eba3c917b Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Wed, 5 May 2021 09:52:14 -0700 Subject: [PATCH 0467/1356] buildsys: retry package build after known BuildKit internal error --- tools/buildsys/src/builder.rs | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index d6703cc3..05e92aac 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -43,6 +43,21 @@ lazy_static! { .unwrap(); } +/* +There's a similar bug that's fixed in new releases of BuildKit but still in the wild in popular +versions of Docker/BuildKit: + https://github.com/moby/buildkit/issues/1468 +*/ +lazy_static! { + static ref DOCKER_BUILD_DEAD_RECORD_ERROR: Regex = Regex::new(concat!( + r#"failed to solve with frontend dockerfile.v0: "#, + r#"failed to solve with frontend gateway.v0: "#, + r#"rpc error: code = Unknown desc = failed to build LLB: "#, + r#"failed to get dead record"#, + )) + .unwrap(); +} + /* We also see sporadic CI failures with only this error message. We use (?m) for multi-line mode so we can match the message on a line of its own without splitting @@ -229,7 +244,11 @@ fn build( &build, Retry::Yes { attempts: DOCKER_BUILD_MAX_ATTEMPTS, - messages: &[&*DOCKER_BUILD_FRONTEND_ERROR, &*UNEXPECTED_EOF_ERROR], + messages: &[ + &*DOCKER_BUILD_FRONTEND_ERROR, + &*DOCKER_BUILD_DEAD_RECORD_ERROR, + &*UNEXPECTED_EOF_ERROR, + ], }, )?; From 6977c84bd6618494ea661d07bfefe8e994551df9 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Thu, 29 Apr 2021 21:52:26 +0000 Subject: [PATCH 0468/1356] README.md: Update for VMware This updates the top level README, adding additional details about VMware variants as well as linking to the VMware QUICKSTART document. --- README.md | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 1ff93752..a4ce4c0e 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ Welcome to Bottlerocket! Bottlerocket is a free and open-source Linux-based operating system meant for hosting containers. -If you’re ready to jump right in, read our [QUICKSTART for Kubernetes](QUICKSTART-EKS.md) to try Bottlerocket in an Amazon EKS cluster or our [QUICKSTART for Amazon ECS](QUICKSTART-ECS.md) to try Bottlerocket in an Amazon ECS cluster. +If you’re ready to jump right in, read one of our setup guides for running Bottlerocket in [Amazon EKS](QUICKSTART-EKS.md), [Amazon ECS](QUICKSTART-ECS.md), or [VMware](QUICKSTART-VMWARE.md). Bottlerocket focuses on security and maintainability, providing a reliable, consistent, and safe platform for container-based workloads. This is a reflection of what we've learned building operating systems and services at Amazon. @@ -76,11 +76,12 @@ Our supported architectures include `x86_64` and `aarch64` (written as `arm64` i :walking: :running: Bottlerocket is best used with a container orchestrator. -To get started with Kubernetes, please see [QUICKSTART-EKS](QUICKSTART-EKS.md). +To get started with Kubernetes in Amazon EKS, please see [QUICKSTART-EKS](QUICKSTART-EKS.md). +To get started with Kubernetes in VMware, please see [QUICKSTART-VMWARE](QUICKSTART-VMWARE.md). To get started with Amazon ECS, please see [QUICKSTART-ECS](QUICKSTART-ECS.md). These guides describe: * how to set up a cluster with the orchestrator, so your Bottlerocket instance can run containers -* how to launch a Bottlerocket instance in EC2 +* how to launch a Bottlerocket instance in EC2 or VMware To build your own Bottlerocket images, please see [BUILDING](BUILDING.md). It describes: @@ -112,7 +113,9 @@ Bottlerocket has a ["control" container](https://github.com/bottlerocket-os/bott This container runs the [AWS SSM agent](https://github.com/aws/amazon-ssm-agent) that lets you run commands, or start shell sessions, on Bottlerocket instances in EC2. (You can easily replace this control container with your own just by changing the URI; see [Settings](#settings).) -You need to give your instance the SSM role for this to work; see the [setup guide](QUICKSTART-EKS.md#enabling-ssm). +In AWS, you need to give your instance the SSM role for this to work; see the [setup guide](QUICKSTART-EKS.md#enabling-ssm). +Outside of AWS, you can use [AWS Systems Manager for hybrid environments](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances.html). +There's more detail about hybrid environments in the [control container documentation](https://github.com/bottlerocket-os/bottlerocket-control-container/#connecting-to-aws-systems-manager-ssm). Once the instance is started, you can start a session: @@ -134,6 +137,7 @@ To do even more, read the next section about the [admin container](#admin-contai Bottlerocket has an [administrative container](https://github.com/bottlerocket-os/bottlerocket-admin-container), disabled by default, that runs outside of the orchestrator in a separate instance of containerd. This container has an SSH server that lets you log in as `ec2-user` using your EC2-registered SSH key. +Outside of AWS, you can [pass in your own SSH keys](https://github.com/bottlerocket-os/bottlerocket-admin-container#authenticating-with-the-admin-container). (You can easily replace this admin container with your own just by changing the URI; see [Settings](#settings). To enable the container, you can change the setting in user data when starting Bottlerocket, for example EC2 instance user data: @@ -289,14 +293,21 @@ In this format, "settings.kubernetes.cluster-name" refers to the same key as in #### Kubernetes settings -See the [setup guide](QUICKSTART-EKS.md) for much more detail on setting up Bottlerocket and Kubernetes. +See the [EKS setup guide](QUICKSTART-EKS.md) for much more detail on setting up Bottlerocket and Kubernetes in AWS EKS. +For more details about running Bottlerocket as a Kubernetes worker node in VMware, see the [VMware setup guide](QUICKSTART-VMWARE.md). The following settings must be specified in order to join a Kubernetes cluster. You should [specify them in user data](#using-user-data). -* `settings.kubernetes.cluster-name`: The cluster name you chose during setup; the [setup guide](QUICKSTART-EKS.md) uses "bottlerocket". * `settings.kubernetes.cluster-certificate`: This is the base64-encoded certificate authority of the cluster. * `settings.kubernetes.api-server`: This is the cluster's Kubernetes API endpoint. +For Kubernetes variants in AWS, you must also specify: +* `settings.kubernetes.cluster-name`: The cluster name you chose during setup; the [setup guide](QUICKSTART-EKS.md) uses "bottlerocket". + +For Kubernetes variants in VMware, you must specify: +* `settings.kubernetes.cluster-dns-ip`: The IP of the DNS service running in the cluster. +* `settings.kubernetes.bootstrap-token`: The token used for [TLS bootstrapping](https://kubernetes.io/docs/reference/command-line-tools-refe rence/kubelet-tls-bootstrapping/). + The following settings can be optionally set to customize the node labels and taints. Remember to quote keys (since they often contain ".") and to quote all values. * `settings.kubernetes.node-labels`: [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) in the form of key, value pairs added when registering the node in the cluster. * `settings.kubernetes.node-taints`: [Taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) in the form of key, value and effect entries added when registering the node in the cluster. @@ -341,9 +352,9 @@ Static pods can be particularly useful when running in standalone mode. * `settings.kubernetes.static-pods..manifest`: A base64-encoded pod manifest. * `settings.kubernetes.static-pods..enabled`: Whether the static pod is enabled. -The following settings are set for you automatically by [pluto](sources/api/) based on runtime instance information, but you can override them if you know what you're doing! -* `settings.kubernetes.max-pods`: The maximum number of pods that can be scheduled on this node (limited by number of available IPv4 addresses) -* `settings.kubernetes.cluster-dns-ip`: The CIDR block of the primary network interface. +For Kubernetes variants in AWS and VMware, the following are set for you automatically, but you can override them if you know what you're doing! +In AWS, [pluto](sources/api/) sets these based on runtime instance information. +In VMware, Bottlerocket uses [netdog](sources/api/) (for `node-ip`) or relies on [default values](sources/models/src/vmware-k8s-1.20/defaults.d/). * `settings.kubernetes.node-ip`: The IPv4 address of this node. * `settings.kubernetes.pod-infra-container-image`: The URI of the "pause" container. * `settings.kubernetes.kube-reserved`: Resources reserved for node components. @@ -352,6 +363,10 @@ The following settings are set for you automatically by [pluto](sources/api/) ba * `memory`: in mebibytes from the max num of pods on the instance. `memory_to_reserve = max_num_pods * 11 + 255`. * `ephemeral-storage`: defaults to `1Gi`. +For Kubernetes variants in AWS, the following settings are set for you automatically by [pluto](sources/api/). +* `settings.kubernetes.max-pods`: The maximum number of pods that can be scheduled on this node (limited by number of available IPv4 addresses) +* `settings.kubernetes.cluster-dns-ip`: Derived from the EKS IPV4 Service CIDR or the CIDR block of the primary network interface. + #### Amazon ECS settings See the [setup guide](QUICKSTART-ECS.md) for much more detail on setting up Bottlerocket and ECS. From a5223e94b3035819c0928d7ba05c08786677d5b4 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Tue, 4 May 2021 23:02:56 +0000 Subject: [PATCH 0469/1356] README: Fix stylized left/right quotes to normal quotes --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index a4ce4c0e..07570118 100644 --- a/README.md +++ b/README.md @@ -120,8 +120,8 @@ There's more detail about hybrid environments in the [control container document Once the instance is started, you can start a session: * Go to AWS SSM's [Session Manager](https://console.aws.amazon.com/systems-manager/session-manager/sessions) -* Select “Start session” and choose your Bottlerocket instance -* Select “Start session” again to get a shell +* Select "Start session" and choose your Bottlerocket instance +* Select "Start session" again to get a shell If you prefer a command-line tool, you can start a session with a recent [AWS CLI](https://aws.amazon.com/cli/) and the [session-manager-plugin](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html). Then you'd be able to start a session using only your instance ID, like this: From 61d7d6a83a5c9af0861da55594c4927069267b6e Mon Sep 17 00:00:00 2001 From: Matt Briggs Date: Thu, 6 May 2021 12:59:21 -0700 Subject: [PATCH 0470/1356] pubsys: limit threads during validate-repo In a previous quick fix we spawned a thread for every target during pubsys validate-repo. Now we limit the number of threads with a rayon thread pool. --- tools/Cargo.lock | 87 ++++++++++++++++++++++ tools/pubsys/Cargo.toml | 2 + tools/pubsys/src/repo/validate_repo/mod.rs | 53 +++++++------ 3 files changed, 121 insertions(+), 21 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 1e540afc..30ad4f7f 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -303,6 +303,51 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "crossbeam-channel" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" +dependencies = [ + "cfg-if", + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52fb27eab85b17fbb9f6fd667089e07d6a2eb8743d02639ee7f6a7a7729c9c94" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "lazy_static", + "memoffset", + "scopeguard", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4feb231f0d4d6af81aed15928e58ecf5816aa62a2393e2c82f46973e92a9a278" +dependencies = [ + "autocfg", + "cfg-if", + "lazy_static", +] + [[package]] name = "crypto-mac" version = "0.10.0" @@ -388,6 +433,12 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee2626afccd7561a06cf1367e2950c4718ea04565e20fb5029b6c7d8ad09abcf" +[[package]] +name = "either" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" + [[package]] name = "encode_unicode" version = "0.3.6" @@ -796,6 +847,15 @@ version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" +[[package]] +name = "memoffset" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83fb6581e8ed1f85fd45c116db8405483899489e38406156c25eb743554361d" +dependencies = [ + "autocfg", +] + [[package]] name = "mime" version = "0.3.16" @@ -1084,8 +1144,10 @@ dependencies = [ "indicatif", "lazy_static", "log", + "num_cpus", "parse-datetime", "pubsys-config", + "rayon", "reqwest", "rusoto_core", "rusoto_credential", @@ -1191,6 +1253,31 @@ dependencies = [ "rand_core", ] +[[package]] +name = "rayon" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674" +dependencies = [ + "autocfg", + "crossbeam-deque", + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a" +dependencies = [ + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-utils", + "lazy_static", + "num_cpus", +] + [[package]] name = "redox_syscall" version = "0.2.7" diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index 19482f42..e435b2cc 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -16,7 +16,9 @@ futures = "0.3.5" indicatif = "0.15.0" lazy_static = "1.4" log = "0.4" +num_cpus = "1" parse-datetime = { path = "../../sources/parse-datetime" } +rayon = "1" # Need to bring in reqwest with a TLS feature so tough can support TLS repos. reqwest = { version = "0.11.1", default-features = false, features = ["rustls-tls", "blocking"] } rusoto_core = { version = "0.46.0", default-features = false, features = ["rustls"] } diff --git a/tools/pubsys/src/repo/validate_repo/mod.rs b/tools/pubsys/src/repo/validate_repo/mod.rs index 26ea168d..46aff5a7 100644 --- a/tools/pubsys/src/repo/validate_repo/mod.rs +++ b/tools/pubsys/src/repo/validate_repo/mod.rs @@ -6,10 +6,11 @@ use crate::Args; use log::{info, trace}; use pubsys_config::InfraConfig; use snafu::{OptionExt, ResultExt}; +use std::cmp::min; use std::fs::File; use std::io; use std::path::PathBuf; -use std::thread::spawn; +use std::sync::mpsc; use structopt::StructOpt; use tough::{Repository, RepositoryLoader}; use url::Url; @@ -38,13 +39,25 @@ pub(crate) struct ValidateRepoArgs { validate_targets: bool, } -/// Retrieves listed targets and attempts to download them for validation purposes +/// If we are on a machine with a large number of cores, then we limit the number of simultaneous +/// downloads to this arbitrarily chosen maximum. +const MAX_DOWNLOAD_THREADS: usize = 16; + +/// Retrieves listed targets and attempts to download them for validation purposes. We use a Rayon +/// thread pool instead of tokio for async execution because `reqwest::blocking` creates a tokio +/// runtime (and multiple tokio runtimes are not supported). fn retrieve_targets(repo: &Repository) -> Result<(), Error> { let targets = &repo.targets().signed.targets; + let thread_pool = rayon::ThreadPoolBuilder::new() + .num_threads(min(num_cpus::get(), MAX_DOWNLOAD_THREADS)) + .build() + .context(error::ThreadPool)?; + + // create the channels through which our download results will be passed + let (tx, rx) = mpsc::channel(); - let mut tasks = Vec::new(); for target in targets.keys().cloned() { - let target = target.to_string(); + let tx = tx.clone(); let mut reader = repo .read_target(&target) .with_context(|| repo_error::ReadTarget { @@ -54,24 +67,22 @@ fn retrieve_targets(repo: &Repository) -> Result<(), Error> { target: target.to_string(), })?; info!("Downloading target: {}", target); - // TODO - limit threads https://github.com/bottlerocket-os/bottlerocket/issues/1522 - tasks.push(spawn(move || { - // tough's `Read` implementation validates the target as it's being downloaded - io::copy(&mut reader, &mut io::sink()).context(error::TargetDownload { - target: target.to_string(), + thread_pool.spawn(move || { + tx.send({ + // tough's `Read` implementation validates the target as it's being downloaded + io::copy(&mut reader, &mut io::sink()).context(error::TargetDownload { + target: target.to_string(), + }) }) - })); + // inability to send on this channel is unrecoverable + .unwrap(); + }); } + // close all senders + drop(tx); - // ensure that we join all threads before checking the results - let mut results = Vec::new(); - for task in tasks { - let result = task.join().map_err(|e| error::Error::Join { - // the join function is returning an error type that does not implement error or display - inner: format!("{:?}", e), - })?; - results.push(result); - } + // block and await all downloads + let results: Vec> = rx.into_iter().collect(); // check all results and return the first error we see for result in results { @@ -164,8 +175,8 @@ mod error { #[snafu(display("Missing target: {}", target))] TargetMissing { target: String }, - #[snafu(display("Failed to join thread: {}", inner))] - Join { inner: String }, + #[snafu(display("Unable to create thread pool: {}", source))] + ThreadPool { source: rayon::ThreadPoolBuildError }, } } pub(crate) use error::Error; From d3261636fb39bdbf7937995731aa048c030a70cb Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Fri, 14 May 2021 10:16:05 -0700 Subject: [PATCH 0471/1356] pubsys: allow refresh-repo to use default key path --- tools/pubsys/src/repo/refresh_repo/mod.rs | 32 ++++++++++++++++------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/tools/pubsys/src/repo/refresh_repo/mod.rs b/tools/pubsys/src/repo/refresh_repo/mod.rs index a198ae93..26683869 100644 --- a/tools/pubsys/src/repo/refresh_repo/mod.rs +++ b/tools/pubsys/src/repo/refresh_repo/mod.rs @@ -15,7 +15,7 @@ use std::fs::File; use std::path::{Path, PathBuf}; use structopt::StructOpt; use tough::editor::RepositoryEditor; -use tough::key_source::KeySource; +use tough::key_source::{KeySource, LocalKeySource}; use tough::{ExpirationEnforcement, RepositoryLoader}; use url::Url; @@ -42,6 +42,10 @@ pub(crate) struct RefreshRepoArgs { /// Path to root.json for this repo root_role_path: PathBuf, + #[structopt(long, parse(from_os_str))] + /// If we generated a local key, we'll find it here; used if Infra.toml has no key defined + default_key_path: PathBuf, + #[structopt(long, parse(from_os_str))] /// Path to file that defines when repo non-root metadata should expire repo_expiration_policy_path: PathBuf, @@ -143,15 +147,23 @@ pub(crate) fn run(args: &Args, refresh_repo_args: &RefreshRepoArgs) -> Result<() missing: format!("definition for repo {}", &refresh_repo_args.repo), })?; - // Get signing key config from repository configuration - let signing_key_config = - repo_config - .signing_keys - .as_ref() - .context(repo_error::MissingConfig { - missing: "signing_keys", - })?; - let key_source = get_signing_key_source(signing_key_config); + // Check if we have a signing key defined in Infra.toml; if not, we'll fall back to the + // generated local key. + let signing_key_config = repo_config.signing_keys.as_ref(); + + let key_source = if let Some(signing_key_config) = signing_key_config { + get_signing_key_source(signing_key_config) + } else { + ensure!( + refresh_repo_args.default_key_path.exists(), + repo_error::MissingConfig { + missing: "signing_keys in repo config, and we found no local key", + } + ); + Box::new(LocalKeySource { + path: refresh_repo_args.default_key_path.clone(), + }) + }; // Get the expiration policy info!( From 498108ab8e15902571ac8df0c97030e843fc4c42 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Thu, 13 May 2021 17:18:54 -0700 Subject: [PATCH 0472/1356] tools: cargo update indicatif has to be updated in pubsys because it passes a progress bar to coldsnap, so the versions have to match. --- tools/Cargo.lock | 181 +++++++++++++++++++++------------------- tools/pubsys/Cargo.toml | 2 +- 2 files changed, 94 insertions(+), 89 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 30ad4f7f..68616741 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -2,9 +2,9 @@ # It is not intended for manual editing. [[package]] name = "addr2line" -version = "0.14.1" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" +checksum = "03345e98af8f3d786b6d9f656ccfa6ac316d954e92bc4841f0bba20789d5fb5a" dependencies = [ "gimli", ] @@ -17,9 +17,9 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aho-corasick" -version = "0.7.15" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" +checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" dependencies = [ "memchr", ] @@ -92,9 +92,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.58" +version = "0.3.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88fb5a785d6b44fd9d6700935608639af1b8356de1e55d5f7c2740f4faa15d82" +checksum = "4717cfcbfaa661a0fd48f8453951837ae7e8f81e481fbb136e3202d72805a744" dependencies = [ "addr2line", "cc", @@ -134,9 +134,9 @@ dependencies = [ [[package]] name = "bstr" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a40b47ad93e1a5404e6c18dec46b628214fee441c70f4ab5d6942142cc268a3d" +checksum = "90682c8d613ad3373e66de8c6411e0ae2ab2571e879d2efbf73558cc66f21279" dependencies = [ "memchr", ] @@ -231,9 +231,9 @@ dependencies = [ [[package]] name = "coldsnap" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6fe5edca2eabd87be4dfee412700406e0865cafbbd36ffe89a1725ec1a0b579" +checksum = "8d79a8cb3e52be4d3c7651152de3a712441c519c0b1757d1e3be77aae63c90d4" dependencies = [ "argh", "base64", @@ -289,10 +289,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" [[package]] -name = "cpuid-bool" -version = "0.1.2" +name = "cpufeatures" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634" +checksum = "281f563b2c3a0e535ab12d81d3c5859045795256ad269afa7c19542585b68f93" +dependencies = [ + "libc", +] [[package]] name = "crc32fast" @@ -472,9 +475,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9d5813545e459ad3ca1bff9915e9ad7f1a47dc6a91b627ce321d5863b7dd253" +checksum = "0e7e43a803dae2fa37c1f6a8fe121e1f7bf9548b4dfc0522a42f34145dadfc27" dependencies = [ "futures-channel", "futures-core", @@ -487,9 +490,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce79c6a52a299137a6013061e0cf0e688fce5d7f1bc60125f520912fdb29ec25" +checksum = "e682a68b29a882df0545c143dc3646daefe80ba479bcdede94d5a703de2871e2" dependencies = [ "futures-core", "futures-sink", @@ -497,15 +500,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "098cd1c6dda6ca01650f1a37a794245eb73181d0d4d4e955e2f3c37db7af1815" +checksum = "0402f765d8a89a26043b889b26ce3c4679d268fa6bb22cd7c6aad98340e179d1" [[package]] name = "futures-executor" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f6cb7042eda00f0049b1d2080aa4b93442997ee507eb3828e8bd7577f94c9d" +checksum = "badaa6a909fac9e7236d0620a2f57f7664640c56575b71a7552fbd68deafab79" dependencies = [ "futures-core", "futures-task", @@ -514,16 +517,17 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "365a1a1fb30ea1c03a830fdb2158f5236833ac81fa0ad12fe35b29cddc35cb04" +checksum = "acc499defb3b348f8d8f3f66415835a9131856ff7714bf10dadfc4ec4bdb29a1" [[package]] name = "futures-macro" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "668c6733a182cd7deb4f1de7ba3bf2120823835b3bcfbeacf7d2c4a773c1bb8b" +checksum = "a4c40298486cdf52cc00cd6d6987892ba502c7656a16a4192a9992b1ccedd121" dependencies = [ + "autocfg", "proc-macro-hack", "proc-macro2", "quote", @@ -532,22 +536,23 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5629433c555de3d82861a7a4e3794a4c40040390907cfbfd7143a92a426c23" +checksum = "a57bead0ceff0d6dde8f465ecd96c9338121bb7717d3e7b108059531870c4282" [[package]] name = "futures-task" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba7aa51095076f3ba6d9a1f702f74bd05ec65f555d70d2033d55ba8d69f581bc" +checksum = "8a16bef9fc1a4dddb5bee51c989e3fbba26569cbb0e31f5b303c184e3dd33dae" [[package]] name = "futures-util" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c144ad54d60f23927f0a6b6d816e4271278b64f005ad65e4e35291d2de9c025" +checksum = "feb5c238d27e2bf94ffdfd27b2c29e3df4a68c4193bb6427384259e2bf191967" dependencies = [ + "autocfg", "futures-channel", "futures-core", "futures-io", @@ -585,9 +590,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" +checksum = "0e4075386626662786ddb0ec9081e7c7eeb1ba31951f447ca780ef9f5d568189" [[package]] name = "globset" @@ -674,9 +679,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfb77c123b4e2f72a2069aeae0b4b4949cc7e966df277813fc16347e7549737" +checksum = "60daa14be0e0786db0f03a9e57cb404c9d756eed2b6c62b9ea98ec5743ec75a9" dependencies = [ "bytes", "http", @@ -685,9 +690,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a1ce40d6fc9764887c2fdc7305c3dcc429ba11ff981c1509416afd5697e4437" +checksum = "f3a87b616e37e93c22fb19bcd386f02f3af5ea98a25670ad0fce773de23c5e68" [[package]] name = "httpdate" @@ -759,9 +764,9 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.15.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7baab56125e25686df467fe470785512329883aab42696d661247aca2a2896e4" +checksum = "507cf157a0dab3c837bef6e2086466255d9de4a6b1af69e62b62c54cd52f6062" dependencies = [ "console", "lazy_static", @@ -792,9 +797,9 @@ checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" [[package]] name = "js-sys" -version = "0.3.50" +version = "0.3.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d99f9e3e84b8f67f846ef5b4cbbc3b1c29f6c759fcbce6f01aa0e73d932a24c" +checksum = "83bdfbace3a0e81a4253f73b49e960b053e396a11012cbd49b9b74d6a2b67062" dependencies = [ "wasm-bindgen", ] @@ -813,9 +818,9 @@ checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e" [[package]] name = "lock_api" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3c91c24eae6777794bb1997ad98bbb87daf92890acab859f7eaa4320333176" +checksum = "0382880606dff6d15c9476c416d18690b72742aa7b605bb6dd6ec9030fbf07eb" dependencies = [ "scopeguard", ] @@ -843,9 +848,9 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" [[package]] name = "memchr" -version = "2.3.4" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" +checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc" [[package]] name = "memoffset" @@ -940,15 +945,15 @@ dependencies = [ [[package]] name = "number_prefix" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17b02fc0ff9a9e4b35b3342880f48e896ebf69f2967921fe8646bf5b7125956a" +checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" +checksum = "1a5b3dd1c072ee7963717671d1ca129f1048fda25edea6b752bfc71ac8854170" [[package]] name = "olpc-cjson" @@ -975,9 +980,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl-probe" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" +checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" [[package]] name = "os_pipe" @@ -1280,9 +1285,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85dd92e586f7355c633911e11f77f3d12f04b1b1bd76a198bd34ae3af8341ef2" +checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc" dependencies = [ "bitflags", ] @@ -1299,9 +1304,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.4.6" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a26af418b574bd56588335b3a3659a65725d4e636eb1016c2f9e3b38c7cc759" +checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" dependencies = [ "aho-corasick", "memchr", @@ -1310,9 +1315,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.23" +version = "0.6.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5f089152e60f62d28b835fbff2cd2e8dc0baf1ac13343bef92ab7eed84548" +checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" [[package]] name = "remove_dir_all" @@ -1515,9 +1520,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" +checksum = "410f7acf3cb3a44527c5d9546bad4bf4e6c460915d5f9f2fc524498bfe8f70ce" [[package]] name = "rustc_version" @@ -1653,18 +1658,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.125" +version = "1.0.126" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "558dc50e1a5a5fa7112ca2ce4effcb321b0300c0d4ccf0776a9f60cd89031171" +checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.125" +version = "1.0.126" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b093b7a2bb58203b5da3056c05b4ec1fed827dcfdb37347a8841695263b3d06d" +checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43" dependencies = [ "proc-macro2", "quote", @@ -1723,13 +1728,13 @@ checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" [[package]] name = "sha2" -version = "0.9.3" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa827a14b29ab7f44778d14a88d3cb76e949c45083f7dbfa507d0cb699dc12de" +checksum = "b362ae5752fd2137731f9fa25fd4d9058af34666ca1966fb969119cc35719f12" dependencies = [ "block-buffer", "cfg-if", - "cpuid-bool", + "cpufeatures", "digest", "opaque-debug", ] @@ -1922,9 +1927,9 @@ checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" [[package]] name = "syn" -version = "1.0.71" +version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad184cc9470f9117b2ac6817bfe297307418819ba40552f9b3846f05c33d5373" +checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82" dependencies = [ "proc-macro2", "quote", @@ -2187,9 +2192,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.25" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01ebdc2bb4498ab1ab5f5b73c5803825e60199229ccba0698170e3be0e7f959f" +checksum = "09adeb8c97449311ccd28a427f96fb563e7fd31aabf994189879d9da2394b89d" dependencies = [ "cfg-if", "pin-project-lite", @@ -2198,9 +2203,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" +checksum = "a9ff14f98b1a4b289c6248a023c1c2fa1491062964e9fed67ab29c4e4da4a052" dependencies = [ "lazy_static", ] @@ -2282,9 +2287,9 @@ dependencies = [ [[package]] name = "url" -version = "2.2.1" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccd964113622c8e9322cfac19eb1004a07e636c545f325da085d5cdde6f1f8b" +checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" dependencies = [ "form_urlencoded", "idna", @@ -2334,9 +2339,9 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.73" +version = "0.2.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83240549659d187488f91f33c0f8547cbfef0b2088bc470c116d1d260ef623d9" +checksum = "d54ee1d4ed486f78874278e63e4069fc1ab9f6a18ca492076ffb90c5eb2997fd" dependencies = [ "cfg-if", "serde", @@ -2346,9 +2351,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.73" +version = "0.2.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae70622411ca953215ca6d06d3ebeb1e915f0f6613e3b495122878d7ebec7dae" +checksum = "3b33f6a0694ccfea53d94db8b2ed1c3a8a4c86dd936b13b9f0a15ec4a451b900" dependencies = [ "bumpalo", "lazy_static", @@ -2361,9 +2366,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.23" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81b8b767af23de6ac18bf2168b690bed2902743ddf0fb39252e36f9e2bfc63ea" +checksum = "5fba7978c679d53ce2d0ac80c8c175840feb849a161664365d1287b41f2e67f1" dependencies = [ "cfg-if", "js-sys", @@ -2373,9 +2378,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.73" +version = "0.2.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e734d91443f177bfdb41969de821e15c516931c3c3db3d318fa1b68975d0f6f" +checksum = "088169ca61430fe1e58b8096c24975251700e7b1f6fd91cc9d59b04fb9b18bd4" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2383,9 +2388,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.73" +version = "0.2.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53739ff08c8a68b0fdbcd54c372b8ab800b1449ab3c9d706503bc7dd1621b2c" +checksum = "be2241542ff3d9f241f5e2cb6dd09b37efe786df8851c54957683a49f0987a97" dependencies = [ "proc-macro2", "quote", @@ -2396,15 +2401,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.73" +version = "0.2.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9a543ae66aa233d14bb765ed9af4a33e81b8b58d1584cf1b47ff8cd0b9e4489" +checksum = "d7cff876b8f18eed75a66cf49b65e7f967cb354a7aa16003fb55dbfd25b44b4f" [[package]] name = "web-sys" -version = "0.3.50" +version = "0.3.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a905d57e488fec8861446d3393670fb50d27a262344013181c2cdf9fff5481be" +checksum = "e828417b379f3df7111d3a2a9e5753706cae29c41f7c4029ee9fd77f3e09e582" dependencies = [ "js-sys", "wasm-bindgen", diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index e435b2cc..ee2585d2 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -13,7 +13,7 @@ clap = "2.33" coldsnap = { version = "0.3", default-features = false, features = ["rusoto-rustls"]} pubsys-config = { path = "../pubsys-config/" } futures = "0.3.5" -indicatif = "0.15.0" +indicatif = "0.16.0" lazy_static = "1.4" log = "0.4" num_cpus = "1" From d008f03cd85c20e3406b8ecf8b07e76a2af3a2a3 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Wed, 19 May 2021 16:56:11 -0700 Subject: [PATCH 0473/1356] README: add details about the two default bottlerocket volumes This documents the uses of the two default Bottlerocket volumes. --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 07570118..1c861cd8 100644 --- a/README.md +++ b/README.md @@ -652,3 +652,10 @@ See [Settings](#settings) above for examples and to understand what you can conf The server and client are the user-facing components of the API system, but there are a number of other components that work together to make sure your settings are applied, and that they survive upgrades of Bottlerocket. For more details, see the [API system documentation](sources/api/). + +### Default Volumes + +Bottlerocket operates with two default storage volumes. +* The root device, `/dev/xvda`, holds the active and passive [partition sets](#updates-1). + It also contains the bootloader, the dm-verity hash tree for verifying the [immutable root filesystem](SECURITY_FEATURES.md#immutable-rootfs-backed-by-dm-verity), and the data store for the Bottlerocket API. +* The data device, `/dex/xvdb`, is used as persistent storage for container images, container orchestration, [host-containers](#Custom-host-containers), and [bootstrap containers](#Bootstrap-containers-settings). From 3caa6c90235db063adecd7c24a70d7109d0ac66e Mon Sep 17 00:00:00 2001 From: Sungwon Cho Date: Thu, 20 May 2021 15:12:07 +0900 Subject: [PATCH 0474/1356] kubelet: add setting for configuring containerLogMaxFiles and MaxSize --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 1c861cd8..f89220a1 100644 --- a/README.md +++ b/README.md @@ -346,6 +346,8 @@ The following settings are optional and allow you to further configure your clus * `settings.kubernetes.event-burst`: The maximum size of a burst of event creations. * `settings.kubernetes.kube-api-qps`: The QPS to use while talking with kubernetes apiserver. * `settings.kubernetes.kube-api-burst`: The burst to allow while talking with kubernetes. +* `settings.kubernetes.container-log-max-size`: The maximum size of container log file before it is rotated. +* `settings.kubernetes.container-log-max-files`: The maximum number of container log files that can be present for a container. You can also optionally specify static pods for your node with the following settings. Static pods can be particularly useful when running in standalone mode. From b433d9baa60079e953238ec3d1de4bbfed7b6404 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Wed, 2 Jun 2021 10:06:06 -0700 Subject: [PATCH 0475/1356] Document the deprecation of the aws-k8s-1.16 variant --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index f89220a1..7fd3c609 100644 --- a/README.md +++ b/README.md @@ -50,7 +50,6 @@ For example, an `x86_64` build of the `aws-k8s-1.19` variant will produce an ima The following variants support EKS, as described above: -- `aws-k8s-1.16` - `aws-k8s-1.17` - `aws-k8s-1.18` - `aws-k8s-1.19` @@ -64,8 +63,9 @@ Another variant we have in preview is designed to be a Kubernetes worker node in - `vmware-k8s-1.20` -The `aws-k8s-1.15` variant is deprecated and will no longer be supported in Bottlerocket releases. -We recommend users replace `aws-k8s-1.15` nodes with the [latest variant compatible with their cluster](variants/). +The `aws-k8s-1.16` variant is deprecated and will no longer be supported in Bottlerocket releases after June, 2021. +The `aws-k8s-1.15` variant is no longer supported. +We recommend users replace `aws-k8s-1.15` and `aws-k8s-1.16` nodes with the [latest variant compatible with their cluster](variants/). ## Architectures From 0486b14cefe501ba965defa2de3ead6fc21e68b5 Mon Sep 17 00:00:00 2001 From: Tianhao Geng Date: Mon, 7 Jun 2021 22:03:38 +0000 Subject: [PATCH 0476/1356] kubelet: add setting for configuring systemReserved pass system-reserved argument to kubelet --- README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/README.md b/README.md index 7fd3c609..a6aaa3ac 100644 --- a/README.md +++ b/README.md @@ -340,6 +340,14 @@ The following settings are optional and allow you to further configure your clus ``` allowed-unsafe-sysctls = ["net.core.somaxconn", "net.ipv4.ip_local_port_range"] ``` +* `settings.kubernetes.system-reserved`: Resources reserved for system components. + * Example user data for setting up system reserved: + ``` + [settings.kubernetes.system-reserved] + cpu = "10m" + memory = "100Mi" + ephemeral-storage= "1Gi" + ``` * `settings.kubernetes.registry-qps`: The registry pull QPS. * `settings.kubernetes.registry-burst`: The maximum size of bursty pulls. * `settings.kubernetes.event-qps`: The maximum event creations per second. From 075d912738fffcb5749fda5c08ec551c91eb09fa Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Thu, 3 Jun 2021 23:32:50 +0000 Subject: [PATCH 0477/1356] kernel-5.4: add patch required for kdump support --- ...86-purgatory-Add-fno-stack-protector.patch | 45 ++++ ...64-kexec_file-add-crash-dump-support.patch | 221 ++++++++++++++++++ .../0004-libfdt-include-fdt_addresses.c.patch | 44 ++++ packages/kernel-5.4/kernel-5.4.spec | 4 + 4 files changed, 314 insertions(+) create mode 100644 packages/kernel-5.4/0002-x86-purgatory-Add-fno-stack-protector.patch create mode 100644 packages/kernel-5.4/0003-arm64-kexec_file-add-crash-dump-support.patch create mode 100644 packages/kernel-5.4/0004-libfdt-include-fdt_addresses.c.patch diff --git a/packages/kernel-5.4/0002-x86-purgatory-Add-fno-stack-protector.patch b/packages/kernel-5.4/0002-x86-purgatory-Add-fno-stack-protector.patch new file mode 100644 index 00000000..a6f19353 --- /dev/null +++ b/packages/kernel-5.4/0002-x86-purgatory-Add-fno-stack-protector.patch @@ -0,0 +1,45 @@ +From ff58155ca4fa7e931f34d948fa09fe14c6a66116 Mon Sep 17 00:00:00 2001 +From: Arvind Sankar +Date: Tue, 16 Jun 2020 18:25:47 -0400 +Subject: [PATCH] x86/purgatory: Add -fno-stack-protector + +The purgatory Makefile removes -fstack-protector options if they were +configured in, but does not currently add -fno-stack-protector. + +If gcc was configured with the --enable-default-ssp configure option, +this results in the stack protector still being enabled for the +purgatory (absent distro-specific specs files that might disable it +again for freestanding compilations), if the main kernel is being +compiled with stack protection enabled (if it's disabled for the main +kernel, the top-level Makefile will add -fno-stack-protector). + +This will break the build since commit + e4160b2e4b02 ("x86/purgatory: Fail the build if purgatory.ro has missing symbols") +and prior to that would have caused runtime failure when trying to use +kexec. + +Explicitly add -fno-stack-protector to avoid this, as done in other +Makefiles that need to disable the stack protector. + +Reported-by: Gabriel C +Signed-off-by: Arvind Sankar +Signed-off-by: Linus Torvalds +--- + arch/x86/purgatory/Makefile | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile +index b04e6e72a592..088bd764e0b7 100644 +--- a/arch/x86/purgatory/Makefile ++++ b/arch/x86/purgatory/Makefile +@@ -34,6 +34,7 @@ KCOV_INSTRUMENT := n + PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel + PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss + PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING ++PURGATORY_CFLAGS += $(call cc-option,-fno-stack-protector) + + # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That + # in turn leaves some undefined symbols like __fentry__ in purgatory and not +-- +2.30.2 + diff --git a/packages/kernel-5.4/0003-arm64-kexec_file-add-crash-dump-support.patch b/packages/kernel-5.4/0003-arm64-kexec_file-add-crash-dump-support.patch new file mode 100644 index 00000000..e7e40e41 --- /dev/null +++ b/packages/kernel-5.4/0003-arm64-kexec_file-add-crash-dump-support.patch @@ -0,0 +1,221 @@ +From 3751e728cef2908c15974a5ae44627fd41ef3362 Mon Sep 17 00:00:00 2001 +From: AKASHI Takahiro +Date: Mon, 16 Dec 2019 11:12:47 +0900 +Subject: [PATCH] arm64: kexec_file: add crash dump support + +Enabling crash dump (kdump) includes +* prepare contents of ELF header of a core dump file, /proc/vmcore, + using crash_prepare_elf64_headers(), and +* add two device tree properties, "linux,usable-memory-range" and + "linux,elfcorehdr", which represent respectively a memory range + to be used by crash dump kernel and the header's location + +Signed-off-by: AKASHI Takahiro +Cc: Catalin Marinas +Cc: Will Deacon +Reviewed-by: James Morse +Tested-and-reviewed-by: Bhupesh Sharma +Signed-off-by: Will Deacon +--- + arch/arm64/include/asm/kexec.h | 4 + + arch/arm64/kernel/kexec_image.c | 4 - + arch/arm64/kernel/machine_kexec_file.c | 106 ++++++++++++++++++++++++- + 3 files changed, 106 insertions(+), 8 deletions(-) + +diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h +index 12a561a54128..d24b527e8c00 100644 +--- a/arch/arm64/include/asm/kexec.h ++++ b/arch/arm64/include/asm/kexec.h +@@ -96,6 +96,10 @@ static inline void crash_post_resume(void) {} + struct kimage_arch { + void *dtb; + unsigned long dtb_mem; ++ /* Core ELF header buffer */ ++ void *elf_headers; ++ unsigned long elf_headers_mem; ++ unsigned long elf_headers_sz; + }; + + extern const struct kexec_file_ops kexec_image_ops; +diff --git a/arch/arm64/kernel/kexec_image.c b/arch/arm64/kernel/kexec_image.c +index 29a9428486a5..af9987c154ca 100644 +--- a/arch/arm64/kernel/kexec_image.c ++++ b/arch/arm64/kernel/kexec_image.c +@@ -47,10 +47,6 @@ static void *image_load(struct kimage *image, + struct kexec_segment *kernel_segment; + int ret; + +- /* We don't support crash kernels yet. */ +- if (image->type == KEXEC_TYPE_CRASH) +- return ERR_PTR(-EOPNOTSUPP); +- + /* + * We require a kernel with an unambiguous Image header. Per + * Documentation/arm64/booting.rst, this is the case when image_size +diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c +index 7b08bf9499b6..dd3ae8081b38 100644 +--- a/arch/arm64/kernel/machine_kexec_file.c ++++ b/arch/arm64/kernel/machine_kexec_file.c +@@ -17,12 +17,15 @@ + #include + #include + #include ++#include + #include + #include + #include + #include + + /* relevant device tree properties */ ++#define FDT_PROP_KEXEC_ELFHDR "linux,elfcorehdr" ++#define FDT_PROP_MEM_RANGE "linux,usable-memory-range" + #define FDT_PROP_INITRD_START "linux,initrd-start" + #define FDT_PROP_INITRD_END "linux,initrd-end" + #define FDT_PROP_BOOTARGS "bootargs" +@@ -40,6 +43,10 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image) + vfree(image->arch.dtb); + image->arch.dtb = NULL; + ++ vfree(image->arch.elf_headers); ++ image->arch.elf_headers = NULL; ++ image->arch.elf_headers_sz = 0; ++ + return kexec_image_post_load_cleanup_default(image); + } + +@@ -55,6 +62,31 @@ static int setup_dtb(struct kimage *image, + + off = ret; + ++ ret = fdt_delprop(dtb, off, FDT_PROP_KEXEC_ELFHDR); ++ if (ret && ret != -FDT_ERR_NOTFOUND) ++ goto out; ++ ret = fdt_delprop(dtb, off, FDT_PROP_MEM_RANGE); ++ if (ret && ret != -FDT_ERR_NOTFOUND) ++ goto out; ++ ++ if (image->type == KEXEC_TYPE_CRASH) { ++ /* add linux,elfcorehdr */ ++ ret = fdt_appendprop_addrrange(dtb, 0, off, ++ FDT_PROP_KEXEC_ELFHDR, ++ image->arch.elf_headers_mem, ++ image->arch.elf_headers_sz); ++ if (ret) ++ return (ret == -FDT_ERR_NOSPACE ? -ENOMEM : -EINVAL); ++ ++ /* add linux,usable-memory-range */ ++ ret = fdt_appendprop_addrrange(dtb, 0, off, ++ FDT_PROP_MEM_RANGE, ++ crashk_res.start, ++ crashk_res.end - crashk_res.start + 1); ++ if (ret) ++ return (ret == -FDT_ERR_NOSPACE ? -ENOMEM : -EINVAL); ++ } ++ + /* add bootargs */ + if (cmdline) { + ret = fdt_setprop_string(dtb, off, FDT_PROP_BOOTARGS, cmdline); +@@ -125,8 +157,8 @@ static int setup_dtb(struct kimage *image, + } + + /* +- * More space needed so that we can add initrd, bootargs, kaslr-seed, and +- * rng-seed. ++ * More space needed so that we can add initrd, bootargs, kaslr-seed, ++ * rng-seed, userable-memory-range and elfcorehdr. + */ + #define DTB_EXTRA_SPACE 0x1000 + +@@ -174,6 +206,43 @@ static int create_dtb(struct kimage *image, + } + } + ++static int prepare_elf_headers(void **addr, unsigned long *sz) ++{ ++ struct crash_mem *cmem; ++ unsigned int nr_ranges; ++ int ret; ++ u64 i; ++ phys_addr_t start, end; ++ ++ nr_ranges = 1; /* for exclusion of crashkernel region */ ++ for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, ++ MEMBLOCK_NONE, &start, &end, NULL) ++ nr_ranges++; ++ ++ cmem = kmalloc(sizeof(struct crash_mem) + ++ sizeof(struct crash_mem_range) * nr_ranges, GFP_KERNEL); ++ if (!cmem) ++ return -ENOMEM; ++ ++ cmem->max_nr_ranges = nr_ranges; ++ cmem->nr_ranges = 0; ++ for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, ++ MEMBLOCK_NONE, &start, &end, NULL) { ++ cmem->ranges[cmem->nr_ranges].start = start; ++ cmem->ranges[cmem->nr_ranges].end = end - 1; ++ cmem->nr_ranges++; ++ } ++ ++ /* Exclude crashkernel region */ ++ ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end); ++ ++ if (!ret) ++ ret = crash_prepare_elf64_headers(cmem, true, addr, sz); ++ ++ kfree(cmem); ++ return ret; ++} ++ + int load_other_segments(struct kimage *image, + unsigned long kernel_load_addr, + unsigned long kernel_size, +@@ -181,14 +250,43 @@ int load_other_segments(struct kimage *image, + char *cmdline) + { + struct kexec_buf kbuf; +- void *dtb = NULL; +- unsigned long initrd_load_addr = 0, dtb_len; ++ void *headers, *dtb = NULL; ++ unsigned long headers_sz, initrd_load_addr = 0, dtb_len; + int ret = 0; + + kbuf.image = image; + /* not allocate anything below the kernel */ + kbuf.buf_min = kernel_load_addr + kernel_size; + ++ /* load elf core header */ ++ if (image->type == KEXEC_TYPE_CRASH) { ++ ret = prepare_elf_headers(&headers, &headers_sz); ++ if (ret) { ++ pr_err("Preparing elf core header failed\n"); ++ goto out_err; ++ } ++ ++ kbuf.buffer = headers; ++ kbuf.bufsz = headers_sz; ++ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; ++ kbuf.memsz = headers_sz; ++ kbuf.buf_align = SZ_64K; /* largest supported page size */ ++ kbuf.buf_max = ULONG_MAX; ++ kbuf.top_down = true; ++ ++ ret = kexec_add_buffer(&kbuf); ++ if (ret) { ++ vfree(headers); ++ goto out_err; ++ } ++ image->arch.elf_headers = headers; ++ image->arch.elf_headers_mem = kbuf.mem; ++ image->arch.elf_headers_sz = headers_sz; ++ ++ pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n", ++ image->arch.elf_headers_mem, headers_sz, headers_sz); ++ } ++ + /* load initrd */ + if (initrd) { + kbuf.buffer = initrd; +-- +2.30.2 + diff --git a/packages/kernel-5.4/0004-libfdt-include-fdt_addresses.c.patch b/packages/kernel-5.4/0004-libfdt-include-fdt_addresses.c.patch new file mode 100644 index 00000000..f2fa541b --- /dev/null +++ b/packages/kernel-5.4/0004-libfdt-include-fdt_addresses.c.patch @@ -0,0 +1,44 @@ +From c273a2bd8aa81b72e48736c3aa51f7ffeae39925 Mon Sep 17 00:00:00 2001 +From: AKASHI Takahiro +Date: Mon, 9 Dec 2019 12:03:44 +0900 +Subject: [PATCH] libfdt: include fdt_addresses.c + +In the implementation of kexec_file_loaded-based kdump for arm64, +fdt_appendprop_addrrange() will be needed. + +So include fdt_addresses.c in making libfdt. + +Signed-off-by: AKASHI Takahiro +Cc: Rob Herring +Cc: Frank Rowand +Signed-off-by: Will Deacon +--- + lib/Makefile | 2 +- + lib/fdt_addresses.c | 2 ++ + 2 files changed, 3 insertions(+), 1 deletion(-) + create mode 100644 lib/fdt_addresses.c + +diff --git a/lib/Makefile b/lib/Makefile +index 93217d44237f..c20b1debe9b4 100644 +--- a/lib/Makefile ++++ b/lib/Makefile +@@ -223,7 +223,7 @@ KASAN_SANITIZE_stackdepot.o := n + KCOV_INSTRUMENT_stackdepot.o := n + + libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \ +- fdt_empty_tree.o ++ fdt_empty_tree.o fdt_addresses.o + $(foreach file, $(libfdt_files), \ + $(eval CFLAGS_$(file) = -I $(srctree)/scripts/dtc/libfdt)) + lib-$(CONFIG_LIBFDT) += $(libfdt_files) +diff --git a/lib/fdt_addresses.c b/lib/fdt_addresses.c +new file mode 100644 +index 000000000000..23610bcf390b +--- /dev/null ++++ b/lib/fdt_addresses.c +@@ -0,0 +1,2 @@ ++#include ++#include "../scripts/dtc/libfdt/fdt_addresses.c" +-- +2.30.2 + diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec index a2e0f9bb..11a58ac5 100644 --- a/packages/kernel-5.4/kernel-5.4.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -12,6 +12,10 @@ Source100: config-bottlerocket # Make Lustre FSx work with a newer GCC. Patch0001: 0001-lustrefsx-Disable-Werror-stringop-overflow.patch +# Required patches for kdump support +Patch0002: 0002-x86-purgatory-Add-fno-stack-protector.patch +Patch0003: 0003-arm64-kexec_file-add-crash-dump-support.patch +Patch0004: 0004-libfdt-include-fdt_addresses.c.patch # Help out-of-tree module builds run `make prepare` automatically. Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch From dea9702af500f791a0bbdbbaefd906aad77dcf80 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Wed, 5 May 2021 17:55:20 +0000 Subject: [PATCH 0478/1356] os: Add kdump support Kdump is a Linux feature that allows to boot to a kernel whenever the system panics. The crash kernel is loaded into a reserved space in memory determined by the `crashkernel` kernel parameter. In Bottlerocket, this parameter is set such that no memory will be reserved if the host has less than 2GB of memory. For Bottlerocket, the crash kernel is loaded from the current active boot partition. The `configure-boot-mount.service` systemd unit determines which is the current active boot partition and mounts it at `/boot`. This mount is set as read-only and with private propagations, so new mount namespaces won't have access to it. As part of this change, SELinux labels are added to the boot partition when it is created by the `rpm2img` tool. The `load-crash-kernel.service` systemd unit loads the crash kernel, only if memory was reserved for it, and the `kexec.kexec_load_disable` setting is `0`. The unit will exit gracefully if no memory was reserved for the crash kernel. For the moment only the aws-dev and vmware variants use that kernel parameter. The `kexec.kexec_load_disable` setting used to be set in the `sysctl.conf` configuration file. With this change, the setting is set using the `disable-kexec-load.service` systemd unit. This unit runs after `load-crash-kernel.service`, even if the latter wasn't executed or excited with a non-zero code. The `capture-kernel-dump.service` systemd unit is set as the target when the crash kernel is executed. It captures both the dmesg logs and the kdump-compressed dump excluding: * Pages filled with zero * Non-private cache pages * All cache pages * User process data pages * Free pages All the files generated by the `capture-kernel-dump.service` unit are stored at `/var/log/kdump`, therefore the unit has a strong dependency on the following services to set up the persistent partition: * local-fs.target * systemd-sysusers.service * systemd-udevd.service * systemd-udev-trigger.service * systemd-tmpfiles-setup.service * systemd-tmpfiles-setup-dev.service Since `local-fs.target` is a dependency of `capture-kernel-dump.service`, systemd will attempt to load all the mount units. To prevent this, the mount units will only be loaded during the execution of the `preconfigured` target. No API is provided to enable/disable the dump collection, since the memory space is reserved and it will be a waste if nothing uses that space. Dynamically changing the `crashkernel` cmd line parameter isn't an option since we will provide support for secure boot in the future. --- README.md | 13 ++++++++++++- tools/rpm2img | 5 +++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index a6aaa3ac..ae6541e5 100644 --- a/README.md +++ b/README.md @@ -591,6 +591,17 @@ ssh -i YOUR_KEY_FILE \ For a list of what is collected, see the logdog [command list](sources/logdog/src/log_request.rs). +### Kdump Support + +Bottlerocket provides support to collect kernel crash dumps whenever the system kernel panics. +Once this happens, both the dmesg log and vmcore dump are stored at `/var/log/kdump`, and the system reboots. + +There are a few important caveats about the provided kdump support: + +* Currently, only vmware variants have kdump support enabled +* The system kernel will reserve 256MB for the crash kernel, only when the host has at least 2GB of memory; the reserved space won't be available for processes running in the host +* The crash kernel will only be loaded when the `crashkernel` parameter is present in the kernel's cmdline and if there is memory reserved for it + ## Details ### Security @@ -665,7 +676,7 @@ For more details, see the [API system documentation](sources/api/). ### Default Volumes -Bottlerocket operates with two default storage volumes. +Bottlerocket operates with two default storage volumes. * The root device, `/dev/xvda`, holds the active and passive [partition sets](#updates-1). It also contains the bootloader, the dm-verity hash tree for verifying the [immutable root filesystem](SECURITY_FEATURES.md#immutable-rootfs-backed-by-dm-verity), and the data store for the Bottlerocket API. * The data device, `/dex/xvdb`, is used as persistent storage for container images, container orchestration, [host-containers](#Custom-host-containers), and [bootstrap containers](#Bootstrap-containers-settings). diff --git a/tools/rpm2img b/tools/rpm2img index 1fe27f72..95c444c8 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -189,7 +189,12 @@ menuentry "${PRETTY_NAME} ${VERSION_ID}" { EOF # BOTTLEROCKET-BOOT-A +mkdir -p "${BOOT_MOUNT}/lost+found" +BOOT_LABELS=$(setfiles -n -d -F -m -r "${BOOT_MOUNT}" \ + "${SELINUX_FILE_CONTEXTS}" "${BOOT_MOUNT}" \ + | awk -v root="${BOOT_MOUNT}" '{gsub(root"/","/"); gsub(root,"/"); print "ea_set", $1, "security.selinux", $4}') mkfs.ext4 -O ^has_journal -d "${BOOT_MOUNT}" "${BOOT_IMAGE}" 40M +echo "${BOOT_LABELS}" | debugfs -w -f - "${BOOT_IMAGE}" resize2fs -M "${BOOT_IMAGE}" dd if="${BOOT_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek=5 From 39dbfe4bf21fa03e889d7e8b4f4cef705cc2de6c Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Mon, 24 May 2021 23:34:28 +0000 Subject: [PATCH 0479/1356] host-containers: allow mount propagations from privileged containers This commit adds support to propagate mount points created in bootstrap and superpowered containers, across mount peer groups. The root filesystem of bootstrap and superpowered containers is setup with the `rshared` configuration to allow mounts propagations across peer groups. All mount points attached to the containers are configured as `rprivate` (except for the `mnt` mount). This prevents bootstrap and superpowered containers from remounting directories in the host's root filesystem. The `/.bottlerocket/rootfs/mnt` mount point was added to bootstrap and superpowered containers. This mount point is a bind mount that points to `/mnt` in the host, which itself is a bind mount of `/local/mnt`. This is required to let users create mount points underneath `/mnt`. This mount point is setup with the `rshared` configuration to allow propagations across peer groups. This is the only mount point from which propagations are allowed across peer groups. With this change, bootstrap containers now have access to all the devices in the host. Also, they now have the `CAP_SYS_ADMIN` capability to let users manage ephemeral disks. The logic to build the container specs was refactored to provide a better understanding of what options are set for the containers' spec. Signed-off-by: Arnaldo Garcia Rincon --- README.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index ae6541e5..dbfe27e2 100644 --- a/README.md +++ b/README.md @@ -528,7 +528,10 @@ Bootstrap containers are host containers that can be used to "bootstrap" the hos Bootstrap containers are very similar to normal host containers; they come with persistent storage and with optional user data. Unlike normal host containers, bootstrap containers can't be treated as `superpowered` containers. -However, these containers have access to the underlying root filesystem on `/.bottlerocket/rootfs`. +However, bootstrap containers do have additional permissions that normal host containers do not have. +Bootstrap containers have access to the underlying root filesystem on `/.bottlerocket/rootfs` as well as to all the devices in the host, and they are set up with the `CAP_SYS_ADMIN` capability. +This allows bootstrap containers to create files, directories, and mounts that are visible to the host. + Bootstrap containers are set up to run after the systemd `configured.target` unit is active. The containers' systemd unit depends on this target (and not on any of the bootstrap containers' peers) which means that bootstrap containers will not execute in a deterministic order The boot process will "wait" for as long as the bootstrap containers run. @@ -558,6 +561,11 @@ mode = "once" essential = true ``` +##### Mount propagations in bootstrap and superpowered containers +Both bootstrap and superpowered host containers are configured with the `/.bottlerocket/rootfs/mnt` bind mount that points to `/mnt` in the host, which itself is a bind mount of `/local/mnt`. +This bind mount is set up with shared propagations, so any new mount point created underneath `/.bottlerocket/rootfs/mnt` in any bootstrap or superpowered host container will propagate across mount namespaces. +You can use this feature to configure ephemeral disks attached to your hosts that you may want to use on your workloads. + #### Platform-specific settings Platform-specific settings are automatically set at boot time by [early-boot-config](sources/api/early-boot-config) based on metadata available on the running platform. From c553afcd51d0c624e810a7d44f9656c78eb1f7c0 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Thu, 17 Jun 2021 14:39:37 -0700 Subject: [PATCH 0480/1356] actions: pin rust toolchain to v1.51.0 temporarily Github actions is failing due to a difference in rust toolchain versions in the sdk and the ci host --- .github/workflows/build.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6cc38417..f61ddab4 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -26,6 +26,7 @@ jobs: fail-fast: false steps: - uses: actions/checkout@v2 + - run: rustup toolchain install 1.51.0 && rustup default 1.51.0 - run: cargo install --version 0.30.0 cargo-make - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} unit-tests - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} -e BUILDSYS_ARCH=${{ matrix.arch }} -e BUILDSYS_JOBS=12 From f47961fdbad427f56615ff7c110f3df410858562 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Wed, 16 Jun 2021 14:56:39 -0700 Subject: [PATCH 0481/1356] Update kernel-5.4 to 5.4.117 --- packages/kernel-5.4/Cargo.toml | 4 ++-- packages/kernel-5.4/kernel-5.4.spec | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/kernel-5.4/Cargo.toml b/packages/kernel-5.4/Cargo.toml index 012fc5ae..20f124e7 100644 --- a/packages/kernel-5.4/Cargo.toml +++ b/packages/kernel-5.4/Cargo.toml @@ -13,8 +13,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/30c599278ce31259b6ad8fcfb05d25c9bdbbdce8398f0ca686e70c36e7b4986b/kernel-5.4.110-54.189.amzn2.src.rpm" -sha512 = "ad38a02ec569dcd088e4013f2c9aa50ddf50775b4ded9da5ca367ae19cd141a7d7cd539c986cdcd70656a17e3e9fe874332942bdb027462ef0e029ac1c5fc38b" +url = "https://cdn.amazonlinux.com/blobstore/3166b2c4af7dbb50ef04eedc98aff0020ea1570892d7e01a9dab885e04168afc/kernel-5.4.117-58.216.amzn2.src.rpm" +sha512 = "0d86948018725b4590622a49f27fa7dae03ce06fcef11d39883f7fc421087442fea54c30603c997bd6f519606be596f1e46f33727213c34bd78a85076a47eeef" # RPM BuildRequires [build-dependencies] diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec index 11a58ac5..fd4c9fea 100644 --- a/packages/kernel-5.4/kernel-5.4.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.4 -Version: 5.4.110 -Release: 2%{?dist} +Version: 5.4.117 +Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/30c599278ce31259b6ad8fcfb05d25c9bdbbdce8398f0ca686e70c36e7b4986b/kernel-5.4.110-54.189.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/3166b2c4af7dbb50ef04eedc98aff0020ea1570892d7e01a9dab885e04168afc/kernel-5.4.117-58.216.amzn2.src.rpm Source100: config-bottlerocket # Make Lustre FSx work with a newer GCC. From 9d705ca3ca9de8664b29545a088054bb1ee65f93 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Wed, 16 Jun 2021 14:56:39 -0700 Subject: [PATCH 0482/1356] Update kernel-5.10 to 5.10.35 --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 68021b52..6ba57bc5 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,5 +13,5 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/9d3856424e8b2b45e2871c0fd558641435e81650c01a70c2c27c0115c86f04c5/kernel-5.10.29-27.128.amzn2.src.rpm" -sha512 = "372b4fa3f69cea03469b4305adfea13b4f67eece27a5a1847fd12913fd1f42a2c7dccc4569c5781573db6a7044b5e073f32ad57e9954bc0290c5ee1d90fe5640" +url = "https://cdn.amazonlinux.com/blobstore/e02ea3dba6fd0e1fedb847a6bf67a4c990c3d23e128cf632af472d38ce05b3cd/kernel-5.10.35-31.135.amzn2.src.rpm" +sha512 = "7c02f472045321ab92e13d6348ed3e6c879d4423cff87770889718c9974c8871c35a3d7caae1e5ea185d53d9fd71b83e3d0f189c7f01633b33682dfc67654df8" diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 1fe53eb8..a5aa57e2 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.29 +Version: 5.10.35 Release: 2%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/9d3856424e8b2b45e2871c0fd558641435e81650c01a70c2c27c0115c86f04c5/kernel-5.10.29-27.128.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/e02ea3dba6fd0e1fedb847a6bf67a4c990c3d23e128cf632af472d38ce05b3cd/kernel-5.10.35-31.135.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From db08f31a4911424f454255b42bcc43aa997fa02c Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Thu, 17 Jun 2021 11:30:54 -0700 Subject: [PATCH 0483/1356] tools: cargo update --- tools/Cargo.lock | 194 +++++++++++++++++++++-------------------------- 1 file changed, 87 insertions(+), 107 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 68616741..203e5ce8 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -2,9 +2,9 @@ # It is not intended for manual editing. [[package]] name = "addr2line" -version = "0.15.1" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03345e98af8f3d786b6d9f656ccfa6ac316d954e92bc4841f0bba20789d5fb5a" +checksum = "e7a2e47a1fbe209ee101dd6d61285226744c6c8d3c21c8dc878ba6cb9f467f3a" dependencies = [ "gimli", ] @@ -92,9 +92,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.59" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4717cfcbfaa661a0fd48f8453951837ae7e8f81e481fbb136e3202d72805a744" +checksum = "b7815ea54e4d821e791162e078acbebfd6d8c8939cd559c9335dceb1c8ca7282" dependencies = [ "addr2line", "cc", @@ -163,9 +163,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.6.1" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe" +checksum = "9c59e7af012c713f529e7a3ee57ce9b31ddd858d4b512923602f74608b009631" [[package]] name = "bytes" @@ -190,9 +190,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.67" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" +checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787" [[package]] name = "cfg-if" @@ -260,17 +260,15 @@ dependencies = [ "encode_unicode", "lazy_static", "libc", - "regex", "terminal_size", - "unicode-width", "winapi", ] [[package]] name = "const_fn" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "402da840495de3f976eaefc3485b7f5eb5b0bf9761f9a47be27fe975b3b8c2ec" +checksum = "f92cfa0fd5690b3cf8c1ef2cabbd9b7ef22fa53cf5e1f92b05103f6d5d1cf6e7" [[package]] name = "core-foundation" @@ -290,9 +288,9 @@ checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" [[package]] name = "cpufeatures" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "281f563b2c3a0e535ab12d81d3c5859045795256ad269afa7c19542585b68f93" +checksum = "ed00c67cb5d0a7d64a44f6ad2668db7e7530311dd53ea79bcd4fb022c64911c8" dependencies = [ "libc", ] @@ -329,9 +327,9 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52fb27eab85b17fbb9f6fd667089e07d6a2eb8743d02639ee7f6a7a7729c9c94" +checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" dependencies = [ "cfg-if", "crossbeam-utils", @@ -342,11 +340,10 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4feb231f0d4d6af81aed15928e58ecf5816aa62a2393e2c82f46973e92a9a278" +checksum = "d82cfc11ce7f2c3faef78d8a684447b40d503d9681acebed6cb728d45940c4db" dependencies = [ - "autocfg", "cfg-if", "lazy_static", ] @@ -579,9 +576,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" +checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" dependencies = [ "cfg-if", "libc", @@ -596,9 +593,9 @@ checksum = "0e4075386626662786ddb0ec9081e7c7eeb1ba31951f447ca780ef9f5d568189" [[package]] name = "globset" -version = "0.4.6" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c152169ef1e421390738366d2f796655fec62621dabbd0fd476f905934061e4a" +checksum = "10463d9ff00a2a068db14231982f5132edebad0d7660cd956a1c30292dbcbfbd" dependencies = [ "aho-corasick", "bstr", @@ -634,9 +631,9 @@ checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" [[package]] name = "heck" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" dependencies = [ "unicode-segmentation", ] @@ -696,15 +693,15 @@ checksum = "f3a87b616e37e93c22fb19bcd386f02f3af5ea98a25670ad0fce773de23c5e68" [[package]] name = "httpdate" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05842d0d43232b23ccb7060ecb0f0626922c21f30012e97b767b30afd4a5d4b9" +checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" [[package]] name = "hyper" -version = "0.14.7" +version = "0.14.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e5f105c494081baa3bf9e200b279e27ec1623895cd504c7dbef8d0b080fcf54" +checksum = "07d6baa1b441335f3ce5098ac421fb6547c46dda735ca1bc6d0153c838f9dd83" dependencies = [ "bytes", "futures-channel", @@ -716,7 +713,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project", + "pin-project-lite", "socket2", "tokio", "tower-service", @@ -764,9 +761,9 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.16.0" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "507cf157a0dab3c837bef6e2086466255d9de4a6b1af69e62b62c54cd52f6062" +checksum = "2d207dc617c7a380ab07ff572a6e52fa202a2a8f355860ac9c38e23f8196be1b" dependencies = [ "console", "lazy_static", @@ -785,9 +782,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" +checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" [[package]] name = "itoa" @@ -812,9 +809,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.94" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e" +checksum = "12b8adadd720df158f4d70dfe7ccc6adb0472d7c55ca83445f6a5ab3e36f8fb6" [[package]] name = "lock_api" @@ -854,9 +851,9 @@ checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc" [[package]] name = "memoffset" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83fb6581e8ed1f85fd45c116db8405483899489e38406156c25eb743554361d" +checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9" dependencies = [ "autocfg", ] @@ -879,9 +876,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.7.11" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf80d3e903b34e0bd7282b218398aec54e082c840d9baf8339e0080a0c542956" +checksum = "8c2bdb6314ec10835cd3293dd268473a835c02b7b352e788be788b3c6ca6bb16" dependencies = [ "libc", "log", @@ -951,9 +948,12 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.24.0" +version = "0.25.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a5b3dd1c072ee7963717671d1ca129f1048fda25edea6b752bfc71ac8854170" +checksum = "a38f2be3697a57b4060074ff41b44c16870d916ad7877c17696e063257482bc7" +dependencies = [ + "memchr", +] [[package]] name = "olpc-cjson" @@ -968,9 +968,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.7.2" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" +checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" [[package]] name = "opaque-debug" @@ -1054,26 +1054,6 @@ dependencies = [ "ucd-trie", ] -[[package]] -name = "pin-project" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7509cc106041c40a4518d2af7a61530e1eed0e6285296a3d8c5472806ccc4a4" -dependencies = [ - "pin-project-internal", -] - -[[package]] -name = "pin-project-internal" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48c950132583b500556b1efd71d45b319029f2b71518d979fcc208e16b42426f" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "pin-project-lite" version = "0.2.6" @@ -1130,9 +1110,9 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a152013215dca273577e18d2bf00fa862b89b24169fb78c4c95aeb07992c9cec" +checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038" dependencies = [ "unicode-xid", ] @@ -1220,9 +1200,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" +checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" dependencies = [ "libc", "rand_chacha", @@ -1232,9 +1212,9 @@ dependencies = [ [[package]] name = "rand_chacha" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", "rand_core", @@ -1242,27 +1222,27 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ "getrandom", ] [[package]] name = "rand_hc" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" dependencies = [ "rand_core", ] [[package]] name = "rayon" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674" +checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90" dependencies = [ "autocfg", "crossbeam-deque", @@ -1272,9 +1252,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.9.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a" +checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e" dependencies = [ "crossbeam-channel", "crossbeam-deque", @@ -1285,9 +1265,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc" +checksum = "5ab49abadf3f9e1c4bc499e8845e152ad87d2ad2d30371841171169e9d75feee" dependencies = [ "bitflags", ] @@ -1485,7 +1465,7 @@ dependencies = [ "rustc_version", "serde", "sha2", - "time 0.2.26", + "time 0.2.27", "tokio", ] @@ -1520,9 +1500,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "410f7acf3cb3a44527c5d9546bad4bf4e6c460915d5f9f2fc524498bfe8f70ce" +checksum = "dead70b0b5e03e9c814bcb6b01e03e68f7c57a80aa48c72ec92152ab3e818d49" [[package]] name = "rustc_version" @@ -1601,9 +1581,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.2.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3670b1d2fdf6084d192bc71ead7aabe6c06aa2ea3fbd9cc3ac111fa5c2b1bd84" +checksum = "23a2ac85147a3a11d77ecf1bc7166ec0b92febfa4461c37944e180f319ece467" dependencies = [ "bitflags", "core-foundation", @@ -1614,9 +1594,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3676258fd3cfe2c9a0ec99ce3038798d847ce3e4bb17746373eb9f0f1ac16339" +checksum = "7e4effb91b4b8b6fb7732e670b6cee160278ff8e6bf485c7805d9e319d76e284" dependencies = [ "core-foundation-sys", "libc", @@ -1763,9 +1743,9 @@ checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" [[package]] name = "signal-hook-registry" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" dependencies = [ "libc", ] @@ -1927,9 +1907,9 @@ checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" [[package]] name = "syn" -version = "1.0.72" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82" +checksum = "f71489ff30030d2ae598524f61326b902466f72a0fb1a8564c001cc63425bcc7" dependencies = [ "proc-macro2", "quote", @@ -1961,9 +1941,9 @@ dependencies = [ [[package]] name = "terminal_size" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ca8ced750734db02076f44132d802af0b33b09942331f4459dde8636fd2406" +checksum = "633c1a546cee861a1a6d0dc69ebeca693bf4296661ba7852b9d21d159e0506df" dependencies = [ "libc", "winapi", @@ -1990,9 +1970,9 @@ dependencies = [ [[package]] name = "time" -version = "0.2.26" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08a8cbfbf47955132d0202d1662f49b2423ae35862aee471f3ba4b133358f372" +checksum = "4752a97f8eebd6854ff91f1c1824cd6160626ac4bd44287f7f4ea2035a02a242" dependencies = [ "const_fn", "libc", @@ -2015,9 +1995,9 @@ dependencies = [ [[package]] name = "time-macros-impl" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5c3be1edfad6027c69f5491cf4cb310d1a71ecd6af742788c6ff8bced86b8fa" +checksum = "fd3c141a1b43194f3f56a1411225df8646c55781d5f26db825b3d98507eb482f" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -2053,9 +2033,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.5.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83f0c8e7c0addab50b663055baf787d0af7f413a46e6e7fb9559a4e4db7137a5" +checksum = "5fb2ed024293bb19f7a5dc54fe83bf86532a44c12a2bb8ba40d64a4509395ca2" dependencies = [ "autocfg", "bytes", @@ -2073,9 +2053,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caf7b11a536f46a809a8a9f0bb4237020f70ecbf115b842360afb127ea2fda57" +checksum = "c49e3df43841dafb86046472506755d8501c5615673955f6aa17181125d13c37" dependencies = [ "proc-macro2", "quote", @@ -2095,9 +2075,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e177a5d8c3bf36de9ebe6d58537d8879e964332f93fb3339e43f618c81361af0" +checksum = "f8864d706fdb3cc0843a49647ac892720dac98a6eeb818b77190592cf4994066" dependencies = [ "futures-core", "pin-project-lite", @@ -2106,9 +2086,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "940a12c99365c31ea8dd9ba04ec1be183ffe4920102bb7122c2f515437601e8e" +checksum = "1caa0b0c8d94a049db56b5acf8cba99dc0623aab1b26d5b5f5e2d945846b3592" dependencies = [ "bytes", "futures-core", @@ -2239,9 +2219,9 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.17" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07fbfce1c8a97d547e8b5334978438d9d6ec8c20e38f56d4a4374d181493eaef" +checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" dependencies = [ "tinyvec", ] From 74a845d1bd3b8dc857c3a9e32ffae8ade36a3d39 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Thu, 17 Jun 2021 11:51:29 -0700 Subject: [PATCH 0484/1356] Update semver to 1.0 This has to be done in the tools and sources workspaces simultaneously because of the updater dependency in pubsys. The semver parser changed, so some version values we considered "good" before (but didn't actually use) now fail to parse, and so were moved to the "bad" value unit test. --- tools/Cargo.lock | 35 +++++------------------------------ tools/pubsys/Cargo.toml | 2 +- tools/pubsys/src/main.rs | 2 +- 3 files changed, 7 insertions(+), 32 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 203e5ce8..302b96de 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -1045,15 +1045,6 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" -[[package]] -name = "pest" -version = "2.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" -dependencies = [ - "ucd-trie", -] - [[package]] name = "pin-project-lite" version = "0.2.6" @@ -1141,7 +1132,7 @@ dependencies = [ "rusoto_signature", "rusoto_ssm", "rusoto_sts", - "semver 0.11.0", + "semver 1.0.3", "serde", "serde_json", "simplelog", @@ -1608,16 +1599,15 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" dependencies = [ - "semver-parser 0.7.0", + "semver-parser", ] [[package]] name = "semver" -version = "0.11.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +checksum = "5f3aac57ee7f3272d8395c6e4f502f434f0e289fcd62876f70daa008c20dcabe" dependencies = [ - "semver-parser 0.10.2", "serde", ] @@ -1627,15 +1617,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" -[[package]] -name = "semver-parser" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" -dependencies = [ - "pest", -] - [[package]] name = "serde" version = "1.0.126" @@ -2202,12 +2183,6 @@ version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "879f6906492a7cd215bfa4cf595b600146ccfac0c79bcbd1f3000162af5e8b06" -[[package]] -name = "ucd-trie" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" - [[package]] name = "unicode-bidi" version = "0.3.5" @@ -2257,7 +2232,7 @@ dependencies = [ "chrono", "parse-datetime", "regex", - "semver 0.11.0", + "semver 1.0.3", "serde", "serde_json", "serde_plain", diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index ee2585d2..398a5ed2 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -30,7 +30,7 @@ rusoto_ssm = { version = "0.46.0", default-features = false, features = ["rustls rusoto_sts = { version = "0.46.0", default-features = false, features = ["rustls"] } simplelog = "0.10.0" snafu = "0.6" -semver = "0.11.0" +semver = "1.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" structopt = { version = "0.3", default-features = false } diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs index b4b85b29..3bc0c278 100644 --- a/tools/pubsys/src/main.rs +++ b/tools/pubsys/src/main.rs @@ -120,7 +120,7 @@ enum SubCommand { /// Parses a SemVer, stripping a leading 'v' if present pub(crate) fn friendly_version( mut version_str: &str, -) -> std::result::Result { +) -> std::result::Result { if version_str.starts_with('v') { version_str = &version_str[1..]; }; From af63d8c7d128b3af60b4e4f04929aa63880fbdc2 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 15 Jun 2021 11:31:57 -0700 Subject: [PATCH 0485/1356] Add aws-k8s-1.21 variant Adds aws-k8s-1.21 variant, relinks symlinks in models --- .github/workflows/build.yml | 2 +- README.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f61ddab4..10de3f55 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,7 +10,7 @@ jobs: continue-on-error: ${{ matrix.supported }} strategy: matrix: - variant: [aws-k8s-1.16, aws-k8s-1.17, aws-k8s-1.18, aws-k8s-1.19, aws-k8s-1.20, aws-ecs-1] + variant: [aws-k8s-1.16, aws-k8s-1.17, aws-k8s-1.18, aws-k8s-1.19, aws-k8s-1.20, aws-k8s-1.21, aws-ecs-1] arch: [x86_64, aarch64] supported: [true] include: diff --git a/README.md b/README.md index dbfe27e2..f0f3824c 100644 --- a/README.md +++ b/README.md @@ -54,6 +54,7 @@ The following variants support EKS, as described above: - `aws-k8s-1.18` - `aws-k8s-1.19` - `aws-k8s-1.20` +- `aws-k8s-1.21` We also have a variant designed to work with ECS, currently in preview: From fa916137ee726eb152876d32799b93440ffe05c6 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 15 Jun 2021 11:55:19 -0700 Subject: [PATCH 0486/1356] Add vmware-k8s-1.21 variant Adds vmware-k8s-1.21 variant, relinks symlinks in models --- .github/workflows/build.yml | 3 +++ README.md | 5 +++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 10de3f55..b589028d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -23,6 +23,9 @@ jobs: - variant: vmware-k8s-1.20 arch: x86_64 supported: true + - variant: vmware-k8s-1.21 + arch: x86_64 + supported: true fail-fast: false steps: - uses: actions/checkout@v2 diff --git a/README.md b/README.md index f0f3824c..e47139cc 100644 --- a/README.md +++ b/README.md @@ -60,9 +60,10 @@ We also have a variant designed to work with ECS, currently in preview: - `aws-ecs-1` -Another variant we have in preview is designed to be a Kubernetes worker node in VMware: +Other variants we have in preview are designed to be Kubernetes worker nodes in VMware: - `vmware-k8s-1.20` +- `vmware-k8s-1.21` The `aws-k8s-1.16` variant is deprecated and will no longer be supported in Bottlerocket releases after June, 2021. The `aws-k8s-1.15` variant is no longer supported. @@ -365,7 +366,7 @@ Static pods can be particularly useful when running in standalone mode. For Kubernetes variants in AWS and VMware, the following are set for you automatically, but you can override them if you know what you're doing! In AWS, [pluto](sources/api/) sets these based on runtime instance information. -In VMware, Bottlerocket uses [netdog](sources/api/) (for `node-ip`) or relies on [default values](sources/models/src/vmware-k8s-1.20/defaults.d/). +In VMware, Bottlerocket uses [netdog](sources/api/) (for `node-ip`) or relies on [default values](sources/models/src/vmware-k8s-1.21/defaults.d/). * `settings.kubernetes.node-ip`: The IPv4 address of this node. * `settings.kubernetes.pod-infra-container-image`: The URI of the "pause" container. * `settings.kubernetes.kube-reserved`: Resources reserved for node components. From 94ba31fe301d61a8e4b03a147084f08d58a60caf Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Mon, 21 Jun 2021 09:55:27 -0700 Subject: [PATCH 0487/1356] Store build artifacts per architecture Previously, switching between architectures would rebuild all artifacts, which can make building and testing changes painful. This change makes cargo and buildsys store their state in arch-specific directories, so when you switch, they can find the last artifacts they built for that arch. --- tools/buildsys/src/builder.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index 05e92aac..19afa0fa 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -103,7 +103,7 @@ impl PackageBuilder { arch = arch, ); - build(BuildType::Package, &package, args, &tag, &output_dir)?; + build(BuildType::Package, &package, &arch, args, &tag, &output_dir)?; Ok(Self) } @@ -160,7 +160,7 @@ impl VariantBuilder { arch = arch ); - build(BuildType::Variant, &variant, args, &tag, &output_dir)?; + build(BuildType::Variant, &variant, &arch, args, &tag, &output_dir)?; Ok(Self) } @@ -177,6 +177,7 @@ enum BuildType { fn build( kind: BuildType, what: &str, + arch: &str, build_args: Vec, tag: &str, output_dir: &PathBuf, @@ -200,7 +201,7 @@ fn build( let nocache = rand::thread_rng().gen::(); // Create a directory for tracking outputs before we move them into position. - let build_dir = create_build_dir(&kind, &what)?; + let build_dir = create_build_dir(&kind, &what, &arch)?; // Clean up any previous outputs we have tracked. clean_build_files(&build_dir, &output_dir)?; @@ -318,13 +319,13 @@ enum Retry<'a> { // =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= /// Create a directory for build artifacts. -fn create_build_dir(kind: &BuildType, name: &str) -> Result { +fn create_build_dir(kind: &BuildType, name: &str, arch: &str) -> Result { let prefix = match kind { BuildType::Package => "packages", BuildType::Variant => "variants", }; - let path = [&getenv("BUILDSYS_STATE_DIR")?, prefix, name] + let path = [&getenv("BUILDSYS_STATE_DIR")?, arch, prefix, name] .iter() .collect(); From 7a2938b1b0752f3e720f102b83c53e68286ff6cb Mon Sep 17 00:00:00 2001 From: Samuel Karp Date: Tue, 29 Jun 2021 14:04:27 -0700 Subject: [PATCH 0488/1356] docs: update references to the ECS variant for GA The ECS variant is no longer in preview. We've also launched a new optional Bottlerocket ECS Updater, similar to the Bottlerocket Update Operator for Kubernetes, which helps automate Bottlerocket updates. --- README.md | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index e47139cc..5330bc4c 100644 --- a/README.md +++ b/README.md @@ -56,11 +56,11 @@ The following variants support EKS, as described above: - `aws-k8s-1.20` - `aws-k8s-1.21` -We also have a variant designed to work with ECS, currently in preview: +The following variant supports ECS: - `aws-ecs-1` -Other variants we have in preview are designed to be Kubernetes worker nodes in VMware: +We also have variants in preview status that are designed to be Kubernetes worker nodes in VMware: - `vmware-k8s-1.20` - `vmware-k8s-1.21` @@ -175,11 +175,13 @@ For more details, see the [update system documentation](sources/updater/). ### Update methods There are several ways of updating your Bottlerocket hosts. +We provide tools for automatically updating hosts, as well as an API for direct control of updates. + +#### Automated updates For EKS variants of Bottlerocket, we recommend using the [Bottlerocket update operator](https://github.com/bottlerocket-os/bottlerocket-update-operator) for automated updates. -You can also use one of the methods below for direct control of updates. -For the ECS preview variant of Bottlerocket, we recommend updating hosts using one of the methods below, until further automation is ready. +For the ECS variant of Bottlerocket, we recommend using the [Bottlerocket ECS updater](https://github.com/bottlerocket-os/bottlerocket-ecs-updater/) for automated updates. #### Update API @@ -213,10 +215,6 @@ apiclient update apply --check --reboot See the [apiclient documentation](sources/api/apiclient/) for more details. -#### Bottlerocket Update Operator - -If you are running the Kubernetes variant of Bottlerocket, you can use the [Bottlerocket update operator](https://github.com/bottlerocket-os/bottlerocket-update-operator) to automate Bottlerocket updates. - ### Update rollback The system will automatically roll back if it's unable to boot. From 9a24c737736a323e2764338b91c6f25928449977 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Fri, 25 Jun 2021 22:45:26 +0000 Subject: [PATCH 0489/1356] build: update SDK to 0.22.0 --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b589028d..352154a6 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -29,7 +29,7 @@ jobs: fail-fast: false steps: - uses: actions/checkout@v2 - - run: rustup toolchain install 1.51.0 && rustup default 1.51.0 + - run: rustup toolchain install 1.53.0 && rustup default 1.53.0 - run: cargo install --version 0.30.0 cargo-make - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} unit-tests - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} -e BUILDSYS_ARCH=${{ matrix.arch }} -e BUILDSYS_JOBS=12 From d988b66df87e09845d486e3e9854d4039b09de93 Mon Sep 17 00:00:00 2001 From: Tianhao Geng Date: Tue, 29 Jun 2021 19:17:36 +0000 Subject: [PATCH 0490/1356] kubelet: add setting for configuring cpuManagerPolicy pass cpu-manager-policy argument to kubelet --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 5330bc4c..3eb2ebcd 100644 --- a/README.md +++ b/README.md @@ -356,6 +356,7 @@ The following settings are optional and allow you to further configure your clus * `settings.kubernetes.kube-api-burst`: The burst to allow while talking with kubernetes. * `settings.kubernetes.container-log-max-size`: The maximum size of container log file before it is rotated. * `settings.kubernetes.container-log-max-files`: The maximum number of container log files that can be present for a container. +* `settings.kubernetes.cpu-manager-policy`: Specifies the CPU manager policy. Possible values are `static` and `none`. Defaults to `none`. If you want to allow pods with certain resource characteristics to be granted increased CPU affinity and exclusivity on the node, you can set this setting to `static`. You should reboot if you change this setting after startup - try `apiclient reboot`. You can also optionally specify static pods for your node with the following settings. Static pods can be particularly useful when running in standalone mode. From b97fd45c7cbb089c7b15ea811be0eb64c3467dd8 Mon Sep 17 00:00:00 2001 From: Tianhao Geng Date: Thu, 1 Jul 2021 21:07:37 +0000 Subject: [PATCH 0491/1356] kubelet: add setting for configuring cpuManagerReconcilePeriod pass cpu-manager-reconcile-period argument to kubelet --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 3eb2ebcd..24cb1a84 100644 --- a/README.md +++ b/README.md @@ -357,6 +357,7 @@ The following settings are optional and allow you to further configure your clus * `settings.kubernetes.container-log-max-size`: The maximum size of container log file before it is rotated. * `settings.kubernetes.container-log-max-files`: The maximum number of container log files that can be present for a container. * `settings.kubernetes.cpu-manager-policy`: Specifies the CPU manager policy. Possible values are `static` and `none`. Defaults to `none`. If you want to allow pods with certain resource characteristics to be granted increased CPU affinity and exclusivity on the node, you can set this setting to `static`. You should reboot if you change this setting after startup - try `apiclient reboot`. +* `settings.kubernetes.cpu-manager-reconcile-period`: Specifies the CPU manager reconcile period, which controls how often updated CPU assignments are written to cgroupfs. The value is a duration like `30s` for 30 seconds or `1h5m` for 1 hour and 5 minutes. You can also optionally specify static pods for your node with the following settings. Static pods can be particularly useful when running in standalone mode. From 2744afafec139f4d6efc9676e2229a158a297431 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Tue, 1 Jun 2021 15:12:05 +0000 Subject: [PATCH 0492/1356] pubsys-config: Add VMware support This adds everything needed to support VMware specific configuration, including credentials. --- tools/Cargo.lock | 12 ++ tools/pubsys-config/Cargo.toml | 3 + tools/pubsys-config/src/lib.rs | 8 ++ tools/pubsys-config/src/vmware.rs | 221 ++++++++++++++++++++++++++++++ 4 files changed, 244 insertions(+) create mode 100644 tools/pubsys-config/src/vmware.rs diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 302b96de..461c8223 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -663,6 +663,15 @@ dependencies = [ "digest", ] +[[package]] +name = "home" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2456aef2e6b6a9784192ae780c0f15bc57df0e918585282325e8c8ac27737654" +dependencies = [ + "winapi", +] + [[package]] name = "http" version = "0.2.4" @@ -1155,6 +1164,9 @@ name = "pubsys-config" version = "0.1.0" dependencies = [ "chrono", + "home", + "lazy_static", + "log", "parse-datetime", "serde", "snafu", diff --git a/tools/pubsys-config/Cargo.toml b/tools/pubsys-config/Cargo.toml index 031ad0aa..88587682 100644 --- a/tools/pubsys-config/Cargo.toml +++ b/tools/pubsys-config/Cargo.toml @@ -8,6 +8,9 @@ publish = false [dependencies] chrono = "0.4" +home = "0.5" +lazy_static = "1.4" +log = "0.4" parse-datetime = { path = "../../sources/parse-datetime" } serde = { version = "1.0", features = ["derive"] } snafu = "0.6" diff --git a/tools/pubsys-config/src/lib.rs b/tools/pubsys-config/src/lib.rs index c4e16989..0e15e3ae 100644 --- a/tools/pubsys-config/src/lib.rs +++ b/tools/pubsys-config/src/lib.rs @@ -1,5 +1,7 @@ //! The config module owns the definition and loading process for our configuration sources. +pub mod vmware; +use crate::vmware::VmwareConfig; use chrono::Duration; use parse_datetime::parse_offset; use serde::{Deserialize, Deserializer}; @@ -19,6 +21,9 @@ pub struct InfraConfig { // Config for AWS specific subcommands pub aws: Option, + + // Config for VMware specific subcommands + pub vmware: Option, } impl InfraConfig { @@ -168,6 +173,9 @@ mod error { path: PathBuf, source: toml::de::Error, }, + + #[snafu(display("Missing config: {}", what))] + MissingConfig { what: String }, } } pub use error::Error; diff --git a/tools/pubsys-config/src/vmware.rs b/tools/pubsys-config/src/vmware.rs new file mode 100644 index 00000000..fa3b521b --- /dev/null +++ b/tools/pubsys-config/src/vmware.rs @@ -0,0 +1,221 @@ +//! The vmware module owns the definition and loading process for our VMware configuration sources. +use lazy_static::lazy_static; +use log::debug; +use serde::Deserialize; +use snafu::{OptionExt, ResultExt}; +use std::collections::HashMap; +use std::path::{Path, PathBuf}; +use std::{env, fs}; + +lazy_static! { + /// Determine the full path to the Vsphere credentials at runtime. This is an Option because it is + /// possible (however unlikely) that `home_dir()` is unable to find the home directory of the + /// current user + pub static ref VMWARE_CREDS_PATH: Option = home::home_dir().map(|home| home + .join(".config") + .join("pubsys") + .join("vsphere-credentials.toml")); +} + +const GOVC_USERNAME: &str = "GOVC_USERNAME"; +const GOVC_PASSWORD: &str = "GOVC_PASSWORD"; +const GOVC_URL: &str = "GOVC_URL"; +const GOVC_DATACENTER: &str = "GOVC_DATACENTER"; +const GOVC_DATASTORE: &str = "GOVC_DATASTORE"; +const GOVC_NETWORK: &str = "GOVC_NETWORK"; +const GOVC_RESOURCE_POOL: &str = "GOVC_RESOURCE_POOL"; +const GOVC_FOLDER: &str = "GOVC_FOLDER"; + +/// VMware-specific infrastructure configuration +#[derive(Debug, Default, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct VmwareConfig { + #[serde(default)] + pub datacenters: Vec, + #[serde(default)] + pub datacenter: HashMap, + pub common: Option, +} + +/// VMware datacenter-specific configuration. +/// +/// Fields are optional here because this struct is used to gather environment variables, common +/// config, and datacenter-specific configuration, each of which may not have the complete set of +/// fields. It is used to build a complete datacenter configuration (hence the "Builder" name). +#[derive(Debug, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct DatacenterBuilder { + pub vsphere_url: Option, + pub datacenter: Option, + pub datastore: Option, + pub network: Option, + pub folder: Option, + pub resource_pool: Option, +} + +/// Helper macro for retrieving a field from another struct if the field in `self` is `None` +macro_rules! field_or { + ($self:expr, $field:ident, $other:expr) => { + $self + .$field + .as_ref() + .or($other.and_then(|o| o.$field.as_ref())) + .cloned() + }; +} + +impl DatacenterBuilder { + /// Create a DatacenterBuilder from environment variables + pub fn from_env() -> Self { + Self { + vsphere_url: get_env(GOVC_URL), + datacenter: get_env(GOVC_DATACENTER), + datastore: get_env(GOVC_DATASTORE), + network: get_env(GOVC_NETWORK), + folder: get_env(GOVC_FOLDER), + resource_pool: get_env(GOVC_RESOURCE_POOL), + } + } + + /// Creates a new DatacenterBuilder, merging fields from another (Optional) + /// DatacenterBuilder if the field in `self` is None + pub fn take_missing_from(&self, other: Option<&Self>) -> Self { + Self { + vsphere_url: field_or!(self, vsphere_url, other), + datacenter: field_or!(self, datacenter, other), + datastore: field_or!(self, datastore, other), + network: field_or!(self, network, other), + folder: field_or!(self, folder, other), + resource_pool: field_or!(self, resource_pool, other), + } + } + + /// Attempts to create a `Datacenter`, consuming `self` and ensuring that each field contains a + /// value. + pub fn build(self) -> Result { + let get_or_err = + |opt: Option, what: &str| opt.context(error::MissingConfig { what }); + + Ok(Datacenter { + vsphere_url: get_or_err(self.vsphere_url, "vSphere URL")?, + datacenter: get_or_err(self.datacenter, "vSphere datacenter")?, + datastore: get_or_err(self.datastore, "vSphere datastore")?, + network: get_or_err(self.network, "vSphere network")?, + folder: get_or_err(self.folder, "vSphere folder")?, + resource_pool: get_or_err(self.resource_pool, "vSphere resource pool")?, + }) + } +} + +/// A fully configured VMware datacenter, i.e. no optional fields +#[derive(Debug)] +pub struct Datacenter { + pub vsphere_url: String, + pub datacenter: String, + pub datastore: String, + pub network: String, + pub folder: String, + pub resource_pool: String, +} + +/// VMware infrastructure credentials for all datacenters +#[derive(Debug, Default, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct DatacenterCredsConfig { + #[serde(default)] + pub datacenter: HashMap, +} + +impl DatacenterCredsConfig { + /// Deserializes a DatacenterCredsConfig from a given path + pub fn from_path

(path: P) -> Result + where + P: AsRef, + { + let path = path.as_ref(); + let creds_config_str = fs::read_to_string(path).context(error::File { path })?; + toml::from_str(&creds_config_str).context(error::InvalidToml { path }) + } +} + +/// VMware datacenter-specific credentials. Fields are optional here since this struct is used to +/// gather environment variables as well as fields from file, either of which may or may not exist. +/// It is used to build a complete credentials configuration (hence the "Builder" name). +#[derive(Debug, Default, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct DatacenterCredsBuilder { + pub username: Option, + pub password: Option, +} + +impl DatacenterCredsBuilder { + /// Create a DatacenterCredsBuilder from environment variables + pub fn from_env() -> Self { + Self { + username: get_env(GOVC_USERNAME), + password: get_env(GOVC_PASSWORD), + } + } + + /// Creates a new DatacenterCredsBuilder, merging fields from another (Optional) + /// DatacenterCredsBuilder if the field in `self` is None + pub fn take_missing_from(&self, other: Option<&Self>) -> Self { + Self { + username: field_or!(self, username, other), + password: field_or!(self, password, other), + } + } + /// Attempts to create a `DatacenterCreds`, consuming `self` and ensuring that each field + /// contains a value + pub fn build(self) -> Result { + let get_or_err = + |opt: Option, what: &str| opt.context(error::MissingConfig { what }); + + Ok(DatacenterCreds { + username: get_or_err(self.username, "vSphere username")?, + password: get_or_err(self.password, "vSphere password")?, + }) + } +} + +/// Fully configured datacenter credentials, i.e. no optional fields +#[derive(Debug)] +pub struct DatacenterCreds { + pub username: String, + pub password: String, +} + +/// Attempt to retrieve an environment variable, returning None if it doesn't exist +fn get_env(var: &str) -> Option { + match env::var(var) { + Ok(v) => Some(v), + Err(e) => { + debug!("Unable to read environment variable '{}': {}", var, e); + None + } + } +} + +mod error { + use snafu::Snafu; + use std::io; + use std::path::PathBuf; + + #[derive(Debug, Snafu)] + #[snafu(visibility = "pub(super)")] + pub enum Error { + #[snafu(display("Failed to read '{}': {}", path.display(), source))] + File { path: PathBuf, source: io::Error }, + + #[snafu(display("Invalid config file at '{}': {}", path.display(), source))] + InvalidToml { + path: PathBuf, + source: toml::de::Error, + }, + + #[snafu(display("Missing config: {}", what))] + MissingConfig { what: String }, + } +} +pub use error::Error; +pub type Result = std::result::Result; From 2594d3b634f479b73b843f5e92ada6332b31623f Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Thu, 17 Jun 2021 20:14:41 +0000 Subject: [PATCH 0493/1356] pubsys: Add VMware 'upload-ova' subcommand This adds the subcommand `upload-ova` to `pubsys`. This subcommand is meant to be used via `cargo make` and allow a user to upload an OVA to one or more VMware datacenters, optionally marking the VM as a template. Under the hood, it accomplishes this by running `govc` in the SDK container via Docker invocation, similar to how `buildsys` accomplishes its task. Like the rest of `pubsys`, it gets much of its configuration from `Infra.toml`. The configuration details all of the values needed to communicate with a vSphere instance, and allows for users to specify "common" configuration that multiple datacenters share. Credentials for different datacenters can be specified in a file located at `~/.config/pubsys/vsphere-credentials.toml`. Because we use `govc` under the hood, at runtime we check for GOVC_* environment variables and allow them to override VMware datacenter config in `Infra.toml` or `vsphere-credentials.toml` The subcommand allows a user to override the name of the resulting VM, import spec, and specify a subset of datacenters than what's specified in config. --- tools/Cargo.lock | 1 + tools/pubsys/Cargo.toml | 1 + tools/pubsys/Infra.toml.example | 32 +++ tools/pubsys/src/main.rs | 11 + tools/pubsys/src/vmware/govc.rs | 177 +++++++++++++ tools/pubsys/src/vmware/mod.rs | 2 + tools/pubsys/src/vmware/upload_ova/mod.rs | 245 ++++++++++++++++++ .../support/vmware/import_spec.template | 16 ++ 8 files changed, 485 insertions(+) create mode 100644 tools/pubsys/src/vmware/govc.rs create mode 100644 tools/pubsys/src/vmware/mod.rs create mode 100644 tools/pubsys/src/vmware/upload_ova/mod.rs create mode 100644 tools/pubsys/support/vmware/import_spec.template diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 461c8223..c4241668 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -1125,6 +1125,7 @@ dependencies = [ "chrono", "clap", "coldsnap", + "duct", "futures", "indicatif", "lazy_static", diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index 398a5ed2..8fde46a8 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -11,6 +11,7 @@ async-trait = "0.1.36" chrono = "0.4" clap = "2.33" coldsnap = { version = "0.3", default-features = false, features = ["rusoto-rustls"]} +duct = "0.13.0" pubsys-config = { path = "../pubsys-config/" } futures = "0.3.5" indicatif = "0.16.0" diff --git a/tools/pubsys/Infra.toml.example b/tools/pubsys/Infra.toml.example index ce163694..c52a64e2 100644 --- a/tools/pubsys/Infra.toml.example +++ b/tools/pubsys/Infra.toml.example @@ -50,3 +50,35 @@ ssm_prefix = "/your/prefix/here" # If specified, we assume this role before making any API calls in this region. # (This is assumed after the "global" aws.role, if that is also specified.) role = "arn:aws:iam::012345678901:role/assume-regional" + +[vmware] +# A list of datacenter names to which you would like to upload an OVA. These +# are "friendly" names, and do not need to be the actual name of the +# software-defined datacenter, but can be. For example, you may have have +# multiple vSphere instances with datacenters that still carry the default +# "SDDC-Datacenter" name; this field allows you to differentiate them. +datacenters = ["north", "south"] + +# *** +# GOVC_* environment variables set in the current environment override any +# configuration set in the sections below! +# *** + +# Optional common configuration +# This configuration allow values to be set in a single place if they are common in +# multiple datacenters. They can be overridden in the datacenter's block below. +[vmware.common] +network = "a_network" + +# Datacenter specific configuration +# This specifies all of the values necessary to communicate with this +# datacenter via `govc`. Each value maps directly to the GOVC_* environment +# variable in the corresponding comment. If any of these values is missing and +# isn't in the environment, we will look for them in `vmware.common`. +[vmware.datacenter.north] +vsphere_url = "https://vcenter.1234.vmwarevmc.com" # GOVC_URL +datacenter = "SDDC-Datacenter" # GOVC_DATACENTER +datastore = "WorkloadDatastore" # GOVC_DATASTORE +network = "sddc-cgw-network-1" # GOVC_NETWORK +folder = "my_folder" # GOVC_FOLDER +resource_pool = "/SDDC-Datacenter/host/Cluster/Resources/Compute-ResourcePool" # GOVC_RESOURCE_POOL diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs index 3bc0c278..48cce8c4 100644 --- a/tools/pubsys/src/main.rs +++ b/tools/pubsys/src/main.rs @@ -25,6 +25,7 @@ Configuration comes from: mod aws; mod repo; +mod vmware; use semver::Version; use simplelog::{Config as LogConfig, LevelFilter, SimpleLogger}; @@ -77,6 +78,9 @@ fn run() -> Result<()> { .context(error::PromoteSsm) }) } + SubCommand::UploadOva(ref upload_args) => { + vmware::upload_ova::run(&args, &upload_args).context(error::UploadOva) + } } } @@ -115,6 +119,8 @@ enum SubCommand { Ssm(aws::ssm::SsmArgs), PromoteSsm(aws::promote_ssm::PromoteArgs), + + UploadOva(vmware::upload_ova::UploadArgs), } /// Parses a SemVer, stripping a leading 'v' if present @@ -173,6 +179,11 @@ mod error { #[snafu(display("Failed to update SSM: {}", source))] Ssm { source: crate::aws::ssm::Error }, + + #[snafu(display("Failed to upload OVA: {}", source))] + UploadOva { + source: crate::vmware::upload_ova::Error, + }, } } type Result = std::result::Result; diff --git a/tools/pubsys/src/vmware/govc.rs b/tools/pubsys/src/vmware/govc.rs new file mode 100644 index 00000000..8248f2c8 --- /dev/null +++ b/tools/pubsys/src/vmware/govc.rs @@ -0,0 +1,177 @@ +//! The govc module handles the process of building and executing the calls to Docker in order to +//! run specific `govc` commands. +use duct::cmd; +use log::trace; +use pubsys_config::vmware::{Datacenter, DatacenterCreds}; +use snafu::ResultExt; +use std::env; +use std::path::Path; +use std::process::Output; + +pub(crate) struct Govc { + env_config: Vec, +} + +impl Govc { + const GOVC: &'static str = "govc"; + + /// Make a new instance of `Govc`, creating all of the environment variables required to run + /// `govc` as Docker `--env` arguments + pub(crate) fn new(dc: Datacenter, creds: DatacenterCreds) -> Self { + let mut env_config = Vec::new(); + env_config.env_arg("GOVC_USERNAME", creds.username); + env_config.env_arg("GOVC_PASSWORD", creds.password); + env_config.env_arg("GOVC_URL", dc.vsphere_url); + env_config.env_arg("GOVC_DATACENTER", dc.datacenter); + env_config.env_arg("GOVC_DATASTORE", dc.datastore); + env_config.env_arg("GOVC_NETWORK", dc.network); + env_config.env_arg("GOVC_RESOURCE_POOL", dc.resource_pool); + env_config.env_arg("GOVC_FOLDER", dc.folder); + + Self { env_config } + } + + /// Run `govc import.ova` using Docker. + /// + /// Using the given name, OVA path, and import spec path, this function builds the `govc + /// import.ova` command as it will be used in the container. It also builds the necessary bind + /// mount arguments to mount the import spec and OVA into the container. Finally, it calls + /// `govc` via `docker run` invocation using these arguments. + pub(crate) fn upload_ova( + self, + name: S, + ova_path: P1, + import_spec_path: P2, + ) -> Result + where + S: AsRef, + P1: AsRef, + P2: AsRef, + { + let name = name.as_ref(); + let ova_host_path = ova_path.as_ref(); + let import_spec_host_path = import_spec_path.as_ref(); + + // Define the paths to the OVA and import spec we will use for the bind mounts into the + // container + let ova_container_path = "/tmp/bottlerocket.ova"; + let import_spec_container_path = "/tmp/import.spec"; + + //--mount type=bind,source="path/to/thing",target=/tmp/thing,readonly + let mount_config = &[ + // Mount the import spec file + "--mount", + &format!( + "type=bind,source={},target={},readonly", + import_spec_host_path.display(), + import_spec_container_path + ), + // Mount the OVA + "--mount", + &format!( + "type=bind,source={},target={},readonly", + ova_host_path.display(), + ova_container_path + ), + ]; + + // govc import.ova -options=/path/to/spec -name bottlerocket_vm_name /path/to/ova + let govc_cmd = &[ + Self::GOVC, + "import.ova", + &format!("-options={}", import_spec_container_path), + "-name", + &name, + ova_container_path, + ]; + + let env_config: Vec<&str> = self.env_config.iter().map(|s| s.as_ref()).collect(); + + docker_run(&env_config, Some(mount_config), govc_cmd) + } +} + +/// Execute `docker run` using the SDK container with the specified environment, mount, and command +/// arguments. +/// +/// This builds the entire `docker run` command string using a list of Docker `--env FOO=BAR` +/// strings, an optional list of `--mount` strings, and a list of strings meant to be the command +/// to run in the container. +// The arguments are `&[&str]` in an attempt to be as flexible as possible for the caller +fn docker_run(docker_env: &[&str], mount: Option<&[&str]>, command: &[&str]) -> Result { + let sdk = env::var("BUILDSYS_SDK_IMAGE").context(error::Environment { + var: "BUILDSYS_SDK_IMAGE", + })?; + trace!("SDK image: {}", sdk); + + let mut args = vec!["run"]; + args.push("--net=host"); + args.extend(docker_env); + + if let Some(mount_cfg) = mount { + args.extend(mount_cfg) + } + + args.push(&sdk); + args.extend(command); + + let output = cmd("docker", args) + .stderr_to_stdout() + .stdout_capture() + .unchecked() + .run() + .context(error::CommandStart)?; + + let stdout = String::from_utf8_lossy(&output.stdout); + trace!("{}", stdout); + if output.status.success() { + Ok(output) + } else { + error::Docker { output: stdout }.fail() + } +} + +// =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= + +/// Helper trait for constructing Docker `--env` arguments +trait EnvArg { + fn env_arg(&mut self, key: S1, value: S2) + where + S1: AsRef, + S2: AsRef; +} + +impl EnvArg for Vec { + fn env_arg(&mut self, key: S1, value: S2) + where + S1: AsRef, + S2: AsRef, + { + self.push("--env".to_string()); + self.push(format!("{}={}", key.as_ref(), value.as_ref())) + } +} + +// =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= + +pub(crate) mod error { + use snafu::Snafu; + + #[derive(Debug, Snafu)] + #[snafu(visibility = "pub(super)")] + pub(crate) enum Error { + #[snafu(display("Failed to start command: {}", source))] + CommandStart { source: std::io::Error }, + + #[snafu(display("Docker invocation failed: {}", output))] + Docker { output: String }, + + #[snafu(display("Missing environment variable '{}'", var))] + Environment { + var: String, + source: std::env::VarError, + }, + } +} +pub(crate) use error::Error; +type Result = std::result::Result; diff --git a/tools/pubsys/src/vmware/mod.rs b/tools/pubsys/src/vmware/mod.rs new file mode 100644 index 00000000..3eabc7ed --- /dev/null +++ b/tools/pubsys/src/vmware/mod.rs @@ -0,0 +1,2 @@ +pub(crate) mod govc; +pub(crate) mod upload_ova; diff --git a/tools/pubsys/src/vmware/upload_ova/mod.rs b/tools/pubsys/src/vmware/upload_ova/mod.rs new file mode 100644 index 00000000..79bfe1ee --- /dev/null +++ b/tools/pubsys/src/vmware/upload_ova/mod.rs @@ -0,0 +1,245 @@ +//! The upload_ova module owns the 'upload_ova' subcommand and is responsible for collating all of +//! the config necessary to upload an OVA bundle to VMware datacenters. +use crate::vmware::govc::Govc; +use crate::Args; +use log::{debug, info, trace}; +use pubsys_config::vmware::{ + Datacenter, DatacenterBuilder, DatacenterCreds, DatacenterCredsBuilder, DatacenterCredsConfig, + VMWARE_CREDS_PATH, +}; +use pubsys_config::InfraConfig; +use serde::Serialize; +use snafu::{ensure, OptionExt, ResultExt}; +use std::fs; +use std::path::PathBuf; +use structopt::StructOpt; +use tempfile::NamedTempFile; +use tinytemplate::TinyTemplate; + +const SPEC_TEMPLATE_NAME: &str = "spec_template"; + +/// Uploads a Bottlerocket OVA to VMware datacenters +#[derive(Debug, StructOpt)] +#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +pub(crate) struct UploadArgs { + /// Path to the OVA image + #[structopt(short = "o", long, parse(from_os_str))] + ova: PathBuf, + + /// Path to the import spec + #[structopt(short = "s", long, parse(from_os_str))] + spec: PathBuf, + + /// The desired VM name + #[structopt(short = "n", long)] + name: String, + + /// Make the uploaded OVA a VM template + #[structopt(long)] + mark_as_template: bool, + + /// Datacenters to which you want to upload the OVA + #[structopt(long, use_delimiter = true)] + datacenters: Vec, +} + +/// Common entrypoint from main() +pub(crate) fn run(args: &Args, upload_args: &UploadArgs) -> Result<()> { + // Get infra config + info!( + "Checking for infra config at path: {}", + args.infra_config_path.display() + ); + let infra_config = + InfraConfig::from_path_or_default(&args.infra_config_path).context(error::InfraConfig)?; + trace!("Using infra config: {:?}", infra_config); + + let vmware = infra_config + .vmware + .context(error::MissingConfig { missing: "vmware" })?; + + // If the user gave an override list of datacenters, use it, otherwise use what's in the config + let upload_datacenters = if !upload_args.datacenters.is_empty() { + &upload_args.datacenters + } else { + &vmware.datacenters + }; + ensure!( + !upload_datacenters.is_empty(), + error::MissingConfig { + missing: "vmware.datacenters" + } + ); + + // Retrieve credentials from GOVC_ environment variables + let creds_env = DatacenterCredsBuilder::from_env(); + // Retrieve credentials from file. The `home` crate is used to construct the VMWARE_CREDS_PATH, + // and it's possible (however unlikely) that it is unable to determine the user's home folder. + let creds_file = if let Some(ref creds_file) = *VMWARE_CREDS_PATH { + if creds_file.exists() { + info!("Using vSphere credentials file at {}", creds_file.display()); + DatacenterCredsConfig::from_path(creds_file).context(error::VmwareConfig)? + } else { + info!("vSphere credentials file not found, will attempt to use environment"); + DatacenterCredsConfig::default() + } + } else { + info!("Unable to determine vSphere credentials file location, will attempt to use environment"); + DatacenterCredsConfig::default() + }; + + // Retrieve datacenter-related GOVC_ environment variables and any common configuration given + // via Infra.toml + let dc_env = DatacenterBuilder::from_env(); + let dc_common = vmware.common.as_ref(); + + // Read the import spec as a template + let import_spec_str = fs::read_to_string(&upload_args.spec).context(error::File { + action: "read", + path: &upload_args.spec, + })?; + let mut tt = TinyTemplate::new(); + tt.add_template(SPEC_TEMPLATE_NAME, &import_spec_str) + .context(error::AddTemplate { + path: &upload_args.spec, + })?; + + info!( + "Uploading to datacenters: {}", + &upload_datacenters.join(", ") + ); + for dc in upload_datacenters { + debug!("Building config for {}", &dc); + // If any specific configuration exists for this datacenter, retrieve it from VMware + // config. Then build out a complete datacenter config with all values necessary to + // interact with VMware. Environment variables trump all others, so start with those, then + // fill in any missing items with datacenter-specific configuration and any common + // configuration. + let dc_config = vmware.datacenter.get(dc); + trace!("{} config: {:?}", &dc, &dc_config); + let datacenter: Datacenter = dc_env + .take_missing_from(dc_config) + .take_missing_from(dc_common) + .build() + .context(error::DatacenterBuild)?; + + // Use a similar pattern here for credentials; start with environment variables and fill in + // any missing items with the datacenter-specific credentials from file. + let dc_creds = creds_file.datacenter.get(dc); + let creds: DatacenterCreds = creds_env + .take_missing_from(dc_creds) + .build() + .context(error::CredsBuild)?; + + // Render the import spec with this datacenter's details and write to temp file + let rendered_spec = render_spec(&tt, &datacenter.network, upload_args.mark_as_template)?; + let import_spec = NamedTempFile::new().context(error::TempFile)?; + fs::write(import_spec.path(), &rendered_spec).context(error::File { + action: "write", + path: import_spec.path(), + })?; + trace!("Import spec: {}", &rendered_spec); + + if upload_args.mark_as_template { + info!( + "Uploading OVA to datacenter '{}' as template with name: '{}'", + &dc, &upload_args.name + ); + } else { + info!( + "Uploading OVA to datacenter '{}' with name '{}'", + &dc, &upload_args.name + ); + } + + Govc::new(datacenter, creds) + .upload_ova(&upload_args.name, &upload_args.ova, import_spec) + .context(error::UploadOva)?; + } + + Ok(()) +} + +/// Render the import spec template given the current network and template setting. +// This exists primarily to abstract the creation of the Context struct that is required by +// TinyTemplate; it's pretty ugly to do inline with the rest of the code. +fn render_spec(tt: &TinyTemplate<'_>, network: S, mark_as_template: bool) -> Result +where + S: AsRef, +{ + #[derive(Debug, Serialize)] + struct Context { + network: String, + mark_as_template: bool, + } + + let context = Context { + network: network.as_ref().to_string(), + mark_as_template, + }; + + Ok(tt + .render(SPEC_TEMPLATE_NAME, &context) + .context(error::RenderTemplate)?) +} + +mod error { + use snafu::Snafu; + use std::io; + use std::path::PathBuf; + + #[derive(Debug, Snafu)] + #[snafu(visibility = "pub(super)")] + pub(crate) enum Error { + #[snafu(display("Error building template from '{}': {}", path.display(), source))] + AddTemplate { + path: PathBuf, + source: tinytemplate::error::Error, + }, + + #[snafu(display("Unable to build datacenter credentials: {}", source))] + CredsBuild { + source: pubsys_config::vmware::Error, + }, + + #[snafu(display("Unable to build datacenter config: {}", source))] + DatacenterBuild { + source: pubsys_config::vmware::Error, + }, + + #[snafu(display("Missing environment variable '{}'", var))] + Environment { + var: String, + source: std::env::VarError, + }, + + #[snafu(display("Failed to {} '{}': {}", action, path.display(), source))] + File { + action: String, + path: PathBuf, + source: io::Error, + }, + + #[snafu(display("Error reading config: {}", source))] + InfraConfig { source: pubsys_config::Error }, + + #[snafu(display("Infra.toml is missing {}", missing))] + MissingConfig { missing: String }, + + #[snafu(display("Error rendering template: {}", source))] + RenderTemplate { source: tinytemplate::error::Error }, + + #[snafu(display("Failed to create temporary file: {}", source))] + TempFile { source: io::Error }, + + #[snafu(display("Error reading config: {}", source))] + VmwareConfig { + source: pubsys_config::vmware::Error, + }, + + #[snafu(display("Failed to upload OVA: {}", source))] + UploadOva { source: crate::vmware::govc::Error }, + } +} +pub(crate) use error::Error; +type Result = std::result::Result; diff --git a/tools/pubsys/support/vmware/import_spec.template b/tools/pubsys/support/vmware/import_spec.template new file mode 100644 index 00000000..9b24bfe6 --- /dev/null +++ b/tools/pubsys/support/vmware/import_spec.template @@ -0,0 +1,16 @@ +\{ + "DiskProvisioning": "flat", + "IPAllocationPolicy": "dhcpPolicy", + "IPProtocol": "IPv4", + "NetworkMapping": [ + \{ + "Name": "VM Network", + "Network": "{ network }" + } + ], + "MarkAsTemplate": { mark_as_template }, + "PowerOn": false, + "InjectOvfEnv": false, + "WaitForIP": false, + "Name": null +} From 2bea46c17e2d33b7d47051c3ec7c60ed70d1cc4a Mon Sep 17 00:00:00 2001 From: Shane Tzen Date: Mon, 12 Jul 2021 10:21:54 -0700 Subject: [PATCH 0494/1356] fix typo Fix minor typo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 24cb1a84..949858b1 100644 --- a/README.md +++ b/README.md @@ -689,4 +689,4 @@ For more details, see the [API system documentation](sources/api/). Bottlerocket operates with two default storage volumes. * The root device, `/dev/xvda`, holds the active and passive [partition sets](#updates-1). It also contains the bootloader, the dm-verity hash tree for verifying the [immutable root filesystem](SECURITY_FEATURES.md#immutable-rootfs-backed-by-dm-verity), and the data store for the Bottlerocket API. -* The data device, `/dex/xvdb`, is used as persistent storage for container images, container orchestration, [host-containers](#Custom-host-containers), and [bootstrap containers](#Bootstrap-containers-settings). +* The data device, `/dev/xvdb`, is used as persistent storage for container images, container orchestration, [host-containers](#Custom-host-containers), and [bootstrap containers](#Bootstrap-containers-settings). From 74864b13caa85234a94244cc286332c91794560d Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Thu, 22 Jul 2021 16:23:48 +0000 Subject: [PATCH 0495/1356] packages: update 5.4 kernel --- packages/kernel-5.4/Cargo.toml | 4 ++-- packages/kernel-5.4/kernel-5.4.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.4/Cargo.toml b/packages/kernel-5.4/Cargo.toml index 20f124e7..75384507 100644 --- a/packages/kernel-5.4/Cargo.toml +++ b/packages/kernel-5.4/Cargo.toml @@ -13,8 +13,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/3166b2c4af7dbb50ef04eedc98aff0020ea1570892d7e01a9dab885e04168afc/kernel-5.4.117-58.216.amzn2.src.rpm" -sha512 = "0d86948018725b4590622a49f27fa7dae03ce06fcef11d39883f7fc421087442fea54c30603c997bd6f519606be596f1e46f33727213c34bd78a85076a47eeef" +url = "https://cdn.amazonlinux.com/blobstore/d10a345f3b99842f109529ef5520232b1eba2349b667a7a0a18b1f86cb3eebbd/kernel-5.4.129-63.229.amzn2.src.rpm" +sha512 = "852a1ece96a9f7cf65f81848291a00a43f7e2ae426e65b38b72125a627970f9d7e1a5ab60cbd570d17d63911fa4644aff9a1aa84f8f3096b9ca596a90fa99fc1" # RPM BuildRequires [build-dependencies] diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec index fd4c9fea..04eb4900 100644 --- a/packages/kernel-5.4/kernel-5.4.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.4 -Version: 5.4.117 +Version: 5.4.129 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/3166b2c4af7dbb50ef04eedc98aff0020ea1570892d7e01a9dab885e04168afc/kernel-5.4.117-58.216.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/d10a345f3b99842f109529ef5520232b1eba2349b667a7a0a18b1f86cb3eebbd/kernel-5.4.129-63.229.amzn2.src.rpm Source100: config-bottlerocket # Make Lustre FSx work with a newer GCC. From 26fcacd88e70b4f43402abdcf035673009ec0a22 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Thu, 22 Jul 2021 16:24:04 +0000 Subject: [PATCH 0496/1356] packages: update 5.10 kernel --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 6ba57bc5..8a57c08d 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,5 +13,5 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/e02ea3dba6fd0e1fedb847a6bf67a4c990c3d23e128cf632af472d38ce05b3cd/kernel-5.10.35-31.135.amzn2.src.rpm" -sha512 = "7c02f472045321ab92e13d6348ed3e6c879d4423cff87770889718c9974c8871c35a3d7caae1e5ea185d53d9fd71b83e3d0f189c7f01633b33682dfc67654df8" +url = "https://cdn.amazonlinux.com/blobstore/ffdc72c6cf8a4fcebfe8a3175a3f618f42f6ff2b00a36c0da6e04cf00d258daf/kernel-5.10.50-44.132.amzn2.src.rpm" +sha512 = "ff548cfb49be98f1180c30f0c4f13846a690fb162a09be17a910267ac301b9efafacacbc5d873d699e250d8d1962bb48d7095509b6de3ce36ebf1b930efa92d8" diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index a5aa57e2..44419ef1 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.35 +Version: 5.10.50 Release: 2%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/e02ea3dba6fd0e1fedb847a6bf67a4c990c3d23e128cf632af472d38ce05b3cd/kernel-5.10.35-31.135.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/ffdc72c6cf8a4fcebfe8a3175a3f618f42f6ff2b00a36c0da6e04cf00d258daf/kernel-5.10.50-44.132.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 6181c5c29a4066b6026144602acc413f5efeb7ce Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Mon, 19 Jul 2021 16:14:50 -0700 Subject: [PATCH 0497/1356] Remove aws-k8s-1.16 variant --- .github/workflows/build.yml | 2 +- README.md | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 352154a6..62d1f6f7 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,7 +10,7 @@ jobs: continue-on-error: ${{ matrix.supported }} strategy: matrix: - variant: [aws-k8s-1.16, aws-k8s-1.17, aws-k8s-1.18, aws-k8s-1.19, aws-k8s-1.20, aws-k8s-1.21, aws-ecs-1] + variant: [aws-k8s-1.17, aws-k8s-1.18, aws-k8s-1.19, aws-k8s-1.20, aws-k8s-1.21, aws-ecs-1] arch: [x86_64, aarch64] supported: [true] include: diff --git a/README.md b/README.md index 949858b1..c67e0f87 100644 --- a/README.md +++ b/README.md @@ -65,8 +65,7 @@ We also have variants in preview status that are designed to be Kubernetes worke - `vmware-k8s-1.20` - `vmware-k8s-1.21` -The `aws-k8s-1.16` variant is deprecated and will no longer be supported in Bottlerocket releases after June, 2021. -The `aws-k8s-1.15` variant is no longer supported. +The `aws-k8s-1.15` and `aws-k8s-1.16` variants are no longer supported. We recommend users replace `aws-k8s-1.15` and `aws-k8s-1.16` nodes with the [latest variant compatible with their cluster](variants/). ## Architectures From 8aa5cdcd743fe6585b5344ca02ef686bfa3ce676 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Mon, 19 Jul 2021 22:16:07 +0000 Subject: [PATCH 0498/1356] README.md: Add hostname setting documentation --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index c67e0f87..75042245 100644 --- a/README.md +++ b/README.md @@ -416,6 +416,13 @@ These settings can be changed at any time. #### Network settings +* `settings.network.hostname`: The desired hostname of the system. + **Important note for all Kubernetes variants:** Changing this setting at runtime (not via user data) can cause issues with kubelet registration, as hostname is closely tied to the identity of the system for both registration and certificates/authorization purposes. + +Most users don't need to change this setting as the following defaults work for the majority of use cases. +If this setting isn't set we attempt to use DNS reverse lookup for the hostname. +If the lookup is unsuccessful, the IP of the node is used in the format `ip-X-X-X-X`. + ##### Proxy settings These settings will configure the proxying behavior of the following services: From 83e0e35b6d700a1e619ded96df0c575d6cba48f7 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Thu, 29 Jul 2021 22:17:15 +0000 Subject: [PATCH 0499/1356] docs: minor fix in host containers documentation Signed-off-by: Arnaldo Garcia Rincon --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 75042245..6f297eef 100644 --- a/README.md +++ b/README.md @@ -522,7 +522,7 @@ There are a few important caveats to understand about host containers: * If you set `superpowered` to true, they'll essentially have root access to the host. Because of these caveats, host containers are only intended for special use cases. -We use it for the control container because it needs to be available early to give you access to the OS, and we use it for the admin container because it needs high levels of privilege and because you need it to debug when orchestration isn't working. +We use them for the control container because it needs to be available early to give you access to the OS, and for the admin container because it needs high levels of privilege and because you need it to debug when orchestration isn't working. Be careful, and make sure you have a similar low-level use case before reaching for host containers. @@ -541,7 +541,7 @@ Bootstrap containers have access to the underlying root filesystem on `/.bottler This allows bootstrap containers to create files, directories, and mounts that are visible to the host. Bootstrap containers are set up to run after the systemd `configured.target` unit is active. -The containers' systemd unit depends on this target (and not on any of the bootstrap containers' peers) which means that bootstrap containers will not execute in a deterministic order +The containers' systemd unit depends on this target (and not on any of the bootstrap containers' peers) which means that bootstrap containers will not execute in a deterministic order. The boot process will "wait" for as long as the bootstrap containers run. Bootstrap containers configured with `essential=true` will stop the boot process if they exit code is a non-zero value. From 95d43173a34a3e60c1fa61d60ef7ad13df2df6c7 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Wed, 14 Jul 2021 18:01:38 +0000 Subject: [PATCH 0500/1356] api: add support for custom CA certificates via settings This commit adds support to update the local trusted certificates store through the API using custom CA certificates. Signed-off-by: Arnaldo Garcia Rincon --- README.md | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/README.md b/README.md index 6f297eef..0a6ad55f 100644 --- a/README.md +++ b/README.md @@ -471,6 +471,36 @@ Here are the metrics settings: "vm.max_map_count" = "262144" ``` +#### Custom CA certificates settings + +By defualt, Bottlerocket ships with the Mozilla CA certificate store, but you can add self-signed certificates through the API using these settings: + +* `settings.pki..data`: Base64-encoded PEM-formatted certificates bundle; it can contain more than one certificate +* `settings.pki..trusted`: Whether the certificates in the bundle are trusted; defaults to `false` when not provided + +Here's an example of adding a bundle of self-signed certificates as user data: + +```toml +[settings.pki.my-trusted-bundle] +data="W3N..." +trusted=true + +[settings.pki.dont-trust-these] +data="W3N..." +trusted=false +``` + +Here's the same example but using API calls: + +```sh +apiclient set \ + pki.my-trusted-bundle.data="W3N..." \ + pki.my-trusted-bundle.trusted=true \ + pki.dont-trust-these.data="N3W..." \ + pki.dont-trust-there.trusted=false +``` + +You can use this method from within a [bootstrap container](#bootstrap-containers-settings), if your user data is over the size limit of the platform. #### Host containers settings * `settings.host-containers.admin.source`: The URI of the [admin container](#admin-container). From e174577668e15d3215a6b389d2194e07305d4663 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Fri, 2 Jul 2021 19:30:28 +0000 Subject: [PATCH 0501/1356] packages: add support for ZSTD compression in kernel 5.4 This commit adds support to compress both kernel images and modules with the Zstandard compression algorithm. --- ...-for-preboot-environment-improve-per.patch | 101 ++++ ...1-lib-Add-zstd-support-to-decompress.patch | 460 ++++++++++++++++++ ...d-support-for-zstd-compressed-kernel.patch | 119 +++++ ...ump-ZO_z_extra_bytes-margin-for-zstd.patch | 50 ++ ...d-support-for-ZSTD-compressed-kernel.patch | 175 +++++++ ....gitignore-Add-ZSTD-compressed-files.patch | 34 ++ ...le-strip-compression-code-into-scrip.patch | 184 +++++++ ...-support-for-zstd-compressed-modules.patch | 82 ++++ packages/kernel-5.4/config-bottlerocket | 11 + packages/kernel-5.4/kernel-5.4.spec | 12 + 10 files changed, 1228 insertions(+) create mode 100644 packages/kernel-5.4/2000-lib-Prepare-zstd-for-preboot-environment-improve-per.patch create mode 100644 packages/kernel-5.4/2001-lib-Add-zstd-support-to-decompress.patch create mode 100644 packages/kernel-5.4/2002-init-Add-support-for-zstd-compressed-kernel.patch create mode 100644 packages/kernel-5.4/2003-x86-Bump-ZO_z_extra_bytes-margin-for-zstd.patch create mode 100644 packages/kernel-5.4/2004-x86-Add-support-for-ZSTD-compressed-kernel.patch create mode 100644 packages/kernel-5.4/2005-.gitignore-Add-ZSTD-compressed-files.patch create mode 100644 packages/kernel-5.4/2006-kbuild-move-module-strip-compression-code-into-scrip.patch create mode 100644 packages/kernel-5.4/2007-kbuild-add-support-for-zstd-compressed-modules.patch diff --git a/packages/kernel-5.4/2000-lib-Prepare-zstd-for-preboot-environment-improve-per.patch b/packages/kernel-5.4/2000-lib-Prepare-zstd-for-preboot-environment-improve-per.patch new file mode 100644 index 00000000..a79dbacc --- /dev/null +++ b/packages/kernel-5.4/2000-lib-Prepare-zstd-for-preboot-environment-improve-per.patch @@ -0,0 +1,101 @@ +From bd475ee90b2b4ce6eae2ccbb5ef214557e937145 Mon Sep 17 00:00:00 2001 +From: Nick Terrell +Date: Thu, 30 Jul 2020 12:08:34 -0700 +Subject: [PATCH 2000/2007] lib: Prepare zstd for preboot environment, improve + performance + +These changes are necessary to get the build to work in the preboot +environment, and to get reasonable performance: + +- Remove a double definition of the CHECK_F macro when the zstd + library is amalgamated. + +- Switch ZSTD_copy8() to __builtin_memcpy(), because in the preboot + environment on x86 gcc can't inline `memcpy()` otherwise. + +- Limit the gcc hack in ZSTD_wildcopy() to the broken gcc version. See + https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388. + +ZSTD_copy8() and ZSTD_wildcopy() are in the core of the zstd hot loop. +So outlining these calls to memcpy(), and having an extra branch are very +detrimental to performance. + +Signed-off-by: Nick Terrell +Signed-off-by: Ingo Molnar +Tested-by: Sedat Dilek +Reviewed-by: Kees Cook +Link: https://lore.kernel.org/r/20200730190841.2071656-2-nickrterrell@gmail.com +(cherry picked from commit 6d25a633ea68a103c7293d16eb69a7d4689075ad) +--- + lib/zstd/fse_decompress.c | 9 +-------- + lib/zstd/zstd_internal.h | 14 ++++++++++++-- + 2 files changed, 13 insertions(+), 10 deletions(-) + +diff --git a/lib/zstd/fse_decompress.c b/lib/zstd/fse_decompress.c +index a84300e5a013..0b353530fb3f 100644 +--- a/lib/zstd/fse_decompress.c ++++ b/lib/zstd/fse_decompress.c +@@ -47,6 +47,7 @@ + ****************************************************************/ + #include "bitstream.h" + #include "fse.h" ++#include "zstd_internal.h" + #include + #include + #include /* memcpy, memset */ +@@ -60,14 +61,6 @@ + enum { FSE_static_assert = 1 / (int)(!!(c)) }; \ + } /* use only *after* variable declarations */ + +-/* check and forward error code */ +-#define CHECK_F(f) \ +- { \ +- size_t const e = f; \ +- if (FSE_isError(e)) \ +- return e; \ +- } +- + /* ************************************************************** + * Templates + ****************************************************************/ +diff --git a/lib/zstd/zstd_internal.h b/lib/zstd/zstd_internal.h +index 1a79fab9e13a..dac753397f86 100644 +--- a/lib/zstd/zstd_internal.h ++++ b/lib/zstd/zstd_internal.h +@@ -127,7 +127,14 @@ static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG; + * Shared functions to include for inlining + *********************************************/ + ZSTD_STATIC void ZSTD_copy8(void *dst, const void *src) { +- memcpy(dst, src, 8); ++ /* ++ * zstd relies heavily on gcc being able to analyze and inline this ++ * memcpy() call, since it is called in a tight loop. Preboot mode ++ * is compiled in freestanding mode, which stops gcc from analyzing ++ * memcpy(). Use __builtin_memcpy() to tell gcc to analyze this as a ++ * regular memcpy(). ++ */ ++ __builtin_memcpy(dst, src, 8); + } + /*! ZSTD_wildcopy() : + * custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */ +@@ -137,13 +144,16 @@ ZSTD_STATIC void ZSTD_wildcopy(void *dst, const void *src, ptrdiff_t length) + const BYTE* ip = (const BYTE*)src; + BYTE* op = (BYTE*)dst; + BYTE* const oend = op + length; +- /* Work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388. ++#if defined(GCC_VERSION) && GCC_VERSION >= 70000 && GCC_VERSION < 70200 ++ /* ++ * Work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388. + * Avoid the bad case where the loop only runs once by handling the + * special case separately. This doesn't trigger the bug because it + * doesn't involve pointer/integer overflow. + */ + if (length <= 8) + return ZSTD_copy8(dst, src); ++#endif + do { + ZSTD_copy8(op, ip); + op += 8; +-- +2.30.2 + diff --git a/packages/kernel-5.4/2001-lib-Add-zstd-support-to-decompress.patch b/packages/kernel-5.4/2001-lib-Add-zstd-support-to-decompress.patch new file mode 100644 index 00000000..4572db2c --- /dev/null +++ b/packages/kernel-5.4/2001-lib-Add-zstd-support-to-decompress.patch @@ -0,0 +1,460 @@ +From 30ff8b18827f5fc6c31808a5868324867688cbdd Mon Sep 17 00:00:00 2001 +From: Nick Terrell +Date: Thu, 30 Jul 2020 12:08:35 -0700 +Subject: [PATCH 2001/2007] lib: Add zstd support to decompress + +- Add unzstd() and the zstd decompress interface. + +- Add zstd support to decompress_method(). + +The decompress_method() and unzstd() functions are used to decompress +the initramfs and the initrd. The __decompress() function is used in +the preboot environment to decompress a zstd compressed kernel. + +The zstd decompression function allows the input and output buffers to +overlap because that is used by x86 kernel decompression. + +Signed-off-by: Nick Terrell +Signed-off-by: Ingo Molnar +Tested-by: Sedat Dilek +Reviewed-by: Kees Cook +Link: https://lore.kernel.org/r/20200730190841.2071656-3-nickrterrell@gmail.com +(cherry picked from commit 4963bb2b89884bbdb7e33e6a09c159551e9627aa) +--- + include/linux/decompress/unzstd.h | 11 + + lib/Kconfig | 4 + + lib/Makefile | 1 + + lib/decompress.c | 5 + + lib/decompress_unzstd.c | 345 ++++++++++++++++++++++++++++++ + 5 files changed, 366 insertions(+) + create mode 100644 include/linux/decompress/unzstd.h + create mode 100644 lib/decompress_unzstd.c + +diff --git a/include/linux/decompress/unzstd.h b/include/linux/decompress/unzstd.h +new file mode 100644 +index 000000000000..56d539ae880f +--- /dev/null ++++ b/include/linux/decompress/unzstd.h +@@ -0,0 +1,11 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef LINUX_DECOMPRESS_UNZSTD_H ++#define LINUX_DECOMPRESS_UNZSTD_H ++ ++int unzstd(unsigned char *inbuf, long len, ++ long (*fill)(void*, unsigned long), ++ long (*flush)(void*, unsigned long), ++ unsigned char *output, ++ long *pos, ++ void (*error_fn)(char *x)); ++#endif +diff --git a/lib/Kconfig b/lib/Kconfig +index 3321d04dfa5a..ad33691e129c 100644 +--- a/lib/Kconfig ++++ b/lib/Kconfig +@@ -329,6 +329,10 @@ config DECOMPRESS_LZ4 + select LZ4_DECOMPRESS + tristate + ++config DECOMPRESS_ZSTD ++ select ZSTD_DECOMPRESS ++ tristate ++ + # + # Generic allocator support is selected if needed + # +diff --git a/lib/Makefile b/lib/Makefile +index 6bf453fb731d..f948c1f6534d 100644 +--- a/lib/Makefile ++++ b/lib/Makefile +@@ -157,6 +157,7 @@ lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o + lib-$(CONFIG_DECOMPRESS_XZ) += decompress_unxz.o + lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o + lib-$(CONFIG_DECOMPRESS_LZ4) += decompress_unlz4.o ++lib-$(CONFIG_DECOMPRESS_ZSTD) += decompress_unzstd.o + + obj-$(CONFIG_TEXTSEARCH) += textsearch.o + obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o +diff --git a/lib/decompress.c b/lib/decompress.c +index 857ab1af1ef3..ab3fc90ffc64 100644 +--- a/lib/decompress.c ++++ b/lib/decompress.c +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -37,6 +38,9 @@ + #ifndef CONFIG_DECOMPRESS_LZ4 + # define unlz4 NULL + #endif ++#ifndef CONFIG_DECOMPRESS_ZSTD ++# define unzstd NULL ++#endif + + struct compress_format { + unsigned char magic[2]; +@@ -52,6 +56,7 @@ static const struct compress_format compressed_formats[] __initconst = { + { {0xfd, 0x37}, "xz", unxz }, + { {0x89, 0x4c}, "lzo", unlzo }, + { {0x02, 0x21}, "lz4", unlz4 }, ++ { {0x28, 0xb5}, "zstd", unzstd }, + { {0, 0}, NULL, NULL } + }; + +diff --git a/lib/decompress_unzstd.c b/lib/decompress_unzstd.c +new file mode 100644 +index 000000000000..0ad2c15479ed +--- /dev/null ++++ b/lib/decompress_unzstd.c +@@ -0,0 +1,345 @@ ++// SPDX-License-Identifier: GPL-2.0 ++ ++/* ++ * Important notes about in-place decompression ++ * ++ * At least on x86, the kernel is decompressed in place: the compressed data ++ * is placed to the end of the output buffer, and the decompressor overwrites ++ * most of the compressed data. There must be enough safety margin to ++ * guarantee that the write position is always behind the read position. ++ * ++ * The safety margin for ZSTD with a 128 KB block size is calculated below. ++ * Note that the margin with ZSTD is bigger than with GZIP or XZ! ++ * ++ * The worst case for in-place decompression is that the beginning of ++ * the file is compressed extremely well, and the rest of the file is ++ * uncompressible. Thus, we must look for worst-case expansion when the ++ * compressor is encoding uncompressible data. ++ * ++ * The structure of the .zst file in case of a compresed kernel is as follows. ++ * Maximum sizes (as bytes) of the fields are in parenthesis. ++ * ++ * Frame Header: (18) ++ * Blocks: (N) ++ * Checksum: (4) ++ * ++ * The frame header and checksum overhead is at most 22 bytes. ++ * ++ * ZSTD stores the data in blocks. Each block has a header whose size is ++ * a 3 bytes. After the block header, there is up to 128 KB of payload. ++ * The maximum uncompressed size of the payload is 128 KB. The minimum ++ * uncompressed size of the payload is never less than the payload size ++ * (excluding the block header). ++ * ++ * The assumption, that the uncompressed size of the payload is never ++ * smaller than the payload itself, is valid only when talking about ++ * the payload as a whole. It is possible that the payload has parts where ++ * the decompressor consumes more input than it produces output. Calculating ++ * the worst case for this would be tricky. Instead of trying to do that, ++ * let's simply make sure that the decompressor never overwrites any bytes ++ * of the payload which it is currently reading. ++ * ++ * Now we have enough information to calculate the safety margin. We need ++ * - 22 bytes for the .zst file format headers; ++ * - 3 bytes per every 128 KiB of uncompressed size (one block header per ++ * block); and ++ * - 128 KiB (biggest possible zstd block size) to make sure that the ++ * decompressor never overwrites anything from the block it is currently ++ * reading. ++ * ++ * We get the following formula: ++ * ++ * safety_margin = 22 + uncompressed_size * 3 / 131072 + 131072 ++ * <= 22 + (uncompressed_size >> 15) + 131072 ++ */ ++ ++/* ++ * Preboot environments #include "path/to/decompress_unzstd.c". ++ * All of the source files we depend on must be #included. ++ * zstd's only source dependeny is xxhash, which has no source ++ * dependencies. ++ * ++ * When UNZSTD_PREBOOT is defined we declare __decompress(), which is ++ * used for kernel decompression, instead of unzstd(). ++ * ++ * Define __DISABLE_EXPORTS in preboot environments to prevent symbols ++ * from xxhash and zstd from being exported by the EXPORT_SYMBOL macro. ++ */ ++#ifdef STATIC ++# define UNZSTD_PREBOOT ++# include "xxhash.c" ++# include "zstd/entropy_common.c" ++# include "zstd/fse_decompress.c" ++# include "zstd/huf_decompress.c" ++# include "zstd/zstd_common.c" ++# include "zstd/decompress.c" ++#endif ++ ++#include ++#include ++#include ++ ++/* 128MB is the maximum window size supported by zstd. */ ++#define ZSTD_WINDOWSIZE_MAX (1 << ZSTD_WINDOWLOG_MAX) ++/* ++ * Size of the input and output buffers in multi-call mode. ++ * Pick a larger size because it isn't used during kernel decompression, ++ * since that is single pass, and we have to allocate a large buffer for ++ * zstd's window anyway. The larger size speeds up initramfs decompression. ++ */ ++#define ZSTD_IOBUF_SIZE (1 << 17) ++ ++static int INIT handle_zstd_error(size_t ret, void (*error)(char *x)) ++{ ++ const int err = ZSTD_getErrorCode(ret); ++ ++ if (!ZSTD_isError(ret)) ++ return 0; ++ ++ switch (err) { ++ case ZSTD_error_memory_allocation: ++ error("ZSTD decompressor ran out of memory"); ++ break; ++ case ZSTD_error_prefix_unknown: ++ error("Input is not in the ZSTD format (wrong magic bytes)"); ++ break; ++ case ZSTD_error_dstSize_tooSmall: ++ case ZSTD_error_corruption_detected: ++ case ZSTD_error_checksum_wrong: ++ error("ZSTD-compressed data is corrupt"); ++ break; ++ default: ++ error("ZSTD-compressed data is probably corrupt"); ++ break; ++ } ++ return -1; ++} ++ ++/* ++ * Handle the case where we have the entire input and output in one segment. ++ * We can allocate less memory (no circular buffer for the sliding window), ++ * and avoid some memcpy() calls. ++ */ ++static int INIT decompress_single(const u8 *in_buf, long in_len, u8 *out_buf, ++ long out_len, long *in_pos, ++ void (*error)(char *x)) ++{ ++ const size_t wksp_size = ZSTD_DCtxWorkspaceBound(); ++ void *wksp = large_malloc(wksp_size); ++ ZSTD_DCtx *dctx = ZSTD_initDCtx(wksp, wksp_size); ++ int err; ++ size_t ret; ++ ++ if (dctx == NULL) { ++ error("Out of memory while allocating ZSTD_DCtx"); ++ err = -1; ++ goto out; ++ } ++ /* ++ * Find out how large the frame actually is, there may be junk at ++ * the end of the frame that ZSTD_decompressDCtx() can't handle. ++ */ ++ ret = ZSTD_findFrameCompressedSize(in_buf, in_len); ++ err = handle_zstd_error(ret, error); ++ if (err) ++ goto out; ++ in_len = (long)ret; ++ ++ ret = ZSTD_decompressDCtx(dctx, out_buf, out_len, in_buf, in_len); ++ err = handle_zstd_error(ret, error); ++ if (err) ++ goto out; ++ ++ if (in_pos != NULL) ++ *in_pos = in_len; ++ ++ err = 0; ++out: ++ if (wksp != NULL) ++ large_free(wksp); ++ return err; ++} ++ ++static int INIT __unzstd(unsigned char *in_buf, long in_len, ++ long (*fill)(void*, unsigned long), ++ long (*flush)(void*, unsigned long), ++ unsigned char *out_buf, long out_len, ++ long *in_pos, ++ void (*error)(char *x)) ++{ ++ ZSTD_inBuffer in; ++ ZSTD_outBuffer out; ++ ZSTD_frameParams params; ++ void *in_allocated = NULL; ++ void *out_allocated = NULL; ++ void *wksp = NULL; ++ size_t wksp_size; ++ ZSTD_DStream *dstream; ++ int err; ++ size_t ret; ++ ++ if (out_len == 0) ++ out_len = LONG_MAX; /* no limit */ ++ ++ if (fill == NULL && flush == NULL) ++ /* ++ * We can decompress faster and with less memory when we have a ++ * single chunk. ++ */ ++ return decompress_single(in_buf, in_len, out_buf, out_len, ++ in_pos, error); ++ ++ /* ++ * If in_buf is not provided, we must be using fill(), so allocate ++ * a large enough buffer. If it is provided, it must be at least ++ * ZSTD_IOBUF_SIZE large. ++ */ ++ if (in_buf == NULL) { ++ in_allocated = large_malloc(ZSTD_IOBUF_SIZE); ++ if (in_allocated == NULL) { ++ error("Out of memory while allocating input buffer"); ++ err = -1; ++ goto out; ++ } ++ in_buf = in_allocated; ++ in_len = 0; ++ } ++ /* Read the first chunk, since we need to decode the frame header. */ ++ if (fill != NULL) ++ in_len = fill(in_buf, ZSTD_IOBUF_SIZE); ++ if (in_len < 0) { ++ error("ZSTD-compressed data is truncated"); ++ err = -1; ++ goto out; ++ } ++ /* Set the first non-empty input buffer. */ ++ in.src = in_buf; ++ in.pos = 0; ++ in.size = in_len; ++ /* Allocate the output buffer if we are using flush(). */ ++ if (flush != NULL) { ++ out_allocated = large_malloc(ZSTD_IOBUF_SIZE); ++ if (out_allocated == NULL) { ++ error("Out of memory while allocating output buffer"); ++ err = -1; ++ goto out; ++ } ++ out_buf = out_allocated; ++ out_len = ZSTD_IOBUF_SIZE; ++ } ++ /* Set the output buffer. */ ++ out.dst = out_buf; ++ out.pos = 0; ++ out.size = out_len; ++ ++ /* ++ * We need to know the window size to allocate the ZSTD_DStream. ++ * Since we are streaming, we need to allocate a buffer for the sliding ++ * window. The window size varies from 1 KB to ZSTD_WINDOWSIZE_MAX ++ * (8 MB), so it is important to use the actual value so as not to ++ * waste memory when it is smaller. ++ */ ++ ret = ZSTD_getFrameParams(¶ms, in.src, in.size); ++ err = handle_zstd_error(ret, error); ++ if (err) ++ goto out; ++ if (ret != 0) { ++ error("ZSTD-compressed data has an incomplete frame header"); ++ err = -1; ++ goto out; ++ } ++ if (params.windowSize > ZSTD_WINDOWSIZE_MAX) { ++ error("ZSTD-compressed data has too large a window size"); ++ err = -1; ++ goto out; ++ } ++ ++ /* ++ * Allocate the ZSTD_DStream now that we know how much memory is ++ * required. ++ */ ++ wksp_size = ZSTD_DStreamWorkspaceBound(params.windowSize); ++ wksp = large_malloc(wksp_size); ++ dstream = ZSTD_initDStream(params.windowSize, wksp, wksp_size); ++ if (dstream == NULL) { ++ error("Out of memory while allocating ZSTD_DStream"); ++ err = -1; ++ goto out; ++ } ++ ++ /* ++ * Decompression loop: ++ * Read more data if necessary (error if no more data can be read). ++ * Call the decompression function, which returns 0 when finished. ++ * Flush any data produced if using flush(). ++ */ ++ if (in_pos != NULL) ++ *in_pos = 0; ++ do { ++ /* ++ * If we need to reload data, either we have fill() and can ++ * try to get more data, or we don't and the input is truncated. ++ */ ++ if (in.pos == in.size) { ++ if (in_pos != NULL) ++ *in_pos += in.pos; ++ in_len = fill ? fill(in_buf, ZSTD_IOBUF_SIZE) : -1; ++ if (in_len < 0) { ++ error("ZSTD-compressed data is truncated"); ++ err = -1; ++ goto out; ++ } ++ in.pos = 0; ++ in.size = in_len; ++ } ++ /* Returns zero when the frame is complete. */ ++ ret = ZSTD_decompressStream(dstream, &out, &in); ++ err = handle_zstd_error(ret, error); ++ if (err) ++ goto out; ++ /* Flush all of the data produced if using flush(). */ ++ if (flush != NULL && out.pos > 0) { ++ if (out.pos != flush(out.dst, out.pos)) { ++ error("Failed to flush()"); ++ err = -1; ++ goto out; ++ } ++ out.pos = 0; ++ } ++ } while (ret != 0); ++ ++ if (in_pos != NULL) ++ *in_pos += in.pos; ++ ++ err = 0; ++out: ++ if (in_allocated != NULL) ++ large_free(in_allocated); ++ if (out_allocated != NULL) ++ large_free(out_allocated); ++ if (wksp != NULL) ++ large_free(wksp); ++ return err; ++} ++ ++#ifndef UNZSTD_PREBOOT ++STATIC int INIT unzstd(unsigned char *buf, long len, ++ long (*fill)(void*, unsigned long), ++ long (*flush)(void*, unsigned long), ++ unsigned char *out_buf, ++ long *pos, ++ void (*error)(char *x)) ++{ ++ return __unzstd(buf, len, fill, flush, out_buf, 0, pos, error); ++} ++#else ++STATIC int INIT __decompress(unsigned char *buf, long len, ++ long (*fill)(void*, unsigned long), ++ long (*flush)(void*, unsigned long), ++ unsigned char *out_buf, long out_len, ++ long *pos, ++ void (*error)(char *x)) ++{ ++ return __unzstd(buf, len, fill, flush, out_buf, out_len, pos, error); ++} ++#endif +-- +2.30.2 + diff --git a/packages/kernel-5.4/2002-init-Add-support-for-zstd-compressed-kernel.patch b/packages/kernel-5.4/2002-init-Add-support-for-zstd-compressed-kernel.patch new file mode 100644 index 00000000..9eef1158 --- /dev/null +++ b/packages/kernel-5.4/2002-init-Add-support-for-zstd-compressed-kernel.patch @@ -0,0 +1,119 @@ +From 306c3246fc07136e55747b9d4016e043bb77b00a Mon Sep 17 00:00:00 2001 +From: Nick Terrell +Date: Thu, 30 Jul 2020 12:08:36 -0700 +Subject: [PATCH 2002/2007] init: Add support for zstd compressed kernel + +- Add the zstd and zstd22 cmds to scripts/Makefile.lib + +- Add the HAVE_KERNEL_ZSTD and KERNEL_ZSTD options + +Architecture specific support is still needed for decompression. + +Signed-off-by: Nick Terrell +Signed-off-by: Ingo Molnar +Tested-by: Sedat Dilek +Reviewed-by: Kees Cook +Link: https://lore.kernel.org/r/20200730190841.2071656-4-nickrterrell@gmail.com +(cherry picked from commit 48f7ddf785af24aa380f3282d8d4400883d0099e) +--- + Makefile | 3 ++- + init/Kconfig | 15 ++++++++++++++- + scripts/Makefile.lib | 22 ++++++++++++++++++++++ + 3 files changed, 38 insertions(+), 2 deletions(-) + +diff --git a/Makefile b/Makefile +index e51077a8080d..3f593214a087 100644 +--- a/Makefile ++++ b/Makefile +@@ -448,6 +448,7 @@ KLZOP = lzop + LZMA = lzma + LZ4 = lz4c + XZ = xz ++ZSTD = zstd + + CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ + -Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF) +@@ -496,7 +497,7 @@ CLANG_FLAGS := + export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC + export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE LEX YACC AWK INSTALLKERNEL + export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX +-export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ++export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD + export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE + + export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS +diff --git a/init/Kconfig b/init/Kconfig +index f23e90d9935f..4dc3ea198a2c 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -159,13 +159,16 @@ config HAVE_KERNEL_LZO + config HAVE_KERNEL_LZ4 + bool + ++config HAVE_KERNEL_ZSTD ++ bool ++ + config HAVE_KERNEL_UNCOMPRESSED + bool + + choice + prompt "Kernel compression mode" + default KERNEL_GZIP +- depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO || HAVE_KERNEL_LZ4 || HAVE_KERNEL_UNCOMPRESSED ++ depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO || HAVE_KERNEL_LZ4 || HAVE_KERNEL_ZSTD || HAVE_KERNEL_UNCOMPRESSED + help + The linux kernel is a kind of self-extracting executable. + Several compression algorithms are available, which differ +@@ -244,6 +247,16 @@ config KERNEL_LZ4 + is about 8% bigger than LZO. But the decompression speed is + faster than LZO. + ++config KERNEL_ZSTD ++ bool "ZSTD" ++ depends on HAVE_KERNEL_ZSTD ++ help ++ ZSTD is a compression algorithm targeting intermediate compression ++ with fast decompression speed. It will compress better than GZIP and ++ decompress around the same speed as LZO, but slower than LZ4. You ++ will need at least 192 KB RAM or more for booting. The zstd command ++ line tool is required for compression. ++ + config KERNEL_UNCOMPRESSED + bool "None" + depends on HAVE_KERNEL_UNCOMPRESSED +diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib +index a6d0044328b1..698b58774ed7 100644 +--- a/scripts/Makefile.lib ++++ b/scripts/Makefile.lib +@@ -383,6 +383,28 @@ quiet_cmd_xzkern = XZKERN $@ + quiet_cmd_xzmisc = XZMISC $@ + cmd_xzmisc = cat $(real-prereqs) | $(XZ) --check=crc32 --lzma2=dict=1MiB > $@ + ++# ZSTD ++# --------------------------------------------------------------------------- ++# Appends the uncompressed size of the data using size_append. The .zst ++# format has the size information available at the beginning of the file too, ++# but it's in a more complex format and it's good to avoid changing the part ++# of the boot code that reads the uncompressed size. ++# ++# Note that the bytes added by size_append will make the zstd tool think that ++# the file is corrupt. This is expected. ++# ++# zstd uses a maximum window size of 8 MB. zstd22 uses a maximum window size of ++# 128 MB. zstd22 is used for kernel compression because it is decompressed in a ++# single pass, so zstd doesn't need to allocate a window buffer. When streaming ++# decompression is used, like initramfs decompression, zstd22 should likely not ++# be used because it would require zstd to allocate a 128 MB buffer. ++ ++quiet_cmd_zstd = ZSTD $@ ++ cmd_zstd = { cat $(real-prereqs) | $(ZSTD) -19; $(size_append); } > $@ ++ ++quiet_cmd_zstd22 = ZSTD22 $@ ++ cmd_zstd22 = { cat $(real-prereqs) | $(ZSTD) -22 --ultra; $(size_append); } > $@ ++ + # ASM offsets + # --------------------------------------------------------------------------- + +-- +2.30.2 + diff --git a/packages/kernel-5.4/2003-x86-Bump-ZO_z_extra_bytes-margin-for-zstd.patch b/packages/kernel-5.4/2003-x86-Bump-ZO_z_extra_bytes-margin-for-zstd.patch new file mode 100644 index 00000000..3da5c14a --- /dev/null +++ b/packages/kernel-5.4/2003-x86-Bump-ZO_z_extra_bytes-margin-for-zstd.patch @@ -0,0 +1,50 @@ +From 66cad5025a1bbd5a2dec72f706c293ee6ff59243 Mon Sep 17 00:00:00 2001 +From: Nick Terrell +Date: Thu, 30 Jul 2020 12:08:38 -0700 +Subject: [PATCH 2003/2007] x86: Bump ZO_z_extra_bytes margin for zstd + +Bump the ZO_z_extra_bytes margin for zstd. + +Zstd needs 3 bytes per 128 KB, and has a 22 byte fixed overhead. +Zstd needs to maintain 128 KB of space at all times, since that is +the maximum block size. See the comments regarding in-place +decompression added in lib/decompress_unzstd.c for details. + +The existing code is written so that all the compression algorithms use +the same ZO_z_extra_bytes. It is taken to be the maximum of the growth +rate plus the maximum fixed overhead. The comments just above this diff +state that: + +Signed-off-by: Nick Terrell +Signed-off-by: Ingo Molnar +Tested-by: Sedat Dilek +Reviewed-by: Kees Cook +Link: https://lore.kernel.org/r/20200730190841.2071656-6-nickrterrell@gmail.com +(cherry picked from commit 0fe4f4ef8cc8e15a8f29f08f4be6128395f125f6) +--- + arch/x86/boot/header.S | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S +index 2c11c0f45d49..1382b7bb73d2 100644 +--- a/arch/x86/boot/header.S ++++ b/arch/x86/boot/header.S +@@ -536,8 +536,14 @@ pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr + # the size-dependent part now grows so fast. + # + # extra_bytes = (uncompressed_size >> 8) + 65536 ++# ++# ZSTD compressed data grows by at most 3 bytes per 128K, and only has a 22 ++# byte fixed overhead but has a maximum block size of 128K, so it needs a ++# larger margin. ++# ++# extra_bytes = (uncompressed_size >> 8) + 131072 + +-#define ZO_z_extra_bytes ((ZO_z_output_len >> 8) + 65536) ++#define ZO_z_extra_bytes ((ZO_z_output_len >> 8) + 131072) + #if ZO_z_output_len > ZO_z_input_len + # define ZO_z_extract_offset (ZO_z_output_len + ZO_z_extra_bytes - \ + ZO_z_input_len) +-- +2.30.2 + diff --git a/packages/kernel-5.4/2004-x86-Add-support-for-ZSTD-compressed-kernel.patch b/packages/kernel-5.4/2004-x86-Add-support-for-ZSTD-compressed-kernel.patch new file mode 100644 index 00000000..9507f2e7 --- /dev/null +++ b/packages/kernel-5.4/2004-x86-Add-support-for-ZSTD-compressed-kernel.patch @@ -0,0 +1,175 @@ +From 31714e3795c54f31f7edbfb1bf1808ab12439347 Mon Sep 17 00:00:00 2001 +From: Nick Terrell +Date: Thu, 30 Jul 2020 12:08:39 -0700 +Subject: [PATCH 2004/2007] x86: Add support for ZSTD compressed kernel + +- Add support for zstd compressed kernel + +- Define __DISABLE_EXPORTS in Makefile + +- Remove __DISABLE_EXPORTS definition from kaslr.c + +- Bump the heap size for zstd. + +- Update the documentation. + +Integrates the ZSTD decompression code to the x86 pre-boot code. + +Zstandard requires slightly more memory during the kernel decompression +on x86 (192 KB vs 64 KB), and the memory usage is independent of the +window size. + +__DISABLE_EXPORTS is now defined in the Makefile, which covers both +the existing use in kaslr.c, and the use needed by the zstd decompressor +in misc.c. + +This patch has been boot tested with both a zstd and gzip compressed +kernel on i386 and x86_64 using buildroot and QEMU. + +Additionally, this has been tested in production on x86_64 devices. +We saw a 2 second boot time reduction by switching kernel compression +from xz to zstd. + +Signed-off-by: Nick Terrell +Signed-off-by: Ingo Molnar +Tested-by: Sedat Dilek +Reviewed-by: Kees Cook +Link: https://lore.kernel.org/r/20200730190841.2071656-7-nickrterrell@gmail.com +(cherry picked from commit fb46d057db824693994b048d3a8c869892afaa3f) +[fixed merge conflict in arch/x86/boot/compressed/Makefile] +Signed-off-by: Arnaldo Garcia Rincon +--- + Documentation/x86/boot.rst | 6 +++--- + arch/x86/Kconfig | 1 + + arch/x86/boot/compressed/Makefile | 6 +++++- + arch/x86/boot/compressed/kaslr.c | 7 ------- + arch/x86/boot/compressed/misc.c | 4 ++++ + arch/x86/include/asm/boot.h | 11 +++++++++-- + 6 files changed, 22 insertions(+), 13 deletions(-) + +diff --git a/Documentation/x86/boot.rst b/Documentation/x86/boot.rst +index 08a2f100c0e6..4e6b8ee2978e 100644 +--- a/Documentation/x86/boot.rst ++++ b/Documentation/x86/boot.rst +@@ -767,9 +767,9 @@ Protocol: 2.08+ + uncompressed data should be determined using the standard magic + numbers. The currently supported compression formats are gzip + (magic numbers 1F 8B or 1F 9E), bzip2 (magic number 42 5A), LZMA +- (magic number 5D 00), XZ (magic number FD 37), and LZ4 (magic number +- 02 21). The uncompressed payload is currently always ELF (magic +- number 7F 45 4C 46). ++ (magic number 5D 00), XZ (magic number FD 37), LZ4 (magic number ++ 02 21) and ZSTD (magic number 28 B5). The uncompressed payload is ++ currently always ELF (magic number 7F 45 4C 46). + + ============ ============== + Field name: payload_length +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig +index 36a28b9e46cb..9e7067cdebbf 100644 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -179,6 +179,7 @@ config X86 + select HAVE_KERNEL_LZMA + select HAVE_KERNEL_LZO + select HAVE_KERNEL_XZ ++ select HAVE_KERNEL_ZSTD + select HAVE_KPROBES + select HAVE_KPROBES_ON_FTRACE + select HAVE_FUNCTION_ERROR_INJECTION +diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile +index 292b5bc6e3a3..dfe6782d76db 100644 +--- a/arch/x86/boot/compressed/Makefile ++++ b/arch/x86/boot/compressed/Makefile +@@ -24,7 +24,7 @@ OBJECT_FILES_NON_STANDARD := y + KCOV_INSTRUMENT := n + + targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \ +- vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 ++ vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 vmlinux.bin.zst + + KBUILD_CFLAGS := -m$(BITS) -O2 + KBUILD_CFLAGS += -fno-strict-aliasing $(call cc-option, -fPIE, -fPIC) +@@ -40,6 +40,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning, gnu) + KBUILD_CFLAGS += -Wno-pointer-sign + # Disable relocation relaxation in case the link is not PIE. + KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no) ++KBUILD_CFLAGS += -D__DISABLE_EXPORTS + + KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ + GCOV_PROFILE := n +@@ -146,6 +147,8 @@ $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE + $(call if_changed,lzo) + $(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) FORCE + $(call if_changed,lz4) ++$(obj)/vmlinux.bin.zst: $(vmlinux.bin.all-y) FORCE ++ $(call if_changed,zstd22) + + suffix-$(CONFIG_KERNEL_GZIP) := gz + suffix-$(CONFIG_KERNEL_BZIP2) := bz2 +@@ -153,6 +156,7 @@ suffix-$(CONFIG_KERNEL_LZMA) := lzma + suffix-$(CONFIG_KERNEL_XZ) := xz + suffix-$(CONFIG_KERNEL_LZO) := lzo + suffix-$(CONFIG_KERNEL_LZ4) := lz4 ++suffix-$(CONFIG_KERNEL_ZSTD) := zst + + quiet_cmd_mkpiggy = MKPIGGY $@ + cmd_mkpiggy = $(obj)/mkpiggy $< > $@ +diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c +index 2e53c056ba20..ae7e1698587f 100644 +--- a/arch/x86/boot/compressed/kaslr.c ++++ b/arch/x86/boot/compressed/kaslr.c +@@ -19,13 +19,6 @@ + */ + #define BOOT_CTYPE_H + +-/* +- * _ctype[] in lib/ctype.c is needed by isspace() of linux/ctype.h. +- * While both lib/ctype.c and lib/cmdline.c will bring EXPORT_SYMBOL +- * which is meaningless and will cause compiling error in some cases. +- */ +-#define __DISABLE_EXPORTS +- + #include "misc.h" + #include "error.h" + #include "../string.h" +diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c +index 9652d5c2afda..39e592d0e0b4 100644 +--- a/arch/x86/boot/compressed/misc.c ++++ b/arch/x86/boot/compressed/misc.c +@@ -77,6 +77,10 @@ static int lines, cols; + #ifdef CONFIG_KERNEL_LZ4 + #include "../../../../lib/decompress_unlz4.c" + #endif ++ ++#ifdef CONFIG_KERNEL_ZSTD ++#include "../../../../lib/decompress_unzstd.c" ++#endif + /* + * NOTE: When adding a new decompressor, please update the analysis in + * ../header.S. +diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h +index 680c320363db..9191280d9ea3 100644 +--- a/arch/x86/include/asm/boot.h ++++ b/arch/x86/include/asm/boot.h +@@ -24,9 +24,16 @@ + # error "Invalid value for CONFIG_PHYSICAL_ALIGN" + #endif + +-#ifdef CONFIG_KERNEL_BZIP2 ++#if defined(CONFIG_KERNEL_BZIP2) + # define BOOT_HEAP_SIZE 0x400000 +-#else /* !CONFIG_KERNEL_BZIP2 */ ++#elif defined(CONFIG_KERNEL_ZSTD) ++/* ++ * Zstd needs to allocate the ZSTD_DCtx in order to decompress the kernel. ++ * The ZSTD_DCtx is ~160KB, so set the heap size to 192KB because it is a ++ * round number and to allow some slack. ++ */ ++# define BOOT_HEAP_SIZE 0x30000 ++#else + # define BOOT_HEAP_SIZE 0x10000 + #endif + +-- +2.30.2 + diff --git a/packages/kernel-5.4/2005-.gitignore-Add-ZSTD-compressed-files.patch b/packages/kernel-5.4/2005-.gitignore-Add-ZSTD-compressed-files.patch new file mode 100644 index 00000000..be693c7c --- /dev/null +++ b/packages/kernel-5.4/2005-.gitignore-Add-ZSTD-compressed-files.patch @@ -0,0 +1,34 @@ +From 9e5deea6a88d5c1038e7d2562224a2bfd1a083f1 Mon Sep 17 00:00:00 2001 +From: Adam Borowski +Date: Thu, 30 Jul 2020 12:08:40 -0700 +Subject: [PATCH 2005/2007] .gitignore: Add ZSTD-compressed files + +For now, that's arch/x86/boot/compressed/vmlinux.bin.zst but probably more +will come, thus let's be consistent with all other compressors. + +Signed-off-by: Adam Borowski +Signed-off-by: Nick Terrell +Signed-off-by: Ingo Molnar +Tested-by: Sedat Dilek +Reviewed-by: Kees Cook +Link: https://lore.kernel.org/r/20200730190841.2071656-8-nickrterrell@gmail.com +(cherry picked from commit 6f3decabaff032e5fcc6cf56f0851ee259359232) +--- + .gitignore | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/.gitignore b/.gitignore +index 70580bdd352c..10faf379482f 100644 +--- a/.gitignore ++++ b/.gitignore +@@ -44,6 +44,7 @@ + *.tab.[ch] + *.tar + *.xz ++*.zst + Module.symvers + modules.builtin + modules.order +-- +2.30.2 + diff --git a/packages/kernel-5.4/2006-kbuild-move-module-strip-compression-code-into-scrip.patch b/packages/kernel-5.4/2006-kbuild-move-module-strip-compression-code-into-scrip.patch new file mode 100644 index 00000000..cc0c97af --- /dev/null +++ b/packages/kernel-5.4/2006-kbuild-move-module-strip-compression-code-into-scrip.patch @@ -0,0 +1,184 @@ +From 1b82a356860716e14a27bc90cc9755caebbdab0b Mon Sep 17 00:00:00 2001 +From: Masahiro Yamada +Date: Wed, 31 Mar 2021 22:38:08 +0900 +Subject: [PATCH 2006/2007] kbuild: move module strip/compression code into + scripts/Makefile.modinst + +Both mod_strip_cmd and mod_compress_cmd are only used in +scripts/Makefile.modinst, hence there is no good reason to define them +in the top Makefile. Move the relevant code to scripts/Makefile.modinst. + +Also, show separate log messages for each of install, strip, sign, and +compress. + +Signed-off-by: Masahiro Yamada +(cherry picked from commit 65ce9c38326e2588fcd1a3a4817c14b4660f430b) +[fixed a merge conflict in Makefile and script/Makefile.modinst while cherry-picking] +Signed-off-by: Arnaldo Garcia Rincon +--- + Makefile | 32 ------------- + scripts/Makefile.modinst | 98 +++++++++++++++++++++++++++++++++------- + 2 files changed, 81 insertions(+), 49 deletions(-) + +diff --git a/Makefile b/Makefile +index 3f593214a087..ef0da022f0c1 100644 +--- a/Makefile ++++ b/Makefile +@@ -978,38 +978,6 @@ export INSTALL_DTBS_PATH ?= $(INSTALL_PATH)/dtbs/$(KERNELRELEASE) + MODLIB = $(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE) + export MODLIB + +-# +-# INSTALL_MOD_STRIP, if defined, will cause modules to be +-# stripped after they are installed. If INSTALL_MOD_STRIP is '1', then +-# the default option --strip-debug will be used. Otherwise, +-# INSTALL_MOD_STRIP value will be used as the options to the strip command. +- +-ifdef INSTALL_MOD_STRIP +-ifeq ($(INSTALL_MOD_STRIP),1) +-mod_strip_cmd = $(STRIP) --strip-debug +-else +-mod_strip_cmd = $(STRIP) $(INSTALL_MOD_STRIP) +-endif # INSTALL_MOD_STRIP=1 +-else +-mod_strip_cmd = true +-endif # INSTALL_MOD_STRIP +-export mod_strip_cmd +- +-# CONFIG_MODULE_COMPRESS, if defined, will cause module to be compressed +-# after they are installed in agreement with CONFIG_MODULE_COMPRESS_GZIP +-# or CONFIG_MODULE_COMPRESS_XZ. +- +-mod_compress_cmd = true +-ifdef CONFIG_MODULE_COMPRESS +- ifdef CONFIG_MODULE_COMPRESS_GZIP +- mod_compress_cmd = $(KGZIP) -n -f +- endif # CONFIG_MODULE_COMPRESS_GZIP +- ifdef CONFIG_MODULE_COMPRESS_XZ +- mod_compress_cmd = $(XZ) -f +- endif # CONFIG_MODULE_COMPRESS_XZ +-endif # CONFIG_MODULE_COMPRESS +-export mod_compress_cmd +- + ifdef CONFIG_MODULE_SIG_ALL + $(eval $(call config_filename,MODULE_SIG_KEY)) + +diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst +index 5a4579e76485..84696ef99df7 100644 +--- a/scripts/Makefile.modinst ++++ b/scripts/Makefile.modinst +@@ -6,30 +6,94 @@ + PHONY := __modinst + __modinst: + +-include scripts/Kbuild.include ++include include/config/auto.conf ++include $(srctree)/scripts/Kbuild.include + +-modules := $(sort $(shell cat $(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD)/)modules.order)) ++modules := $(sort $(shell cat $(MODORDER))) ++ ++ifeq ($(KBUILD_EXTMOD),) ++dst := $(MODLIB)/kernel ++else ++INSTALL_MOD_DIR ?= extra ++dst := $(MODLIB)/$(INSTALL_MOD_DIR) ++endif ++ ++suffix-y := ++suffix-$(CONFIG_MODULE_COMPRESS_GZIP) := .gz ++suffix-$(CONFIG_MODULE_COMPRESS_XZ) := .xz ++ ++modules := $(patsubst $(extmod_prefix)%, $(dst)/%$(suffix-y), $(modules)) + +-PHONY += $(modules) + __modinst: $(modules) + @: + +-# Don't stop modules_install if we can't sign external modules. +-quiet_cmd_modules_install = INSTALL $@ +- cmd_modules_install = \ +- mkdir -p $(2) ; \ +- cp $@ $(2) ; \ +- $(mod_strip_cmd) $(2)/$(notdir $@) ; \ +- $(mod_sign_cmd) $(2)/$(notdir $@) $(patsubst %,|| true,$(KBUILD_EXTMOD)) ; \ +- $(mod_compress_cmd) $(2)/$(notdir $@) ++quiet_cmd_none = ++ cmd_none = : + +-# Modules built outside the kernel source tree go into extra by default +-INSTALL_MOD_DIR ?= extra +-ext-mod-dir = $(INSTALL_MOD_DIR)$(subst $(patsubst %/,%,$(KBUILD_EXTMOD)),,$(@D)) ++# ++# Installation ++# ++quiet_cmd_install = INSTALL $@ ++ cmd_install = mkdir -p $(dir $@); cp $< $@ ++ ++# Strip ++# ++# INSTALL_MOD_STRIP, if defined, will cause modules to be stripped after they ++# are installed. If INSTALL_MOD_STRIP is '1', then the default option ++# --strip-debug will be used. Otherwise, INSTALL_MOD_STRIP value will be used ++# as the options to the strip command. ++ifdef INSTALL_MOD_STRIP ++ ++ifeq ($(INSTALL_MOD_STRIP),1) ++strip-option := --strip-debug ++else ++strip-option := $(INSTALL_MOD_STRIP) ++endif ++ ++quiet_cmd_strip = STRIP $@ ++ cmd_strip = $(STRIP) $(strip-option) $@ ++ ++else ++ ++quiet_cmd_strip = ++ cmd_strip = : ++ ++endif ++ ++# ++# Signing ++# Don't stop modules_install even if we can't sign external modules. ++# ++ifeq ($(CONFIG_MODULE_SIG_ALL),y) ++quiet_cmd_sign = SIGN $@ ++$(eval $(call config_filename,MODULE_SIG_KEY)) ++ cmd_sign = scripts/sign-file $(CONFIG_MODULE_SIG_HASH) $(MODULE_SIG_KEY_SRCPREFIX)$(CONFIG_MODULE_SIG_KEY) certs/signing_key.x509 $@ \ ++ $(if $(KBUILD_EXTMOD),|| true) ++else ++quiet_cmd_sign := ++ cmd_sign := : ++endif ++ ++$(dst)/%.ko: $(extmod_prefix)%.ko FORCE ++ $(call cmd,install) ++ $(call cmd,strip) ++ $(call cmd,sign) ++ ++# ++# Compression ++# ++quiet_cmd_gzip = GZIP $@ ++ cmd_gzip = $(KGZIP) -n -f $< ++quiet_cmd_xz = XZ $@ ++ cmd_xz = $(XZ) --lzma2=dict=2MiB -f $< ++ ++$(dst)/%.ko.gz: $(dst)/%.ko FORCE ++ $(call cmd,gzip) + +-modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D)) ++$(dst)/%.ko.xz: $(dst)/%.ko FORCE ++ $(call cmd,xz) + +-$(modules): +- $(call cmd,modules_install,$(MODLIB)/$(modinst_dir)) ++PHONY += FORCE ++FORCE: + + .PHONY: $(PHONY) +-- +2.30.2 + diff --git a/packages/kernel-5.4/2007-kbuild-add-support-for-zstd-compressed-modules.patch b/packages/kernel-5.4/2007-kbuild-add-support-for-zstd-compressed-modules.patch new file mode 100644 index 00000000..7aebb53f --- /dev/null +++ b/packages/kernel-5.4/2007-kbuild-add-support-for-zstd-compressed-modules.patch @@ -0,0 +1,82 @@ +From ddd6d2cff1af4bccee97a7d939e39f64a8965e50 Mon Sep 17 00:00:00 2001 +From: Piotr Gorski +Date: Wed, 7 Apr 2021 18:09:27 +0200 +Subject: [PATCH 2007/2007] kbuild: add support for zstd compressed modules + +kmod 28 supports modules compressed in zstd format so let's add this +possibility to kernel. + +Signed-off-by: Piotr Gorski +Reviewed-by: Oleksandr Natalenko +Signed-off-by: Masahiro Yamada +(cherry picked from commit c3d7ef377eb2564b165b1e8fdb4646952c90ac17) +[fixed a merge conflict in init/Kconfig] +Signed-off-by: Arnaldo Garcia Rincon +--- + init/Kconfig | 11 +++++++++-- + scripts/Makefile.modinst | 6 ++++++ + 2 files changed, 15 insertions(+), 2 deletions(-) + +diff --git a/init/Kconfig b/init/Kconfig +index 4dc3ea198a2c..c6ffb8b7eec6 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -2121,8 +2121,9 @@ config MODULE_COMPRESS + Out-of-tree kernel modules installed using Kbuild will also be + compressed upon installation. + +- Note: for modules inside an initrd or initramfs, it's more efficient +- to compress the whole initrd or initramfs instead. ++ Please note that the tool used to load modules needs to support the ++ corresponding algorithm. module-init-tools MAY support gzip, and kmod ++ MAY support gzip, xz and zstd. + + Note: This is fully compatible with signed modules. + +@@ -2144,6 +2145,12 @@ config MODULE_COMPRESS_GZIP + config MODULE_COMPRESS_XZ + bool "XZ" + ++config MODULE_COMPRESS_ZSTD ++ bool "ZSTD" ++ help ++ Compress modules with ZSTD. The installed modules are suffixed ++ with .ko.zst. ++ + endchoice + + config MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS +diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst +index 84696ef99df7..59f613aa08b4 100644 +--- a/scripts/Makefile.modinst ++++ b/scripts/Makefile.modinst +@@ -21,6 +21,7 @@ endif + suffix-y := + suffix-$(CONFIG_MODULE_COMPRESS_GZIP) := .gz + suffix-$(CONFIG_MODULE_COMPRESS_XZ) := .xz ++suffix-$(CONFIG_MODULE_COMPRESS_ZSTD) := .zst + + modules := $(patsubst $(extmod_prefix)%, $(dst)/%$(suffix-y), $(modules)) + +@@ -86,6 +87,8 @@ quiet_cmd_gzip = GZIP $@ + cmd_gzip = $(KGZIP) -n -f $< + quiet_cmd_xz = XZ $@ + cmd_xz = $(XZ) --lzma2=dict=2MiB -f $< ++quiet_cmd_zstd = ZSTD $@ ++ cmd_zstd = $(ZSTD) -T0 --rm -f -q $< + + $(dst)/%.ko.gz: $(dst)/%.ko FORCE + $(call cmd,gzip) +@@ -93,6 +96,9 @@ $(dst)/%.ko.gz: $(dst)/%.ko FORCE + $(dst)/%.ko.xz: $(dst)/%.ko FORCE + $(call cmd,xz) + ++$(dst)/%.ko.zst: $(dst)/%.ko FORCE ++ $(call cmd,zstd) ++ + PHONY += FORCE + FORCE: + +-- +2.30.2 + diff --git a/packages/kernel-5.4/config-bottlerocket b/packages/kernel-5.4/config-bottlerocket index 26cce3d6..baf23abd 100644 --- a/packages/kernel-5.4/config-bottlerocket +++ b/packages/kernel-5.4/config-bottlerocket @@ -61,3 +61,14 @@ CONFIG_DEBUG_INFO_BTF=y # We don't want to extend the kernel command line with any upstream defaults; # Bottlerocket uses a fairly custom setup that needs tight control over it. CONFIG_CMDLINE_EXTEND=n + +# Enable ZSTD kernel image compression +CONFIG_HAVE_KERNEL_ZSTD=y +CONFIG_KERNEL_ZSTD=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_DECOMPRESS_ZSTD=y + +# Enable ZSTD modules compression +CONFIG_MODULE_COMPRESS=y +CONFIG_MODULE_COMPRESS_ZSTD=y diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec index 04eb4900..94e5fea6 100644 --- a/packages/kernel-5.4/kernel-5.4.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -12,6 +12,7 @@ Source100: config-bottlerocket # Make Lustre FSx work with a newer GCC. Patch0001: 0001-lustrefsx-Disable-Werror-stringop-overflow.patch + # Required patches for kdump support Patch0002: 0002-x86-purgatory-Add-fno-stack-protector.patch Patch0003: 0003-arm64-kexec_file-add-crash-dump-support.patch @@ -20,6 +21,17 @@ Patch0004: 0004-libfdt-include-fdt_addresses.c.patch # Help out-of-tree module builds run `make prepare` automatically. Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch +# Add zstd support for compressed kernel +Patch2000: 2000-lib-Prepare-zstd-for-preboot-environment-improve-per.patch +Patch2001: 2001-lib-Add-zstd-support-to-decompress.patch +Patch2002: 2002-init-Add-support-for-zstd-compressed-kernel.patch +Patch2003: 2003-x86-Bump-ZO_z_extra_bytes-margin-for-zstd.patch +Patch2004: 2004-x86-Add-support-for-ZSTD-compressed-kernel.patch +Patch2005: 2005-.gitignore-Add-ZSTD-compressed-files.patch +# Add zstd support for compressed kernel modules +Patch2006: 2006-kbuild-move-module-strip-compression-code-into-scrip.patch +Patch2007: 2007-kbuild-add-support-for-zstd-compressed-modules.patch + BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From 3407c1d66b3cf3963eb4a1bcab02dbd3dc299566 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Fri, 2 Jul 2021 19:30:50 +0000 Subject: [PATCH 0502/1356] packages: add support for ZSTD compression in kernel 5.10 This commit adds support to compress both kernel images and modules with the Zstandard compression algorithm. --- ...le-strip-compression-code-into-scrip.patch | 184 ++++++++++++++++++ ...-support-for-zstd-compressed-modules.patch | 82 ++++++++ packages/kernel-5.10/config-bottlerocket | 11 ++ packages/kernel-5.10/kernel-5.10.spec | 3 + 4 files changed, 280 insertions(+) create mode 100644 packages/kernel-5.10/2000-kbuild-move-module-strip-compression-code-into-scrip.patch create mode 100644 packages/kernel-5.10/2001-kbuild-add-support-for-zstd-compressed-modules.patch diff --git a/packages/kernel-5.10/2000-kbuild-move-module-strip-compression-code-into-scrip.patch b/packages/kernel-5.10/2000-kbuild-move-module-strip-compression-code-into-scrip.patch new file mode 100644 index 00000000..7e0050bd --- /dev/null +++ b/packages/kernel-5.10/2000-kbuild-move-module-strip-compression-code-into-scrip.patch @@ -0,0 +1,184 @@ +From 4fc601b61092b4a7608a3d79d32663d9d167caf4 Mon Sep 17 00:00:00 2001 +From: Masahiro Yamada +Date: Wed, 31 Mar 2021 22:38:08 +0900 +Subject: [PATCH 2000/2001] kbuild: move module strip/compression code into + scripts/Makefile.modinst + +Both mod_strip_cmd and mod_compress_cmd are only used in +scripts/Makefile.modinst, hence there is no good reason to define them +in the top Makefile. Move the relevant code to scripts/Makefile.modinst. + +Also, show separate log messages for each of install, strip, sign, and +compress. + +Signed-off-by: Masahiro Yamada +(cherry picked from commit 65ce9c38326e2588fcd1a3a4817c14b4660f430b) +[fixed a merge conflict in Makefile and script/Makefile.modinst while cherry-picking] +Signed-off-by: Arnaldo Garcia Rincon +--- + Makefile | 32 ------------- + scripts/Makefile.modinst | 98 +++++++++++++++++++++++++++++++++------- + 2 files changed, 81 insertions(+), 49 deletions(-) + +diff --git a/Makefile b/Makefile +index 2931104182e7..bf6f6e3074da 100644 +--- a/Makefile ++++ b/Makefile +@@ -1028,38 +1028,6 @@ export INSTALL_DTBS_PATH ?= $(INSTALL_PATH)/dtbs/$(KERNELRELEASE) + MODLIB = $(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE) + export MODLIB + +-# +-# INSTALL_MOD_STRIP, if defined, will cause modules to be +-# stripped after they are installed. If INSTALL_MOD_STRIP is '1', then +-# the default option --strip-debug will be used. Otherwise, +-# INSTALL_MOD_STRIP value will be used as the options to the strip command. +- +-ifdef INSTALL_MOD_STRIP +-ifeq ($(INSTALL_MOD_STRIP),1) +-mod_strip_cmd = $(STRIP) --strip-debug +-else +-mod_strip_cmd = $(STRIP) $(INSTALL_MOD_STRIP) +-endif # INSTALL_MOD_STRIP=1 +-else +-mod_strip_cmd = true +-endif # INSTALL_MOD_STRIP +-export mod_strip_cmd +- +-# CONFIG_MODULE_COMPRESS, if defined, will cause module to be compressed +-# after they are installed in agreement with CONFIG_MODULE_COMPRESS_GZIP +-# or CONFIG_MODULE_COMPRESS_XZ. +- +-mod_compress_cmd = true +-ifdef CONFIG_MODULE_COMPRESS +- ifdef CONFIG_MODULE_COMPRESS_GZIP +- mod_compress_cmd = $(KGZIP) -n -f +- endif # CONFIG_MODULE_COMPRESS_GZIP +- ifdef CONFIG_MODULE_COMPRESS_XZ +- mod_compress_cmd = $(XZ) -f +- endif # CONFIG_MODULE_COMPRESS_XZ +-endif # CONFIG_MODULE_COMPRESS +-export mod_compress_cmd +- + ifdef CONFIG_MODULE_SIG_ALL + $(eval $(call config_filename,MODULE_SIG_KEY)) + +diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst +index 5a4579e76485..84696ef99df7 100644 +--- a/scripts/Makefile.modinst ++++ b/scripts/Makefile.modinst +@@ -6,30 +6,94 @@ + PHONY := __modinst + __modinst: + +-include scripts/Kbuild.include ++include include/config/auto.conf ++include $(srctree)/scripts/Kbuild.include + +-modules := $(sort $(shell cat $(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD)/)modules.order)) ++modules := $(sort $(shell cat $(MODORDER))) ++ ++ifeq ($(KBUILD_EXTMOD),) ++dst := $(MODLIB)/kernel ++else ++INSTALL_MOD_DIR ?= extra ++dst := $(MODLIB)/$(INSTALL_MOD_DIR) ++endif ++ ++suffix-y := ++suffix-$(CONFIG_MODULE_COMPRESS_GZIP) := .gz ++suffix-$(CONFIG_MODULE_COMPRESS_XZ) := .xz ++ ++modules := $(patsubst $(extmod_prefix)%, $(dst)/%$(suffix-y), $(modules)) + +-PHONY += $(modules) + __modinst: $(modules) + @: + +-# Don't stop modules_install if we can't sign external modules. +-quiet_cmd_modules_install = INSTALL $@ +- cmd_modules_install = \ +- mkdir -p $(2) ; \ +- cp $@ $(2) ; \ +- $(mod_strip_cmd) $(2)/$(notdir $@) ; \ +- $(mod_sign_cmd) $(2)/$(notdir $@) $(patsubst %,|| true,$(KBUILD_EXTMOD)) ; \ +- $(mod_compress_cmd) $(2)/$(notdir $@) ++quiet_cmd_none = ++ cmd_none = : + +-# Modules built outside the kernel source tree go into extra by default +-INSTALL_MOD_DIR ?= extra +-ext-mod-dir = $(INSTALL_MOD_DIR)$(subst $(patsubst %/,%,$(KBUILD_EXTMOD)),,$(@D)) ++# ++# Installation ++# ++quiet_cmd_install = INSTALL $@ ++ cmd_install = mkdir -p $(dir $@); cp $< $@ ++ ++# Strip ++# ++# INSTALL_MOD_STRIP, if defined, will cause modules to be stripped after they ++# are installed. If INSTALL_MOD_STRIP is '1', then the default option ++# --strip-debug will be used. Otherwise, INSTALL_MOD_STRIP value will be used ++# as the options to the strip command. ++ifdef INSTALL_MOD_STRIP ++ ++ifeq ($(INSTALL_MOD_STRIP),1) ++strip-option := --strip-debug ++else ++strip-option := $(INSTALL_MOD_STRIP) ++endif ++ ++quiet_cmd_strip = STRIP $@ ++ cmd_strip = $(STRIP) $(strip-option) $@ ++ ++else ++ ++quiet_cmd_strip = ++ cmd_strip = : ++ ++endif ++ ++# ++# Signing ++# Don't stop modules_install even if we can't sign external modules. ++# ++ifeq ($(CONFIG_MODULE_SIG_ALL),y) ++quiet_cmd_sign = SIGN $@ ++$(eval $(call config_filename,MODULE_SIG_KEY)) ++ cmd_sign = scripts/sign-file $(CONFIG_MODULE_SIG_HASH) $(MODULE_SIG_KEY_SRCPREFIX)$(CONFIG_MODULE_SIG_KEY) certs/signing_key.x509 $@ \ ++ $(if $(KBUILD_EXTMOD),|| true) ++else ++quiet_cmd_sign := ++ cmd_sign := : ++endif ++ ++$(dst)/%.ko: $(extmod_prefix)%.ko FORCE ++ $(call cmd,install) ++ $(call cmd,strip) ++ $(call cmd,sign) ++ ++# ++# Compression ++# ++quiet_cmd_gzip = GZIP $@ ++ cmd_gzip = $(KGZIP) -n -f $< ++quiet_cmd_xz = XZ $@ ++ cmd_xz = $(XZ) --lzma2=dict=2MiB -f $< ++ ++$(dst)/%.ko.gz: $(dst)/%.ko FORCE ++ $(call cmd,gzip) + +-modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D)) ++$(dst)/%.ko.xz: $(dst)/%.ko FORCE ++ $(call cmd,xz) + +-$(modules): +- $(call cmd,modules_install,$(MODLIB)/$(modinst_dir)) ++PHONY += FORCE ++FORCE: + + .PHONY: $(PHONY) +-- +2.30.2 + diff --git a/packages/kernel-5.10/2001-kbuild-add-support-for-zstd-compressed-modules.patch b/packages/kernel-5.10/2001-kbuild-add-support-for-zstd-compressed-modules.patch new file mode 100644 index 00000000..9cd6a36b --- /dev/null +++ b/packages/kernel-5.10/2001-kbuild-add-support-for-zstd-compressed-modules.patch @@ -0,0 +1,82 @@ +From 35822be50c3068a660331b82a6d37db42bc78126 Mon Sep 17 00:00:00 2001 +From: Piotr Gorski +Date: Wed, 7 Apr 2021 18:09:27 +0200 +Subject: [PATCH 2001/2001] kbuild: add support for zstd compressed modules + +kmod 28 supports modules compressed in zstd format so let's add this +possibility to kernel. + +Signed-off-by: Piotr Gorski +Reviewed-by: Oleksandr Natalenko +Signed-off-by: Masahiro Yamada +(cherry picked from commit c3d7ef377eb2564b165b1e8fdb4646952c90ac17) +[fixed a merge conflict in init/Kconfig] +Signed-off-by: Arnaldo Garcia Rincon +--- + init/Kconfig | 11 +++++++++-- + scripts/Makefile.modinst | 6 ++++++ + 2 files changed, 15 insertions(+), 2 deletions(-) + +diff --git a/init/Kconfig b/init/Kconfig +index fc4c9f416fad..113481826737 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -2234,8 +2234,9 @@ config MODULE_COMPRESS + Out-of-tree kernel modules installed using Kbuild will also be + compressed upon installation. + +- Note: for modules inside an initrd or initramfs, it's more efficient +- to compress the whole initrd or initramfs instead. ++ Please note that the tool used to load modules needs to support the ++ corresponding algorithm. module-init-tools MAY support gzip, and kmod ++ MAY support gzip, xz and zstd. + + Note: This is fully compatible with signed modules. + +@@ -2257,6 +2258,12 @@ config MODULE_COMPRESS_GZIP + config MODULE_COMPRESS_XZ + bool "XZ" + ++config MODULE_COMPRESS_ZSTD ++ bool "ZSTD" ++ help ++ Compress modules with ZSTD. The installed modules are suffixed ++ with .ko.zst. ++ + endchoice + + config MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS +diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst +index 84696ef99df7..59f613aa08b4 100644 +--- a/scripts/Makefile.modinst ++++ b/scripts/Makefile.modinst +@@ -21,6 +21,7 @@ endif + suffix-y := + suffix-$(CONFIG_MODULE_COMPRESS_GZIP) := .gz + suffix-$(CONFIG_MODULE_COMPRESS_XZ) := .xz ++suffix-$(CONFIG_MODULE_COMPRESS_ZSTD) := .zst + + modules := $(patsubst $(extmod_prefix)%, $(dst)/%$(suffix-y), $(modules)) + +@@ -86,6 +87,8 @@ quiet_cmd_gzip = GZIP $@ + cmd_gzip = $(KGZIP) -n -f $< + quiet_cmd_xz = XZ $@ + cmd_xz = $(XZ) --lzma2=dict=2MiB -f $< ++quiet_cmd_zstd = ZSTD $@ ++ cmd_zstd = $(ZSTD) -T0 --rm -f -q $< + + $(dst)/%.ko.gz: $(dst)/%.ko FORCE + $(call cmd,gzip) +@@ -93,6 +96,9 @@ $(dst)/%.ko.gz: $(dst)/%.ko FORCE + $(dst)/%.ko.xz: $(dst)/%.ko FORCE + $(call cmd,xz) + ++$(dst)/%.ko.zst: $(dst)/%.ko FORCE ++ $(call cmd,zstd) ++ + PHONY += FORCE + FORCE: + +-- +2.30.2 + diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index 26cce3d6..baf23abd 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -61,3 +61,14 @@ CONFIG_DEBUG_INFO_BTF=y # We don't want to extend the kernel command line with any upstream defaults; # Bottlerocket uses a fairly custom setup that needs tight control over it. CONFIG_CMDLINE_EXTEND=n + +# Enable ZSTD kernel image compression +CONFIG_HAVE_KERNEL_ZSTD=y +CONFIG_KERNEL_ZSTD=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_DECOMPRESS_ZSTD=y + +# Enable ZSTD modules compression +CONFIG_MODULE_COMPRESS=y +CONFIG_MODULE_COMPRESS_ZSTD=y diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 44419ef1..8d4e05a5 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -12,6 +12,9 @@ Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch +# Add zstd support for compressed kernel modules +Patch2000: 2000-kbuild-move-module-strip-compression-code-into-scrip.patch +Patch2001: 2001-kbuild-add-support-for-zstd-compressed-modules.patch BuildRequires: bc BuildRequires: elfutils-devel From 383582feab7e99c440475d93ac6ec560755d6340 Mon Sep 17 00:00:00 2001 From: Tianhao Geng Date: Mon, 19 Jul 2021 18:25:42 +0000 Subject: [PATCH 0503/1356] kubelet: add setting for configuring TopologyManagerScope pass topology-manager-scope argument to kubelet --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 0a6ad55f..efecd2b3 100644 --- a/README.md +++ b/README.md @@ -357,6 +357,7 @@ The following settings are optional and allow you to further configure your clus * `settings.kubernetes.container-log-max-files`: The maximum number of container log files that can be present for a container. * `settings.kubernetes.cpu-manager-policy`: Specifies the CPU manager policy. Possible values are `static` and `none`. Defaults to `none`. If you want to allow pods with certain resource characteristics to be granted increased CPU affinity and exclusivity on the node, you can set this setting to `static`. You should reboot if you change this setting after startup - try `apiclient reboot`. * `settings.kubernetes.cpu-manager-reconcile-period`: Specifies the CPU manager reconcile period, which controls how often updated CPU assignments are written to cgroupfs. The value is a duration like `30s` for 30 seconds or `1h5m` for 1 hour and 5 minutes. +* `settings.kubernetes.topology-manager-scope`: Specifies the topology manager scope. Possible values are `container` and `pod`. Defaults to `container`. If you want to group all containers in a pod to a common set of NUMA nodes, you can set this setting to `pod`. You can also optionally specify static pods for your node with the following settings. Static pods can be particularly useful when running in standalone mode. From a72a81bd826578e1b7f73d87bfa1fea8d0816748 Mon Sep 17 00:00:00 2001 From: Tianhao Geng Date: Mon, 19 Jul 2021 21:10:37 +0000 Subject: [PATCH 0504/1356] kubelet: add setting for configuring TopologyManagerPolicy pass topology-manager-policy argument to kubelet --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index efecd2b3..b4bdfb5a 100644 --- a/README.md +++ b/README.md @@ -357,6 +357,7 @@ The following settings are optional and allow you to further configure your clus * `settings.kubernetes.container-log-max-files`: The maximum number of container log files that can be present for a container. * `settings.kubernetes.cpu-manager-policy`: Specifies the CPU manager policy. Possible values are `static` and `none`. Defaults to `none`. If you want to allow pods with certain resource characteristics to be granted increased CPU affinity and exclusivity on the node, you can set this setting to `static`. You should reboot if you change this setting after startup - try `apiclient reboot`. * `settings.kubernetes.cpu-manager-reconcile-period`: Specifies the CPU manager reconcile period, which controls how often updated CPU assignments are written to cgroupfs. The value is a duration like `30s` for 30 seconds or `1h5m` for 1 hour and 5 minutes. +* `settings.kubernetes.topology-manager-policy`: Specifies the topology manager policy. Possible values are `none`, `restricted`, `best-effort`, and `single-numa-node`. Defaults to `none`. * `settings.kubernetes.topology-manager-scope`: Specifies the topology manager scope. Possible values are `container` and `pod`. Defaults to `container`. If you want to group all containers in a pod to a common set of NUMA nodes, you can set this setting to `pod`. You can also optionally specify static pods for your node with the following settings. From 8a098c916f98e1a08fb1650f96e5a8ff15b25d4f Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Fri, 30 Jul 2021 10:58:48 -0700 Subject: [PATCH 0505/1356] tools: use tokio LTS release series 1.8.x --- tools/Cargo.lock | 4 ++-- tools/pubsys/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index c4241668..c75250eb 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -2027,9 +2027,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.7.1" +version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fb2ed024293bb19f7a5dc54fe83bf86532a44c12a2bb8ba40d64a4509395ca2" +checksum = "00a287ce596d527f273dea7638a044739234740dbad141e7ed0c62c7d0c9c55a" dependencies = [ "autocfg", "bytes", diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index 8fde46a8..6ee4cdd0 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -36,7 +36,7 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" structopt = { version = "0.3", default-features = false } tinytemplate = "1.1" -tokio = { version = "1", features = ["full"] } +tokio = { version = "~1.8", features = ["full"] } # LTS tokio-stream = { version = "0.1", features = ["time"] } toml = "0.5" tough = { version = "0.11", features = ["http"] } From b44605599e3114b063ba40df767579b497baee6d Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Fri, 30 Jul 2021 16:16:36 -0700 Subject: [PATCH 0506/1356] tools: update rusoto to 0.47.0 This brings in tough and coldsnap updates to use a consistent version. --- tools/Cargo.lock | 287 ++++++++++------------------------------ tools/pubsys/Cargo.toml | 14 +- 2 files changed, 77 insertions(+), 224 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index c75250eb..51140fae 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -105,12 +105,6 @@ dependencies = [ "rustc-demangle", ] -[[package]] -name = "base-x" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" - [[package]] name = "base64" version = "0.13.0" @@ -210,7 +204,7 @@ dependencies = [ "num-integer", "num-traits", "serde", - "time 0.1.43", + "time", "winapi", ] @@ -231,9 +225,9 @@ dependencies = [ [[package]] name = "coldsnap" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d79a8cb3e52be4d3c7651152de3a712441c519c0b1757d1e3be77aae63c90d4" +checksum = "e0abab1b9dde1257595c242aca970d5b3af6f569d979f4a9a571a392942db87e" dependencies = [ "argh", "base64", @@ -264,12 +258,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "const_fn" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f92cfa0fd5690b3cf8c1ef2cabbd9b7ef22fa53cf5e1f92b05103f6d5d1cf6e7" - [[package]] name = "core-foundation" version = "0.9.1" @@ -350,9 +338,9 @@ dependencies = [ [[package]] name = "crypto-mac" -version = "0.10.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4857fd85a0c34b3c3297875b747c1e02e06b6a0ea32dd892d8192b9ce0813ea6" +checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" dependencies = [ "generic-array", "subtle", @@ -397,24 +385,12 @@ dependencies = [ "winapi", ] -[[package]] -name = "discard" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" - [[package]] name = "doc-comment" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" -[[package]] -name = "dtoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" - [[package]] name = "duct" version = "0.13.5" @@ -472,9 +448,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7e43a803dae2fa37c1f6a8fe121e1f7bf9548b4dfc0522a42f34145dadfc27" +checksum = "1adc00f486adfc9ce99f77d717836f0c5aa84965eb0b4f051f4e83f7cab53f8b" dependencies = [ "futures-channel", "futures-core", @@ -487,9 +463,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e682a68b29a882df0545c143dc3646daefe80ba479bcdede94d5a703de2871e2" +checksum = "74ed2411805f6e4e3d9bc904c95d5d423b89b3b25dc0250aa74729de20629ff9" dependencies = [ "futures-core", "futures-sink", @@ -497,15 +473,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0402f765d8a89a26043b889b26ce3c4679d268fa6bb22cd7c6aad98340e179d1" +checksum = "af51b1b4a7fdff033703db39de8802c673eb91855f2e0d47dcf3bf2c0ef01f99" [[package]] name = "futures-executor" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "badaa6a909fac9e7236d0620a2f57f7664640c56575b71a7552fbd68deafab79" +checksum = "4d0d535a57b87e1ae31437b892713aee90cd2d7b0ee48727cd11fc72ef54761c" dependencies = [ "futures-core", "futures-task", @@ -514,15 +490,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acc499defb3b348f8d8f3f66415835a9131856ff7714bf10dadfc4ec4bdb29a1" +checksum = "0b0e06c393068f3a6ef246c75cdca793d6a46347e75286933e5e75fd2fd11582" [[package]] name = "futures-macro" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c40298486cdf52cc00cd6d6987892ba502c7656a16a4192a9992b1ccedd121" +checksum = "c54913bae956fb8df7f4dc6fc90362aa72e69148e3f39041fbe8742d21e0ac57" dependencies = [ "autocfg", "proc-macro-hack", @@ -533,21 +509,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a57bead0ceff0d6dde8f465ecd96c9338121bb7717d3e7b108059531870c4282" +checksum = "c0f30aaa67363d119812743aa5f33c201a7a66329f97d1a887022971feea4b53" [[package]] name = "futures-task" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a16bef9fc1a4dddb5bee51c989e3fbba26569cbb0e31f5b303c184e3dd33dae" +checksum = "bbe54a98670017f3be909561f6ad13e810d9a51f3f061b902062ca3da80799f2" [[package]] name = "futures-util" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb5c238d27e2bf94ffdfd27b2c29e3df4a68c4193bb6427384259e2bf191967" +checksum = "67eb846bfd58e44a8481a00049e82c43e0ccb5d61f8dc071057cb19249dd4d78" dependencies = [ "autocfg", "futures-channel", @@ -655,9 +631,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hmac" -version = "0.10.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" +checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" dependencies = [ "crypto-mac", "digest", @@ -847,10 +823,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" [[package]] -name = "md5" -version = "0.7.0" +name = "md-5" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" +checksum = "7b5a279bb9607f9f53c22d496eade00d138d1bdcccd07d74650387cf94942a15" +dependencies = [ + "block-buffer", + "digest", + "opaque-debug", +] [[package]] name = "memchr" @@ -966,9 +947,9 @@ dependencies = [ [[package]] name = "olpc-cjson" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9409e2493366c8f19387c98c5189ab9c937541b5bf48f11390d038a59fdfd9c1" +checksum = "72ca49fe685014bbf124ee547da94ed7bb65a6eb9dc9c4711773c081af96a39c" dependencies = [ "serde", "serde_json", @@ -1142,7 +1123,7 @@ dependencies = [ "rusoto_signature", "rusoto_ssm", "rusoto_sts", - "semver 1.0.3", + "semver", "serde", "serde_json", "simplelog", @@ -1336,7 +1317,7 @@ dependencies = [ "pin-project-lite", "rustls", "serde", - "serde_urlencoded 0.7.0", + "serde_urlencoded", "tokio", "tokio-rustls", "url", @@ -1364,9 +1345,9 @@ dependencies = [ [[package]] name = "rusoto_core" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02aff20978970d47630f08de5f0d04799497818d16cafee5aec90c4b4d0806cf" +checksum = "5b4f000e8934c1b4f70adde180056812e7ea6b1a247952db8ee98c94cd3116cc" dependencies = [ "async-trait", "base64", @@ -1389,9 +1370,9 @@ dependencies = [ [[package]] name = "rusoto_credential" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e91e4c25ea8bfa6247684ff635299015845113baaa93ba8169b9e565701b58e" +checksum = "6a46b67db7bb66f5541e44db22b0a02fed59c9603e146db3a9e633272d3bac2f" dependencies = [ "async-trait", "chrono", @@ -1407,9 +1388,9 @@ dependencies = [ [[package]] name = "rusoto_ebs" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d42287c611c85d1ad06ac41fb91bad8a87fd8607836c5135ae3254be36c19402" +checksum = "618ce34e8ec52dfd0f597608ec21049e4d7379d737b5f2cc339c92b61d096e0d" dependencies = [ "async-trait", "bytes", @@ -1422,23 +1403,23 @@ dependencies = [ [[package]] name = "rusoto_ec2" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83733a43d1369fb58e5d7198e31d6196de22539f2cd87c4168f5e25a794612d6" +checksum = "92315363c2f2acda29029ce0ce0e58e1c32caf10c9719068a1ec102add3d4878" dependencies = [ "async-trait", "bytes", "futures", "rusoto_core", - "serde_urlencoded 0.6.1", + "serde_urlencoded", "xml-rs", ] [[package]] name = "rusoto_kms" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5655f80886a4b0f6f57ca0921e38b4f96e5c70135dd8d6d2a7ee8e70f0e013" +checksum = "d7892cd2cca7644d33bd6fafdb2236efd3659162fd7b73ca68d3877f0528399c" dependencies = [ "async-trait", "bytes", @@ -1450,34 +1431,35 @@ dependencies = [ [[package]] name = "rusoto_signature" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5486e6b1673ab3e0ba1ded284fb444845fe1b7f41d13989a54dd60f62a7b2baa" +checksum = "6264e93384b90a747758bcc82079711eacf2e755c3a8b5091687b5349d870bcc" dependencies = [ "base64", "bytes", + "chrono", + "digest", "futures", "hex", "hmac", "http", "hyper", "log", - "md5", + "md-5", "percent-encoding", "pin-project-lite", "rusoto_credential", "rustc_version", "serde", "sha2", - "time 0.2.27", "tokio", ] [[package]] name = "rusoto_ssm" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d08d672711b9e8dad45565c86ecc93a0a1bfbab57a2547a428fda5486473405d" +checksum = "050304a18997ab01994d4a452472199088dc0376e61d1f50e2d9675227a0fe0c" dependencies = [ "async-trait", "bytes", @@ -1489,16 +1471,16 @@ dependencies = [ [[package]] name = "rusoto_sts" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f93005e0c3b9e40a424b50ca71886d2445cc19bb6cdac3ac84c2daff482eb59" +checksum = "4e7edd42473ac006fd54105f619e480b0a94136e7f53cf3fb73541363678fd92" dependencies = [ "async-trait", "bytes", "chrono", "futures", "rusoto_core", - "serde_urlencoded 0.6.1", + "serde_urlencoded", "xml-rs", ] @@ -1510,11 +1492,11 @@ checksum = "dead70b0b5e03e9c814bcb6b01e03e68f7c57a80aa48c72ec92152ab3e818d49" [[package]] name = "rustc_version" -version = "0.2.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 0.9.0", + "semver", ] [[package]] @@ -1606,15 +1588,6 @@ dependencies = [ "libc", ] -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - [[package]] name = "semver" version = "1.0.3" @@ -1624,12 +1597,6 @@ dependencies = [ "serde", ] -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - [[package]] name = "serde" version = "1.0.126" @@ -1670,18 +1637,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_urlencoded" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" -dependencies = [ - "dtoa", - "itoa", - "serde", - "url", -] - [[package]] name = "serde_urlencoded" version = "0.7.0" @@ -1694,12 +1649,6 @@ dependencies = [ "serde", ] -[[package]] -name = "sha1" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" - [[package]] name = "sha2" version = "0.9.5" @@ -1731,9 +1680,9 @@ checksum = "b6fa3938c99da4914afedd13bf3d79bcb6c277d1b2c398d23257a304d9e1b074" [[package]] name = "shlex" -version = "0.1.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" +checksum = "42a568c8f2cd051a4d283bd6eb0343ac214c1b0f1ac19f93e1175b2dee38c73d" [[package]] name = "signal-hook-registry" @@ -1805,64 +1754,6 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" -[[package]] -name = "standback" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e113fb6f3de07a243d434a56ec6f186dfd51cb08448239fe7bcae73f87ff28ff" -dependencies = [ - "version_check", -] - -[[package]] -name = "stdweb" -version = "0.4.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" -dependencies = [ - "discard", - "rustc_version", - "stdweb-derive", - "stdweb-internal-macros", - "stdweb-internal-runtime", - "wasm-bindgen", -] - -[[package]] -name = "stdweb-derive" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" -dependencies = [ - "proc-macro2", - "quote", - "serde", - "serde_derive", - "syn", -] - -[[package]] -name = "stdweb-internal-macros" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" -dependencies = [ - "base-x", - "proc-macro2", - "quote", - "serde", - "serde_derive", - "serde_json", - "sha1", - "syn", -] - -[[package]] -name = "stdweb-internal-runtime" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" - [[package]] name = "strsim" version = "0.8.0" @@ -1962,44 +1853,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "time" -version = "0.2.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4752a97f8eebd6854ff91f1c1824cd6160626ac4bd44287f7f4ea2035a02a242" -dependencies = [ - "const_fn", - "libc", - "standback", - "stdweb", - "time-macros", - "version_check", - "winapi", -] - -[[package]] -name = "time-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e9c6e26f12cb6d0dd7fc776bb67a706312e7299aed74c8dd5b17ebb27e2f1" -dependencies = [ - "proc-macro-hack", - "time-macros-impl", -] - -[[package]] -name = "time-macros-impl" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd3c141a1b43194f3f56a1411225df8646c55781d5f26db825b3d98507eb482f" -dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "standback", - "syn", -] - [[package]] name = "tinytemplate" version = "1.2.1" @@ -2103,9 +1956,9 @@ dependencies = [ [[package]] name = "tough" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "203cc46930159ea049e9308ca0e31815bb57f761e6345fa5d42dbcbd06c1809c" +checksum = "25e4c7428cbe9c2989bd5b7c1fef62e574106020d8e3a8168455fb19b43c81e4" dependencies = [ "chrono", "dyn-clone", @@ -2128,9 +1981,9 @@ dependencies = [ [[package]] name = "tough-kms" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b99dba0d219a96733df48372dfe315b4d5bbfa9a151649c6bba9d32a0274aed8" +checksum = "87de009112dcd35d79f594074e719d8c89a533ba321ce523bdc88cf4c456bd45" dependencies = [ "pem", "ring", @@ -2144,9 +1997,9 @@ dependencies = [ [[package]] name = "tough-ssm" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3abe301282442a697b40ae02f845bbf0d340e959a610c02d03d032258d0ce0fe" +checksum = "6a09388d82c02fe77ac4aac43d8d5d2766b43736c7f8d0b1b7fe8c7aa713a093" dependencies = [ "rusoto_core", "rusoto_credential", @@ -2245,7 +2098,7 @@ dependencies = [ "chrono", "parse-datetime", "regex", - "semver 1.0.3", + "semver", "serde", "serde_json", "serde_plain", diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index 6ee4cdd0..01f69d76 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -22,13 +22,13 @@ parse-datetime = { path = "../../sources/parse-datetime" } rayon = "1" # Need to bring in reqwest with a TLS feature so tough can support TLS repos. reqwest = { version = "0.11.1", default-features = false, features = ["rustls-tls", "blocking"] } -rusoto_core = { version = "0.46.0", default-features = false, features = ["rustls"] } -rusoto_credential = "0.46.0" -rusoto_ebs = { version = "0.46.0", default-features = false, features = ["rustls"] } -rusoto_ec2 = { version = "0.46.0", default-features = false, features = ["rustls"] } -rusoto_signature = "0.46.0" -rusoto_ssm = { version = "0.46.0", default-features = false, features = ["rustls"] } -rusoto_sts = { version = "0.46.0", default-features = false, features = ["rustls"] } +rusoto_core = { version = "0.47.0", default-features = false, features = ["rustls"] } +rusoto_credential = "0.47.0" +rusoto_ebs = { version = "0.47.0", default-features = false, features = ["rustls"] } +rusoto_ec2 = { version = "0.47.0", default-features = false, features = ["rustls"] } +rusoto_signature = "0.47.0" +rusoto_ssm = { version = "0.47.0", default-features = false, features = ["rustls"] } +rusoto_sts = { version = "0.47.0", default-features = false, features = ["rustls"] } simplelog = "0.10.0" snafu = "0.6" semver = "1.0" From 5f9064eafbb07dca7774019caa36dccb3c54387c Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Fri, 30 Jul 2021 16:23:49 -0700 Subject: [PATCH 0507/1356] tools: cargo update --- tools/Cargo.lock | 120 +++++++++++++++++++++++------------------------ 1 file changed, 60 insertions(+), 60 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 51140fae..28c5cf53 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -2,9 +2,9 @@ # It is not intended for manual editing. [[package]] name = "addr2line" -version = "0.15.2" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7a2e47a1fbe209ee101dd6d61285226744c6c8d3c21c8dc878ba6cb9f467f3a" +checksum = "3e61f2b7f93d2c7d2b08263acaa4a363b3e276806c68af6134c44f523bf1aacd" dependencies = [ "gimli", ] @@ -64,9 +64,9 @@ checksum = "781f336cc9826dbaddb9754cb5db61e64cab4f69668bd19dcc4a0394a86f4cb1" [[package]] name = "async-trait" -version = "0.1.50" +version = "0.1.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b98e84bbb4cbcdd97da190ba0c58a1bb0de2c1fdf67d159e192ed766aeca722" +checksum = "44318e776df68115a881de9a8fd1b9e53368d7a4a5ce4cc48517da3393233a5e" dependencies = [ "proc-macro2", "quote", @@ -92,9 +92,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.60" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7815ea54e4d821e791162e078acbebfd6d8c8939cd559c9335dceb1c8ca7282" +checksum = "e7a905d892734eea339e896738c14b9afce22b5318f64b951e70bf3844419b01" dependencies = [ "addr2line", "cc", @@ -184,9 +184,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.68" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787" +checksum = "e70cc2f62c6ce1868963827bd677764c62d07c3d9a3e1fb1177ee1a9ab199eb2" [[package]] name = "cfg-if" @@ -276,9 +276,9 @@ checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" [[package]] name = "cpufeatures" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed00c67cb5d0a7d64a44f6ad2668db7e7530311dd53ea79bcd4fb022c64911c8" +checksum = "66c99696f6c9dd7f35d486b9d04d7e6e202aa3e8c40d553f2fdf5e7e0c6a71ef" dependencies = [ "libc", ] @@ -304,9 +304,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" +checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -563,9 +563,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4075386626662786ddb0ec9081e7c7eeb1ba31951f447ca780ef9f5d568189" +checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7" [[package]] name = "globset" @@ -601,9 +601,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.9.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" [[package]] name = "heck" @@ -616,9 +616,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ "libc", ] @@ -684,9 +684,9 @@ checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" [[package]] name = "hyper" -version = "0.14.9" +version = "0.14.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07d6baa1b441335f3ce5098ac421fb6547c46dda735ca1bc6d0153c838f9dd83" +checksum = "0b61cf2d1aebcf6e6352c97b81dc2244ca29194be1b276f5d8ad5c6330fffb11" dependencies = [ "bytes", "futures-channel", @@ -736,9 +736,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.6.2" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "824845a0bf897a9042383849b02c1bc219c2383772efcd5c6f9766fa4b81aef3" +checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" dependencies = [ "autocfg", "hashbrown", @@ -758,9 +758,9 @@ dependencies = [ [[package]] name = "instant" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" +checksum = "bee0328b1209d157ef001c94dd85b4f8f64139adb0eac2659f4b08382b2f474d" dependencies = [ "cfg-if", ] @@ -794,9 +794,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.97" +version = "0.2.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12b8adadd720df158f4d70dfe7ccc6adb0472d7c55ca83445f6a5ab3e36f8fb6" +checksum = "320cfe77175da3a483efed4bc0adc1968ca050b098ce4f2f1c13a56626128790" [[package]] name = "lock_api" @@ -938,9 +938,9 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.25.3" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a38f2be3697a57b4060074ff41b44c16870d916ad7877c17696e063257482bc7" +checksum = "c55827317fb4c08822499848a14237d2874d6f139828893017237e7ab93eb386" dependencies = [ "memchr", ] @@ -1037,9 +1037,9 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project-lite" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc0e1f259c92177c30a4c9d177246edd0a3568b25756a977d0632cf8fa37e905" +checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" [[package]] name = "pin-utils" @@ -1091,9 +1091,9 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.27" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038" +checksum = "5c7ed8b8c7b886ea3ed7dde405212185f423ab44682667c8c6dd14aa1d9f6612" dependencies = [ "unicode-xid", ] @@ -1295,9 +1295,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.3" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2296f2fac53979e8ccbc4a1136b25dcefd37be9ed7e4a1f6b05a6029c84ff124" +checksum = "246e9f61b9bb77df069a947682be06e31ac43ea37862e244a69f177694ea6d22" dependencies = [ "base64", "bytes", @@ -1590,9 +1590,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f3aac57ee7f3272d8395c6e4f502f434f0e289fcd62876f70daa008c20dcabe" +checksum = "568a8e6258aa33c13358f81fd834adb854c6f7c9468520910a9b1e8fac068012" dependencies = [ "serde", ] @@ -1619,9 +1619,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.64" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" +checksum = "336b10da19a12ad094b59d870ebde26a45402e5b470add4b5fd03c5048a32127" dependencies = [ "itoa", "ryu", @@ -1740,9 +1740,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e3dfc207c526015c632472a77be09cf1b6e46866581aecae5cc38fb4235dea2" +checksum = "765f090f0e423d2b55843402a07915add955e7d60657db13707a159727326cad" dependencies = [ "libc", "winapi", @@ -1762,9 +1762,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "structopt" -version = "0.3.21" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5277acd7ee46e63e5168a80734c9f6ee81b1367a7d8772a2d765df2a3705d28c" +checksum = "69b041cdcb67226aca307e6e7be44c8806423d83e018bd662360a93dabce4d71" dependencies = [ "clap", "lazy_static", @@ -1773,9 +1773,9 @@ dependencies = [ [[package]] name = "structopt-derive" -version = "0.4.14" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ba9cdfda491b814720b6b06e0cac513d922fc407582032e8706e9f137976f90" +checksum = "7813934aecf5f51a54775e00068c237de98489463968231a51746bbbc03f9c10" dependencies = [ "heck", "proc-macro-error", @@ -1786,15 +1786,15 @@ dependencies = [ [[package]] name = "subtle" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" +checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.73" +version = "1.0.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f71489ff30030d2ae598524f61326b902466f72a0fb1a8564c001cc63425bcc7" +checksum = "1873d832550d4588c3dbc20f01361ab00bfe741048f71e3fecf145a7cc18b29c" dependencies = [ "proc-macro2", "quote", @@ -1865,9 +1865,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b5220f05bb7de7f3f53c7c065e1199b3172696fe2db9f9c4d8ad9b4ee74c342" +checksum = "848a1e1181b9f6753b5e96a092749e29b11d19ede67dfbbd6c7dc7e0f49b5338" dependencies = [ "tinyvec_macros", ] @@ -1900,9 +1900,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c49e3df43841dafb86046472506755d8501c5615673955f6aa17181125d13c37" +checksum = "54473be61f4ebe4efd09cec9bd5d16fa51d70ea0192213d754d2d500457db110" dependencies = [ "proc-macro2", "quote", @@ -1922,9 +1922,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8864d706fdb3cc0843a49647ac892720dac98a6eeb818b77190592cf4994066" +checksum = "7b2f3f698253f03119ac0102beaa64f67a67e08074d03a22d18784104543727f" dependencies = [ "futures-core", "pin-project-lite", @@ -2069,9 +2069,9 @@ dependencies = [ [[package]] name = "unicode-segmentation" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" +checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" [[package]] name = "unicode-width" @@ -2297,12 +2297,12 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b07db065a5cf61a7e4ba64f29e67db906fb1787316516c4e6e5ff0fea1efcd8a" +checksum = "d2d7d3948613f75c98fd9328cfdcc45acc4d360655289d0a7d4ec931392200a3" [[package]] name = "zeroize" -version = "1.3.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd" +checksum = "377db0846015f7ae377174787dd452e1c5f5a9050bc6f954911d01f116daa0cd" From 6bc608dd69c5c6a0bc5f57307d42d21e829e729e Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 22 Jun 2021 15:12:14 -0700 Subject: [PATCH 0508/1356] docker, containerd: container image registry mirrors Adds a new setting `settings.container-registry.mirrors` that lets users set pull-through caches for container image registries. --- README.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/README.md b/README.md index b4bdfb5a..25b90abe 100644 --- a/README.md +++ b/README.md @@ -408,6 +408,19 @@ These settings can be changed at any time. Supported values are `debug`, `info`, `warn`, `error`, and `crit`, and the default is `info`. * `settings.ecs.enable-spot-instance-draining`: If the instance receives a spot termination notice, the agent will set the instance's state to `DRAINING`, so the workload can be moved gracefully before the instance is removed. Defaults to `false`. +#### Container image registry settings + +The following setting is optional and allows you to configure image registry mirrors and pull-through caches for your orchestrated containers. +* `settings.container-registry.mirrors`: A mapping of container image registry to a list of image registry URL endpoints. When pulling an image from a registry, the container runtime will try the endpoints one by one and use the first working one. + (Docker and containerd will still try the default registry URL if the mirrors fail.) + * Example user data for setting up image registry mirrors: + ``` + [settings.container-registry.mirrors] + "docker.io" = ["https://"] + "gcr.io" = ["https://","http://"] + ``` + If you use a Bottlerocket variant that uses Docker as the container runtime, like `aws-ecs-1`, you should be aware that Docker only supports pull-through caches for images from Docker Hub (docker.io). Mirrors for other registries are ignored in this case. + #### Updates settings * `settings.updates.metadata-base-url`: The common portion of all URIs used to download update metadata. From 4c0a76b6a335809aae5528988ef636ac6508969d Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Mon, 26 Jul 2021 22:52:09 -0700 Subject: [PATCH 0509/1356] host-ctr: add support for container image registries Adds support for configuring container image registrys for host-containers and bootstrap containers. A lot of the logic is borrowed from containerd-cri plugin's implementation of image registry mirrors. --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 25b90abe..7edd76b1 100644 --- a/README.md +++ b/README.md @@ -410,7 +410,7 @@ These settings can be changed at any time. #### Container image registry settings -The following setting is optional and allows you to configure image registry mirrors and pull-through caches for your orchestrated containers. +The following setting is optional and allows you to configure image registry mirrors and pull-through caches for your containers. * `settings.container-registry.mirrors`: A mapping of container image registry to a list of image registry URL endpoints. When pulling an image from a registry, the container runtime will try the endpoints one by one and use the first working one. (Docker and containerd will still try the default registry URL if the mirrors fail.) * Example user data for setting up image registry mirrors: @@ -421,6 +421,8 @@ The following setting is optional and allows you to configure image registry mir ``` If you use a Bottlerocket variant that uses Docker as the container runtime, like `aws-ecs-1`, you should be aware that Docker only supports pull-through caches for images from Docker Hub (docker.io). Mirrors for other registries are ignored in this case. +For [host-container](#host-containers-settings) and [bootstrap-container](#bootstrap-containers-settings) images from Amazon ECR private repositories, registry mirrors are currently unsupported. + #### Updates settings * `settings.updates.metadata-base-url`: The common portion of all URIs used to download update metadata. From 5ef365f3ed96baba69a36843c241340446058e89 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Tue, 3 Aug 2021 02:57:05 +0000 Subject: [PATCH 0510/1356] packages: remove kernel module compression for 5.4 kernel --- packages/kernel-5.4/config-bottlerocket | 4 ---- 1 file changed, 4 deletions(-) diff --git a/packages/kernel-5.4/config-bottlerocket b/packages/kernel-5.4/config-bottlerocket index baf23abd..27a1ad9b 100644 --- a/packages/kernel-5.4/config-bottlerocket +++ b/packages/kernel-5.4/config-bottlerocket @@ -68,7 +68,3 @@ CONFIG_KERNEL_ZSTD=y CONFIG_ZSTD_COMPRESS=y CONFIG_ZSTD_DECOMPRESS=y CONFIG_DECOMPRESS_ZSTD=y - -# Enable ZSTD modules compression -CONFIG_MODULE_COMPRESS=y -CONFIG_MODULE_COMPRESS_ZSTD=y From d01fab0b57520ec105c13ab0de2e6186ca9652b5 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Tue, 3 Aug 2021 02:57:27 +0000 Subject: [PATCH 0511/1356] packages: remove kernel modules compression for 5.10 kernel --- packages/kernel-5.10/config-bottlerocket | 4 ---- 1 file changed, 4 deletions(-) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index baf23abd..27a1ad9b 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -68,7 +68,3 @@ CONFIG_KERNEL_ZSTD=y CONFIG_ZSTD_COMPRESS=y CONFIG_ZSTD_DECOMPRESS=y CONFIG_DECOMPRESS_ZSTD=y - -# Enable ZSTD modules compression -CONFIG_MODULE_COMPRESS=y -CONFIG_MODULE_COMPRESS_ZSTD=y From 59a0d194a7c43a469ee0af8c338ad9537fe5f7a2 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Mon, 2 Aug 2021 10:47:02 -0700 Subject: [PATCH 0512/1356] Update default variant to aws-k8s-1.21 --- BUILDING.md | 2 +- README.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/BUILDING.md b/BUILDING.md index ccc596ee..4c4aeabf 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -71,7 +71,7 @@ To build an image, run: cargo make ``` -This will build an image for the default variant, `aws-k8s-1.19`. +This will build an image for the default variant, `aws-k8s-1.21`. All packages will be built in turn, and then compiled into an `img` file in the `build/images/` directory. The version number in [Release.toml](Release.toml) will be used in naming the file, and will be used inside the image as the release version. diff --git a/README.md b/README.md index 7edd76b1..ab599759 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ We’re excited to get early feedback and to continue working on more use cases! Bottlerocket is architected such that different cloud environments and container orchestrators can be supported in the future. A build of Bottlerocket that supports different features or integration characteristics is known as a 'variant'. The artifacts of a build will include the architecture and variant name. -For example, an `x86_64` build of the `aws-k8s-1.19` variant will produce an image named `bottlerocket-aws-k8s-1.19-x86_64--.img`. +For example, an `x86_64` build of the `aws-k8s-1.21` variant will produce an image named `bottlerocket-aws-k8s-1.21-x86_64--.img`. The following variants support EKS, as described above: @@ -694,7 +694,7 @@ We currently package the following major third-party components: * systemd as init ([background](https://en.wikipedia.org/wiki/Systemd), [packaging](packages/systemd/)) * wicked for networking ([background](https://github.com/openSUSE/wicked), [packaging](packages/wicked/)) * containerd ([background](https://containerd.io/), [packaging](packages/containerd/)) -* Kubernetes ([background](https://kubernetes.io/), [packaging](packages/kubernetes-1.19/)) +* Kubernetes ([background](https://kubernetes.io/), [packaging](packages/kubernetes-1.21/)) * aws-iam-authenticator ([background](https://github.com/kubernetes-sigs/aws-iam-authenticator), [packaging](packages/aws-iam-authenticator/)) * Amazon ECS agent ([background](https://github.com/aws/amazon-ecs-agent), [packaging](packages/ecs-agent/)) From 2be71fedd92030643656eb1dc0abfc9f10d649c7 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 3 Aug 2021 17:50:05 -0700 Subject: [PATCH 0513/1356] netdog: remove ip string formatting when setting as hostname This removes the "ip-X-X-X-X" formatting of the IP address when we're using it to set as our hostname. This fixes upgrades and downgrades to and from versions of Bottlerocket that uses their plain IP as their hostname when registering with the K8s control plane. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ab599759..0da1da56 100644 --- a/README.md +++ b/README.md @@ -438,7 +438,7 @@ For [host-container](#host-containers-settings) and [bootstrap-container](#boots Most users don't need to change this setting as the following defaults work for the majority of use cases. If this setting isn't set we attempt to use DNS reverse lookup for the hostname. -If the lookup is unsuccessful, the IP of the node is used in the format `ip-X-X-X-X`. +If the lookup is unsuccessful, the IP of the node is used. ##### Proxy settings From 85e7face9ce79329c74f12dc7a1c3203151ca6fc Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Fri, 6 Aug 2021 23:44:36 +0000 Subject: [PATCH 0514/1356] logdog: change default output directory for tarball This commit changes the default output directory for the tarball created by logdog, from `/tmp` to `/var/log/support`. Signed-off-by: Arnaldo Garcia Rincon --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 0da1da56..baedad9e 100644 --- a/README.md +++ b/README.md @@ -642,14 +642,14 @@ sudo sheltie logdog ``` -This will write an archive of the logs to `/tmp/bottlerocket-logs.tar.gz`. +This will write an archive of the logs to `/var/log/support/bottlerocket-logs.tar.gz`. You can use SSH to retrieve the file. Once you have exited from the Bottlerocket host, run a command like: ```bash ssh -i YOUR_KEY_FILE \ ec2-user@YOUR_HOST \ - "cat /.bottlerocket/rootfs/tmp/bottlerocket-logs.tar.gz" > bottlerocket-logs.tar.gz + "cat /.bottlerocket/rootfs/var/log/support/bottlerocket-logs.tar.gz" > bottlerocket-logs.tar.gz ``` For a list of what is collected, see the logdog [command list](sources/logdog/src/log_request.rs). From 8dbfc78ee8cb5501afb07dcdcfb8c8ffa0a0723c Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Mon, 9 Aug 2021 22:01:55 +0000 Subject: [PATCH 0515/1356] docs: fix kernel lockdown documentation This fixes the kernel lockdown's documentation, since the default values changed in newer variants Signed-off-by: Arnaldo Garcia Rincon --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index baedad9e..f0b0b387 100644 --- a/README.md +++ b/README.md @@ -476,7 +476,7 @@ Here are the metrics settings: #### Kernel settings * `settings.kernel.lockdown`: This allows further restrictions on what the Linux kernel will allow, for example preventing the loading of unsigned modules. - May be set to "none" (the default), "integrity", or "confidentiality". + May be set to "none" (the default in older [variants](variants/), up through aws-k8s-1.19), "integrity" (the default for newer [variants](variants/)), or "confidentiality". **Important note:** this setting cannot be lowered (toward 'none') at runtime. You must reboot for a change to a lower level to take effect. * `settings.kernel.sysctl`: Key/value pairs representing Linux kernel parameters. From 3704660f1b1f6e4beefedd789b7518be44cf669d Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Tue, 10 Aug 2021 14:15:39 -0700 Subject: [PATCH 0516/1356] pubsys: raise messages to 'warn' if AMI exists or repo doesn't During development, if you make changes without committing, pubsys will see that an AMI exists for your (-dirty) commit and not register a new one. Similarly, if you have a repo configured in Infra.toml and expect to be updating it, but it doesn't exist, pubsys will create a new one. You may not want to continue testing in these cases if you thought a new AMI was going to be built, or an existing repo updated, with your recent changes. Raise these to 'warn' level so they're more obvious. --- tools/pubsys/src/aws/ami/mod.rs | 4 ++-- tools/pubsys/src/repo.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index 3985e899..47aec810 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -10,7 +10,7 @@ use crate::aws::{client::build_client, parse_arch, region_from_string}; use crate::Args; use futures::future::{join, lazy, ready, FutureExt}; use futures::stream::{self, StreamExt}; -use log::{error, info, trace}; +use log::{error, info, trace, warn}; use pubsys_config::{AwsConfig, InfraConfig}; use register::{get_ami_id, register_image, RegisteredIds}; use rusoto_core::{Region, RusotoError}; @@ -149,7 +149,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> })?; let (ids_of_image, already_registered) = if let Some(found_id) = maybe_id { - info!( + warn!( "Found '{}' already registered in {}: {}", ami_args.name, base_region.name(), diff --git a/tools/pubsys/src/repo.rs b/tools/pubsys/src/repo.rs index 385b6ac1..6c9630a1 100644 --- a/tools/pubsys/src/repo.rs +++ b/tools/pubsys/src/repo.rs @@ -7,7 +7,7 @@ pub(crate) mod validate_repo; use crate::{friendly_version, Args}; use chrono::{DateTime, Utc}; use lazy_static::lazy_static; -use log::{debug, info, trace}; +use log::{debug, info, trace, warn}; use parse_datetime::parse_datetime; use pubsys_config::{InfraConfig, RepoConfig, RepoExpirationPolicy, SigningKeyConfig}; use semver::Version; @@ -460,7 +460,7 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { match load_editor_and_manifest(&repo_args.root_role_path, &metadata_url, &targets_url)? { Some((editor, manifest)) => (editor, manifest), None => { - info!( + warn!( "Did not find repo at '{}', starting a new one", metadata_url ); From 740b36fb52f315757246e02dbfe4fa025b486941 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 6 Aug 2021 04:57:29 +0000 Subject: [PATCH 0517/1356] kernel: enable EFI for x86_64 Signed-off-by: Ben Cressey --- packages/kernel-5.10/config-bottlerocket | 5 +++++ packages/kernel-5.4/config-bottlerocket | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index 27a1ad9b..e6eb3312 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -22,6 +22,11 @@ CONFIG_DAX=y CONFIG_DM_INIT=y CONFIG_DM_VERITY=y +# Enable EFI. +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_EFI_MIXED=y + # yama LSM for ptrace restrictions CONFIG_SECURITY_YAMA=y diff --git a/packages/kernel-5.4/config-bottlerocket b/packages/kernel-5.4/config-bottlerocket index 27a1ad9b..e6eb3312 100644 --- a/packages/kernel-5.4/config-bottlerocket +++ b/packages/kernel-5.4/config-bottlerocket @@ -22,6 +22,11 @@ CONFIG_DAX=y CONFIG_DM_INIT=y CONFIG_DM_VERITY=y +# Enable EFI. +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_EFI_MIXED=y + # yama LSM for ptrace restrictions CONFIG_SECURITY_YAMA=y From 9e1c834d9deeae62e42b31d2e118d76b1215cfec Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 4 Aug 2021 17:09:45 +0000 Subject: [PATCH 0518/1356] grub: build EFI and BIOS images for x86_64 Previously we built x86_64 images for BIOS only; now we want to build them for EFI as well. This cleans up the GRUB related macros, which assumed a single target per architecture, and special cases the BIOS build for x86_64. Signed-off-by: Ben Cressey --- packages/grub/grub.spec | 83 +++++++++++++++++++++++++++++------------ 1 file changed, 60 insertions(+), 23 deletions(-) diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index dd9e830d..a369dcec 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -1,6 +1,9 @@ %global debug_package %{nil} %global __strip %{_bindir}/true +%global efidir /boot/efi/EFI/BOOT +%global biosdir /boot/grub + Name: %{_cross_os}grub Version: 2.04 Release: 1%{?dist} @@ -77,9 +80,11 @@ Summary: Tools for the bootloader with support for Linux and more %prep %autosetup -n grub-%{version} -p1 cp unicode/COPYING COPYING.unicode +./autogen.sh %global grub_cflags -pipe -fno-stack-protector -fno-strict-aliasing %global grub_ldflags -static +%global _configure ../configure %build export \ @@ -93,52 +98,85 @@ export \ TARGET_STRIP="%{_cross_target}-strip" \ PYTHON="python3" \ -./autogen.sh +%if "%{_cross_arch}" == "x86_64" +mkdir bios-build +pushd bios-build + %cross_configure \ CFLAGS="" \ LDFLAGS="" \ --host="%{_build}" \ - --target="%{_cross_grub_target}" \ - --with-platform="%{_cross_grub_platform}" \ + --target="i386" \ + --with-platform="pc" \ --disable-grub-mkfont \ + --disable-werror \ --enable-efiemu=no \ --enable-device-mapper=no \ --enable-libzfs=no \ + +%make_build +popd +%endif + +mkdir efi-build +pushd efi-build + +%cross_configure \ + CFLAGS="" \ + LDFLAGS="" \ + --host="%{_build}" \ + --target="%{_cross_arch}" \ + --with-platform="efi" \ + --disable-grub-mkfont \ --disable-werror \ + --enable-efiemu=no \ + --enable-device-mapper=no \ + --enable-libzfs=no \ %make_build +popd %install -%make_install - -mkdir -p %{buildroot}%{_cross_grubdir} +MODS="configfile echo ext2 gptprio linux normal part_gpt reboot sleep" +%if "%{_cross_arch}" == "x86_64" +pushd bios-build +%make_install +mkdir -p %{buildroot}%{biosdir} grub2-mkimage \ -c %{SOURCE1} \ -d ./grub-core/ \ - -O "%{_cross_grub_tuple}" \ - -o "%{buildroot}%{_cross_grubdir}/%{_cross_grub_image}" \ - -p "%{_cross_grub_prefix}" \ -%if "%{_cross_arch}" == "x86_64" - biosdisk \ -%else - efi_gop \ -%endif - configfile echo ext2 gptprio linux normal part_gpt reboot sleep - -%if "%{_cross_arch}" == "x86_64" + -O "i386-pc" \ + -o "%{buildroot}%{biosdir}/core.img" \ + -p "(hd0,gpt2)/boot/grub" \ + biosdisk ${MODS} install -m 0644 ./grub-core/boot.img \ - %{buildroot}%{_cross_grubdir}/boot.img + %{buildroot}%{biosdir}/boot.img +popd %endif +pushd efi-build +%make_install +mkdir -p %{buildroot}%{efidir} +grub2-mkimage \ + -c %{SOURCE1} \ + -d ./grub-core/ \ + -O "%{_cross_grub_efi_format}" \ + -o "%{buildroot}%{efidir}/%{_cross_grub_efi_image}" \ + -p "/EFI/BOOT" \ + efi_gop ${MODS} +popd + %files %license COPYING COPYING.unicode %{_cross_attribution_file} -%dir %{_cross_grubdir} %if "%{_cross_arch}" == "x86_64" -%{_cross_grubdir}/boot.img +%dir %{biosdir} +%{biosdir}/boot.img +%{biosdir}/core.img %endif -%{_cross_grubdir}/%{_cross_grub_image} +%dir %{efidir} +%{efidir}/%{_cross_grub_efi_image} %{_cross_sbindir}/grub-bios-setup %exclude %{_cross_infodir} %exclude %{_cross_localedir} @@ -146,8 +184,7 @@ install -m 0644 ./grub-core/boot.img \ %files modules %dir %{_cross_libdir}/grub -%dir %{_cross_libdir}/grub/%{_cross_grub_tuple} -%{_cross_libdir}/grub/%{_cross_grub_tuple}/* +%{_cross_libdir}/grub/* %files tools %{_cross_bindir}/grub-editenv From b34433b8f09d9476e85ad77e01d6d2453ecc1537 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 4 Aug 2021 18:00:25 +0000 Subject: [PATCH 0519/1356] grub: update to 2.06 This moves to Amazon Linux as our upstream, by way of Fedora. Fedora's patches include an implementation of Secure Boot support, as well as many other fixes that are not yet upstream. Amazon Linux's patches include fixes for console handling in EC2, and on metal instances in particular. They also add an option to disable module loading, which eliminates a potential vector for altering the boot process. Signed-off-by: Ben Cressey --- ...d-root-device-argument-to-grub-setup.patch | 65 ++++++++++--------- .../grub/0002-gpt-start-new-GPT-module.patch | 21 +++--- ...name-misnamed-header-location-fields.patch | 11 ++-- ...-record-size-of-of-the-entries-table.patch | 9 ++- ...t-consolidate-crc32-computation-code.patch | 7 +- ...ir-function-to-sync-up-primary-and-b.patch | 11 ++-- ...write-function-and-gptrepair-command.patch | 21 +++--- ...0008-gpt-add-a-new-generic-GUID-type.patch | 9 ++- ...next-command-for-selecting-priority-.patch | 21 +++--- ...gpt-split-out-checksum-recomputation.patch | 9 ++- ...d-printing-function-to-common-librar.patch | 11 ++-- ...tch-partition-names-to-a-16-bit-type.patch | 9 ++- ...partitions-to-the-gpt-unit-test-data.patch | 7 +- ...by-partition-label-and-uuid-commands.patch | 29 +++++---- ...n-up-little-endian-crc32-computation.patch | 7 +- packages/grub/0016-gpt-minor-cleanup.patch | 9 ++- ...-gpt-add-search-by-disk-uuid-command.patch | 27 ++++---- ...isk-sizes-GRUB-will-reject-as-invali.patch | 7 +- .../0019-gpt-add-verbose-debug-logging.patch | 7 +- ...pt-improve-validation-of-GPT-headers.patch | 7 +- ...0021-gpt-refuse-to-write-to-sector-0.patch | 7 +- ...rly-detect-and-repair-invalid-tables.patch | 9 ++- ...repair_test-fix-typo-in-cleanup-trap.patch | 7 +- ...eck-GPT-is-repaired-when-appropriate.patch | 7 +- ...tition-table-indexing-and-validation.patch | 13 ++-- ...-disk-size-from-header-over-firmware.patch | 9 ++- ...dd-helper-for-picking-a-valid-header.patch | 7 +- .../0028-gptrepair-fix-status-checking.patch | 7 +- ...e-functions-for-checking-status-bits.patch | 13 ++-- ...30-gpt-allow-repair-function-to-noop.patch | 9 ++- ...ot-use-an-enum-for-status-bit-values.patch | 7 +- ...der-and-entries-status-bits-together.patch | 7 +- ...reful-about-relocating-backup-header.patch | 7 +- ...ectively-update-fields-during-repair.patch | 7 +- ...evalidate-when-recomputing-checksums.patch | 7 +- ...backup-in-sync-check-in-revalidation.patch | 7 +- ...-table-at-the-same-time-as-the-heade.patch | 7 +- ...8-gpt-report-all-revalidation-errors.patch | 7 +- ...pdate-documentation-for-grub_gpt_upd.patch | 11 ++-- ...ackup-GPT-first-skip-if-inaccessible.patch | 7 +- ...ottlerocket-boot-partition-type-GUID.patch | 11 ++-- packages/grub/Cargo.toml | 4 +- packages/grub/grub.spec | 47 +++++++++++++- packages/grub/latest-srpm-url.sh | 2 + 44 files changed, 350 insertions(+), 180 deletions(-) create mode 100755 packages/grub/latest-srpm-url.sh diff --git a/packages/grub/0001-setup-Add-root-device-argument-to-grub-setup.patch b/packages/grub/0001-setup-Add-root-device-argument-to-grub-setup.patch index 7d15b4b7..797be263 100644 --- a/packages/grub/0001-setup-Add-root-device-argument-to-grub-setup.patch +++ b/packages/grub/0001-setup-Add-root-device-argument-to-grub-setup.patch @@ -1,4 +1,4 @@ -From 9ddb865df4ea2fb79edaf6c3a8b0122796800014 Mon Sep 17 00:00:00 2001 +From fa856e9d6cce8cac8f55bb00392725961c264b8f Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Tue, 6 Aug 2019 17:37:19 +0000 Subject: [PATCH] setup: Add root device argument to grub-setup @@ -12,50 +12,53 @@ https://github.com/openwrt/openwrt/blob/65c8f2890ca4f41f5b933b5bc1e43de86cc1bd54 util/setup.c | 10 +++++++--- 4 files changed, 22 insertions(+), 8 deletions(-) +[bcressey: updated for grub 2.06] +Signed-off-by: Ben Cressey + diff --git a/include/grub/util/install.h b/include/grub/util/install.h -index 2631b1074..ff02c365c 100644 +index 51f3b13..18cd28f 100644 --- a/include/grub/util/install.h +++ b/include/grub/util/install.h -@@ -191,13 +191,13 @@ grub_install_get_image_target (const char *arg); +@@ -205,13 +205,13 @@ grub_install_get_image_target (const char *arg); void grub_util_bios_setup (const char *dir, const char *boot_file, const char *core_file, - const char *dest, int force, + const char *root, const char *dest, int force, int fs_probe, int allow_floppy, - int add_rs_codes); + int add_rs_codes, int warn_short_mbr_gap); void grub_util_sparc_setup (const char *dir, const char *boot_file, const char *core_file, - const char *dest, int force, + const char *root, const char *dest, int force, int fs_probe, int allow_floppy, - int add_rs_codes); + int add_rs_codes, int warn_short_mbr_gap); diff --git a/util/grub-install.c b/util/grub-install.c -index 8a55ad4b8..c0a2c5ec0 100644 +index 5babc7a..4233ac8 100644 --- a/util/grub-install.c +++ b/util/grub-install.c -@@ -1712,7 +1712,7 @@ main (int argc, char *argv[]) - /* Now perform the installation. */ +@@ -1770,7 +1770,7 @@ main (int argc, char *argv[]) if (install_bootsector) - grub_util_bios_setup (platdir, "boot.img", "core.img", -- install_drive, force, -+ NULL, install_drive, force, - fs_probe, allow_floppy, add_rs_codes); - break; - } -@@ -1738,7 +1738,7 @@ main (int argc, char *argv[]) - /* Now perform the installation. */ + { + grub_util_bios_setup (platdir, "boot.img", "core.img", +- install_drive, force, ++ NULL, install_drive, force, + fs_probe, allow_floppy, add_rs_codes, + !grub_install_is_short_mbrgap_supported ()); + +@@ -1801,7 +1801,7 @@ main (int argc, char *argv[]) if (install_bootsector) - grub_util_sparc_setup (platdir, "boot.img", "core.img", -- install_drive, force, -+ NULL, install_drive, force, - fs_probe, allow_floppy, - 0 /* unused */ ); - break; + { + grub_util_sparc_setup (platdir, "boot.img", "core.img", +- install_drive, force, ++ NULL, install_drive, force, + fs_probe, allow_floppy, + 0 /* unused */, 0 /* unused */ ); + diff --git a/util/grub-setup.c b/util/grub-setup.c -index 42b98ad3c..ae1f98f75 100644 +index 1783224..48cde49 100644 --- a/util/grub-setup.c +++ b/util/grub-setup.c @@ -87,6 +87,8 @@ static struct argp_option options[] = { @@ -96,20 +99,21 @@ index 42b98ad3c..ae1f98f75 100644 - dest_dev, arguments.force, + arguments.root_dev, dest_dev, arguments.force, arguments.fs_probe, arguments.allow_floppy, - arguments.add_rs_codes); + arguments.add_rs_codes, 0); diff --git a/util/setup.c b/util/setup.c -index 6f88f3cc4..bd7bb9c86 100644 +index 8b22bb8..960aeda 100644 --- a/util/setup.c +++ b/util/setup.c -@@ -252,13 +252,12 @@ identify_partmap (grub_disk_t disk __attribute__ ((unused)), +@@ -252,14 +252,13 @@ identify_partmap (grub_disk_t disk __attribute__ ((unused)), void SETUP (const char *dir, const char *boot_file, const char *core_file, - const char *dest, int force, + const char *root, const char *dest, int force, int fs_probe, int allow_floppy, - int add_rs_codes __attribute__ ((unused))) /* unused on sparc64 */ + int add_rs_codes __attribute__ ((unused)), /* unused on sparc64 */ + int warn_small) { char *core_path; char *boot_img, *core_img, *boot_path; @@ -117,7 +121,7 @@ index 6f88f3cc4..bd7bb9c86 100644 size_t boot_size, core_size; grub_uint16_t core_sectors; grub_device_t root_dev = 0, dest_dev, core_dev; -@@ -307,7 +306,10 @@ SETUP (const char *dir, +@@ -311,7 +310,10 @@ SETUP (const char *dir, core_dev = dest_dev; @@ -129,7 +133,7 @@ index 6f88f3cc4..bd7bb9c86 100644 char **root_devices = grub_guess_root_devices (dir); char **cur; int found = 0; -@@ -320,6 +322,8 @@ SETUP (const char *dir, +@@ -324,6 +326,8 @@ SETUP (const char *dir, char *drive; grub_device_t try_dev; @@ -138,3 +142,6 @@ index 6f88f3cc4..bd7bb9c86 100644 drive = grub_util_get_grub_dev (*cur); if (!drive) continue; +-- +2.21.3 + diff --git a/packages/grub/0002-gpt-start-new-GPT-module.patch b/packages/grub/0002-gpt-start-new-GPT-module.patch index d850692f..896d62ab 100644 --- a/packages/grub/0002-gpt-start-new-GPT-module.patch +++ b/packages/grub/0002-gpt-start-new-GPT-module.patch @@ -1,4 +1,4 @@ -From 46217d1569b652fe38169a788a079b373cf6c91f Mon Sep 17 00:00:00 2001 +From 6635d2ce6f2b2a0be4049af3ce271cdab3fbd97b Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Sun, 28 Sep 2014 21:26:21 -0700 Subject: [PATCH] gpt: start new GPT module @@ -20,10 +20,10 @@ The current code does nothing more than read and verify the table. create mode 100644 tests/gpt_unit_test.c diff --git a/Makefile.util.def b/Makefile.util.def -index 969d32f00..af8a008e2 100644 +index 3f191aa..c7efe17 100644 --- a/Makefile.util.def +++ b/Makefile.util.def -@@ -1270,6 +1270,22 @@ program = { +@@ -1389,6 +1389,22 @@ program = { ldadd = '$(LIBDEVMAPPER) $(LIBZFS) $(LIBNVPAIR) $(LIBGEOM)'; }; @@ -47,10 +47,10 @@ index 969d32f00..af8a008e2 100644 name = grub-menulst2cfg; mansection = 1; diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def -index 474a63e68..95dba2d26 100644 +index 9cff83f..3443e9c 100644 --- a/grub-core/Makefile.core.def +++ b/grub-core/Makefile.core.def -@@ -893,6 +893,11 @@ module = { +@@ -932,6 +932,11 @@ module = { common = commands/gptsync.c; }; @@ -64,7 +64,7 @@ index 474a63e68..95dba2d26 100644 nopc = commands/halt.c; diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c new file mode 100644 -index 000000000..a308e8537 +index 0000000..a308e85 --- /dev/null +++ b/grub-core/lib/gpt.c @@ -0,0 +1,288 @@ @@ -357,7 +357,7 @@ index 000000000..a308e8537 + grub_free (gpt); +} diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index 7a93f4329..ee9eb0b95 100644 +index 8212697..8cffe16 100644 --- a/include/grub/gpt_partition.h +++ b/include/grub/gpt_partition.h @@ -21,6 +21,7 @@ @@ -383,7 +383,7 @@ index 7a93f4329..ee9eb0b95 100644 grub_uint8_t magic[8]; @@ -78,10 +85,63 @@ struct grub_gpt_partentry char name[72]; - } GRUB_PACKED; + } GRUB_PACKED __attribute__ ((aligned(8))); +/* Basic GPT partmap module. */ grub_err_t @@ -447,7 +447,7 @@ index 7a93f4329..ee9eb0b95 100644 #endif /* ! GRUB_GPT_PARTITION_HEADER */ diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c new file mode 100644 -index 000000000..a824cd967 +index 0000000..a824cd9 --- /dev/null +++ b/tests/gpt_unit_test.c @@ -0,0 +1,467 @@ @@ -918,3 +918,6 @@ index 000000000..a824cd967 + grub_test_unregister ("gpt_read_fallback_test"); + grub_fini_all (); +} +-- +2.21.3 + diff --git a/packages/grub/0003-gpt-rename-misnamed-header-location-fields.patch b/packages/grub/0003-gpt-rename-misnamed-header-location-fields.patch index ff06baf7..d6351a56 100644 --- a/packages/grub/0003-gpt-rename-misnamed-header-location-fields.patch +++ b/packages/grub/0003-gpt-rename-misnamed-header-location-fields.patch @@ -1,4 +1,4 @@ -From eb194ecfc9137233703e530fa0411bc86405469b Mon Sep 17 00:00:00 2001 +From 7bf95046b9123cd7d57df6dee81cbbd75ca722f6 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Sat, 18 Oct 2014 15:39:13 -0700 Subject: [PATCH] gpt: rename misnamed header location fields @@ -13,7 +13,7 @@ field names are backwards for the backup header. 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index a308e8537..705bd77f9 100644 +index a308e85..705bd77 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -137,7 +137,7 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) @@ -26,7 +26,7 @@ index a308e8537..705bd77f9 100644 return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, "Unable to locate backup GPT"); diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index ee9eb0b95..6d678fae2 100644 +index 8cffe16..1101d85 100644 --- a/include/grub/gpt_partition.h +++ b/include/grub/gpt_partition.h @@ -64,8 +64,8 @@ struct grub_gpt_header @@ -41,7 +41,7 @@ index ee9eb0b95..6d678fae2 100644 grub_uint64_t end; grub_uint8_t guid[16]; diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c -index a824cd967..4d70868af 100644 +index a824cd9..4d70868 100644 --- a/tests/gpt_unit_test.c +++ b/tests/gpt_unit_test.c @@ -94,8 +94,8 @@ static const struct grub_gpt_header example_primary = { @@ -66,3 +66,6 @@ index a824cd967..4d70868af 100644 .start = grub_cpu_to_le64_compile_time (DATA_START_SECTOR), .end = grub_cpu_to_le64_compile_time (DATA_END_SECTOR), .guid = {0xad, 0x31, 0xc1, 0x69, 0xd6, 0x67, 0xc6, 0x46, +-- +2.21.3 + diff --git a/packages/grub/0004-gpt-record-size-of-of-the-entries-table.patch b/packages/grub/0004-gpt-record-size-of-of-the-entries-table.patch index dabd5064..e0c931b8 100644 --- a/packages/grub/0004-gpt-record-size-of-of-the-entries-table.patch +++ b/packages/grub/0004-gpt-record-size-of-of-the-entries-table.patch @@ -1,4 +1,4 @@ -From b78d543b0ab493f6bd20f2e5d101fc11c9357faf Mon Sep 17 00:00:00 2001 +From 42ad151307ad12ca85ae1865f4abc79d25673ae3 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Sat, 18 Oct 2014 16:46:17 -0700 Subject: [PATCH] gpt: record size of of the entries table @@ -11,7 +11,7 @@ to disk. Restructure the entries reading code to flow a little better. 2 files changed, 27 insertions(+), 31 deletions(-) diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 705bd77f9..01df7f3e8 100644 +index 705bd77..01df7f3 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -153,7 +153,7 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) @@ -110,7 +110,7 @@ index 705bd77f9..01df7f3e8 100644 fail: grub_gpt_free (gpt); diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index 6d678fae2..451b02a89 100644 +index 1101d85..21ea08d 100644 --- a/include/grub/gpt_partition.h +++ b/include/grub/gpt_partition.h @@ -106,7 +106,9 @@ typedef enum grub_gpt_status @@ -132,3 +132,6 @@ index 6d678fae2..451b02a89 100644 /* Logarithm of sector size, in case GPT and disk driver disagree. */ unsigned int log_sector_size; +-- +2.21.3 + diff --git a/packages/grub/0005-gpt-consolidate-crc32-computation-code.patch b/packages/grub/0005-gpt-consolidate-crc32-computation-code.patch index 8246377c..9c9918dd 100644 --- a/packages/grub/0005-gpt-consolidate-crc32-computation-code.patch +++ b/packages/grub/0005-gpt-consolidate-crc32-computation-code.patch @@ -1,4 +1,4 @@ -From 39351efcb1aab82bb86052d21f07308429414a83 Mon Sep 17 00:00:00 2001 +From a0481ec20cd8f99f973385b6dcacff6201aa5661 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Sat, 18 Oct 2014 18:18:17 -0700 Subject: [PATCH] gpt: consolidate crc32 computation code @@ -10,7 +10,7 @@ keep this rather common operation easy to use. 1 file changed, 24 insertions(+), 19 deletions(-) diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 01df7f3e8..43a150942 100644 +index 01df7f3..43a1509 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -32,22 +32,17 @@ static grub_uint8_t grub_gpt_magic[] = GRUB_GPT_HEADER_MAGIC; @@ -114,3 +114,6 @@ index 01df7f3e8..43a150942 100644 return grub_errno; } +-- +2.21.3 + diff --git a/packages/grub/0006-gpt-add-new-repair-function-to-sync-up-primary-and-b.patch b/packages/grub/0006-gpt-add-new-repair-function-to-sync-up-primary-and-b.patch index 8bf78901..f683671b 100644 --- a/packages/grub/0006-gpt-add-new-repair-function-to-sync-up-primary-and-b.patch +++ b/packages/grub/0006-gpt-add-new-repair-function-to-sync-up-primary-and-b.patch @@ -1,4 +1,4 @@ -From 5e9049af888cd344990fd031f93d7189ef340805 Mon Sep 17 00:00:00 2001 +From 36fc577048523438ffbf51291a40fe59eb479724 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Sat, 18 Oct 2014 18:21:07 -0700 Subject: [PATCH] gpt: add new repair function to sync up primary and backup @@ -11,7 +11,7 @@ Subject: [PATCH] gpt: add new repair function to sync up primary and backup 3 files changed, 142 insertions(+) diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 43a150942..2d61df488 100644 +index 43a1509..2d61df4 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -31,6 +31,20 @@ GRUB_MOD_LICENSE ("GPLv3+"); @@ -119,7 +119,7 @@ index 43a150942..2d61df488 100644 grub_gpt_free (grub_gpt_t gpt) { diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index 451b02a89..f367fe50d 100644 +index 21ea08d..b45acbd 100644 --- a/include/grub/gpt_partition.h +++ b/include/grub/gpt_partition.h @@ -141,6 +141,9 @@ grub_gpt_sector_to_addr (grub_gpt_t gpt, grub_uint64_t sector) @@ -133,7 +133,7 @@ index 451b02a89..f367fe50d 100644 grub_err_t grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr); diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c -index 4d70868af..83198bebf 100644 +index 4d70868..83198be 100644 --- a/tests/gpt_unit_test.c +++ b/tests/gpt_unit_test.c @@ -24,6 +24,7 @@ @@ -212,3 +212,6 @@ index 4d70868af..83198bebf 100644 + grub_test_unregister ("gpt_repair_test"); grub_fini_all (); } +-- +2.21.3 + diff --git a/packages/grub/0007-gpt-add-write-function-and-gptrepair-command.patch b/packages/grub/0007-gpt-add-write-function-and-gptrepair-command.patch index ae331c19..1366729d 100644 --- a/packages/grub/0007-gpt-add-write-function-and-gptrepair-command.patch +++ b/packages/grub/0007-gpt-add-write-function-and-gptrepair-command.patch @@ -1,4 +1,4 @@ -From cb285d07b1b20c2ae081913cd3002cd97c8dd385 Mon Sep 17 00:00:00 2001 +From b6fdf2155967411d35ebf7667f0495ec741ad055 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Sun, 19 Oct 2014 14:21:29 -0700 Subject: [PATCH] gpt: add write function and gptrepair command @@ -19,10 +19,10 @@ not support writing. create mode 100644 tests/gptrepair_test.in diff --git a/Makefile.util.def b/Makefile.util.def -index af8a008e2..6ed541c1c 100644 +index c7efe17..a2ca51d 100644 --- a/Makefile.util.def +++ b/Makefile.util.def -@@ -1175,6 +1175,12 @@ script = { +@@ -1288,6 +1288,12 @@ script = { common = tests/grub_cmd_tr.in; }; @@ -36,10 +36,10 @@ index af8a008e2..6ed541c1c 100644 testcase; name = file_filter_test; diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def -index 95dba2d26..43ce166db 100644 +index 3443e9c..72d485f 100644 --- a/grub-core/Makefile.core.def +++ b/grub-core/Makefile.core.def -@@ -893,6 +893,11 @@ module = { +@@ -932,6 +932,11 @@ module = { common = commands/gptsync.c; }; @@ -53,7 +53,7 @@ index 95dba2d26..43ce166db 100644 common = lib/gpt.c; diff --git a/grub-core/commands/gptrepair.c b/grub-core/commands/gptrepair.c new file mode 100644 -index 000000000..38392fd8f +index 0000000..38392fd --- /dev/null +++ b/grub-core/commands/gptrepair.c @@ -0,0 +1,116 @@ @@ -174,7 +174,7 @@ index 000000000..38392fd8f + grub_unregister_command (cmd); +} diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 2d61df488..67ffdf703 100644 +index 2d61df4..67ffdf7 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -357,10 +357,46 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) @@ -229,7 +229,7 @@ index 2d61df488..67ffdf703 100644 return GRUB_ERR_NONE; } diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index f367fe50d..a483f710a 100644 +index b45acbd..5c2b535 100644 --- a/include/grub/gpt_partition.h +++ b/include/grub/gpt_partition.h @@ -103,6 +103,11 @@ typedef enum grub_gpt_status @@ -256,7 +256,7 @@ index f367fe50d..a483f710a 100644 grub_err_t grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr); diff --git a/tests/gptrepair_test.in b/tests/gptrepair_test.in new file mode 100644 -index 000000000..80b2de633 +index 0000000..80b2de6 --- /dev/null +++ b/tests/gptrepair_test.in @@ -0,0 +1,102 @@ @@ -362,3 +362,6 @@ index 000000000..80b2de633 +do_repair +cmp "${img1}" "${img2}" +echo +-- +2.21.3 + diff --git a/packages/grub/0008-gpt-add-a-new-generic-GUID-type.patch b/packages/grub/0008-gpt-add-a-new-generic-GUID-type.patch index 5f57e963..c68642a7 100644 --- a/packages/grub/0008-gpt-add-a-new-generic-GUID-type.patch +++ b/packages/grub/0008-gpt-add-a-new-generic-GUID-type.patch @@ -1,4 +1,4 @@ -From 14d7fb113f2c89b3df8b6ed7b0fa001084704d6b Mon Sep 17 00:00:00 2001 +From d5520178578153d5b1c79bfba014d388acc6cf75 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Thu, 30 Oct 2014 20:55:21 -0700 Subject: [PATCH] gpt: add a new generic GUID type @@ -15,7 +15,7 @@ Signed-off-by: iliana destroyer of worlds 2 files changed, 24 insertions(+), 22 deletions(-) diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index a483f710a..8183a1f30 100644 +index 5c2b535..d13ea0b 100644 --- a/include/grub/gpt_partition.h +++ b/include/grub/gpt_partition.h @@ -23,33 +23,35 @@ @@ -79,7 +79,7 @@ index a483f710a..8183a1f30 100644 grub_uint32_t maxpart; grub_uint32_t partentry_size; diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c -index 83198bebf..86e4364a5 100644 +index 83198be..86e4364 100644 --- a/tests/gpt_unit_test.c +++ b/tests/gpt_unit_test.c @@ -99,8 +99,8 @@ static const struct grub_gpt_header example_primary = { @@ -120,3 +120,6 @@ index 83198bebf..86e4364a5 100644 grub_gpt_header_check (&backup, GRUB_DISK_SECTOR_BITS); grub_test_assert (grub_errno == GRUB_ERR_BAD_PART_TABLE, "unexpected error: %s", grub_errmsg); +-- +2.21.3 + diff --git a/packages/grub/0009-gpt-new-gptprio.next-command-for-selecting-priority-.patch b/packages/grub/0009-gpt-new-gptprio.next-command-for-selecting-priority-.patch index 6de6693a..97928b0c 100644 --- a/packages/grub/0009-gpt-new-gptprio.next-command-for-selecting-priority-.patch +++ b/packages/grub/0009-gpt-new-gptprio.next-command-for-selecting-priority-.patch @@ -1,4 +1,4 @@ -From 821cdddd98bdc3879fb7580c225c3a89282873e7 Mon Sep 17 00:00:00 2001 +From ad528de92162a7aaa2666782c5cd18a9ab14d7e2 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Mon, 3 Nov 2014 17:14:37 -0800 Subject: [PATCH] gpt: new gptprio.next command for selecting priority based @@ -25,10 +25,10 @@ Signed-off-by: iliana destroyer of worlds create mode 100644 tests/gptprio_test.in diff --git a/Makefile.util.def b/Makefile.util.def -index 6ed541c1c..a2b84ec4b 100644 +index a2ca51d..eb4bc90 100644 --- a/Makefile.util.def +++ b/Makefile.util.def -@@ -1181,6 +1181,12 @@ script = { +@@ -1294,6 +1294,12 @@ script = { common = tests/gptrepair_test.in; }; @@ -42,10 +42,10 @@ index 6ed541c1c..a2b84ec4b 100644 testcase; name = file_filter_test; diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def -index 43ce166db..615b00226 100644 +index 72d485f..78c64a8 100644 --- a/grub-core/Makefile.core.def +++ b/grub-core/Makefile.core.def -@@ -898,6 +898,11 @@ module = { +@@ -937,6 +937,11 @@ module = { common = commands/gptrepair.c; }; @@ -59,7 +59,7 @@ index 43ce166db..615b00226 100644 common = lib/gpt.c; diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c new file mode 100644 -index 000000000..1e2e06cef +index 0000000..1e2e06c --- /dev/null +++ b/grub-core/commands/gptprio.c @@ -0,0 +1,238 @@ @@ -302,7 +302,7 @@ index 000000000..1e2e06cef + grub_unregister_extcmd (cmd_next); +} diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index 8183a1f30..8a6e56af4 100644 +index d13ea0b..fc4f0f5 100644 --- a/include/grub/gpt_partition.h +++ b/include/grub/gpt_partition.h @@ -53,6 +53,10 @@ typedef struct grub_gpt_guid grub_gpt_part_guid_t; @@ -318,7 +318,7 @@ index 8183a1f30..8a6e56af4 100644 @@ -87,6 +91,51 @@ struct grub_gpt_partentry char name[72]; - } GRUB_PACKED; + } GRUB_PACKED __attribute__ ((aligned(8))); +enum grub_gpt_part_attr_offset +{ @@ -370,7 +370,7 @@ index 8183a1f30..8a6e56af4 100644 grub_gpt_partition_map_iterate (grub_disk_t disk, diff --git a/tests/gptprio_test.in b/tests/gptprio_test.in new file mode 100644 -index 000000000..f4aea0dc9 +index 0000000..f4aea0d --- /dev/null +++ b/tests/gptprio_test.in @@ -0,0 +1,150 @@ @@ -524,3 +524,6 @@ index 000000000..f4aea0dc9 +check_next 4 1 0 1 +check_prio 2 3 0 0 +check_prio 3 2 0 0 +-- +2.21.3 + diff --git a/packages/grub/0010-gpt-split-out-checksum-recomputation.patch b/packages/grub/0010-gpt-split-out-checksum-recomputation.patch index 335541da..b7798588 100644 --- a/packages/grub/0010-gpt-split-out-checksum-recomputation.patch +++ b/packages/grub/0010-gpt-split-out-checksum-recomputation.patch @@ -1,4 +1,4 @@ -From 344f0a5304c6962bbafaf218380d6d00cc18a252 Mon Sep 17 00:00:00 2001 +From f6e5e2723069e684b13bc90c7d252f49037e505e Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Sat, 15 Nov 2014 13:27:13 -0800 Subject: [PATCH] gpt: split out checksum recomputation @@ -10,7 +10,7 @@ For basic data modifications the full repair function is overkill. 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 67ffdf703..198234071 100644 +index 67ffdf7..1982340 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -293,7 +293,6 @@ grub_err_t @@ -71,7 +71,7 @@ index 67ffdf703..198234071 100644 } diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index 8a6e56af4..f5197b8ae 100644 +index fc4f0f5..7fbdf4c 100644 --- a/include/grub/gpt_partition.h +++ b/include/grub/gpt_partition.h @@ -200,6 +200,9 @@ grub_gpt_t grub_gpt_read (grub_disk_t disk); @@ -84,3 +84,6 @@ index 8a6e56af4..f5197b8ae 100644 /* Write headers and entry tables back to disk. */ grub_err_t grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt); +-- +2.21.3 + diff --git a/packages/grub/0011-gpt-move-gpt-guid-printing-function-to-common-librar.patch b/packages/grub/0011-gpt-move-gpt-guid-printing-function-to-common-librar.patch index 2fa53107..b91b5ae5 100644 --- a/packages/grub/0011-gpt-move-gpt-guid-printing-function-to-common-librar.patch +++ b/packages/grub/0011-gpt-move-gpt-guid-printing-function-to-common-librar.patch @@ -1,4 +1,4 @@ -From 415fc53d5282e848e64096aeb90a628488ab2c94 Mon Sep 17 00:00:00 2001 +From 44c916ad1a8ae8fbd3743f269fd13cc677a932da Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Thu, 27 Nov 2014 12:55:53 -0800 Subject: [PATCH] gpt: move gpt guid printing function to common library @@ -10,7 +10,7 @@ Subject: [PATCH] gpt: move gpt guid printing function to common library 3 files changed, 19 insertions(+), 14 deletions(-) diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c -index 1e2e06cef..24157477c 100644 +index 1e2e06c..2415747 100644 --- a/grub-core/commands/gptprio.c +++ b/grub-core/commands/gptprio.c @@ -141,20 +141,8 @@ grub_find_next (const char *disk_name, @@ -37,7 +37,7 @@ index 1e2e06cef..24157477c 100644 grub_errno = GRUB_ERR_NONE; diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 198234071..9a1835b84 100644 +index 1982340..9a1835b 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -31,6 +31,19 @@ GRUB_MOD_LICENSE ("GPLv3+"); @@ -61,7 +61,7 @@ index 198234071..9a1835b84 100644 grub_gpt_size_to_sectors (grub_gpt_t gpt, grub_size_t size) { diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index f5197b8ae..f2b3630e4 100644 +index 7fbdf4c..dc1bcca 100644 --- a/include/grub/gpt_partition.h +++ b/include/grub/gpt_partition.h @@ -33,6 +33,10 @@ struct grub_gpt_guid @@ -75,3 +75,6 @@ index f5197b8ae..f2b3630e4 100644 #define GRUB_GPT_GUID_INIT(a, b, c, d1, d2, d3, d4, d5, d6, d7, d8) \ { \ grub_cpu_to_le32_compile_time (a), \ +-- +2.21.3 + diff --git a/packages/grub/0012-gpt-switch-partition-names-to-a-16-bit-type.patch b/packages/grub/0012-gpt-switch-partition-names-to-a-16-bit-type.patch index b23a4ae9..7383f4f7 100644 --- a/packages/grub/0012-gpt-switch-partition-names-to-a-16-bit-type.patch +++ b/packages/grub/0012-gpt-switch-partition-names-to-a-16-bit-type.patch @@ -1,4 +1,4 @@ -From de8ed5eacc471e74e32a70257261ef83cb8b9adc Mon Sep 17 00:00:00 2001 +From d424d55607bdfbbd0f5f0a220ea582ff88106931 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Thu, 27 Nov 2014 14:54:27 -0800 Subject: [PATCH] gpt: switch partition names to a 16 bit type @@ -10,7 +10,7 @@ string practical. 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index f2b3630e4..1d065df99 100644 +index dc1bcca..a44c0d5 100644 --- a/include/grub/gpt_partition.h +++ b/include/grub/gpt_partition.h @@ -92,7 +92,7 @@ struct grub_gpt_partentry @@ -19,6 +19,9 @@ index f2b3630e4..1d065df99 100644 grub_uint64_t attrib; - char name[72]; + grub_uint16_t name[36]; - } GRUB_PACKED; + } GRUB_PACKED __attribute__ ((aligned(8))); enum grub_gpt_part_attr_offset +-- +2.21.3 + diff --git a/packages/grub/0013-tests-add-some-partitions-to-the-gpt-unit-test-data.patch b/packages/grub/0013-tests-add-some-partitions-to-the-gpt-unit-test-data.patch index a787e50b..aefdd71f 100644 --- a/packages/grub/0013-tests-add-some-partitions-to-the-gpt-unit-test-data.patch +++ b/packages/grub/0013-tests-add-some-partitions-to-the-gpt-unit-test-data.patch @@ -1,4 +1,4 @@ -From 19f4319c4f68b416433265a104e49520486419cc Mon Sep 17 00:00:00 2001 +From 564616c70c7a914f385cfa36e0bb214cf75723a3 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Thu, 27 Nov 2014 15:49:57 -0800 Subject: [PATCH] tests: add some partitions to the gpt unit test data @@ -8,7 +8,7 @@ Subject: [PATCH] tests: add some partitions to the gpt unit test data 1 file changed, 55 insertions(+), 10 deletions(-) diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c -index 86e4364a5..5692a5a52 100644 +index 86e4364..5692a5a 100644 --- a/tests/gpt_unit_test.c +++ b/tests/gpt_unit_test.c @@ -89,12 +89,12 @@ struct test_data @@ -122,3 +122,6 @@ index 86e4364a5..5692a5a52 100644 return gpt; } +-- +2.21.3 + diff --git a/packages/grub/0014-gpt-add-search-by-partition-label-and-uuid-commands.patch b/packages/grub/0014-gpt-add-search-by-partition-label-and-uuid-commands.patch index b4563654..bb4a151c 100644 --- a/packages/grub/0014-gpt-add-search-by-partition-label-and-uuid-commands.patch +++ b/packages/grub/0014-gpt-add-search-by-partition-label-and-uuid-commands.patch @@ -1,4 +1,4 @@ -From 5ca51d1d60b6a692fc679b3cc0236cec0a66e1aa Mon Sep 17 00:00:00 2001 +From 0fceffa51b087c1f3f19f470f08e9378d71ddb2d Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Thu, 27 Nov 2014 16:34:21 -0800 Subject: [PATCH] gpt: add search by partition label and uuid commands @@ -20,10 +20,10 @@ Builds on the existing filesystem search code. Only for GPT right now. create mode 100644 grub-core/commands/search_part_uuid.c diff --git a/Makefile.util.def b/Makefile.util.def -index a2b84ec4b..b63a2963c 100644 +index eb4bc90..8f74405 100644 --- a/Makefile.util.def +++ b/Makefile.util.def -@@ -1287,6 +1287,8 @@ program = { +@@ -1406,6 +1406,8 @@ program = { name = gpt_unit_test; common = tests/gpt_unit_test.c; common = tests/lib/unit_test.c; @@ -33,10 +33,10 @@ index a2b84ec4b..b63a2963c 100644 common = grub-core/kern/emu/hostfs.c; common = grub-core/lib/gpt.c; diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def -index 615b00226..9964d42fe 100644 +index 78c64a8..fb4b1a1 100644 --- a/grub-core/Makefile.core.def +++ b/grub-core/Makefile.core.def -@@ -1095,6 +1095,16 @@ module = { +@@ -1136,6 +1136,16 @@ module = { common = commands/search_label.c; }; @@ -54,7 +54,7 @@ index 615b00226..9964d42fe 100644 name = setpci; common = commands/setpci.c; diff --git a/grub-core/commands/search.c b/grub-core/commands/search.c -index ed090b3af..4ad72c5b4 100644 +index ed090b3..4ad72c5 100644 --- a/grub-core/commands/search.c +++ b/grub-core/commands/search.c @@ -30,6 +30,9 @@ @@ -136,7 +136,7 @@ index ed090b3af..4ad72c5b4 100644 #else diff --git a/grub-core/commands/search_part_label.c b/grub-core/commands/search_part_label.c new file mode 100644 -index 000000000..ca906cbd9 +index 0000000..ca906cb --- /dev/null +++ b/grub-core/commands/search_part_label.c @@ -0,0 +1,5 @@ @@ -147,7 +147,7 @@ index 000000000..ca906cbd9 +#include "search.c" diff --git a/grub-core/commands/search_part_uuid.c b/grub-core/commands/search_part_uuid.c new file mode 100644 -index 000000000..2d1d3d0d7 +index 0000000..2d1d3d0 --- /dev/null +++ b/grub-core/commands/search_part_uuid.c @@ -0,0 +1,5 @@ @@ -157,7 +157,7 @@ index 000000000..2d1d3d0d7 +#define HELP_MESSAGE N_("Search devices by partition UUID. If VARIABLE is specified, the first device found is set to a variable.") +#include "search.c" diff --git a/grub-core/commands/search_wrap.c b/grub-core/commands/search_wrap.c -index d7fd26b94..e3ff756df 100644 +index 47fc8eb..d357454 100644 --- a/grub-core/commands/search_wrap.c +++ b/grub-core/commands/search_wrap.c @@ -36,6 +36,10 @@ static const struct grub_arg_option options[] = @@ -194,7 +194,7 @@ index d7fd26b94..e3ff756df 100644 grub_search_fs_file (id, var, state[SEARCH_NO_FLOPPY].set, hints, nhints); diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 9a1835b84..10a4b852d 100644 +index 9a1835b..10a4b85 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -18,7 +18,9 @@ @@ -277,7 +277,7 @@ index 9a1835b84..10a4b852d 100644 grub_gpt_size_to_sectors (grub_gpt_t gpt, grub_size_t size) { diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index 1d065df99..dc2dec43a 100644 +index a44c0d5..7b04080 100644 --- a/include/grub/gpt_partition.h +++ b/include/grub/gpt_partition.h @@ -49,6 +49,10 @@ char * grub_gpt_guid_to_str (grub_gpt_guid_t *guid); @@ -309,7 +309,7 @@ index 1d065df99..dc2dec43a 100644 + #endif /* ! GRUB_GPT_PARTITION_HEADER */ diff --git a/include/grub/search.h b/include/grub/search.h -index d80347df3..c2f40abe9 100644 +index d80347d..c2f40ab 100644 --- a/include/grub/search.h +++ b/include/grub/search.h @@ -25,5 +25,9 @@ void grub_search_fs_uuid (const char *key, const char *var, int no_floppy, @@ -323,7 +323,7 @@ index d80347df3..c2f40abe9 100644 #endif diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c -index 5692a5a52..deb55a926 100644 +index 5692a5a..deb55a9 100644 --- a/tests/gpt_unit_test.c +++ b/tests/gpt_unit_test.c @@ -21,10 +21,12 @@ @@ -441,3 +441,6 @@ index 5692a5a52..deb55a926 100644 + grub_test_unregister ("gpt_search_uuid_test"); grub_fini_all (); } +-- +2.21.3 + diff --git a/packages/grub/0015-gpt-clean-up-little-endian-crc32-computation.patch b/packages/grub/0015-gpt-clean-up-little-endian-crc32-computation.patch index 079c7428..82831f0d 100644 --- a/packages/grub/0015-gpt-clean-up-little-endian-crc32-computation.patch +++ b/packages/grub/0015-gpt-clean-up-little-endian-crc32-computation.patch @@ -1,4 +1,4 @@ -From ea2059a5db3f2c74216d30e4d509f477edaf0a42 Mon Sep 17 00:00:00 2001 +From 7ed137907dfccfcd738ec7371ded51af39d48006 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Fri, 31 Jul 2015 15:03:11 -0700 Subject: [PATCH] gpt: clean up little-endian crc32 computation @@ -11,7 +11,7 @@ Subject: [PATCH] gpt: clean up little-endian crc32 computation 1 file changed, 13 insertions(+), 38 deletions(-) diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 10a4b852d..aedc4f7a1 100644 +index 10a4b85..aedc4f7 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -122,45 +122,29 @@ grub_gpt_size_to_sectors (grub_gpt_t gpt, grub_size_t size) @@ -111,3 +111,6 @@ index 10a4b852d..aedc4f7a1 100644 return GRUB_ERR_NONE; } +-- +2.21.3 + diff --git a/packages/grub/0016-gpt-minor-cleanup.patch b/packages/grub/0016-gpt-minor-cleanup.patch index 940106cd..ca7aec89 100644 --- a/packages/grub/0016-gpt-minor-cleanup.patch +++ b/packages/grub/0016-gpt-minor-cleanup.patch @@ -1,4 +1,4 @@ -From 025b3591c95200132256b44d15048a26bf558c40 Mon Sep 17 00:00:00 2001 +From 6563a1739f3ae9e06002682b4455f78df67fbf32 Mon Sep 17 00:00:00 2001 From: Alex Crawford Date: Mon, 31 Aug 2015 15:23:39 -0700 Subject: [PATCH] gpt: minor cleanup @@ -9,7 +9,7 @@ Subject: [PATCH] gpt: minor cleanup 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index dc2dec43a..ae72b026c 100644 +index 7b04080..1eb2f7b 100644 --- a/include/grub/gpt_partition.h +++ b/include/grub/gpt_partition.h @@ -229,7 +229,7 @@ grub_err_t grub_gpt_header_check (struct grub_gpt_header *gpt, @@ -22,7 +22,7 @@ index dc2dec43a..ae72b026c 100644 #endif /* ! GRUB_GPT_PARTITION_HEADER */ diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c -index deb55a926..7a1af46e1 100644 +index deb55a9..7a1af46 100644 --- a/tests/gpt_unit_test.c +++ b/tests/gpt_unit_test.c @@ -538,7 +538,7 @@ repair_test (void) @@ -64,3 +64,6 @@ index deb55a926..7a1af46e1 100644 + grub_test_unregister ("gpt_search_part_uuid_test"); grub_fini_all (); } +-- +2.21.3 + diff --git a/packages/grub/0017-gpt-add-search-by-disk-uuid-command.patch b/packages/grub/0017-gpt-add-search-by-disk-uuid-command.patch index cd4b4cbc..76a334d2 100644 --- a/packages/grub/0017-gpt-add-search-by-disk-uuid-command.patch +++ b/packages/grub/0017-gpt-add-search-by-disk-uuid-command.patch @@ -1,4 +1,4 @@ -From aa6b435ee13658a7eced13ebbe9be25567ee019a Mon Sep 17 00:00:00 2001 +From 9963416869afbe7fa9e671fa6ffd0871aec994f8 Mon Sep 17 00:00:00 2001 From: Alex Crawford Date: Mon, 31 Aug 2015 15:15:48 -0700 Subject: [PATCH] gpt: add search by disk uuid command @@ -17,10 +17,10 @@ Subject: [PATCH] gpt: add search by disk uuid command create mode 100644 grub-core/commands/search_disk_uuid.c diff --git a/Makefile.util.def b/Makefile.util.def -index b63a2963c..65cbfc081 100644 +index 8f74405..33ce60d 100644 --- a/Makefile.util.def +++ b/Makefile.util.def -@@ -1289,6 +1289,7 @@ program = { +@@ -1408,6 +1408,7 @@ program = { common = tests/lib/unit_test.c; common = grub-core/commands/search_part_label.c; common = grub-core/commands/search_part_uuid.c; @@ -29,10 +29,10 @@ index b63a2963c..65cbfc081 100644 common = grub-core/kern/emu/hostfs.c; common = grub-core/lib/gpt.c; diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def -index 9964d42fe..79b24c187 100644 +index fb4b1a1..b9c0494 100644 --- a/grub-core/Makefile.core.def +++ b/grub-core/Makefile.core.def -@@ -1105,6 +1105,11 @@ module = { +@@ -1146,6 +1146,11 @@ module = { common = commands/search_part_label.c; }; @@ -45,7 +45,7 @@ index 9964d42fe..79b24c187 100644 name = setpci; common = commands/setpci.c; diff --git a/grub-core/commands/search.c b/grub-core/commands/search.c -index 4ad72c5b4..fd411ce3e 100644 +index 4ad72c5..fd411ce 100644 --- a/grub-core/commands/search.c +++ b/grub-core/commands/search.c @@ -30,7 +30,8 @@ @@ -113,7 +113,7 @@ index 4ad72c5b4..fd411ce3e 100644 #endif diff --git a/grub-core/commands/search_disk_uuid.c b/grub-core/commands/search_disk_uuid.c new file mode 100644 -index 000000000..fba96f6b8 +index 0000000..fba96f6 --- /dev/null +++ b/grub-core/commands/search_disk_uuid.c @@ -0,0 +1,5 @@ @@ -123,7 +123,7 @@ index 000000000..fba96f6b8 +#define HELP_MESSAGE N_("Search devices by disk UUID. If VARIABLE is specified, the first device found is set to a variable.") +#include "search.c" diff --git a/grub-core/commands/search_wrap.c b/grub-core/commands/search_wrap.c -index e3ff756df..d931c56c5 100644 +index d357454..fc149cd 100644 --- a/grub-core/commands/search_wrap.c +++ b/grub-core/commands/search_wrap.c @@ -40,6 +40,8 @@ static const struct grub_arg_option options[] = @@ -154,7 +154,7 @@ index e3ff756df..d931c56c5 100644 grub_search_fs_file (id, var, state[SEARCH_NO_FLOPPY].set, hints, nhints); diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index aedc4f7a1..e162bafd3 100644 +index aedc4f7..e162baf 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -108,6 +108,27 @@ grub_gpt_part_uuid (grub_device_t device, char **uuid) @@ -186,7 +186,7 @@ index aedc4f7a1..e162bafd3 100644 grub_gpt_size_to_sectors (grub_gpt_t gpt, grub_size_t size) { diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index ae72b026c..2ca063ad9 100644 +index 1eb2f7b..16fdd7f 100644 --- a/include/grub/gpt_partition.h +++ b/include/grub/gpt_partition.h @@ -232,4 +232,8 @@ grub_err_t grub_gpt_part_label (grub_device_t device, char **label); @@ -199,7 +199,7 @@ index ae72b026c..2ca063ad9 100644 + #endif /* ! GRUB_GPT_PARTITION_HEADER */ diff --git a/include/grub/search.h b/include/grub/search.h -index c2f40abe9..7f69d25d1 100644 +index c2f40ab..7f69d25 100644 --- a/include/grub/search.h +++ b/include/grub/search.h @@ -29,5 +29,7 @@ void grub_search_part_uuid (const char *key, const char *var, int no_floppy, @@ -211,7 +211,7 @@ index c2f40abe9..7f69d25d1 100644 #endif diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c -index 7a1af46e1..60f601729 100644 +index 7a1af46..60f6017 100644 --- a/tests/gpt_unit_test.c +++ b/tests/gpt_unit_test.c @@ -614,6 +614,37 @@ search_part_uuid_test (void) @@ -267,3 +267,6 @@ index 7a1af46e1..60f601729 100644 + grub_test_unregister ("gpt_search_disk_uuid_test"); grub_fini_all (); } +-- +2.21.3 + diff --git a/packages/grub/0018-gpt-do-not-use-disk-sizes-GRUB-will-reject-as-invali.patch b/packages/grub/0018-gpt-do-not-use-disk-sizes-GRUB-will-reject-as-invali.patch index 9bb3bac0..c8087ee6 100644 --- a/packages/grub/0018-gpt-do-not-use-disk-sizes-GRUB-will-reject-as-invali.patch +++ b/packages/grub/0018-gpt-do-not-use-disk-sizes-GRUB-will-reject-as-invali.patch @@ -1,4 +1,4 @@ -From 2009646dc28e1a20b71d176040e9222f5c62d231 Mon Sep 17 00:00:00 2001 +From 2d1291ab5c4c814b961c1dc8f3a6541bf9aa8d32 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Mon, 25 Jul 2016 14:59:29 -0700 Subject: [PATCH] gpt: do not use disk sizes GRUB will reject as invalid later @@ -12,7 +12,7 @@ the usual way with the special GRUB_DISK_SIZE_UNKNOWN value. 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index e162bafd3..3e17f2771 100644 +index e162baf..3e17f27 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -143,6 +143,28 @@ grub_gpt_size_to_sectors (grub_gpt_t gpt, grub_size_t size) @@ -62,3 +62,6 @@ index e162bafd3..3e17f2771 100644 backup_header = disk->total_sectors - 1; backup_entries = backup_header - +-- +2.21.3 + diff --git a/packages/grub/0019-gpt-add-verbose-debug-logging.patch b/packages/grub/0019-gpt-add-verbose-debug-logging.patch index e1a4039c..40a24543 100644 --- a/packages/grub/0019-gpt-add-verbose-debug-logging.patch +++ b/packages/grub/0019-gpt-add-verbose-debug-logging.patch @@ -1,4 +1,4 @@ -From 8fb347e03236e927e3a2a1134923854a39a03c4b Mon Sep 17 00:00:00 2001 +From acc91512edec9cae77efc51df8418ae9fdba4a0e Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Wed, 10 Aug 2016 18:26:03 -0700 Subject: [PATCH] gpt: add verbose debug logging @@ -8,7 +8,7 @@ Subject: [PATCH] gpt: add verbose debug logging 1 file changed, 109 insertions(+), 8 deletions(-) diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 3e17f2771..c2821b563 100644 +index 3e17f27..c2821b5 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -207,6 +207,18 @@ grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr) @@ -249,3 +249,6 @@ index 3e17f2771..c2821b563 100644 if (grub_gpt_write_table (disk, gpt, &gpt->backup)) return grub_errno; +-- +2.21.3 + diff --git a/packages/grub/0020-gpt-improve-validation-of-GPT-headers.patch b/packages/grub/0020-gpt-improve-validation-of-GPT-headers.patch index 6d45c729..dbc80463 100644 --- a/packages/grub/0020-gpt-improve-validation-of-GPT-headers.patch +++ b/packages/grub/0020-gpt-improve-validation-of-GPT-headers.patch @@ -1,4 +1,4 @@ -From 3670a0019adb31fd9f849b5128a629e0e89a9bba Mon Sep 17 00:00:00 2001 +From 4a037b26a09886e33f281e968f0be4545a6d2c57 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Wed, 10 Aug 2016 18:26:03 -0700 Subject: [PATCH] gpt: improve validation of GPT headers @@ -10,7 +10,7 @@ the chance of corrupting weird locations on disk. 1 file changed, 48 insertions(+) diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index c2821b563..f83fe29ac 100644 +index c2821b5..f83fe29 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -224,6 +224,7 @@ grub_gpt_header_check (struct grub_gpt_header *gpt, @@ -99,3 +99,6 @@ index c2821b563..f83fe29ac 100644 gpt->status |= GRUB_GPT_BACKUP_HEADER_VALID; return GRUB_ERR_NONE; } +-- +2.21.3 + diff --git a/packages/grub/0021-gpt-refuse-to-write-to-sector-0.patch b/packages/grub/0021-gpt-refuse-to-write-to-sector-0.patch index cd8869cc..787d0dbb 100644 --- a/packages/grub/0021-gpt-refuse-to-write-to-sector-0.patch +++ b/packages/grub/0021-gpt-refuse-to-write-to-sector-0.patch @@ -1,4 +1,4 @@ -From 2ebc9802edf1656597d8f6d7f9cc557dae08d4b2 Mon Sep 17 00:00:00 2001 +From d81aa092c0eb91ec618a6f0879426e8fcbdae0d4 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Thu, 11 Aug 2016 15:02:21 -0700 Subject: [PATCH] gpt: refuse to write to sector 0 @@ -8,7 +8,7 @@ Subject: [PATCH] gpt: refuse to write to sector 0 1 file changed, 7 insertions(+) diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index f83fe29ac..b7449911a 100644 +index f83fe29..b744991 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -626,10 +626,17 @@ grub_gpt_write_table (grub_disk_t disk, grub_gpt_t gpt, @@ -29,3 +29,6 @@ index f83fe29ac..b7449911a 100644 if (grub_disk_write (disk, addr, 0, gpt->entries_size, gpt->entries)) return grub_errno; +-- +2.21.3 + diff --git a/packages/grub/0022-gpt-properly-detect-and-repair-invalid-tables.patch b/packages/grub/0022-gpt-properly-detect-and-repair-invalid-tables.patch index 3b69a5e8..fbee57b8 100644 --- a/packages/grub/0022-gpt-properly-detect-and-repair-invalid-tables.patch +++ b/packages/grub/0022-gpt-properly-detect-and-repair-invalid-tables.patch @@ -1,4 +1,4 @@ -From 6ca9e261944a004668f9416f0ce9dad19846155e Mon Sep 17 00:00:00 2001 +From 29d1ef938cc64d87b09955645b46f947bfcf7b60 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Sat, 20 Aug 2016 17:42:12 -0700 Subject: [PATCH] gpt: properly detect and repair invalid tables @@ -12,7 +12,7 @@ headers that were marked invalid causing arbitrary disk corruption. 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c -index 24157477c..2021cb200 100644 +index 2415747..2021cb2 100644 --- a/grub-core/commands/gptprio.c +++ b/grub-core/commands/gptprio.c @@ -91,7 +91,7 @@ grub_find_next (const char *disk_name, @@ -25,7 +25,7 @@ index 24157477c..2021cb200 100644 goto done; diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index b7449911a..0daf3f8de 100644 +index b744991..0daf3f8 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -648,7 +648,7 @@ grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt) @@ -37,3 +37,6 @@ index b7449911a..0daf3f8de 100644 return grub_error (GRUB_ERR_BAD_PART_TABLE, "Invalid GPT data"); grub_dprintf ("gpt", "writing primary GPT to %s\n", disk->name); +-- +2.21.3 + diff --git a/packages/grub/0023-gptrepair_test-fix-typo-in-cleanup-trap.patch b/packages/grub/0023-gptrepair_test-fix-typo-in-cleanup-trap.patch index 1d719da9..1fe747da 100644 --- a/packages/grub/0023-gptrepair_test-fix-typo-in-cleanup-trap.patch +++ b/packages/grub/0023-gptrepair_test-fix-typo-in-cleanup-trap.patch @@ -1,4 +1,4 @@ -From b2fabac1e1c8eb2beb83b5cafb45daed9a3eef64 Mon Sep 17 00:00:00 2001 +From 725982e848e600ed6f5028fb2197a5ffe0070806 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Mon, 22 Aug 2016 16:44:30 -0700 Subject: [PATCH] gptrepair_test: fix typo in cleanup trap @@ -8,7 +8,7 @@ Subject: [PATCH] gptrepair_test: fix typo in cleanup trap 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/gptrepair_test.in b/tests/gptrepair_test.in -index 80b2de633..805dc171a 100644 +index 80b2de6..805dc17 100644 --- a/tests/gptrepair_test.in +++ b/tests/gptrepair_test.in @@ -53,7 +53,7 @@ case "${grub_modinfo_target_cpu}-${grub_modinfo_platform}" in @@ -20,3 +20,6 @@ index 80b2de633..805dc171a 100644 create_disk_image () { size=$1 +-- +2.21.3 + diff --git a/packages/grub/0024-gptprio_test-check-GPT-is-repaired-when-appropriate.patch b/packages/grub/0024-gptprio_test-check-GPT-is-repaired-when-appropriate.patch index cab276af..b2176e0a 100644 --- a/packages/grub/0024-gptprio_test-check-GPT-is-repaired-when-appropriate.patch +++ b/packages/grub/0024-gptprio_test-check-GPT-is-repaired-when-appropriate.patch @@ -1,4 +1,4 @@ -From 999e6bc69145e936a5172d4f3f64374c8087d43e Mon Sep 17 00:00:00 2001 +From 9b3da48085a4437f6915fee3e0d6b74de4ee714c Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Mon, 22 Aug 2016 16:45:10 -0700 Subject: [PATCH] gptprio_test: check GPT is repaired when appropriate @@ -8,7 +8,7 @@ Subject: [PATCH] gptprio_test: check GPT is repaired when appropriate 1 file changed, 60 insertions(+), 3 deletions(-) diff --git a/tests/gptprio_test.in b/tests/gptprio_test.in -index f4aea0dc9..c5cf0f3b7 100644 +index f4aea0d..c5cf0f3 100644 --- a/tests/gptprio_test.in +++ b/tests/gptprio_test.in @@ -66,8 +66,9 @@ prio_uuid[3]="1aa5a658-5b02-414d-9b71-f7e6c151f0cd" @@ -105,3 +105,6 @@ index f4aea0dc9..c5cf0f3b7 100644 # Try two partitions before falling before falling back to a third create_disk_image 100 set_prio 2 3 3 0 +-- +2.21.3 + diff --git a/packages/grub/0025-gpt-fix-partition-table-indexing-and-validation.patch b/packages/grub/0025-gpt-fix-partition-table-indexing-and-validation.patch index a2683a82..13bbf901 100644 --- a/packages/grub/0025-gpt-fix-partition-table-indexing-and-validation.patch +++ b/packages/grub/0025-gpt-fix-partition-table-indexing-and-validation.patch @@ -1,4 +1,4 @@ -From 266c58dedddb5b927528052db1e3b21a892cfc0d Mon Sep 17 00:00:00 2001 +From f5639101bd80e1246a155b7d1b576efb6fe716f4 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Wed, 24 Aug 2016 16:14:20 -0700 Subject: [PATCH] gpt: fix partition table indexing and validation @@ -16,7 +16,7 @@ for the sake of safety we need to do this by the spec. 4 files changed, 176 insertions(+), 12 deletions(-) diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c -index 2021cb200..eebca7a09 100644 +index 2021cb2..eebca7a 100644 --- a/grub-core/commands/gptprio.c +++ b/grub-core/commands/gptprio.c @@ -78,7 +78,7 @@ grub_find_next (const char *disk_name, @@ -41,7 +41,7 @@ index 2021cb200..eebca7a09 100644 { unsigned int priority, tries_left, successful, old_priority = 0; diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 0daf3f8de..205779192 100644 +index 0daf3f8..2057791 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -207,6 +207,13 @@ grub_gpt_pmbr_check (struct grub_msdos_partition_mbr *mbr) @@ -141,7 +141,7 @@ index 0daf3f8de..205779192 100644 grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) { diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index 2ca063ad9..b354fd5f4 100644 +index 16fdd7f..1a215f8 100644 --- a/include/grub/gpt_partition.h +++ b/include/grub/gpt_partition.h @@ -186,8 +186,10 @@ struct grub_gpt @@ -170,7 +170,7 @@ index 2ca063ad9..b354fd5f4 100644 grub_err_t grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt); diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c -index 60f601729..9cf3414c2 100644 +index 60f6017..9cf3414 100644 --- a/tests/gpt_unit_test.c +++ b/tests/gpt_unit_test.c @@ -40,6 +40,13 @@ @@ -321,3 +321,6 @@ index 60f601729..9cf3414c2 100644 grub_test_unregister ("gpt_search_part_label_test"); grub_test_unregister ("gpt_search_part_uuid_test"); grub_test_unregister ("gpt_search_disk_uuid_test"); +-- +2.21.3 + diff --git a/packages/grub/0026-gpt-prefer-disk-size-from-header-over-firmware.patch b/packages/grub/0026-gpt-prefer-disk-size-from-header-over-firmware.patch index 2eb64f29..2772aad9 100644 --- a/packages/grub/0026-gpt-prefer-disk-size-from-header-over-firmware.patch +++ b/packages/grub/0026-gpt-prefer-disk-size-from-header-over-firmware.patch @@ -1,4 +1,4 @@ -From 81126be2c4ee13b747586e15797a9c5505c0ce0b Mon Sep 17 00:00:00 2001 +From 09b0254a01a4433bd6d58e0c7386e750834b19d5 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Tue, 23 Aug 2016 13:09:14 -0700 Subject: [PATCH] gpt: prefer disk size from header over firmware @@ -15,7 +15,7 @@ the error as best we can and move on. 2 files changed, 55 insertions(+), 5 deletions(-) diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 205779192..f0c71bde1 100644 +index 2057791..f0c71bd 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -394,13 +394,21 @@ grub_gpt_read_backup (grub_disk_t disk, grub_gpt_t gpt) @@ -46,7 +46,7 @@ index 205779192..f0c71bde1 100644 grub_dprintf ("gpt", "reading backup GPT from sector 0x%llx\n", (unsigned long long) sector); diff --git a/tests/gpt_unit_test.c b/tests/gpt_unit_test.c -index 9cf3414c2..218b18697 100644 +index 9cf3414..218b186 100644 --- a/tests/gpt_unit_test.c +++ b/tests/gpt_unit_test.c @@ -544,6 +544,46 @@ repair_test (void) @@ -112,3 +112,6 @@ index 9cf3414c2..218b18697 100644 grub_test_unregister ("gpt_search_part_label_test"); grub_test_unregister ("gpt_search_part_uuid_test"); grub_test_unregister ("gpt_search_disk_uuid_test"); +-- +2.21.3 + diff --git a/packages/grub/0027-gpt-add-helper-for-picking-a-valid-header.patch b/packages/grub/0027-gpt-add-helper-for-picking-a-valid-header.patch index 899a0405..0d429c4f 100644 --- a/packages/grub/0027-gpt-add-helper-for-picking-a-valid-header.patch +++ b/packages/grub/0027-gpt-add-helper-for-picking-a-valid-header.patch @@ -1,4 +1,4 @@ -From e1db8ebe4d10b34817f1e9837f2eb8d6e29a0dd6 Mon Sep 17 00:00:00 2001 +From 974f79ff5cfff8b37f6377d55c2e090f9c5c6251 Mon Sep 17 00:00:00 2001 From: Vito Caputo Date: Thu, 25 Aug 2016 17:21:18 -0700 Subject: [PATCH] gpt: add helper for picking a valid header @@ -9,7 +9,7 @@ Eliminate some repetition in primary vs. backup header acquisition. 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index f0c71bde1..2550ed87c 100644 +index f0c71bd..2550ed8 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -108,21 +108,32 @@ grub_gpt_part_uuid (grub_device_t device, char **uuid) @@ -66,3 +66,6 @@ index f0c71bde1..2550ed87c 100644 return NULL; if (n >= grub_le_to_cpu32 (header->maxpart)) +-- +2.21.3 + diff --git a/packages/grub/0028-gptrepair-fix-status-checking.patch b/packages/grub/0028-gptrepair-fix-status-checking.patch index 60b6a6e6..5fe003af 100644 --- a/packages/grub/0028-gptrepair-fix-status-checking.patch +++ b/packages/grub/0028-gptrepair-fix-status-checking.patch @@ -1,4 +1,4 @@ -From 1edd283385b1c07a0fd88e7db46d89490de0648d Mon Sep 17 00:00:00 2001 +From 0969dfbba2274221ece397dc4dedfb9b03f8577b Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Tue, 20 Sep 2016 13:06:05 -0700 Subject: [PATCH] gptrepair: fix status checking @@ -9,7 +9,7 @@ None of these status bit checks were correct. Fix and simplify. 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/grub-core/commands/gptrepair.c b/grub-core/commands/gptrepair.c -index 38392fd8f..66ac3f7c7 100644 +index 38392fd..66ac3f7 100644 --- a/grub-core/commands/gptrepair.c +++ b/grub-core/commands/gptrepair.c @@ -46,8 +46,6 @@ grub_cmd_gptrepair (grub_command_t cmd __attribute__ ((unused)), @@ -62,3 +62,6 @@ index 38392fd8f..66ac3f7c7 100644 done: if (gpt) +-- +2.21.3 + diff --git a/packages/grub/0029-gpt-use-inline-functions-for-checking-status-bits.patch b/packages/grub/0029-gpt-use-inline-functions-for-checking-status-bits.patch index bd12660f..1c2bf9b7 100644 --- a/packages/grub/0029-gpt-use-inline-functions-for-checking-status-bits.patch +++ b/packages/grub/0029-gpt-use-inline-functions-for-checking-status-bits.patch @@ -1,4 +1,4 @@ -From babffa21b389bdb7cca50df69ac94d623890a70e Mon Sep 17 00:00:00 2001 +From 7f6fb19999b3b825a9c04cee20c8c1dee1590813 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Tue, 20 Sep 2016 12:43:01 -0700 Subject: [PATCH] gpt: use inline functions for checking status bits @@ -12,7 +12,7 @@ This should prevent bugs like 6078f836 and 4268f3da. 4 files changed, 39 insertions(+), 13 deletions(-) diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c -index eebca7a09..59bc4fd09 100644 +index eebca7a..59bc4fd 100644 --- a/grub-core/commands/gptprio.c +++ b/grub-core/commands/gptprio.c @@ -91,7 +91,7 @@ grub_find_next (const char *disk_name, @@ -25,7 +25,7 @@ index eebca7a09..59bc4fd09 100644 goto done; diff --git a/grub-core/commands/gptrepair.c b/grub-core/commands/gptrepair.c -index 66ac3f7c7..c17c7346c 100644 +index 66ac3f7..c17c734 100644 --- a/grub-core/commands/gptrepair.c +++ b/grub-core/commands/gptrepair.c @@ -65,16 +65,16 @@ grub_cmd_gptrepair (grub_command_t cmd __attribute__ ((unused)), @@ -49,7 +49,7 @@ index 66ac3f7c7..c17c7346c 100644 if (grub_gpt_repair (dev->disk, gpt)) diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 2550ed87c..3e077c497 100644 +index 2550ed8..3e077c4 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -638,10 +638,15 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) @@ -79,7 +79,7 @@ index 2550ed87c..3e077c497 100644 grub_dprintf ("gpt", "writing primary GPT to %s\n", disk->name); diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index b354fd5f4..226e09978 100644 +index 1a215f8..b7fbb7a 100644 --- a/include/grub/gpt_partition.h +++ b/include/grub/gpt_partition.h @@ -161,13 +161,6 @@ typedef enum grub_gpt_status @@ -131,3 +131,6 @@ index b354fd5f4..226e09978 100644 /* Translate GPT sectors to GRUB's 512 byte block addresses. */ static inline grub_disk_addr_t grub_gpt_sector_to_addr (grub_gpt_t gpt, grub_uint64_t sector) +-- +2.21.3 + diff --git a/packages/grub/0030-gpt-allow-repair-function-to-noop.patch b/packages/grub/0030-gpt-allow-repair-function-to-noop.patch index f02b79b6..5e8c07b7 100644 --- a/packages/grub/0030-gpt-allow-repair-function-to-noop.patch +++ b/packages/grub/0030-gpt-allow-repair-function-to-noop.patch @@ -1,4 +1,4 @@ -From 1c073b3844ef82f73df06659a533dbb276514420 Mon Sep 17 00:00:00 2001 +From 661ed965cb87ede5c83956cc5710dbeeb97e4abf Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Tue, 20 Sep 2016 13:40:11 -0700 Subject: [PATCH] gpt: allow repair function to noop @@ -10,7 +10,7 @@ Simplifies usage a little. 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c -index 59bc4fd09..b799faa37 100644 +index 59bc4fd..b799faa 100644 --- a/grub-core/commands/gptprio.c +++ b/grub-core/commands/gptprio.c @@ -91,9 +91,8 @@ grub_find_next (const char *disk_name, @@ -26,7 +26,7 @@ index 59bc4fd09..b799faa37 100644 for (i = 0; (part = grub_gpt_get_partentry (gpt, i)) != NULL; i++) { diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 3e077c497..9bb19678d 100644 +index 3e077c4..9bb1967 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -586,6 +586,10 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) @@ -40,3 +40,6 @@ index 3e077c497..9bb19678d 100644 grub_dprintf ("gpt", "repairing GPT for %s\n", disk->name); if (disk->log_sector_size != gpt->log_sector_size) +-- +2.21.3 + diff --git a/packages/grub/0031-gpt-do-not-use-an-enum-for-status-bit-values.patch b/packages/grub/0031-gpt-do-not-use-an-enum-for-status-bit-values.patch index 9a108605..1d712234 100644 --- a/packages/grub/0031-gpt-do-not-use-an-enum-for-status-bit-values.patch +++ b/packages/grub/0031-gpt-do-not-use-an-enum-for-status-bit-values.patch @@ -1,4 +1,4 @@ -From 603feacb39091f9f4e01fe4a4160f0f320b03fe5 Mon Sep 17 00:00:00 2001 +From 16c295c98aebfe855b7cd169420fa659dfb8d327 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Wed, 21 Sep 2016 13:22:06 -0700 Subject: [PATCH] gpt: do not use an enum for status bit values @@ -8,7 +8,7 @@ Subject: [PATCH] gpt: do not use an enum for status bit values 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index 226e09978..92b606cd9 100644 +index b7fbb7a..d94b93a 100644 --- a/include/grub/gpt_partition.h +++ b/include/grub/gpt_partition.h @@ -151,15 +151,14 @@ grub_gpt_partition_map_iterate (grub_disk_t disk, @@ -44,3 +44,6 @@ index 226e09978..92b606cd9 100644 /* Protective or hybrid MBR. */ struct grub_msdos_partition_mbr mbr; +-- +2.21.3 + diff --git a/packages/grub/0032-gpt-check-header-and-entries-status-bits-together.patch b/packages/grub/0032-gpt-check-header-and-entries-status-bits-together.patch index 693b7c46..f37eaad4 100644 --- a/packages/grub/0032-gpt-check-header-and-entries-status-bits-together.patch +++ b/packages/grub/0032-gpt-check-header-and-entries-status-bits-together.patch @@ -1,4 +1,4 @@ -From 3ffa128b0a04cc7c111cdfa2332e07e0977e3073 Mon Sep 17 00:00:00 2001 +From ac788c40bb54691731a317217c9d5a7e241888ca Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Wed, 21 Sep 2016 13:44:11 -0700 Subject: [PATCH] gpt: check header and entries status bits together @@ -11,7 +11,7 @@ entries bits to mismatch so don't allow for it. 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 9bb19678d..3c6ff3540 100644 +index 9bb1967..3c6ff35 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -596,24 +596,20 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) @@ -44,3 +44,6 @@ index 9bb19678d..3c6ff3540 100644 /* Relocate backup to end if disk whenever possible. */ if (grub_gpt_disk_size_valid(disk)) +-- +2.21.3 + diff --git a/packages/grub/0033-gpt-be-more-careful-about-relocating-backup-header.patch b/packages/grub/0033-gpt-be-more-careful-about-relocating-backup-header.patch index 35f9e93d..1edb3716 100644 --- a/packages/grub/0033-gpt-be-more-careful-about-relocating-backup-header.patch +++ b/packages/grub/0033-gpt-be-more-careful-about-relocating-backup-header.patch @@ -1,4 +1,4 @@ -From 527a976a1f18bf536d460b6c7e6b3704ad52ecac Mon Sep 17 00:00:00 2001 +From 53e4e887d6ca11fc302bae2c417ecab94ced1252 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Wed, 21 Sep 2016 13:52:52 -0700 Subject: [PATCH] gpt: be more careful about relocating backup header @@ -15,7 +15,7 @@ location is good enough and leave it as-is. 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 3c6ff3540..35e65d8d9 100644 +index 3c6ff35..35e65d8 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -599,7 +599,17 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) @@ -49,3 +49,6 @@ index 3c6ff3540..35e65d8d9 100644 backup_entries = backup_header - grub_gpt_size_to_sectors (gpt, gpt->entries_size); grub_dprintf ("gpt", "backup GPT entries will be located at 0x%llx\n", +-- +2.21.3 + diff --git a/packages/grub/0034-gpt-selectively-update-fields-during-repair.patch b/packages/grub/0034-gpt-selectively-update-fields-during-repair.patch index 1c2f5c08..d6819890 100644 --- a/packages/grub/0034-gpt-selectively-update-fields-during-repair.patch +++ b/packages/grub/0034-gpt-selectively-update-fields-during-repair.patch @@ -1,4 +1,4 @@ -From 2fd4959a6ca0575e4f69042c2034705ec460702a Mon Sep 17 00:00:00 2001 +From d1eb127676eb180eaf211d3a6cd9806a3e30c734 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Wed, 21 Sep 2016 14:33:48 -0700 Subject: [PATCH] gpt: selectively update fields during repair @@ -9,7 +9,7 @@ Just a little cleanup/refactor to skip touching data we don't need to. 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 35e65d8d9..03e807b25 100644 +index 35e65d8..03e807b 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -584,8 +584,6 @@ grub_gpt_get_partentry (grub_gpt_t gpt, grub_uint32_t n) @@ -73,3 +73,6 @@ index 35e65d8d9..03e807b25 100644 /* Recompute checksums. */ if (grub_gpt_update_checksums (gpt)) return grub_errno; +-- +2.21.3 + diff --git a/packages/grub/0035-gpt-always-revalidate-when-recomputing-checksums.patch b/packages/grub/0035-gpt-always-revalidate-when-recomputing-checksums.patch index 8bdf2f36..b535efd8 100644 --- a/packages/grub/0035-gpt-always-revalidate-when-recomputing-checksums.patch +++ b/packages/grub/0035-gpt-always-revalidate-when-recomputing-checksums.patch @@ -1,4 +1,4 @@ -From 04deacdc8d85855b3bb85bcd7c02f71b05ee2bec Mon Sep 17 00:00:00 2001 +From 6065f2a049bfe5f8ef6f90b77d2f8ab738004c50 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Wed, 21 Sep 2016 14:55:19 -0700 Subject: [PATCH] gpt: always revalidate when recomputing checksums @@ -11,7 +11,7 @@ appropriate state. 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 03e807b25..3ac2987c6 100644 +index 03e807b..3ac2987 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -630,23 +630,9 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) @@ -70,3 +70,6 @@ index 03e807b25..3ac2987c6 100644 return GRUB_ERR_NONE; } +-- +2.21.3 + diff --git a/packages/grub/0036-gpt-include-backup-in-sync-check-in-revalidation.patch b/packages/grub/0036-gpt-include-backup-in-sync-check-in-revalidation.patch index abb2b67b..86c6ba38 100644 --- a/packages/grub/0036-gpt-include-backup-in-sync-check-in-revalidation.patch +++ b/packages/grub/0036-gpt-include-backup-in-sync-check-in-revalidation.patch @@ -1,4 +1,4 @@ -From a17e4e799fbcb64ea2379a8886a97de74a521bf6 Mon Sep 17 00:00:00 2001 +From 23ce2d3a2b0422c725b1cfb0c61fccba595f8b9b Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Wed, 21 Sep 2016 15:01:09 -0700 Subject: [PATCH] gpt: include backup-in-sync check in revalidation @@ -8,7 +8,7 @@ Subject: [PATCH] gpt: include backup-in-sync check in revalidation 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 3ac2987c6..c27bcc510 100644 +index 3ac2987..c27bcc5 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -372,6 +372,11 @@ grub_gpt_check_backup (grub_gpt_t gpt) @@ -35,3 +35,6 @@ index 3ac2987c6..c27bcc510 100644 gpt->status |= GRUB_GPT_BACKUP_HEADER_VALID; return GRUB_ERR_NONE; } +-- +2.21.3 + diff --git a/packages/grub/0037-gpt-read-entries-table-at-the-same-time-as-the-heade.patch b/packages/grub/0037-gpt-read-entries-table-at-the-same-time-as-the-heade.patch index da5a4973..ffade3c2 100644 --- a/packages/grub/0037-gpt-read-entries-table-at-the-same-time-as-the-heade.patch +++ b/packages/grub/0037-gpt-read-entries-table-at-the-same-time-as-the-heade.patch @@ -1,4 +1,4 @@ -From d3661baf7287febdf4afef788f71bc654653de2b Mon Sep 17 00:00:00 2001 +From d9f966bfcdfbe8fe1ccb08c512e26a9deae4f60c Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Wed, 21 Sep 2016 15:29:55 -0700 Subject: [PATCH] gpt: read entries table at the same time as the header @@ -11,7 +11,7 @@ they are equal if the crc32 matches. 1 file changed, 41 insertions(+), 28 deletions(-) diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index c27bcc510..b93cedea1 100644 +index c27bcc5..b93cede 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -32,6 +32,11 @@ GRUB_MOD_LICENSE ("GPLv3+"); @@ -129,3 +129,6 @@ index c27bcc510..b93cedea1 100644 grub_errno = GRUB_ERR_NONE; else goto fail; +-- +2.21.3 + diff --git a/packages/grub/0038-gpt-report-all-revalidation-errors.patch b/packages/grub/0038-gpt-report-all-revalidation-errors.patch index d2c0115f..d89937e9 100644 --- a/packages/grub/0038-gpt-report-all-revalidation-errors.patch +++ b/packages/grub/0038-gpt-report-all-revalidation-errors.patch @@ -1,4 +1,4 @@ -From 6c9c0a2011b748d239e6f8f8426426323920f50a Mon Sep 17 00:00:00 2001 +From 48243011b5cffdbfb49c2b3d377754459a0a46a3 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Wed, 21 Sep 2016 16:02:53 -0700 Subject: [PATCH] gpt: report all revalidation errors @@ -10,7 +10,7 @@ the existing error onto the stack so the user will be told what is bad. 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index b93cedea1..f6f853309 100644 +index b93cede..f6f8533 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -676,13 +676,19 @@ grub_gpt_update_checksums (grub_gpt_t gpt) @@ -35,3 +35,6 @@ index b93cedea1..f6f853309 100644 gpt->status |= (GRUB_GPT_BACKUP_HEADER_VALID | GRUB_GPT_BACKUP_ENTRIES_VALID); +-- +2.21.3 + diff --git a/packages/grub/0039-gpt-rename-and-update-documentation-for-grub_gpt_upd.patch b/packages/grub/0039-gpt-rename-and-update-documentation-for-grub_gpt_upd.patch index eabe4394..7490f1d5 100644 --- a/packages/grub/0039-gpt-rename-and-update-documentation-for-grub_gpt_upd.patch +++ b/packages/grub/0039-gpt-rename-and-update-documentation-for-grub_gpt_upd.patch @@ -1,4 +1,4 @@ -From 6bd9c7881cc30773f7d89506bfe131764ffd1cb1 Mon Sep 17 00:00:00 2001 +From ed3b92445adb3f2b73d22f5c0ca65447944b023f Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Thu, 22 Sep 2016 10:00:27 -0700 Subject: [PATCH] gpt: rename and update documentation for grub_gpt_update @@ -12,7 +12,7 @@ more general name to reflect that. 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c -index b799faa37..8908d8bed 100644 +index b799faa..8908d8b 100644 --- a/grub-core/commands/gptprio.c +++ b/grub-core/commands/gptprio.c @@ -127,7 +127,7 @@ grub_find_next (const char *disk_name, @@ -25,7 +25,7 @@ index b799faa37..8908d8bed 100644 if (grub_gpt_write (dev->disk, gpt)) diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index f6f853309..430404848 100644 +index f6f8533..4304048 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -643,7 +643,7 @@ grub_gpt_repair (grub_disk_t disk, grub_gpt_t gpt) @@ -47,7 +47,7 @@ index f6f853309..430404848 100644 grub_uint32_t crc; diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index 92b606cd9..726b98c00 100644 +index d94b93a..797692c 100644 --- a/include/grub/gpt_partition.h +++ b/include/grub/gpt_partition.h @@ -232,11 +232,12 @@ grub_gpt_t grub_gpt_read (grub_disk_t disk); @@ -66,3 +66,6 @@ index 92b606cd9..726b98c00 100644 /* Write headers and entry tables back to disk. */ grub_err_t grub_gpt_write (grub_disk_t disk, grub_gpt_t gpt); +-- +2.21.3 + diff --git a/packages/grub/0040-gpt-write-backup-GPT-first-skip-if-inaccessible.patch b/packages/grub/0040-gpt-write-backup-GPT-first-skip-if-inaccessible.patch index 29be913e..e723c0eb 100644 --- a/packages/grub/0040-gpt-write-backup-GPT-first-skip-if-inaccessible.patch +++ b/packages/grub/0040-gpt-write-backup-GPT-first-skip-if-inaccessible.patch @@ -1,4 +1,4 @@ -From a789bc5ac9133b0c25dfbfa6d39c7c35a1115dfd Mon Sep 17 00:00:00 2001 +From 114dc2c20130ebe232fb52f02ddb14d06a6ac890 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Thu, 22 Sep 2016 11:18:42 -0700 Subject: [PATCH] gpt: write backup GPT first, skip if inaccessible. @@ -20,7 +20,7 @@ will take care of repairing the backup. 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c -index 430404848..c3e3a25f9 100644 +index 4304048..c3e3a25 100644 --- a/grub-core/lib/gpt.c +++ b/grub-core/lib/gpt.c @@ -729,19 +729,39 @@ grub_gpt_write_table (grub_disk_t disk, grub_gpt_t gpt, @@ -67,3 +67,6 @@ index 430404848..c3e3a25f9 100644 return GRUB_ERR_NONE; } +-- +2.21.3 + diff --git a/packages/grub/0041-gptprio-Use-Bottlerocket-boot-partition-type-GUID.patch b/packages/grub/0041-gptprio-Use-Bottlerocket-boot-partition-type-GUID.patch index 7b436e9d..57f8aeda 100644 --- a/packages/grub/0041-gptprio-Use-Bottlerocket-boot-partition-type-GUID.patch +++ b/packages/grub/0041-gptprio-Use-Bottlerocket-boot-partition-type-GUID.patch @@ -1,4 +1,4 @@ -From 9e925f4ccaf5d7137f58152598d75381b6bbefe8 Mon Sep 17 00:00:00 2001 +From b6ae405132d795bfbb405dbf15fcb5721428b435 Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Thu, 28 Mar 2019 16:28:41 -0700 Subject: [PATCH] gptprio: Use Bottlerocket boot partition type GUID @@ -11,7 +11,7 @@ Signed-off-by: iliana destroyer of worlds 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/grub-core/commands/gptprio.c b/grub-core/commands/gptprio.c -index 8908d8bed..3678d1018 100644 +index 8908d8b..761aea4 100644 --- a/grub-core/commands/gptprio.c +++ b/grub-core/commands/gptprio.c @@ -162,7 +162,7 @@ grub_cmd_next (grub_extcmd_context_t ctxt, int argc, char **args) @@ -24,7 +24,7 @@ index 8908d8bed..3678d1018 100644 if (!state[NEXT_SET_DEVICE].set || !state[NEXT_SET_UUID].set) { diff --git a/include/grub/gpt_partition.h b/include/grub/gpt_partition.h -index 726b98c00..88ad02a44 100644 +index 797692c..2bbfd33 100644 --- a/include/grub/gpt_partition.h +++ b/include/grub/gpt_partition.h @@ -61,9 +61,9 @@ char * grub_gpt_guid_to_str (grub_gpt_guid_t *guid); @@ -41,7 +41,7 @@ index 726b98c00..88ad02a44 100644 #define GRUB_GPT_HEADER_MAGIC \ { 0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54 } diff --git a/tests/gptprio_test.in b/tests/gptprio_test.in -index c5cf0f3b7..9df4dd350 100644 +index c5cf0f3..9df4dd3 100644 --- a/tests/gptprio_test.in +++ b/tests/gptprio_test.in @@ -59,7 +59,7 @@ esac @@ -53,3 +53,6 @@ index c5cf0f3b7..9df4dd350 100644 declare -a prio_uuid prio_uuid[2]="9b003904-d006-4ab3-97f1-73f547b7af1a" prio_uuid[3]="1aa5a658-5b02-414d-9b71-f7e6c151f0cd" +-- +2.21.3 + diff --git a/packages/grub/Cargo.toml b/packages/grub/Cargo.toml index 3f89ba03..290eb60e 100644 --- a/packages/grub/Cargo.toml +++ b/packages/grub/Cargo.toml @@ -9,8 +9,8 @@ build = "build.rs" path = "pkg.rs" [[package.metadata.build-package.external-files]] -url = "https://ftp.gnu.org/gnu/grub/grub-2.04.tar.xz" -sha512 = "9c15c42d0cf5d61446b752194e3b628bb04be0fe6ea0240ab62b3d753784712744846e1f7c3651d8e0968d22012e6d713c38c44936d4004ded3ca4d4007babbb" +url = "https://cdn.amazonlinux.com/blobstore/a2f920abd554c7ab22af43c720198abcf5f78828c0543a0d7c65c654610eab26/grub2-2.06-2.amzn2.0.1.src.rpm" +sha512 = "0a30a75426f9030b9bab489b824d4cc51c864f2fef87df298ca4a725ecfb49dbd310f276740fadab64879ee1dfc60b35f52957b7cfc5ff023d856b536b0af04d" [build-dependencies] glibc = { path = "../glibc" } diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index a369dcec..ba845357 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -4,13 +4,17 @@ %global efidir /boot/efi/EFI/BOOT %global biosdir /boot/grub +# This is specific to the upstream source RPM, and will likely need to be +# updated for each new version. +%global gnulib_fixes gnulib-fixes-0e9febb5e + Name: %{_cross_os}grub -Version: 2.04 +Version: 2.06 Release: 1%{?dist} Summary: Bootloader with support for Linux and more License: GPL-3.0-or-later AND Unicode-DFS-2015 URL: https://www.gnu.org/software/grub/ -Source0: https://ftp.gnu.org/gnu/grub/grub-%{version}.tar.xz +Source0: https://cdn.amazonlinux.com/blobstore/a2f920abd554c7ab22af43c720198abcf5f78828c0543a0d7c65c654610eab26/grub2-2.06-2.amzn2.0.1.src.rpm Source1: core.cfg Patch0001: 0001-setup-Add-root-device-argument-to-grub-setup.patch Patch0002: 0002-gpt-start-new-GPT-module.patch @@ -78,8 +82,34 @@ Summary: Tools for the bootloader with support for Linux and more %{summary}. %prep -%autosetup -n grub-%{version} -p1 +rpm2cpio %{S:0} | cpio -iu grub-%{version}.tar.xz \ + bootstrap bootstrap.conf \ + gitignore %{gnulib_fixes}.tar.gz \ + "*.patch" + +# Mimic prep from upstream spec to prepare for patching. +tar -xof grub-%{version}.tar.xz; rm grub-%{version}.tar.xz +%setup -TDn grub-%{version} +mv ../bootstrap{,.conf} . +mv ../gitignore .gitignore +tar -xof ../%{gnulib_fixes}.tar.gz; rm ../%{gnulib_fixes}.tar.gz +mv %{gnulib_fixes} gnulib +pushd gnulib +patch -p1 < ../../gnulib-amzn2-cflags.patch +rm ../../gnulib-amzn2-cflags.patch +popd cp unicode/COPYING COPYING.unicode +rm -f configure + +# Apply upstream and local patches. +git init +git config user.email 'user@localhost' +git config user.name 'user' +git add . +git commit -a -q -m "base" +git am --whitespace=nowarn ../*.patch %{patches} + +./bootstrap ./autogen.sh %global grub_cflags -pipe -fno-stack-protector -fno-strict-aliasing @@ -108,7 +138,9 @@ pushd bios-build --host="%{_build}" \ --target="i386" \ --with-platform="pc" \ + --with-utils=host \ --disable-grub-mkfont \ + --disable-rpm-sort \ --disable-werror \ --enable-efiemu=no \ --enable-device-mapper=no \ @@ -127,7 +159,9 @@ pushd efi-build --host="%{_build}" \ --target="%{_cross_arch}" \ --with-platform="efi" \ + --with-utils=host \ --disable-grub-mkfont \ + --disable-rpm-sort \ --disable-werror \ --enable-efiemu=no \ --enable-device-mapper=no \ @@ -178,9 +212,12 @@ popd %dir %{efidir} %{efidir}/%{_cross_grub_efi_image} %{_cross_sbindir}/grub-bios-setup +%exclude %{_cross_bashdir} %exclude %{_cross_infodir} +%exclude %{_cross_libexecdir} %exclude %{_cross_localedir} %exclude %{_cross_sysconfdir} +%exclude %{_cross_unitdir} %files modules %dir %{_cross_libdir}/grub @@ -203,14 +240,18 @@ popd %{_cross_bindir}/grub-render-label %{_cross_bindir}/grub-script-check %{_cross_bindir}/grub-syslinux2cfg +%{_cross_sbindir}/grub-get-kernel-settings %{_cross_sbindir}/grub-install %{_cross_sbindir}/grub-macbless %{_cross_sbindir}/grub-mkconfig %{_cross_sbindir}/grub-ofpathname %{_cross_sbindir}/grub-probe %{_cross_sbindir}/grub-reboot +%{_cross_sbindir}/grub-set-bootflag %{_cross_sbindir}/grub-set-default +%{_cross_sbindir}/grub-set-password %{_cross_sbindir}/grub-sparc64-setup +%{_cross_sbindir}/grub-switch-to-blscfg %dir %{_cross_datadir}/grub %{_cross_datadir}/grub/grub-mkconfig_lib diff --git a/packages/grub/latest-srpm-url.sh b/packages/grub/latest-srpm-url.sh new file mode 100755 index 00000000..7d5be9aa --- /dev/null +++ b/packages/grub/latest-srpm-url.sh @@ -0,0 +1,2 @@ +#!/bin/sh +docker run --rm amazonlinux:2 sh -c 'yum install -q -y yum-utils && yumdownloader -q --source --urls grub2 | grep ^http' From fac8745816c62f1dfb655443d7f671d3b612afd0 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 4 Aug 2021 18:45:18 +0000 Subject: [PATCH 0520/1356] grub: set terminal output for EFI and BIOS Split the configs so we can change the default terminal output based on whether we're booting from the BIOS or EFI image. Signed-off-by: Ben Cressey --- packages/grub/bios.cfg | 11 +++++++++++ packages/grub/{core.cfg => efi.cfg} | 1 + packages/grub/grub.spec | 9 +++++---- 3 files changed, 17 insertions(+), 4 deletions(-) create mode 100644 packages/grub/bios.cfg rename packages/grub/{core.cfg => efi.cfg} (90%) diff --git a/packages/grub/bios.cfg b/packages/grub/bios.cfg new file mode 100644 index 00000000..b5c750dc --- /dev/null +++ b/packages/grub/bios.cfg @@ -0,0 +1,11 @@ +serial +terminal_output serial +gptprio.next -d boot_dev -u boot_uuid +set root=$boot_dev +set prefix=($root)/grub +export boot_uuid +configfile /grub/grub.cfg +echo "boot failed (device ($boot_dev), uuid $boot_uuid)" +echo "rebooting in 30 seconds..." +sleep 30 +reboot diff --git a/packages/grub/core.cfg b/packages/grub/efi.cfg similarity index 90% rename from packages/grub/core.cfg rename to packages/grub/efi.cfg index 00a12e90..701822ca 100644 --- a/packages/grub/core.cfg +++ b/packages/grub/efi.cfg @@ -1,3 +1,4 @@ +terminal_output console gptprio.next -d boot_dev -u boot_uuid set root=$boot_dev set prefix=($root)/grub diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index ba845357..1767ae36 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -15,7 +15,8 @@ Summary: Bootloader with support for Linux and more License: GPL-3.0-or-later AND Unicode-DFS-2015 URL: https://www.gnu.org/software/grub/ Source0: https://cdn.amazonlinux.com/blobstore/a2f920abd554c7ab22af43c720198abcf5f78828c0543a0d7c65c654610eab26/grub2-2.06-2.amzn2.0.1.src.rpm -Source1: core.cfg +Source1: bios.cfg +Source2: efi.cfg Patch0001: 0001-setup-Add-root-device-argument-to-grub-setup.patch Patch0002: 0002-gpt-start-new-GPT-module.patch Patch0003: 0003-gpt-rename-misnamed-header-location-fields.patch @@ -178,12 +179,12 @@ pushd bios-build %make_install mkdir -p %{buildroot}%{biosdir} grub2-mkimage \ - -c %{SOURCE1} \ + -c %{S:1} \ -d ./grub-core/ \ -O "i386-pc" \ -o "%{buildroot}%{biosdir}/core.img" \ -p "(hd0,gpt2)/boot/grub" \ - biosdisk ${MODS} + biosdisk serial ${MODS} install -m 0644 ./grub-core/boot.img \ %{buildroot}%{biosdir}/boot.img popd @@ -193,7 +194,7 @@ pushd efi-build %make_install mkdir -p %{buildroot}%{efidir} grub2-mkimage \ - -c %{SOURCE1} \ + -c %{S:2} \ -d ./grub-core/ \ -O "%{_cross_grub_efi_format}" \ -o "%{buildroot}%{efidir}/%{_cross_grub_efi_image}" \ From 28d945e10753b36c725ae7c93087b621a8427718 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 4 Aug 2021 18:47:54 +0000 Subject: [PATCH 0521/1356] grub: disable module loading The modules we need are all built into the firmware image. Signed-off-by: Ben Cressey --- packages/grub/bios.cfg | 1 + packages/grub/efi.cfg | 1 + 2 files changed, 2 insertions(+) diff --git a/packages/grub/bios.cfg b/packages/grub/bios.cfg index b5c750dc..3cf5fb99 100644 --- a/packages/grub/bios.cfg +++ b/packages/grub/bios.cfg @@ -1,3 +1,4 @@ +set no_modules=y serial terminal_output serial gptprio.next -d boot_dev -u boot_uuid diff --git a/packages/grub/efi.cfg b/packages/grub/efi.cfg index 701822ca..cde1542b 100644 --- a/packages/grub/efi.cfg +++ b/packages/grub/efi.cfg @@ -1,3 +1,4 @@ +set no_modules=y terminal_output console gptprio.next -d boot_dev -u boot_uuid set root=$boot_dev From e6763c49c36eb4a9b407f768e875810532ac456f Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 6 Aug 2021 04:02:44 +0000 Subject: [PATCH 0522/1356] grub: add zstd module This allows us to boot compressed kernels for platforms like aarch64 where the kernel does not support decompressing itself. Signed-off-by: Ben Cressey --- packages/grub/grub.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index 1767ae36..89692eac 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -172,7 +172,7 @@ pushd efi-build popd %install -MODS="configfile echo ext2 gptprio linux normal part_gpt reboot sleep" +MODS="configfile echo ext2 gptprio linux normal part_gpt reboot sleep zstd" %if "%{_cross_arch}" == "x86_64" pushd bios-build From 529d5bd49acd550bbfb83713b5c80b0b21b08642 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 6 Aug 2021 04:55:52 +0000 Subject: [PATCH 0523/1356] create hybrid images for x86_64 Instead of a single "firmware" partition for either BIOS or EFI, we now create the BIOS partition for all architectures, and ignore it except for the x86_64 use case. Two EFI partitions are allocated out of previously reserved space. Only the first one is used today; using the second one would require a scheme to swap the partition types on upgrade, since EFI firmware will not understand the significance of the GPT priority bits. The result is a hybrid x86_64 image capable of booting under legacy BIOS or EFI. With BIOS, the MBR will point to the next GRUB stage in the BIOS boot partition. With EFI, the firmware will look for the EFI system partition. In either case, the GRUB image will find the right boot partition by checking GPT priorities, and load the configuration file from there. Signed-off-by: Ben Cressey --- tools/rpm2img | 96 ++++++++++++++++++++++++++++++--------------------- 1 file changed, 56 insertions(+), 40 deletions(-) diff --git a/tools/rpm2img b/tools/rpm2img index 95c444c8..9d837b86 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -60,36 +60,53 @@ VERITY_HASH_BLOCK_SIZE=4096 # for the boot partition, where we set gptprio bits in the GUID-specific use # field, but we might as well do it for all of them. BOTTLEROCKET_BOOT_TYPECODE="6b636168-7420-6568-2070-6c616e657421" -EFI_SYSTEM_TYPECODE="C12A7328-F81F-11D2-BA4B-00A0C93EC93B" BOTTLEROCKET_ROOT_TYPECODE="5526016a-1a97-4ea4-b39a-b7c8c6ca4502" BOTTLEROCKET_HASH_TYPECODE="598f10af-c955-4456-6a99-7720068a6cea" BOTTLEROCKET_RESERVED_TYPECODE="0c5d99a5-d331-4147-baef-08e2b855bdc9" BOTTLEROCKET_PRIVATE_TYPECODE="440408bb-eb0b-4328-a6e5-a29038fad706" BOTTLEROCKET_DATA_TYPECODE="626f7474-6c65-6474-6861-726d61726b73" -if [[ "${ARCH}" == "x86_64" ]]; then - FIRM_NAME="BIOS-BOOT" - BOTTLEROCKET_FIRM_TYPECODE="ef02" -else - FIRM_NAME="EFI_BOOT" - BOTTLEROCKET_FIRM_TYPECODE="${EFI_SYSTEM_TYPECODE}" -fi +# Under BIOS, the firmware will transfer control to the MBR on the boot device, +# which will pass control to the GRUB stage 2 binary written to the BIOS boot +# partition. The BIOS does not attach any significance to this partition type, +# but GRUB knows to install itself there when we run `grub-bios-setup`. +BIOS_BOOT_TYPECODE="ef02" + +# Under EFI, the firmware will find the EFI system partition and execute the +# program at a platform-defined path like `bootx64.efi`. The partition type +# must match what the firmware expects. +EFI_SYSTEM_TYPECODE="C12A7328-F81F-11D2-BA4B-00A0C93EC93B" + +# Whichever entry point is used for booting the system, it's important to note +# that only one build of GRUB is involved - the one that's installed below when +# we run this script. GRUB understands the GPT priorities scheme we use to find +# the active boot partition; EFI and BIOS firmware does not. This is why we do +# not update GRUB during our system updates; we would have no way to revert to +# an earlier copy of the bootloader if it failed to boot. +# +# We may eventually want to have an active/passive scheme for EFI partitions, +# to allow for potential GRUB and shim updates on EFI platforms in cases where +# we need to deliver security fixes. For now, add a placeholder partition type +# and reserve space for an alternate bank. +EFI_BACKUP_TYPECODE="B39CE39C-0A00-B4AB-2D11-F18F8237A21C" truncate -s 2G "${DISK_IMAGE}" -# boot: 40M + root: 920M + hash: 10M + reserved: 30M = 1000M +# efi: 5M + boot: 40M + root: 920M + hash: 10M + reserved: 25M = 1000M # boot partition attributes (-A): 48 = gptprio priority bit; 56 = gptprio successful bit # partitions are backwards so that we don't make things inconsistent when specifying a wrong end sector :) sgdisk --clear \ -n 0:2005M:2047M -c 0:"BOTTLEROCKET-PRIVATE" -t 0:"${BOTTLEROCKET_PRIVATE_TYPECODE}" \ - -n 0:1975M:0 -c 0:"BOTTLEROCKET-RESERVED-B" -t 0:"${BOTTLEROCKET_RESERVED_TYPECODE}" \ - -n 0:1965M:0 -c 0:"BOTTLEROCKET-HASH-B" -t 0:"${BOTTLEROCKET_HASH_TYPECODE}" \ - -n 0:1045M:0 -c 0:"BOTTLEROCKET-ROOT-B" -t 0:"${BOTTLEROCKET_ROOT_TYPECODE}" \ - -n 0:1005M:0 -c 0:"BOTTLEROCKET-BOOT-B" -t 0:"${BOTTLEROCKET_BOOT_TYPECODE}" -A 0:"clear":48 -A 0:"clear":56 \ - -n 0:975M:0 -c 0:"BOTTLEROCKET-RESERVED-A" -t 0:"${BOTTLEROCKET_RESERVED_TYPECODE}" \ - -n 0:965M:0 -c 0:"BOTTLEROCKET-HASH-A" -t 0:"${BOTTLEROCKET_HASH_TYPECODE}" \ - -n 0:45M:0 -c 0:"BOTTLEROCKET-ROOT-A" -t 0:"${BOTTLEROCKET_ROOT_TYPECODE}" \ - -n 0:5M:0 -c 0:"BOTTLEROCKET-BOOT-A" -t 0:"${BOTTLEROCKET_BOOT_TYPECODE}" -A 0:"set":48 -A 0:"set":56 \ - -n 0:1M:0 -c 0:"${FIRM_NAME}" -t 0:"${BOTTLEROCKET_FIRM_TYPECODE}" \ + -n 0:1980M:0 -c 0:"BOTTLEROCKET-RESERVED-B" -t 0:"${BOTTLEROCKET_RESERVED_TYPECODE}" \ + -n 0:1970M:0 -c 0:"BOTTLEROCKET-HASH-B" -t 0:"${BOTTLEROCKET_HASH_TYPECODE}" \ + -n 0:1050M:0 -c 0:"BOTTLEROCKET-ROOT-B" -t 0:"${BOTTLEROCKET_ROOT_TYPECODE}" \ + -n 0:1010M:0 -c 0:"BOTTLEROCKET-BOOT-B" -t 0:"${BOTTLEROCKET_BOOT_TYPECODE}" -A 0:"clear":48 -A 0:"clear":56 \ + -n 0:1005M:0 -c 0:"EFI-BACKUP" -t 0:"${EFI_BACKUP_TYPECODE}" \ + -n 0:980M:0 -c 0:"BOTTLEROCKET-RESERVED-A" -t 0:"${BOTTLEROCKET_RESERVED_TYPECODE}" \ + -n 0:970M:0 -c 0:"BOTTLEROCKET-HASH-A" -t 0:"${BOTTLEROCKET_HASH_TYPECODE}" \ + -n 0:50M:0 -c 0:"BOTTLEROCKET-ROOT-A" -t 0:"${BOTTLEROCKET_ROOT_TYPECODE}" \ + -n 0:10M:0 -c 0:"BOTTLEROCKET-BOOT-A" -t 0:"${BOTTLEROCKET_BOOT_TYPECODE}" -A 0:"set":48 -A 0:"set":56 \ + -n 0:5M:0 -c 0:"EFI-SYSTEM" -t 0:"${EFI_SYSTEM_TYPECODE}" \ + -n 0:1M:0 -c 0:"BIOS-BOOT" -t 0:"${BIOS_BOOT_TYPECODE}" \ --sort --print "${DISK_IMAGE}" rpm -iv --root "${ROOT_MOUNT}" "${PACKAGE_DIR}"/*.rpm @@ -101,7 +118,6 @@ mksquashfs \ rm -rf "${ROOT_MOUNT}"/var/lib "${ROOT_MOUNT}"/usr/share/licenses/* if [[ "${ARCH}" == "x86_64" ]]; then - SYS_ROOT="x86_64-bottlerocket-linux-gnu/sys-root" # MBR and BIOS-BOOT echo "(hd0) ${DISK_IMAGE}" > "${ROOT_MOUNT}/boot/grub/device.map" "${ROOT_MOUNT}/sbin/grub-bios-setup" \ @@ -112,30 +128,30 @@ if [[ "${ARCH}" == "x86_64" ]]; then "${DISK_IMAGE}" rm -vf "${ROOT_MOUNT}"/boot/grub/* "${ROOT_MOUNT}"/sbin/grub* -else - SYS_ROOT="aarch64-bottlerocket-linux-gnu/sys-root" - # For aarch64 we need an EFI partition instead, formatted - # FAT32 with the .efi binary at the correct path, eg /efi/boot. - # grub-mkimage has put bootaa64.efi at /boot/efi/EFI/BOOT - mv "${ROOT_MOUNT}/boot/efi"/* "${EFI_MOUNT}" - - # The 'recommended' size for the EFI partition is 100MB but our aarch64.efi - # only takes up around 700KB, so this will suffice for now. - dd if=/dev/zero of="${EFI_IMAGE}" bs=1M count=4 - mkfs.vfat -I -S 512 "${EFI_IMAGE}" $((4*2048)) - mmd -i "${EFI_IMAGE}" ::/EFI - mmd -i "${EFI_IMAGE}" ::/EFI/BOOT - mcopy -i "${EFI_IMAGE}" "${EFI_MOUNT}/EFI/BOOT/bootaa64.efi" ::/EFI/BOOT - dd if="${EFI_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek=1 - - # Create the grub directory which grub-bios-setup would have otherwise done. - mkdir -p "${ROOT_MOUNT}/boot/grub" fi +# We also need an EFI partition, formatted FAT32 with the +# EFI binary at the correct path, eg /efi/boot. The grub +# package has placed the image in /boot/efi/EFI/BOOT. +mv "${ROOT_MOUNT}/boot/efi"/* "${EFI_MOUNT}" + +# The 'recommended' size for the EFI partition is 100MB but our EFI +# images are under 1MB, so this will suffice for now. +dd if=/dev/zero of="${EFI_IMAGE}" bs=1M count=5 +mkfs.vfat -I -S 512 "${EFI_IMAGE}" $((5*2048)) +mmd -i "${EFI_IMAGE}" ::/EFI +mmd -i "${EFI_IMAGE}" ::/EFI/BOOT +mcopy -i "${EFI_IMAGE}" "${EFI_MOUNT}/EFI/BOOT"/*.efi ::/EFI/BOOT +dd if="${EFI_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek=5 + +# Ensure that the grub directory exists. +mkdir -p "${ROOT_MOUNT}/boot/grub" + # Now that we're done messing with /, move /boot out of it mv "${ROOT_MOUNT}/boot"/* "${BOOT_MOUNT}" # Set the Bottlerocket variant, version, and build-id +SYS_ROOT="${ARCH}-bottlerocket-linux-gnu/sys-root" echo "PRETTY_NAME=\"${PRETTY_NAME} ${VERSION_ID}\"" >> "${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release" echo "VARIANT_ID=${VARIANT}" >> "${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release" echo "VERSION_ID=${VERSION_ID}" >> "${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release" @@ -149,7 +165,7 @@ ROOT_LABELS=$(setfiles -n -d -F -m -r "${ROOT_MOUNT}" \ mkfs.ext4 -O ^has_journal -b "${VERITY_DATA_BLOCK_SIZE}" -d "${ROOT_MOUNT}" "${ROOT_IMAGE}" 920M echo "${ROOT_LABELS}" | debugfs -w -f - "${ROOT_IMAGE}" resize2fs -M "${ROOT_IMAGE}" -dd if="${ROOT_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek=45 +dd if="${ROOT_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek=50 # BOTTLEROCKET-VERITY-A truncate -s 8M "${VERITY_IMAGE}" @@ -169,7 +185,7 @@ VERITY_DATA_512B_BLOCKS="$(($VERITY_DATA_4K_BLOCKS * 8))" VERITY_ROOT_HASH="$(grep '^Root hash:' <<<$veritysetup_output | awk '{ print $NF }')" VERITY_SALT="$(grep '^Salt:' <<<$veritysetup_output | awk '{ print $NF }')" veritysetup verify "${ROOT_IMAGE}" "${VERITY_IMAGE}" "${VERITY_ROOT_HASH}" -dd if="${VERITY_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek=965 +dd if="${VERITY_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek=970 # write GRUB config cat < "${BOOT_MOUNT}/grub/grub.cfg" @@ -196,7 +212,7 @@ BOOT_LABELS=$(setfiles -n -d -F -m -r "${BOOT_MOUNT}" \ mkfs.ext4 -O ^has_journal -d "${BOOT_MOUNT}" "${BOOT_IMAGE}" 40M echo "${BOOT_LABELS}" | debugfs -w -f - "${BOOT_IMAGE}" resize2fs -M "${BOOT_IMAGE}" -dd if="${BOOT_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek=5 +dd if="${BOOT_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek=10 # BOTTLEROCKET-PRIVATE From ab350051479ac84506cd56a8905b4f7306b7d79b Mon Sep 17 00:00:00 2001 From: aashnasheth Date: Tue, 20 Jul 2021 00:57:15 -0700 Subject: [PATCH 0524/1356] infrasys: create TUF infra in AWS using `cargo make create-infra` Adds binary (and supporting files) to automatically spin up TUF infra (S3 Bucket+Policy, KMS Keys, and populated root.json) across multiple regions in a single account. Edits pubsys-config (and supporting files) to accomodate new infrasys fields in Infra.toml. --- tools/Cargo.lock | 98 +++++ tools/Cargo.toml | 1 + tools/infrasys/Cargo.toml | 30 ++ .../kms_key_setup.yml | 30 ++ .../cloudformation-templates/s3_setup.yml | 25 ++ tools/infrasys/src/error.rs | 165 ++++++++ tools/infrasys/src/keys.rs | 147 +++++++ tools/infrasys/src/main.rs | 335 ++++++++++++++++ tools/infrasys/src/root.rs | 193 +++++++++ tools/infrasys/src/s3.rs | 366 ++++++++++++++++++ tools/infrasys/src/shared.rs | 91 +++++ .../test_tomls/toml_yaml_conversion.toml | 12 + tools/pubsys-config/Cargo.toml | 1 + tools/pubsys-config/src/lib.rs | 79 +++- tools/pubsys-config/src/vmware.rs | 6 +- tools/pubsys-setup/src/main.rs | 3 +- tools/pubsys/src/repo.rs | 18 +- tools/pubsys/src/repo/refresh_repo/mod.rs | 2 +- 18 files changed, 1575 insertions(+), 27 deletions(-) create mode 100644 tools/infrasys/Cargo.toml create mode 100644 tools/infrasys/cloudformation-templates/kms_key_setup.yml create mode 100644 tools/infrasys/cloudformation-templates/s3_setup.yml create mode 100644 tools/infrasys/src/error.rs create mode 100644 tools/infrasys/src/keys.rs create mode 100644 tools/infrasys/src/main.rs create mode 100644 tools/infrasys/src/root.rs create mode 100644 tools/infrasys/src/s3.rs create mode 100644 tools/infrasys/src/shared.rs create mode 100644 tools/infrasys/test_tomls/toml_yaml_conversion.toml diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 28c5cf53..327519ff 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -1,5 +1,7 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +version = 3 + [[package]] name = "addr2line" version = "0.16.0" @@ -62,6 +64,16 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "781f336cc9826dbaddb9754cb5db61e64cab4f69668bd19dcc4a0394a86f4cb1" +[[package]] +name = "assert-json-diff" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50f1c3703dd33532d7f0ca049168930e9099ecac238e23cf932f3a69c42f06da" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "async-trait" version = "0.1.51" @@ -391,6 +403,12 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" +[[package]] +name = "dtoa" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" + [[package]] name = "duct" version = "0.13.5" @@ -756,6 +774,31 @@ dependencies = [ "regex", ] +[[package]] +name = "infrasys" +version = "0.1.0" +dependencies = [ + "assert-json-diff", + "async-trait", + "clap", + "hex", + "log", + "pubsys-config", + "rusoto_cloudformation", + "rusoto_core", + "rusoto_s3", + "serde_json", + "serde_yaml", + "sha2", + "shell-words", + "simplelog", + "snafu", + "structopt", + "tokio", + "toml", + "url", +] + [[package]] name = "instant" version = "0.1.10" @@ -798,6 +841,12 @@ version = "0.2.98" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "320cfe77175da3a483efed4bc0adc1968ca050b098ce4f2f1c13a56626128790" +[[package]] +name = "linked-hash-map" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" + [[package]] name = "lock_api" version = "0.4.4" @@ -1151,6 +1200,7 @@ dependencies = [ "log", "parse-datetime", "serde", + "serde_yaml", "snafu", "toml", "url", @@ -1343,6 +1393,20 @@ dependencies = [ "winapi", ] +[[package]] +name = "rusoto_cloudformation" +version = "0.47.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e00db4cfcfc14725c720d881443f2c17607bd80aa20fecd1382a5936cc2db05d" +dependencies = [ + "async-trait", + "bytes", + "futures", + "rusoto_core", + "serde_urlencoded", + "xml-rs", +] + [[package]] name = "rusoto_core" version = "0.47.0" @@ -1429,6 +1493,19 @@ dependencies = [ "serde_json", ] +[[package]] +name = "rusoto_s3" +version = "0.47.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "048c2fe811a823ad5a9acc976e8bf4f1d910df719dcf44b15c3e96c5b7a51027" +dependencies = [ + "async-trait", + "bytes", + "futures", + "rusoto_core", + "xml-rs", +] + [[package]] name = "rusoto_signature" version = "0.47.0" @@ -1649,6 +1726,18 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_yaml" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6375dbd828ed6964c3748e4ef6d18e7a175d408ffe184bca01698d0c73f915a9" +dependencies = [ + "dtoa", + "indexmap", + "serde", + "yaml-rust", +] + [[package]] name = "sha2" version = "0.9.5" @@ -2301,6 +2390,15 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2d7d3948613f75c98fd9328cfdcc45acc4d360655289d0a7d4ec931392200a3" +[[package]] +name = "yaml-rust" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +dependencies = [ + "linked-hash-map", +] + [[package]] name = "zeroize" version = "1.4.1" diff --git a/tools/Cargo.toml b/tools/Cargo.toml index c4502c3c..58b7f54b 100644 --- a/tools/Cargo.toml +++ b/tools/Cargo.toml @@ -1,5 +1,6 @@ [workspace] members = [ + "infrasys", "buildsys", "pubsys", "pubsys-config", diff --git a/tools/infrasys/Cargo.toml b/tools/infrasys/Cargo.toml new file mode 100644 index 00000000..e34fc6f6 --- /dev/null +++ b/tools/infrasys/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "infrasys" +version = "0.1.0" +license = "Apache-2.0 OR MIT" +authors = ["Aashna Sheth "] +edition = "2018" +publish = false + +[dependencies] +async-trait = "0.1.51" +clap = "2.33" +hex = "0.4.0" +log = "0.4.14" +pubsys-config = { path = "../pubsys-config/" } +rusoto_cloudformation = { version = "0.47", default-features = false, features = ["rustls"] } +rusoto_core = { version = "0.47", default-features = false, features = ["rustls"] } +rusoto_s3 = { version = "0.47", default-features = false, features = ["rustls"] } +serde_json = "1.0.66" +serde_yaml = "0.8.17" +sha2 = "0.9" +shell-words = "1.0.0" +simplelog = "0.10.0" +snafu = "0.6" +structopt = { version = "0.3", default-features = false } +tokio = { version = "~1.8", default-features = false, features = ["macros", "rt-multi-thread"] } # LTS +toml = "0.5" +url = "2.2.2" + +[dev-dependencies] +assert-json-diff = "2.0.1" diff --git a/tools/infrasys/cloudformation-templates/kms_key_setup.yml b/tools/infrasys/cloudformation-templates/kms_key_setup.yml new file mode 100644 index 00000000..09e3e113 --- /dev/null +++ b/tools/infrasys/cloudformation-templates/kms_key_setup.yml @@ -0,0 +1,30 @@ +Parameters: + Alias: + Description: "Required. Alias for KMS key to be created" + Type: String + +Resources: + KMSKey: + Type: AWS::KMS::Key + Properties: + KeySpec: RSA_3072 + KeyUsage: SIGN_VERIFY + KeyPolicy: + Statement: + - Effect: Allow + Principal: + AWS: !Sub "arn:aws:iam::${AWS::AccountId}:root" + Action: "kms:*" + Resource: "*" + + KMSKeyAlias: + Type: AWS::KMS::Alias + DependsOn: + - KMSKey + Properties: + AliasName: !Sub "alias/${Alias}" + TargetKeyId: !Ref KMSKey + +Outputs: + KeyId: + Value: !GetAtt KMSKey.Arn diff --git a/tools/infrasys/cloudformation-templates/s3_setup.yml b/tools/infrasys/cloudformation-templates/s3_setup.yml new file mode 100644 index 00000000..61cf5c66 --- /dev/null +++ b/tools/infrasys/cloudformation-templates/s3_setup.yml @@ -0,0 +1,25 @@ +Resources: + TUFRepoBucket: + Type: AWS::S3::Bucket + DeletionPolicy: Retain + Properties: + VersioningConfiguration: + Status: Enabled + AccessControl: LogDeliveryWrite + MetricsConfigurations: + - Id: BucketMetrics + BucketEncryption: + ServerSideEncryptionConfiguration: + - ServerSideEncryptionByDefault: + SSEAlgorithm: AES256 + PublicAccessBlockConfiguration: + BlockPublicAcls: True + BlockPublicPolicy: True + IgnorePublicAcls: True + RestrictPublicBuckets: True + +Outputs: + BucketName: + Value: !Ref TUFRepoBucket + RDN: + Value: !GetAtt TUFRepoBucket.RegionalDomainName diff --git a/tools/infrasys/src/error.rs b/tools/infrasys/src/error.rs new file mode 100644 index 00000000..8e474ef3 --- /dev/null +++ b/tools/infrasys/src/error.rs @@ -0,0 +1,165 @@ +use snafu::Snafu; +use std::io; +use std::path::PathBuf; + +#[derive(Debug, Snafu)] +#[snafu(visibility = "pub(super)")] +pub enum Error { + #[snafu(display( + "Failed to create CFN stack '{}' in '{}': {}", + stack_name, + region, + source + ))] + CreateStack { + stack_name: String, + region: String, + source: rusoto_core::RusotoError, + }, + + #[snafu(display( + "Recieved CREATE_FAILED status for CFN stack '{}' in '{}'", + stack_name, + region + ))] + CreateStackFailure { stack_name: String, region: String }, + + #[snafu(display("Error splitting shell command '{}': {}", command, source))] + CommandSplit { + command: String, + source: shell_words::ParseError, + }, + + #[snafu(display("Error reading Infra.toml: {}", source))] + Config { source: pubsys_config::Error }, + + #[snafu(display( + "Stuck in indefinite CREATE_IN_PROGRESS loop for CFN stack '{}' in '{}'", + stack_name, + region + ))] + CreateStackTimeout { stack_name: String, region: String }, + + #[snafu(display( + "Failed to fetch stack details for CFN stack '{}' in '{}': {}", + stack_name, + region, + source + ))] + DescribeStack { + stack_name: String, + region: String, + source: rusoto_core::RusotoError, + }, + + #[snafu(display("Missing environment variable '{}'", var))] + Environment { + var: String, + source: std::env::VarError, + }, + + #[snafu(display("File already exists at '{}'", path.display()))] + FileExists { path: PathBuf }, + + #[snafu(display("Failed to open file at '{}': {}", path.display(), source))] + FileOpen { path: PathBuf, source: io::Error }, + + #[snafu(display("Failed to read file at '{}': {}", path.display(), source))] + FileRead { path: PathBuf, source: io::Error }, + + #[snafu(display("Failed to write file at '{}': {}", path.display(), source))] + FileWrite { path: PathBuf, source: io::Error }, + + #[snafu(display("Failed to get bucket policy statement for bucket '{}'", bucket_name))] + GetPolicyStatement { bucket_name: String }, + + #[snafu(display("Failed to convert '{}' to yaml: {}", what, source))] + InvalidJson { + what: String, + source: serde_json::Error, + }, + + #[snafu(display("Invalid path '{}' for '{}'", path.display(), thing))] + InvalidPath { path: PathBuf, thing: String }, + + #[snafu(display("Publication/Root key threshold must be <= {}, currently {}", num_keys.to_string(), threshold))] + InvalidThreshold { threshold: String, num_keys: usize }, + + #[snafu(display("Failed to convert updated Infra.toml information to yaml: {}", source))] + InvalidYaml { source: serde_yaml::Error }, + + #[snafu(display( + "Failed to create keys due to invalid key config. Missing '{}'.", + missing + ))] + KeyConfig { missing: String }, + + #[snafu(display( + "Failed to create new keys or access pre-existing keys in available_keys list." + ))] + KeyCreation, + + #[snafu(display("Logger setup error: {}", source))] + Logger { source: log::SetLoggerError }, + + #[snafu(display("Infra.toml is missing '{}'", missing))] + MissingConfig { missing: String }, + + #[snafu(display("Failed to create directory '{}': {}", path.display(), source))] + Mkdir { path: PathBuf, source: io::Error }, + + #[snafu(display("Failed to get parent of path '{}'", path.display()))] + Parent { path: PathBuf }, + + #[snafu(display("Failed to parse '{}' to int: {}", what, source))] + ParseInt { + what: String, + source: std::num::ParseIntError, + }, + + #[snafu(display("Failed to parse '{}' to a valid rusoto region: {}", what, source))] + ParseRegion { + what: String, + source: rusoto_core::region::ParseRegionError, + }, + + #[snafu(display( + "Failed to find field '{}' after attempting to create resource '{}'", + what, + resource_name + ))] + ParseResponse { what: String, resource_name: String }, + + #[snafu(display("Failed to convert '{}' to URL: {}", input, source))] + ParseUrl { + input: String, + source: url::ParseError, + }, + + #[snafu(display("Failed to push object to bucket '{}': {}", bucket_name, source))] + PutObject { + bucket_name: String, + source: rusoto_core::RusotoError, + }, + + #[snafu(display( + "Failed to update bucket policy for bucket '{}': {}", + bucket_name, + source + ))] + PutPolicy { + bucket_name: String, + source: rusoto_core::RusotoError, + }, + + #[snafu(display("Failed to create async runtime: {}", source))] + Runtime { source: std::io::Error }, + + #[snafu(display("'tuftool {}' returned {}", command, code))] + TuftoolResult { command: String, code: String }, + + #[snafu(display("Failed to start tuftool: {}", source))] + TuftoolSpawn { source: io::Error }, +} + +pub type Result = std::result::Result; diff --git a/tools/infrasys/src/keys.rs b/tools/infrasys/src/keys.rs new file mode 100644 index 00000000..7bd8c0b8 --- /dev/null +++ b/tools/infrasys/src/keys.rs @@ -0,0 +1,147 @@ +use async_trait::async_trait; +use pubsys_config::{KMSKeyConfig, SigningKeyConfig}; +use rusoto_cloudformation::{CloudFormation, CloudFormationClient, CreateStackInput}; +use rusoto_core::Region; +use snafu::{OptionExt, ResultExt}; +use std::fs; +use std::str::FromStr; + +use super::{error, shared, Result}; + +/// Creates keys using data stored in SigningKeyConfig enum +/// Output: Edits KMSConfig fields in place after creating new keys +pub async fn create_keys(signing_key_config: &mut SigningKeyConfig) -> Result<()> { + // An extra check even through these parameters are checked earlier in main.rs + check_signing_key_config(signing_key_config)?; + match signing_key_config { + SigningKeyConfig::file { .. } => (), + SigningKeyConfig::kms { config, .. } => { + config + .as_mut() + .context(error::MissingConfig { + missing: "config field for a kms key", + })? + .create_kms_keys() + .await?; + } + SigningKeyConfig::ssm { .. } => (), + } + Ok(()) +} + +pub fn check_signing_key_config(signing_key_config: &SigningKeyConfig) -> Result<()> { + match signing_key_config { + SigningKeyConfig::file { .. } => (), + SigningKeyConfig::kms { config, .. } => { + let config = config.as_ref().context(error::MissingConfig { + missing: "config field for a kms key", + })?; + + match ( + config.available_keys.is_empty(), + config.regions.is_empty(), + config.key_alias.as_ref(), + ) { + // everything is unspecified (no way to allocate a key_id) + (true, true, None) => error::KeyConfig { + missing: "an available_key or region/key_alias", + } + .fail()?, + // regions is populated, but no key alias + // (it doesn't matter if available keys are listed or not) + (_, false, None) => error::KeyConfig { + missing: "key_alias", + } + .fail()?, + // key alias is populated, but no key regions to create keys in + // (it doesn't matter if available keys are listed or not) + (_, true, Some(..)) => error::KeyConfig { missing: "region" }.fail()?, + _ => (), + }; + } + SigningKeyConfig::ssm { .. } => (), + } + Ok(()) +} + +/// Must create a trait because can't directly implement a method for an struct in an +/// external crate like KMSKeyConfig (which lives in pubsys-config/lib.rs) +#[async_trait] +trait KMSKeyConfigExt { + async fn create_kms_keys(&mut self) -> Result<()>; +} + +/// Creates new KMS keys using cloudformation in regions specified +/// Input Conditions: Alias+Region or AvailableKeys must be specified +/// Output: Populates KMSKeyConfig with information about resources created +/// 'available-keys' starts as a map of pre-existing keyids:regions and will end as a +/// map of pre-existing and generated keyids:regions, +/// 'key-stack-arns' starts empty and will end as a +/// map of keyids:stackarn if new keys are created +#[async_trait] +impl KMSKeyConfigExt for KMSKeyConfig { + async fn create_kms_keys(&mut self) -> Result<()> { + // Generating new keys (if regions is non-empty) + for region in self.regions.iter() { + let stack_name = format!( + "TUF-KMS-{}", + self.key_alias.as_ref().context(error::KeyConfig { + missing: "key_alias", + })? + ); + let cfn_client = CloudFormationClient::new( + Region::from_str(region).context(error::ParseRegion { what: region })?, + ); + let cfn_filepath = format!( + "{}/infrasys/cloudformation-templates/kms_key_setup.yml", + shared::getenv("BUILDSYS_TOOLS_DIR")? + ); + let cfn_template = fs::read_to_string(&cfn_filepath) + .context(error::FileRead { path: cfn_filepath })?; + + let stack_result = cfn_client + .create_stack(CreateStackInput { + parameters: Some(vec![shared::create_parameter( + "Alias".to_string(), + self.key_alias + .as_ref() + .context(error::KeyConfig { + missing: "key_alias", + })? + .to_string(), + )]), + stack_name: stack_name.clone(), + template_body: Some(cfn_template.clone()), + ..Default::default() + }) + .await + .context(error::CreateStack { + stack_name: &stack_name, + region, + })?; + + let stack_arn = stack_result + .clone() + .stack_id + .context(error::ParseResponse { + what: "stack_id", + resource_name: &stack_name, + })?; + + let output_array = shared::get_stack_outputs(&cfn_client, &stack_name, region).await?; + let key_id = output_array[0] + .output_value + .as_ref() + .context(error::ParseResponse { + what: "outputs[0].output_value (key id)", + resource_name: stack_name, + })?; + self.available_keys + .insert(key_id.to_string(), region.to_string()); + self.key_stack_arns + .insert(key_id.to_string(), stack_arn.to_string()); + } + + Ok(()) + } +} diff --git a/tools/infrasys/src/main.rs b/tools/infrasys/src/main.rs new file mode 100644 index 00000000..995bc24c --- /dev/null +++ b/tools/infrasys/src/main.rs @@ -0,0 +1,335 @@ +mod error; +mod keys; +mod root; +mod s3; +mod shared; + +use error::Result; +use log::{error, info}; +use pubsys_config::{InfraConfig, RepoConfig, S3Config, SigningKeyConfig}; +use sha2::{Digest, Sha512}; +use shared::KeyRole; +use simplelog::{Config as LogConfig, LevelFilter, SimpleLogger}; +use snafu::{ensure, OptionExt, ResultExt}; +use std::collections::HashMap; +use std::num::NonZeroUsize; +use std::path::{Path, PathBuf}; +use std::{fs, process}; +use structopt::StructOpt; +use tokio::runtime::Runtime; +use url::Url; + +// =^..^= =^..^= =^..^= SUB-COMMAND STRUCTS =^..^= =^..^= =^..^= + +#[derive(Debug, StructOpt)] +#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +struct Args { + #[structopt(global = true, long, default_value = "INFO")] + log_level: LevelFilter, + + // Path to Infra.toml (NOTE: must be specified before subcommand) + #[structopt(long, parse(from_os_str))] + infra_config_path: PathBuf, + + #[structopt(subcommand)] + subcommand: SubCommand, +} + +#[derive(Debug, StructOpt)] +#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +struct CreateInfraArgs { + /// Path to the root.json file. + #[structopt(long)] + root_role_path: PathBuf, +} + +#[derive(Debug, StructOpt)] +enum SubCommand { + /// Creates infrastructure specified in the Infra.toml file. + CreateInfra(CreateInfraArgs), +} + +// =^..^= =^..^= =^..^= MAIN METHODS =^..^= =^..^= =^..^= + +fn main() { + if let Err(e) = run() { + eprintln!("{}", e); + process::exit(1); + } +} + +fn run() -> Result<()> { + // Parse and store the args passed to the program + let args = Args::from_args(); + + SimpleLogger::init(args.log_level, LogConfig::default()).context(error::Logger)?; + + match args.subcommand { + SubCommand::CreateInfra(ref run_task_args) => { + let rt = Runtime::new().context(error::Runtime)?; + rt.block_on(async { + create_infra(&args.infra_config_path, &run_task_args.root_role_path).await + }) + } + } +} + +fn check_infra_lock(toml_path: &Path) -> Result<()> { + let lock_path = toml_path + .parent() + .context(error::Parent { path: toml_path })? + .join("Infra.lock"); + + ensure!(!lock_path.is_file(), { + error!( + "It looks like you've already created some resources for your custom TUF repository because a lock file exists at '{}'. + \nPlease clean up your TUF resources in AWS, delete Infra.lock, and run again.", + lock_path.display() + ); + error::FileExists { path: lock_path } + }); + Ok(()) +} + +/// Automates setting up infrastructure for a custom TUF repo +async fn create_infra(toml_path: &Path, root_role_path: &Path) -> Result<()> { + check_infra_lock(toml_path)?; + info!("Parsing Infra.toml..."); + let mut infra_config = InfraConfig::from_path(toml_path).context(error::Config)?; + let repos = infra_config + .repo + .as_mut() + .context(error::MissingConfig { missing: "repo" })?; + let s3_info_map = infra_config + .aws + .as_mut() + .context(error::MissingConfig { missing: "aws" })? + .s3 + .as_mut() + .context(error::MissingConfig { missing: "aws.s3" })?; + + for (repo_name, repo_config) in repos.iter_mut() { + // Validate repo_config and unwrap required optional data + let mut repo_info = ValidRepoInfo::new(repo_config, repo_name, s3_info_map)?; + + // Validate the key configurations and root file + keys::check_signing_key_config(repo_info.signing_keys)?; + keys::check_signing_key_config(repo_info.root_keys)?; + root::check_root(root_role_path)?; + + // Create the repo + let (s3_stack_arn, bucket_name, bucket_rdn) = + create_repo_infrastructure(&mut repo_info).await?; + *repo_info.stack_arn = Some(s3_stack_arn); + *repo_info.bucket_name = Some(bucket_name.clone()); + update_root_and_sign_root(&mut repo_info, root_role_path).await?; + + // Upload root.json. + info!("Uploading root.json to S3 bucket..."); + s3::upload_file( + &repo_info.s3_region, + &bucket_name, + &repo_info.prefix, + root_role_path, + ) + .await?; + + // Update infra_config with output parameters if not already set + if repo_info.metadata_base_url.is_none() { + *repo_info.metadata_base_url = Some( + Url::parse(format!("https://{}{}/", &bucket_rdn, &repo_info.prefix).as_str()) + .context(error::ParseUrl { input: &bucket_rdn })?, + ); + } + if repo_info.targets_url.is_none() { + *repo_info.targets_url = Some( + Url::parse( + format!("https://{}{}/targets/", &bucket_rdn, &repo_info.prefix).as_str(), + ) + .context(error::ParseUrl { input: &bucket_rdn })?, + ); + } + if repo_info.root_role_url.is_none() { + *repo_info.root_role_url = Some( + Url::parse( + format!("https://{}{}/root.json", &bucket_rdn, &repo_info.prefix).as_str(), + ) + .context(error::ParseUrl { input: &bucket_rdn })?, + ); + } + let root_role_data = fs::read_to_string(&root_role_path).context(error::FileRead { + path: root_role_path, + })?; + let mut d = Sha512::new(); + d.update(&root_role_data); + let digest = hex::encode(d.finalize()); + repo_config.root_role_sha512 = Some(digest); + } + + // Generate Infra.lock + info!("Writing Infra.lock..."); + let yaml_string = serde_yaml::to_string(&infra_config).context(error::InvalidYaml)?; + fs::write( + toml_path + .parent() + .context(error::Parent { path: toml_path })? + .join("Infra.lock"), + yaml_string, + ) + .context(error::FileWrite { path: toml_path })?; + + info!("Complete!"); + Ok(()) +} + +struct ValidRepoInfo<'a> { + bucket_name: &'a mut Option, + metadata_base_url: &'a mut Option, + prefix: String, + pub_key_threshold: &'a NonZeroUsize, + root_key_threshold: &'a NonZeroUsize, + root_keys: &'a mut SigningKeyConfig, + root_role_url: &'a mut Option, + s3_region: &'a String, + s3_stack_name: String, + signing_keys: &'a mut SigningKeyConfig, + stack_arn: &'a mut Option, + targets_url: &'a mut Option, + vpce_id: &'a String, +} + +impl<'a> ValidRepoInfo<'a> { + fn new( + repo_config: &'a mut RepoConfig, + repo_name: &str, + s3_info_map: &'a mut HashMap, + ) -> Result { + let s3_stack_name = + repo_config + .file_hosting_config_name + .as_ref() + .context(error::MissingConfig { + missing: "file_hosting_config_name", + })?; + let s3_info = s3_info_map + .get_mut(s3_stack_name) + .context(error::MissingConfig { + missing: format!("aws.s3 config with name {}", s3_stack_name), + })?; + Ok(ValidRepoInfo { + s3_stack_name: s3_stack_name.to_string(), + s3_region: s3_info.region.as_ref().context(error::MissingConfig { + missing: format!("region for '{}' s3 config", s3_stack_name), + })?, + bucket_name: &mut s3_info.bucket_name, + stack_arn: &mut s3_info.stack_arn, + vpce_id: s3_info + .vpc_endpoint_id + .as_ref() + .context(error::MissingConfig { + missing: format!("vpc_endpoint_id for '{}' s3 config", s3_stack_name), + })?, + prefix: s3::format_prefix(&s3_info.s3_prefix), + signing_keys: repo_config + .signing_keys + .as_mut() + .context(error::MissingConfig { + missing: format!("signing_keys for '{}' repo config", repo_name), + })?, + root_keys: repo_config + .root_keys + .as_mut() + .context(error::MissingConfig { + missing: format!("root_keys for '{}' repo config", repo_name), + })?, + root_key_threshold: repo_config.root_key_threshold.as_mut().context( + error::MissingConfig { + missing: format!("root_key_threshold for '{}' repo config", repo_name), + }, + )?, + pub_key_threshold: repo_config.pub_key_threshold.as_ref().context( + error::MissingConfig { + missing: format!("pub_key_threshold for '{}' repo config", repo_name), + }, + )?, + root_role_url: &mut repo_config.root_role_url, + targets_url: &mut repo_config.targets_url, + metadata_base_url: &mut repo_config.metadata_base_url, + }) + } +} + +async fn create_repo_infrastructure( + repo_info: &'_ mut ValidRepoInfo<'_>, +) -> Result<(String, String, String)> { + // Create S3 bucket + info!("Creating S3 bucket..."); + let (s3_stack_arn, bucket_name, bucket_rdn) = + s3::create_s3_bucket(&repo_info.s3_region, &repo_info.s3_stack_name).await?; + + // Add Bucket Policy to newly created bucket + s3::add_bucket_policy( + &repo_info.s3_region, + &bucket_name, + &repo_info.prefix, + &repo_info.vpce_id, + ) + .await?; + + // Create root + publication keys + info!("Creating KMS Keys..."); + keys::create_keys(repo_info.signing_keys).await?; + keys::create_keys(repo_info.root_keys).await?; + Ok((s3_stack_arn, bucket_name, bucket_rdn)) +} + +async fn update_root_and_sign_root( + repo_info: &'_ mut ValidRepoInfo<'_>, + root_role_path: &Path, +) -> Result<()> { + // Create and populate (add/sign) root.json + info!("Creating and signing root.json..."); + root::create_root(root_role_path)?; + // Add keys (for both roles) + root::add_keys( + repo_info.signing_keys, + &KeyRole::Publication, + repo_info.pub_key_threshold, + &root_role_path.display().to_string(), + )?; + root::add_keys( + repo_info.root_keys, + &KeyRole::Root, + repo_info.root_key_threshold, + &root_role_path.display().to_string(), + )?; + // Sign root with all root keys + root::sign_root(repo_info.root_keys, &root_role_path.display().to_string())?; + Ok(()) +} + +// =^..^= =^..^= =^..^= TESTS =^..^= =^..^= =^..^= + +#[cfg(test)] +mod tests { + use super::{fs, shared, InfraConfig}; + + #[test] + fn toml_yaml_conversion() { + let test_toml_path = format!( + "{}/test_tomls/toml_yaml_conversion.toml", + shared::getenv("CARGO_MANIFEST_DIR").unwrap() + ); + let toml_struct = InfraConfig::from_path(&test_toml_path).unwrap(); + let yaml_string = serde_yaml::to_string(&toml_struct).expect("Could not write to file!"); + + let test_yaml_path = format!( + "{}/test_tomls/toml_yaml_conversion.yml", + shared::getenv("CARGO_MANIFEST_DIR").unwrap() + ); + fs::write(&test_yaml_path, &yaml_string).expect("Could not write to file!"); + let decoded_yaml = InfraConfig::from_lock_path(&test_yaml_path).unwrap(); + + assert_eq!(toml_struct, decoded_yaml); + } +} diff --git a/tools/infrasys/src/root.rs b/tools/infrasys/src/root.rs new file mode 100644 index 00000000..819d92d8 --- /dev/null +++ b/tools/infrasys/src/root.rs @@ -0,0 +1,193 @@ +use super::{error, KeyRole, Result}; +use log::{trace, warn}; +use pubsys_config::SigningKeyConfig; +use rusoto_core::Region; +use snafu::{ensure, OptionExt, ResultExt}; +use std::collections::HashMap; +use std::fs; +use std::num::NonZeroUsize; +use std::path::Path; +use std::process::Command; + +/// The tuftool macro wraps Command to simplify calls to tuftool, adding region functionality. +macro_rules! tuftool { + ($region:expr, $format_str:expr, $($format_arg:expr),*) => { + let arg_str = format!($format_str, $($format_arg),*); + trace!("tuftool arg string: {}", arg_str); + let args = shell_words::split(&arg_str).context(error::CommandSplit { command: &arg_str })?; + trace!("tuftool split args: {:#?}", args); + + let status = Command::new("tuftool") + .args(args) + .env("AWS_REGION", $region) + .status() + .context(error::TuftoolSpawn)?; + + ensure!(status.success(), error::TuftoolResult { + command: arg_str, + code: status.code().map(|i| i.to_string()).unwrap_or_else(|| "".to_string()) + }); + } +} + +pub fn check_root(root_role_path: &Path) -> Result<()> { + ensure!(!root_role_path.is_file(), { + warn!("Cowardly refusing to overwrite the existing root.json at {}. Please manually delete it and run again.", root_role_path.display()); + error::FileExists { + path: root_role_path, + } + }); + Ok(()) +} + +/// Creates the directory where root.json will live and creates root.json itself according to details specified in root-role-path +pub fn create_root(root_role_path: &Path) -> Result<()> { + // Make /roles and /keys directories, if they don't exist, so we can write generated files. + let role_dir = root_role_path.parent().context(error::InvalidPath { + path: root_role_path, + thing: "root role", + })?; + fs::create_dir_all(role_dir).context(error::Mkdir { path: role_dir })?; + // Initialize root + tuftool!( + Region::default().name(), + "root init '{}'", + root_role_path.display() + ); + tuftool!( + Region::default().name(), + // TODO: expose expiration date as a configurable parameter + "root expire '{}' 'in 52 weeks'", + root_role_path.display() + ); + Ok(()) +} + +/// Adds keys to root.json according to key type +pub fn add_keys( + signing_key_config: &mut SigningKeyConfig, + role: &KeyRole, + threshold: &NonZeroUsize, + filepath: &str, +) -> Result<()> { + match signing_key_config { + SigningKeyConfig::file { .. } => (), + SigningKeyConfig::kms { key_id, config, .. } => add_keys_kms( + &config + .as_ref() + .context(error::MissingConfig { + missing: "config field for a kms key", + })? + .available_keys, + role, + threshold, + filepath, + key_id, + )?, + SigningKeyConfig::ssm { .. } => (), + } + Ok(()) +} + +/// Adds KMSKeys to root.json given root or publication type +/// Input: available-keys (keys to sign with), role (root or publication), threshold for role, filepath for root.JSON, +/// mutable key_id +/// Output: in-place edit of root.json and key_id with a valid publication key +/// (If key-id is populated, it will not change. Otherwise, it will be populated with a key-id of an available key) +fn add_keys_kms( + available_keys: &HashMap, + role: &KeyRole, + threshold: &NonZeroUsize, + filepath: &str, + key_id: &mut Option, +) -> Result<()> { + ensure!( + (*available_keys).len() >= (*threshold).get(), + error::InvalidThreshold { + threshold: threshold.to_string(), + num_keys: (*available_keys).len(), + } + ); + + match role { + KeyRole::Root => { + tuftool!( + Region::default().name(), + "root set-threshold '{}' root '{}' ", + filepath, + threshold.to_string() + ); + for (keyid, region) in available_keys.iter() { + tuftool!( + region, + "root add-key '{}' aws-kms:///'{}' --role root", + filepath, + keyid + ); + } + } + KeyRole::Publication => { + tuftool!( + Region::default().name(), + "root set-threshold '{}' snapshot '{}' ", + filepath, + threshold.to_string() + ); + tuftool!( + Region::default().name(), + "root set-threshold '{}' targets '{}' ", + filepath, + threshold.to_string() + ); + tuftool!( + Region::default().name(), + "root set-threshold '{}' timestamp '{}' ", + filepath, + threshold.to_string() + ); + for (keyid, region) in available_keys.iter() { + tuftool!( + region, + "root add-key '{}' aws-kms:///'{}' --role snapshot --role targets --role timestamp", + filepath, + keyid + ); + } + + // Set key_id using a publication key (if one is not already provided) + if key_id.is_none() { + *key_id = Some( + available_keys + .iter() + .next() + .context(error::KeyCreation)? + .0 + .to_string(), + ); + } + } + } + + Ok(()) +} + +/// Signs root with available_keys under root_keys (will have a different tuftool command depending on key type) +pub fn sign_root(signing_key_config: &SigningKeyConfig, filepath: &str) -> Result<()> { + match signing_key_config { + SigningKeyConfig::file { .. } => (), + SigningKeyConfig::kms { config, .. } => { + for (keyid, region) in config + .as_ref() + .context(error::MissingConfig { + missing: "KMS key details", + })? + .available_keys + .iter() + { + tuftool!(region, "root sign '{}' -k aws-kms:///'{}'", filepath, keyid); + } + } + SigningKeyConfig::ssm { .. } => (), + } + Ok(()) +} diff --git a/tools/infrasys/src/s3.rs b/tools/infrasys/src/s3.rs new file mode 100644 index 00000000..1496d69e --- /dev/null +++ b/tools/infrasys/src/s3.rs @@ -0,0 +1,366 @@ +use rusoto_cloudformation::{CloudFormation, CloudFormationClient, CreateStackInput}; +use rusoto_core::Region; +use rusoto_s3::{ + GetBucketPolicyRequest, PutBucketPolicyRequest, PutObjectRequest, S3Client, StreamingBody, S3, +}; +use snafu::{OptionExt, ResultExt}; +use std::fs; +use std::fs::File; +use std::io::prelude::*; +use std::path::{Path, PathBuf}; +use std::str::FromStr; + +use super::{error, shared, Result}; + +pub fn format_prefix(prefix: &str) -> String { + if prefix.is_empty() { + return prefix.to_string(); + } + let formatted = { + if prefix.starts_with('/') { + prefix.to_string() + } else { + format!("/{}", prefix) + } + }; + if formatted.ends_with('/') { + formatted[..formatted.len() - 1].to_string() + } else if formatted.ends_with("/*") { + formatted[..formatted.len() - 2].to_string() + } else { + formatted + } +} + +/// Creates a *private* S3 Bucket using a CloudFormation template +/// Input: The region in which the bucket will be created and the name of the bucket +/// Output: The stack_arn of the stack w/ the S3 bucket, the CFN allocated bucket name, +/// and the bucket url (for the url fields in Infra.lock) +pub async fn create_s3_bucket(region: &str, stack_name: &str) -> Result<(String, String, String)> { + // TODO: Add support for accomodating pre-existing buckets (skip this creation process) + let cfn_client = CloudFormationClient::new( + Region::from_str(region).context(error::ParseRegion { what: region })?, + ); + let cfn_filepath: PathBuf = format!( + "{}/infrasys/cloudformation-templates/s3_setup.yml", + shared::getenv("BUILDSYS_TOOLS_DIR")? + ) + .into(); + let cfn_template = + fs::read_to_string(&cfn_filepath).context(error::FileRead { path: cfn_filepath })?; + let stack_result = cfn_client + .create_stack(CreateStackInput { + stack_name: stack_name.to_string(), + template_body: Some(cfn_template.clone()), + ..Default::default() + }) + .await + .context(error::CreateStack { stack_name, region })?; + // We don't have to wait for successful stack creation to grab the stack ARN + let stack_arn = stack_result + .clone() + .stack_id + .context(error::ParseResponse { + what: "stack_id", + resource_name: stack_name, + })?; + + // Grab the StackOutputs to get the Bucketname and BucketURL + let output_array = shared::get_stack_outputs(&cfn_client, &stack_name, region).await?; + let bucket_name = output_array[0] + .output_value + .as_ref() + .context(error::ParseResponse { + what: "outputs[0].output_value (bucket name)", + resource_name: stack_name, + })? + .to_string(); + let bucket_rdn = output_array[1] + .output_value + .as_ref() + .context(error::ParseResponse { + what: "outputs[1].output_value (bucket url)", + resource_name: stack_name, + })? + .to_string(); + + Ok((stack_arn, bucket_name, bucket_rdn)) +} + +/// Adds a BucketPolicy allowing GetObject access to a specified VPC +/// Input: Region, Name of bucket, which prefix root.json should be put under, and vpcid +/// Note that the prefix parameter must have the format "//*" and the bucket name "" +/// Output: Doesn't need to save any metadata from this action +pub async fn add_bucket_policy( + region: &str, + bucket_name: &str, + prefix: &str, + vpcid: &str, +) -> Result<()> { + // Get old policy + let s3_client = + S3Client::new(Region::from_str(region).context(error::ParseRegion { what: region })?); + let mut policy: serde_json::Value = match s3_client + .get_bucket_policy(GetBucketPolicyRequest { + bucket: bucket_name.to_string(), + expected_bucket_owner: None, + }) + .await + { + Ok(output) => serde_json::from_str(&output.policy.context(error::ParseResponse { + what: "policy", + resource_name: bucket_name, + })?) + .context(error::InvalidJson { + what: format!("retrieved bucket policy for {}", &bucket_name), + })?, + + Err(..) => serde_json::from_str( + r#"{"Version": "2008-10-17", + "Statement": []}"#, + ) + .context(error::InvalidJson { + what: format!("new bucket policy for {}", &bucket_name), + })?, + }; + + // Create a new policy + let new_bucket_policy = serde_json::from_str(&format!( + r#"{{ + "Effect": "Allow", + "Principal": "*", + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::{}{}/*", + "Condition": {{ + "StringEquals": {{ + "aws:sourceVpce": "{}" + }} + }} + }}"#, + bucket_name, prefix, vpcid + )) + .context(error::InvalidJson { + what: format!("new bucket policy for {}", &bucket_name), + })?; + + // Append new policy onto old one + policy + .get_mut("Statement") + .context(error::GetPolicyStatement { bucket_name })? + .as_array_mut() + .context(error::GetPolicyStatement { bucket_name })? + .push(new_bucket_policy); + + // Push the new policy as a string + s3_client + .put_bucket_policy(PutBucketPolicyRequest { + bucket: bucket_name.to_string(), + policy: serde_json::to_string(&policy).context(error::InvalidJson { + what: format!("new bucket policy for {}", &bucket_name), + })?, + ..Default::default() + }) + .await + .context(error::PutPolicy { bucket_name })?; + + Ok(()) +} + +/// Uploads root.json to S3 Bucket (automatically creates the folder that the bucket policy was scoped to or will simply add to it) +/// Input: Region, Name of bucket, which prefix root.json should be put under, and path to the S3 bucket CFN template +/// Note that the prefix parameter must have the format "/" and the bucket name "" +/// Output: Doesn't need to save any metadata from this action +pub async fn upload_file( + region: &str, + bucket_name: &str, + prefix: &str, + file_path: &Path, +) -> Result<()> { + let s3_client = + S3Client::new(Region::from_str(region).context(error::ParseRegion { what: region })?); + + // File --> Bytes + let mut file = File::open(file_path).context(error::FileOpen { path: file_path })?; + let mut buffer = Vec::new(); + file.read_to_end(&mut buffer) + .context(error::FileRead { path: file_path })?; + + s3_client + .put_object(PutObjectRequest { + bucket: format!("{}{}", bucket_name, prefix), + key: "root.json".to_string(), + body: Some(StreamingBody::from(buffer)), + ..Default::default() + }) + .await + .context(error::PutObject { bucket_name })?; + + Ok(()) +} + +// =^..^= =^..^= =^..^= TESTS =^..^= =^..^= =^..^= + +#[cfg(test)] +mod tests { + use super::format_prefix; + use assert_json_diff::assert_json_include; + + #[test] + fn format_prefix_test() { + let valid = "/prefix"; + let missing_slash = "prefix"; + let excess_ending_1 = "/prefix/"; + let excess_ending_2 = "/prefix/*"; + let slash_and_excess_ending = "prefix/*"; + let empty = ""; + let single_slash = "/"; + + assert_eq!("/prefix", format_prefix(&valid.to_string())); + assert_eq!("/prefix", format_prefix(&missing_slash.to_string())); + assert_eq!("/prefix", format_prefix(&excess_ending_1.to_string())); + assert_eq!("/prefix", format_prefix(&excess_ending_2.to_string())); + assert_eq!( + "/prefix", + format_prefix(&slash_and_excess_ending.to_string()) + ); + assert_eq!("", format_prefix(&empty.to_string())); + assert_eq!("", format_prefix(&single_slash.to_string())); + } + + #[test] + fn empty_bucket_policy() { + let mut policy: serde_json::Value = serde_json::from_str( + r#"{"Version": "2008-10-17", + "Statement": []}"#, + ) + .unwrap(); + + let new_bucket_policy = serde_json::from_str(&format!( + r#"{{ + "Effect": "Allow", + "Principal": "*", + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::{}{}/*", + "Condition": {{ + "StringEquals": {{ + "aws:sourceVpce": "{}" + }} + }} + }}"#, + "test-bucket-name".to_string(), + "/test-prefix".to_string(), + "testvpc123".to_string() + )) + .unwrap(); + + policy + .get_mut("Statement") + .unwrap() + .as_array_mut() + .unwrap() + .push(new_bucket_policy); + + let expected_policy: serde_json::Value = serde_json::from_str( + r#"{ + "Version": "2008-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": "*", + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::test-bucket-name/test-prefix/*", + "Condition": { + "StringEquals": { + "aws:sourceVpce": "testvpc123" + } + } + } + ] + }"#, + ) + .unwrap(); + + assert_json_include!(expected: expected_policy, actual: &policy); + } + + #[test] + fn populated_bucket_policy() { + let mut policy: serde_json::Value = serde_json::from_str( + r#"{ + "Version": "2008-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": "*", + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::test-bucket-name/test-prefix/*", + "Condition": { + "StringEquals": { + "aws:sourceVpce": "testvpc123" + } + } + } + ] + }"#, + ) + .unwrap(); + + let new_bucket_policy = serde_json::from_str(&format!( + r#"{{ + "Effect": "Deny", + "Principal": "*", + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::{}{}/*", + "Condition": {{ + "StringEquals": {{ + "aws:sourceVpce": "{}" + }} + }} + }}"#, + "test-bucket-name".to_string(), + "/test-prefix".to_string(), + "testvpc123".to_string() + )) + .unwrap(); + + policy + .get_mut("Statement") + .unwrap() + .as_array_mut() + .unwrap() + .push(new_bucket_policy); + + let expected_policy: serde_json::Value = serde_json::from_str( + r#"{ + "Version": "2008-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": "*", + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::test-bucket-name/test-prefix/*", + "Condition": { + "StringEquals": { + "aws:sourceVpce": "testvpc123" + } + } + }, + { + "Effect": "Deny", + "Principal": "*", + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::test-bucket-name/test-prefix/*", + "Condition": { + "StringEquals": { + "aws:sourceVpce": "testvpc123" + } + } + } + ] + }"#, + ) + .unwrap(); + + assert_json_include!(expected: expected_policy, actual: &policy); + } +} diff --git a/tools/infrasys/src/shared.rs b/tools/infrasys/src/shared.rs new file mode 100644 index 00000000..addde7e3 --- /dev/null +++ b/tools/infrasys/src/shared.rs @@ -0,0 +1,91 @@ +use log::info; +use rusoto_cloudformation::{CloudFormation, CloudFormationClient, DescribeStacksInput, Parameter}; +use snafu::{ensure, OptionExt, ResultExt}; +use std::{env, thread, time}; +use structopt::StructOpt; + +use super::{error, Result}; + +#[derive(Debug, StructOpt)] +pub enum KeyRole { + Root, + Publication, +} + +/// Retrieve a BUILDSYS_* variable that we expect to be set in the environment +pub fn getenv(var: &str) -> Result { + env::var(var).context(error::Environment { var }) +} + +/// Generates a parameter type object used to specify parameters in CloudFormation templates +pub fn create_parameter(key: String, val: String) -> Parameter { + Parameter { + parameter_key: Some(key), + parameter_value: Some(val), + ..Default::default() + } +} + +/// Polls cfn_client for stack_name in region until it's ready +/// Once stack is created, we can grab the outputs (before this point, outputs are empty) +pub async fn get_stack_outputs( + cfn_client: &CloudFormationClient, + stack_name: &str, + region: &str, +) -> Result> { + let mut stack_outputs = cfn_client + .describe_stacks(DescribeStacksInput { + stack_name: Some(stack_name.to_string()), + ..Default::default() + }) + .await + .context(error::DescribeStack { stack_name, region })? + .stacks + .context(error::ParseResponse { + what: "stacks", + resource_name: stack_name, + })?[0] + .clone(); + + // Checking that keys have been created so we can return updated outputs + let mut status = stack_outputs.stack_status; + // Max wait is 30 mins (90 attempts * 20s = 1800s = 30mins) + let mut max_attempts: u32 = 90; + while status != "CREATE_COMPLETE" { + ensure!( + max_attempts > 0, + error::CreateStackTimeout { stack_name, region } + ); + ensure!( + status != "CREATE_FAILED", + error::CreateStackFailure { stack_name, region } + ); + info!( + "Waiting for stack resources to be ready, current status is '{}'...", + status + ); + thread::sleep(time::Duration::from_secs(20)); + stack_outputs = cfn_client + .describe_stacks(DescribeStacksInput { + stack_name: Some(stack_name.to_string()), + ..Default::default() + }) + .await + .context(error::DescribeStack { stack_name, region })? + .stacks + .context(error::ParseResponse { + what: "stacks", + resource_name: stack_name, + })?[0] + .clone(); + status = stack_outputs.stack_status; + max_attempts -= 1; + } + + let output_array = stack_outputs.outputs.context(error::ParseResponse { + what: "outputs", + resource_name: stack_name, + })?; + + Ok(output_array) +} diff --git a/tools/infrasys/test_tomls/toml_yaml_conversion.toml b/tools/infrasys/test_tomls/toml_yaml_conversion.toml new file mode 100644 index 00000000..57508f58 --- /dev/null +++ b/tools/infrasys/test_tomls/toml_yaml_conversion.toml @@ -0,0 +1,12 @@ +[repo.default] + file_hosting_config_name = "TUF-Repo-S3-Buck" + signing_keys = { kms = { available_keys = { "e4a8f7fe-2272-4e51-bc3e-3f719c77eb31" = "us-west-1" } } } + root_keys = { kms = { available_keys = { "e4a8f7fe-2272-4e51-bc3e-3f719c77eb31" = "us-west-1" } } } + root_key_threshold = 1 + pub_key_threshold = 1 + +[aws] + [aws.s3.TUF-Repo-S3-Buck] + region = "us-west-2" + vpc_endpoint_id = "vpc-12345" + s3_prefix = "/my-bottlerocket-remix" diff --git a/tools/pubsys-config/Cargo.toml b/tools/pubsys-config/Cargo.toml index 88587682..1461cfca 100644 --- a/tools/pubsys-config/Cargo.toml +++ b/tools/pubsys-config/Cargo.toml @@ -16,3 +16,4 @@ serde = { version = "1.0", features = ["derive"] } snafu = "0.6" toml = "0.5" url = { version = "2.1.0", features = ["serde"] } +serde_yaml = "0.8.17" diff --git a/tools/pubsys-config/src/lib.rs b/tools/pubsys-config/src/lib.rs index 0e15e3ae..48477128 100644 --- a/tools/pubsys-config/src/lib.rs +++ b/tools/pubsys-config/src/lib.rs @@ -4,16 +4,17 @@ pub mod vmware; use crate::vmware::VmwareConfig; use chrono::Duration; use parse_datetime::parse_offset; -use serde::{Deserialize, Deserializer}; +use serde::{Deserialize, Deserializer, Serialize}; use snafu::ResultExt; use std::collections::{HashMap, VecDeque}; use std::convert::TryFrom; use std::fs; +use std::num::NonZeroUsize; use std::path::{Path, PathBuf}; use url::Url; /// Configuration needed to load and create repos -#[derive(Debug, Default, Deserialize)] +#[derive(Debug, Default, Deserialize, Serialize, PartialEq)] #[serde(deny_unknown_fields)] pub struct InfraConfig { // Repo subcommand config @@ -49,10 +50,31 @@ impl InfraConfig { Ok(Self::default()) } } + + /// Deserializes an InfraConfig from a Infra.lock file at a given path + pub fn from_lock_path

(path: P) -> Result + where + P: AsRef, + { + let path = path.as_ref(); + let infra_config_str = fs::read_to_string(path).context(error::File { path })?; + serde_yaml::from_str(&infra_config_str).context(error::InvalidLock { path }) + } +} + +/// S3-specific TUF infrastructure configuration +#[derive(Debug, Default, Deserialize, Serialize, PartialEq)] +pub struct S3Config { + pub region: Option, + #[serde(default)] + pub s3_prefix: String, + pub vpc_endpoint_id: Option, + pub stack_arn: Option, + pub bucket_name: Option, } /// AWS-specific infrastructure configuration -#[derive(Debug, Default, Deserialize)] +#[derive(Debug, Default, Deserialize, Serialize, PartialEq)] #[serde(deny_unknown_fields)] pub struct AwsConfig { #[serde(default)] @@ -62,10 +84,11 @@ pub struct AwsConfig { #[serde(default)] pub region: HashMap, pub ssm_prefix: Option, + pub s3: Option>, } /// AWS region-specific configuration -#[derive(Debug, Deserialize)] +#[derive(Debug, Deserialize, Serialize, PartialEq)] #[serde(deny_unknown_fields)] pub struct AwsRegionConfig { pub role: Option, @@ -76,12 +99,33 @@ pub struct AwsRegionConfig { // These variant names are lowercase because they have to match the text in Infra.toml, and it's // more common for TOML config to be lowercase. #[allow(non_camel_case_types)] -#[derive(Clone, Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)] #[serde(deny_unknown_fields)] pub enum SigningKeyConfig { - file { path: PathBuf }, - kms { key_id: String }, - ssm { parameter: String }, + file { + path: PathBuf, + }, + kms { + key_id: Option, + #[serde(flatten)] + config: Option, + }, + ssm { + parameter: String, + }, +} + +/// AWS region-specific configuration +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)] +//#[serde(deny_unknown_fields)] +pub struct KMSKeyConfig { + #[serde(default)] + pub available_keys: HashMap, + pub key_alias: Option, + #[serde(default)] + pub regions: VecDeque, + #[serde(default)] + pub key_stack_arns: HashMap, } impl TryFrom for Url { @@ -91,8 +135,9 @@ impl TryFrom for Url { SigningKeyConfig::file { path } => Url::from_file_path(path), // We don't support passing profiles to tough in the name of the key/parameter, so for // KMS and SSM we prepend a slash if there isn't one present. - SigningKeyConfig::kms { key_id } => { - let key_id = if key_id.starts_with('/') { + SigningKeyConfig::kms { key_id, .. } => { + let mut key_id = key_id.unwrap_or_else(Default::default); + key_id = if key_id.starts_with('/') { key_id.to_string() } else { format!("/{}", key_id) @@ -112,16 +157,18 @@ impl TryFrom for Url { } /// Represents a Bottlerocket repo's location and the metadata needed to update the repo -#[derive(Debug, Default, Deserialize)] +#[derive(Debug, Default, Deserialize, Serialize, PartialEq)] #[serde(deny_unknown_fields)] pub struct RepoConfig { pub root_role_url: Option, pub root_role_sha512: Option, - pub signing_keys: Option, - + pub root_keys: Option, pub metadata_base_url: Option, pub targets_url: Option, + pub file_hosting_config_name: Option, + pub root_key_threshold: Option, + pub pub_key_threshold: Option, } /// How long it takes for each metadata type to expire @@ -174,6 +221,12 @@ mod error { source: toml::de::Error, }, + #[snafu(display("Invalid lock file at '{}': {}", path.display(), source))] + InvalidLock { + path: PathBuf, + source: serde_yaml::Error, + }, + #[snafu(display("Missing config: {}", what))] MissingConfig { what: String }, } diff --git a/tools/pubsys-config/src/vmware.rs b/tools/pubsys-config/src/vmware.rs index fa3b521b..a97e8e10 100644 --- a/tools/pubsys-config/src/vmware.rs +++ b/tools/pubsys-config/src/vmware.rs @@ -1,7 +1,7 @@ //! The vmware module owns the definition and loading process for our VMware configuration sources. use lazy_static::lazy_static; use log::debug; -use serde::Deserialize; +use serde::{Deserialize, Serialize}; use snafu::{OptionExt, ResultExt}; use std::collections::HashMap; use std::path::{Path, PathBuf}; @@ -27,7 +27,7 @@ const GOVC_RESOURCE_POOL: &str = "GOVC_RESOURCE_POOL"; const GOVC_FOLDER: &str = "GOVC_FOLDER"; /// VMware-specific infrastructure configuration -#[derive(Debug, Default, Deserialize)] +#[derive(Debug, Default, Deserialize, Serialize, PartialEq)] #[serde(deny_unknown_fields)] pub struct VmwareConfig { #[serde(default)] @@ -42,7 +42,7 @@ pub struct VmwareConfig { /// Fields are optional here because this struct is used to gather environment variables, common /// config, and datacenter-specific configuration, each of which may not have the complete set of /// fields. It is used to build a complete datacenter configuration (hence the "Builder" name). -#[derive(Debug, Deserialize)] +#[derive(Debug, Deserialize, Serialize, PartialEq)] #[serde(deny_unknown_fields)] pub struct DatacenterBuilder { pub vsphere_url: Option, diff --git a/tools/pubsys-setup/src/main.rs b/tools/pubsys-setup/src/main.rs index 50f75423..77277601 100644 --- a/tools/pubsys-setup/src/main.rs +++ b/tools/pubsys-setup/src/main.rs @@ -76,8 +76,7 @@ fn run() -> Result<()> { let args = Args::from_args(); // SimpleLogger will send errors to stderr and anything less to stdout. - SimpleLogger::init(args.log_level, LogConfig::default()) - .context(error::Logger)?; + SimpleLogger::init(args.log_level, LogConfig::default()).context(error::Logger)?; // Make /roles and /keys directories, if they don't exist, so we can write generated files. let role_dir = args.root_role_path.parent().context(error::Path { diff --git a/tools/pubsys/src/repo.rs b/tools/pubsys/src/repo.rs index 6c9630a1..be355711 100644 --- a/tools/pubsys/src/repo.rs +++ b/tools/pubsys/src/repo.rs @@ -388,20 +388,22 @@ fn is_file_not_found_error(e: &tough::error::Error) -> bool { } /// Gets the corresponding `KeySource` according to the signing key config from Infra.toml -fn get_signing_key_source(signing_key_config: &SigningKeyConfig) -> Box { +fn get_signing_key_source(signing_key_config: &SigningKeyConfig) -> Result> { match signing_key_config { - SigningKeyConfig::file { path } => Box::new(LocalKeySource { path: path.clone() }), - SigningKeyConfig::kms { key_id } => Box::new(KmsKeySource { + SigningKeyConfig::file { path } => Ok(Box::new(LocalKeySource { path: path.clone() })), + SigningKeyConfig::kms { key_id, .. } => Ok(Box::new(KmsKeySource { profile: None, - key_id: key_id.clone(), + key_id: key_id + .clone() + .context(error::MissingConfig { missing: "key_id" })?, client: None, signing_algorithm: KmsSigningAlgorithm::RsassaPssSha256, - }), - SigningKeyConfig::ssm { parameter } => Box::new(SsmKeySource { + })), + SigningKeyConfig::ssm { parameter } => Ok(Box::new(SsmKeySource { profile: None, parameter_name: parameter.clone(), key_id: None, - }), + })), } } @@ -506,7 +508,7 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { let signing_key_config = repo_config.signing_keys.as_ref(); let key_source = if let Some(signing_key_config) = signing_key_config { - get_signing_key_source(signing_key_config) + get_signing_key_source(signing_key_config)? } else { ensure!( repo_args.default_key_path.exists(), diff --git a/tools/pubsys/src/repo/refresh_repo/mod.rs b/tools/pubsys/src/repo/refresh_repo/mod.rs index 26683869..5f081625 100644 --- a/tools/pubsys/src/repo/refresh_repo/mod.rs +++ b/tools/pubsys/src/repo/refresh_repo/mod.rs @@ -152,7 +152,7 @@ pub(crate) fn run(args: &Args, refresh_repo_args: &RefreshRepoArgs) -> Result<() let signing_key_config = repo_config.signing_keys.as_ref(); let key_source = if let Some(signing_key_config) = signing_key_config { - get_signing_key_source(signing_key_config) + get_signing_key_source(signing_key_config)? } else { ensure!( refresh_repo_args.default_key_path.exists(), From 6294ced1cbd6c0e95999dd466c0d347064e45bdd Mon Sep 17 00:00:00 2001 From: aashnasheth Date: Fri, 27 Aug 2021 15:57:24 -0700 Subject: [PATCH 0525/1356] pubsys: add lock file integration --- tools/Cargo.lock | 1 + tools/infrasys/src/main.rs | 5 +- tools/pubsys-config/Cargo.toml | 2 +- tools/pubsys-config/src/lib.rs | 63 +++++++++++++++++-- tools/pubsys-setup/src/main.rs | 11 +--- tools/pubsys/Cargo.toml | 1 + tools/pubsys/src/aws/ami/mod.rs | 7 +-- tools/pubsys/src/aws/promote_ssm/mod.rs | 9 ++- tools/pubsys/src/aws/publish_ami/mod.rs | 7 +-- tools/pubsys/src/aws/ssm/mod.rs | 8 +-- tools/pubsys/src/repo.rs | 42 ++++++++++--- .../pubsys/src/repo/check_expirations/mod.rs | 9 +-- tools/pubsys/src/repo/refresh_repo/mod.rs | 9 +-- tools/pubsys/src/repo/validate_repo/mod.rs | 9 +-- tools/pubsys/src/vmware/upload_ova/mod.rs | 10 +-- 15 files changed, 121 insertions(+), 72 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 327519ff..e5583fea 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -1169,6 +1169,7 @@ dependencies = [ "rusoto_credential", "rusoto_ebs", "rusoto_ec2", + "rusoto_kms", "rusoto_signature", "rusoto_ssm", "rusoto_sts", diff --git a/tools/infrasys/src/main.rs b/tools/infrasys/src/main.rs index 995bc24c..6464189f 100644 --- a/tools/infrasys/src/main.rs +++ b/tools/infrasys/src/main.rs @@ -75,10 +75,7 @@ fn run() -> Result<()> { } fn check_infra_lock(toml_path: &Path) -> Result<()> { - let lock_path = toml_path - .parent() - .context(error::Parent { path: toml_path })? - .join("Infra.lock"); + let lock_path = InfraConfig::compute_lock_path(toml_path).context(error::Config)?; ensure!(!lock_path.is_file(), { error!( diff --git a/tools/pubsys-config/Cargo.toml b/tools/pubsys-config/Cargo.toml index 1461cfca..816408af 100644 --- a/tools/pubsys-config/Cargo.toml +++ b/tools/pubsys-config/Cargo.toml @@ -13,7 +13,7 @@ lazy_static = "1.4" log = "0.4" parse-datetime = { path = "../../sources/parse-datetime" } serde = { version = "1.0", features = ["derive"] } +serde_yaml = "0.8.17" snafu = "0.6" toml = "0.5" url = { version = "2.1.0", features = ["serde"] } -serde_yaml = "0.8.17" diff --git a/tools/pubsys-config/src/lib.rs b/tools/pubsys-config/src/lib.rs index 48477128..76b93121 100644 --- a/tools/pubsys-config/src/lib.rs +++ b/tools/pubsys-config/src/lib.rs @@ -3,9 +3,10 @@ pub mod vmware; use crate::vmware::VmwareConfig; use chrono::Duration; +use log::info; use parse_datetime::parse_offset; use serde::{Deserialize, Deserializer, Serialize}; -use snafu::ResultExt; +use snafu::{OptionExt, ResultExt}; use std::collections::{HashMap, VecDeque}; use std::convert::TryFrom; use std::fs; @@ -38,6 +39,16 @@ impl InfraConfig { toml::from_str(&infra_config_str).context(error::InvalidToml { path }) } + /// Deserializes an InfraConfig from a Infra.lock file at a given path + pub fn from_lock_path

(path: P) -> Result + where + P: AsRef, + { + let path = path.as_ref(); + let infra_config_str = fs::read_to_string(path).context(error::File { path })?; + serde_yaml::from_str(&infra_config_str).context(error::InvalidLock { path }) + } + /// Deserializes an InfraConfig from a given path, if it exists, otherwise builds a default /// config pub fn from_path_or_default

(path: P) -> Result @@ -51,14 +62,51 @@ impl InfraConfig { } } - /// Deserializes an InfraConfig from a Infra.lock file at a given path - pub fn from_lock_path

(path: P) -> Result + /// Deserializes an InfraConfig from Infra.lock, if it exists, otherwise uses Infra.toml + /// If the default flag is true, will create a default config if Infra.toml doesn't exist + pub fn from_path_or_lock(path: &Path, default: bool) -> Result { + let lock_path = Self::compute_lock_path(&path)?; + if lock_path.exists() { + info!( + "Found infra config at path: {}", + lock_path.display() + ); + Self::from_lock_path(lock_path) + } else if default { + Self::from_path_or_default(&path) + } else { + info!( + "Found infra config at path: {}", + path.display() + ); + Self::from_path(&path) + } + } + + /// Looks for a file named `Infra.lock` in the same directory as the file named by + /// `infra_config_path`. Returns true if the `Infra.lock` file exists, or if `infra_config_path` + /// exists. Returns an error if the directory of `infra_config_path` cannot be found. + pub fn lock_or_infra_config_exists

(infra_config_path: P) -> Result where P: AsRef, { - let path = path.as_ref(); - let infra_config_str = fs::read_to_string(path).context(error::File { path })?; - serde_yaml::from_str(&infra_config_str).context(error::InvalidLock { path }) + let lock_path = Self::compute_lock_path(&infra_config_path)?; + Ok(lock_path.exists() || infra_config_path.as_ref().exists()) + } + + /// Returns the file path to a file named `Infra.lock` in the same directory as the file named + /// by `infra_config_path`. + pub fn compute_lock_path

(infra_config_path: P) -> Result + where + P: AsRef, + { + Ok(infra_config_path + .as_ref() + .parent() + .context(error::Parent { + path: infra_config_path.as_ref(), + })? + .join("Infra.lock")) } } @@ -229,6 +277,9 @@ mod error { #[snafu(display("Missing config: {}", what))] MissingConfig { what: String }, + + #[snafu(display("Failed to get parent of path: {}", path.display()))] + Parent { path: PathBuf }, } } pub use error::Error; diff --git a/tools/pubsys-setup/src/main.rs b/tools/pubsys-setup/src/main.rs index 77277601..838b48b0 100644 --- a/tools/pubsys-setup/src/main.rs +++ b/tools/pubsys-setup/src/main.rs @@ -177,14 +177,9 @@ fn run() -> Result<()> { fn find_root_role_and_key(args: &Args) -> Result<(Option<&PathBuf>, Option)> { let (mut root_role_path, mut key_url) = (None, None); - if args.infra_config_path.exists() { - info!( - "Found infra config at path: {}", - args.infra_config_path.display() - ); - - let infra_config = - InfraConfig::from_path(&args.infra_config_path).context(error::Config)?; + if InfraConfig::lock_or_infra_config_exists(&args.infra_config_path).context(error::Config)? { + let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) + .context(error::Config)?; trace!("Parsed infra config: {:?}", infra_config); // Check whether the user has the relevant repo defined in their Infra.toml. diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index 01f69d76..5cd08577 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -26,6 +26,7 @@ rusoto_core = { version = "0.47.0", default-features = false, features = ["rustl rusoto_credential = "0.47.0" rusoto_ebs = { version = "0.47.0", default-features = false, features = ["rustls"] } rusoto_ec2 = { version = "0.47.0", default-features = false, features = ["rustls"] } +rusoto_kms = { version = "0.47.0", default-features = false, features = ["rustls"] } rusoto_signature = "0.47.0" rusoto_ssm = { version = "0.47.0", default-features = false, features = ["rustls"] } rusoto_sts = { version = "0.47.0", default-features = false, features = ["rustls"] } diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index 47aec810..9f3d9acd 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -91,12 +91,9 @@ pub(crate) async fn run(args: &Args, ami_args: &AmiArgs) -> Result<()> { async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> { let mut amis = HashMap::new(); - info!( - "Checking for infra config at path: {}", - args.infra_config_path.display() - ); + // If a lock file exists, use that, otherwise use Infra.toml or default let infra_config = - InfraConfig::from_path_or_default(&args.infra_config_path).context(error::Config)?; + InfraConfig::from_path_or_lock(&args.infra_config_path, true).context(error::Config)?; trace!("Using infra config: {:?}", infra_config); let aws = infra_config.aws.unwrap_or_else(|| Default::default()); diff --git a/tools/pubsys/src/aws/promote_ssm/mod.rs b/tools/pubsys/src/aws/promote_ssm/mod.rs index 4c96335b..e27f3507 100644 --- a/tools/pubsys/src/aws/promote_ssm/mod.rs +++ b/tools/pubsys/src/aws/promote_ssm/mod.rs @@ -52,11 +52,10 @@ pub(crate) async fn run(args: &Args, promote_args: &PromoteArgs) -> Result<()> { // Setup =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - info!( - "Using infra config from path: {}", - args.infra_config_path.display() - ); - let infra_config = InfraConfig::from_path(&args.infra_config_path).context(error::Config)?; + // If a lock file exists, use that, otherwise use Infra.toml + let infra_config = + InfraConfig::from_path_or_lock(&args.infra_config_path, false).context(error::Config)?; + trace!("Parsed infra config: {:#?}", infra_config); let aws = infra_config.aws.unwrap_or_else(Default::default); let ssm_prefix = aws.ssm_prefix.as_deref().unwrap_or_else(|| ""); diff --git a/tools/pubsys/src/aws/publish_ami/mod.rs b/tools/pubsys/src/aws/publish_ami/mod.rs index c3ff5719..0e692df9 100644 --- a/tools/pubsys/src/aws/publish_ami/mod.rs +++ b/tools/pubsys/src/aws/publish_ami/mod.rs @@ -85,12 +85,9 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { } ); - info!( - "Checking for infra config at path: {}", - args.infra_config_path.display() - ); + // If a lock file exists, use that, otherwise use Infra.toml or default let infra_config = - InfraConfig::from_path_or_default(&args.infra_config_path).context(error::Config)?; + InfraConfig::from_path_or_lock(&args.infra_config_path, true).context(error::Config)?; trace!("Using infra config: {:?}", infra_config); let aws = infra_config.aws.unwrap_or_else(Default::default); diff --git a/tools/pubsys/src/aws/ssm/mod.rs b/tools/pubsys/src/aws/ssm/mod.rs index e9430be6..d2245921 100644 --- a/tools/pubsys/src/aws/ssm/mod.rs +++ b/tools/pubsys/src/aws/ssm/mod.rs @@ -56,11 +56,9 @@ pub(crate) struct SsmArgs { pub(crate) async fn run(args: &Args, ssm_args: &SsmArgs) -> Result<()> { // Setup =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - info!( - "Using infra config from path: {}", - args.infra_config_path.display() - ); - let infra_config = InfraConfig::from_path(&args.infra_config_path).context(error::Config)?; + // If a lock file exists, use that, otherwise use Infra.toml + let infra_config = + InfraConfig::from_path_or_lock(&args.infra_config_path, false).context(error::Config)?; trace!("Parsed infra config: {:#?}", infra_config); let aws = infra_config.aws.unwrap_or_else(Default::default); let ssm_prefix = aws.ssm_prefix.as_deref().unwrap_or_else(|| ""); diff --git a/tools/pubsys/src/repo.rs b/tools/pubsys/src/repo.rs index be355711..493fb4f9 100644 --- a/tools/pubsys/src/repo.rs +++ b/tools/pubsys/src/repo.rs @@ -9,13 +9,18 @@ use chrono::{DateTime, Utc}; use lazy_static::lazy_static; use log::{debug, info, trace, warn}; use parse_datetime::parse_datetime; -use pubsys_config::{InfraConfig, RepoConfig, RepoExpirationPolicy, SigningKeyConfig}; +use pubsys_config::{ + InfraConfig, KMSKeyConfig, RepoConfig, RepoExpirationPolicy, SigningKeyConfig, +}; +use rusoto_core::Region; +use rusoto_kms::KmsClient; use semver::Version; use snafu::{ensure, OptionExt, ResultExt}; use std::convert::TryInto; use std::fs::{self, File}; use std::num::NonZeroU64; use std::path::{Path, PathBuf}; +use std::str::FromStr; use structopt::StructOpt; use tempfile::NamedTempFile; use tough::{ @@ -391,12 +396,19 @@ fn is_file_not_found_error(e: &tough::error::Error) -> bool { fn get_signing_key_source(signing_key_config: &SigningKeyConfig) -> Result> { match signing_key_config { SigningKeyConfig::file { path } => Ok(Box::new(LocalKeySource { path: path.clone() })), - SigningKeyConfig::kms { key_id, .. } => Ok(Box::new(KmsKeySource { + SigningKeyConfig::kms { key_id, config, .. } => Ok(Box::new(KmsKeySource { profile: None, key_id: key_id .clone() .context(error::MissingConfig { missing: "key_id" })?, - client: None, + client: { + let key_id_val = key_id + .clone() + .context(error::MissingConfig { missing: "key_id" })?; + config + .as_ref() + .map_or(Ok(None), |config_val| get_client(&config_val, &key_id_val))? + }, signing_algorithm: KmsSigningAlgorithm::RsassaPssSha256, })), SigningKeyConfig::ssm { parameter } => Ok(Box::new(SsmKeySource { @@ -407,6 +419,17 @@ fn get_signing_key_source(signing_key_config: &SigningKeyConfig) -> Result Result> { + if let Some(region) = config.available_keys.get(key_id) { + Ok(Some(KmsClient::new( + Region::from_str(region).context(error::ParseRegion { what: region })?, + ))) + } else { + Ok(None) + } +} + /// Common entrypoint from main() pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { let metadata_out_dir = repo_args @@ -426,12 +449,9 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { // Build repo =^..^= =^..^= =^..^= =^..^= - info!( - "Checking for infra config at path: {}", - args.infra_config_path.display() - ); + // If a lock file exists, use that, otherwise use Infra.toml or default let infra_config = - InfraConfig::from_path_or_default(&args.infra_config_path).context(error::Config)?; + InfraConfig::from_path_or_lock(&args.infra_config_path, true).context(error::Config)?; trace!("Using infra config: {:?}", infra_config); // If the user has the requested (or "default") repo defined in their Infra.toml, use it, @@ -670,6 +690,12 @@ mod error { #[snafu(display("Non-UTF8 path '{}' not supported", path.display()))] NonUtf8Path { path: PathBuf }, + #[snafu(display("Failed to parse {} to a valid rusoto region: {}", what, source))] + ParseRegion { + what: String, + source: rusoto_core::region::ParseRegionError, + }, + #[snafu(display("Invalid URL '{}': {}", input, source))] ParseUrl { input: String, diff --git a/tools/pubsys/src/repo/check_expirations/mod.rs b/tools/pubsys/src/repo/check_expirations/mod.rs index dd726054..11ad4aaf 100644 --- a/tools/pubsys/src/repo/check_expirations/mod.rs +++ b/tools/pubsys/src/repo/check_expirations/mod.rs @@ -130,12 +130,9 @@ fn check_expirations( /// Common entrypoint from main() pub(crate) fn run(args: &Args, check_expirations_args: &CheckExpirationsArgs) -> Result<()> { - info!( - "Using infra config from path: {}", - args.infra_config_path.display() - ); - let infra_config = - InfraConfig::from_path(&args.infra_config_path).context(repo_error::Config)?; + // If a lock file exists, use that, otherwise use Infra.toml + let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) + .context(repo_error::Config)?; trace!("Parsed infra config: {:?}", infra_config); let repo_config = infra_config .repo diff --git a/tools/pubsys/src/repo/refresh_repo/mod.rs b/tools/pubsys/src/repo/refresh_repo/mod.rs index 5f081625..5bdea815 100644 --- a/tools/pubsys/src/repo/refresh_repo/mod.rs +++ b/tools/pubsys/src/repo/refresh_repo/mod.rs @@ -128,12 +128,9 @@ fn refresh_repo( /// Common entrypoint from main() pub(crate) fn run(args: &Args, refresh_repo_args: &RefreshRepoArgs) -> Result<(), Error> { - info!( - "Using infra config from path: {}", - args.infra_config_path.display() - ); - let infra_config = - InfraConfig::from_path(&args.infra_config_path).context(repo_error::Config)?; + // If a lock file exists, use that, otherwise use Infra.toml + let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) + .context(repo_error::Config)?; trace!("Parsed infra config: {:?}", infra_config); let repo_config = infra_config diff --git a/tools/pubsys/src/repo/validate_repo/mod.rs b/tools/pubsys/src/repo/validate_repo/mod.rs index 46aff5a7..327eeb8b 100644 --- a/tools/pubsys/src/repo/validate_repo/mod.rs +++ b/tools/pubsys/src/repo/validate_repo/mod.rs @@ -122,12 +122,9 @@ fn validate_repo( /// Common entrypoint from main() pub(crate) fn run(args: &Args, validate_repo_args: &ValidateRepoArgs) -> Result<(), Error> { - info!( - "Using infra config from path: {}", - args.infra_config_path.display() - ); - let infra_config = - InfraConfig::from_path(&args.infra_config_path).context(repo_error::Config)?; + // If a lock file exists, use that, otherwise use Infra.toml + let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) + .context(repo_error::Config)?; trace!("Parsed infra config: {:?}", infra_config); let repo_config = infra_config .repo diff --git a/tools/pubsys/src/vmware/upload_ova/mod.rs b/tools/pubsys/src/vmware/upload_ova/mod.rs index 79bfe1ee..3634327e 100644 --- a/tools/pubsys/src/vmware/upload_ova/mod.rs +++ b/tools/pubsys/src/vmware/upload_ova/mod.rs @@ -45,13 +45,9 @@ pub(crate) struct UploadArgs { /// Common entrypoint from main() pub(crate) fn run(args: &Args, upload_args: &UploadArgs) -> Result<()> { - // Get infra config - info!( - "Checking for infra config at path: {}", - args.infra_config_path.display() - ); - let infra_config = - InfraConfig::from_path_or_default(&args.infra_config_path).context(error::InfraConfig)?; + // If a lock file exists, use that, otherwise use Infra.toml or default + let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, true) + .context(error::InfraConfig)?; trace!("Using infra config: {:?}", infra_config); let vmware = infra_config From 73a17fd0004ee90d5cf1ae446fcb47042e634194 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Tue, 14 Sep 2021 13:22:58 -0700 Subject: [PATCH 0526/1356] buildsys: Add build-package.releases-url key for packager notes --- tools/buildsys/src/manifest.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index 49d4c341..77e11ae5 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -51,6 +51,13 @@ reads BUILDSYS_VARIANT.) variant-sensitive = true ``` +`releases-url` is ignored by buildsys, but can be used by packager maintainers +to indicate a good URL for checking whether the software has had a new release. +``` +[package.metadata.build-package] +releases-url = "https://www.example.com/releases" +``` + ## Metadata for variants `included-packages` is a list of packages that should be included in a variant. @@ -187,6 +194,7 @@ struct Metadata { pub(crate) struct BuildPackage { pub(crate) external_files: Option>, pub(crate) package_name: Option, + pub(crate) releases_url: Option, pub(crate) source_groups: Option>, pub(crate) variant_sensitive: Option, } From 5b1cfce1a27e1aad78a1328e718574f2c3360164 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Tue, 14 Sep 2021 23:32:33 +0000 Subject: [PATCH 0527/1356] Add HOME_URL, SUPPORT_URL, and BUG_REPORT_URL to /etc/os-release --- tools/rpm2img | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/tools/rpm2img b/tools/rpm2img index 9d837b86..b74f261f 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -152,10 +152,15 @@ mv "${ROOT_MOUNT}/boot"/* "${BOOT_MOUNT}" # Set the Bottlerocket variant, version, and build-id SYS_ROOT="${ARCH}-bottlerocket-linux-gnu/sys-root" -echo "PRETTY_NAME=\"${PRETTY_NAME} ${VERSION_ID}\"" >> "${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release" -echo "VARIANT_ID=${VARIANT}" >> "${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release" -echo "VERSION_ID=${VERSION_ID}" >> "${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release" -echo "BUILD_ID=${BUILD_ID}" >> "${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release" +cat <> "${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release" +PRETTY_NAME="${PRETTY_NAME} ${VERSION_ID}" +VARIANT_ID=${VARIANT} +VERSION_ID=${VERSION_ID} +BUILD_ID=${BUILD_ID} +HOME_URL="https://github.com/bottlerocket-os/bottlerocket" +SUPPORT_URL="https://github.com/bottlerocket-os/bottlerocket/discussions" +BUG_REPORT_URL="https://github.com/bottlerocket-os/bottlerocket/issues" +EOF # BOTTLEROCKET-ROOT-A mkdir -p "${ROOT_MOUNT}/lost+found" From b71ac7d307ea1a42e52a84b1b3a0cda9e380e040 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Tue, 14 Sep 2021 23:33:06 +0000 Subject: [PATCH 0528/1356] Add variant to PRETTY_NAME in /etc/os-release This commit creates a VERSION field which is comprised of the VERSION_ID and the variant in parenthesis. The VERSION field replaces VERSION_ID in PRETTY_NAME. --- tools/rpm2img | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/rpm2img b/tools/rpm2img index b74f261f..75637bb8 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -152,8 +152,10 @@ mv "${ROOT_MOUNT}/boot"/* "${BOOT_MOUNT}" # Set the Bottlerocket variant, version, and build-id SYS_ROOT="${ARCH}-bottlerocket-linux-gnu/sys-root" +VERSION="${VERSION_ID} (${VARIANT})" cat <> "${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release" -PRETTY_NAME="${PRETTY_NAME} ${VERSION_ID}" +VERSION="${VERSION}" +PRETTY_NAME="${PRETTY_NAME} ${VERSION}" VARIANT_ID=${VARIANT} VERSION_ID=${VERSION_ID} BUILD_ID=${BUILD_ID} From 78f997ac0b747a7d9fc3299268c29879f35be6e6 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Mon, 13 Sep 2021 13:30:10 -0700 Subject: [PATCH 0529/1356] tools: cargo update --- tools/Cargo.lock | 243 +++++++++++++++++++++++------------------------ 1 file changed, 120 insertions(+), 123 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index e5583fea..7a15eaa3 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -37,9 +37,9 @@ dependencies = [ [[package]] name = "argh" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91792f088f87cdc7a2cfb1d617fa5ea18d7f1dc22ef0e1b5f82f3157cdc522be" +checksum = "f023c76cd7975f9969f8e29f0e461decbdc7f51048ce43427107a3d192f1c9bf" dependencies = [ "argh_derive", "argh_shared", @@ -47,9 +47,9 @@ dependencies = [ [[package]] name = "argh_derive" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4eb0c0c120ad477412dc95a4ce31e38f2113e46bd13511253f79196ca68b067" +checksum = "48ad219abc0c06ca788aface2e3a1970587e3413ab70acd20e54b6ec524c1f8f" dependencies = [ "argh_shared", "heck", @@ -60,9 +60,9 @@ dependencies = [ [[package]] name = "argh_shared" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "781f336cc9826dbaddb9754cb5db61e64cab4f69668bd19dcc4a0394a86f4cb1" +checksum = "38de00daab4eac7d753e97697066238d67ce9d7e2d823ab4f72fe14af29f3f33" [[package]] name = "assert-json-diff" @@ -125,9 +125,9 @@ checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" [[package]] name = "bitflags" -version = "1.2.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "block-buffer" @@ -175,9 +175,9 @@ checksum = "9c59e7af012c713f529e7a3ee57ce9b31ddd858d4b512923602f74608b009631" [[package]] name = "bytes" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" +checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" [[package]] name = "cargo-readme" @@ -196,9 +196,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.69" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e70cc2f62c6ce1868963827bd677764c62d07c3d9a3e1fb1177ee1a9ab199eb2" +checksum = "d26a6ce4b6a484fa3edb70f7efa6fc430fd2b87285fe8b84304fd0936faa0dc0" [[package]] name = "cfg-if" @@ -288,9 +288,9 @@ checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" [[package]] name = "cpufeatures" -version = "0.1.5" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66c99696f6c9dd7f35d486b9d04d7e6e202aa3e8c40d553f2fdf5e7e0c6a71ef" +checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" dependencies = [ "libc", ] @@ -466,9 +466,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1adc00f486adfc9ce99f77d717836f0c5aa84965eb0b4f051f4e83f7cab53f8b" +checksum = "a12aa0eb539080d55c3f2d45a67c3b58b6b0773c1a3ca2dfec66d58c97fd66ca" dependencies = [ "futures-channel", "futures-core", @@ -481,9 +481,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74ed2411805f6e4e3d9bc904c95d5d423b89b3b25dc0250aa74729de20629ff9" +checksum = "5da6ba8c3bb3c165d3c7319fc1cc8304facf1fb8db99c5de877183c08a273888" dependencies = [ "futures-core", "futures-sink", @@ -491,15 +491,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af51b1b4a7fdff033703db39de8802c673eb91855f2e0d47dcf3bf2c0ef01f99" +checksum = "88d1c26957f23603395cd326b0ffe64124b818f4449552f960d815cfba83a53d" [[package]] name = "futures-executor" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d0d535a57b87e1ae31437b892713aee90cd2d7b0ee48727cd11fc72ef54761c" +checksum = "45025be030969d763025784f7f355043dc6bc74093e4ecc5000ca4dc50d8745c" dependencies = [ "futures-core", "futures-task", @@ -508,15 +508,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b0e06c393068f3a6ef246c75cdca793d6a46347e75286933e5e75fd2fd11582" +checksum = "522de2a0fe3e380f1bc577ba0474108faf3f6b18321dbf60b3b9c39a75073377" [[package]] name = "futures-macro" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c54913bae956fb8df7f4dc6fc90362aa72e69148e3f39041fbe8742d21e0ac57" +checksum = "18e4a4b95cea4b4ccbcf1c5675ca7c4ee4e9e75eb79944d07defde18068f79bb" dependencies = [ "autocfg", "proc-macro-hack", @@ -527,21 +527,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0f30aaa67363d119812743aa5f33c201a7a66329f97d1a887022971feea4b53" +checksum = "36ea153c13024fe480590b3e3d4cad89a0cfacecc24577b68f86c6ced9c2bc11" [[package]] name = "futures-task" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbe54a98670017f3be909561f6ad13e810d9a51f3f061b902062ca3da80799f2" +checksum = "1d3d00f4eddb73e498a54394f228cd55853bdf059259e8e7bc6e69d408892e99" [[package]] name = "futures-util" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67eb846bfd58e44a8481a00049e82c43e0ccb5d61f8dc071057cb19249dd4d78" +checksum = "36568465210a3a6ee45e1f165136d68671471a501e632e9a98d96872222b5481" dependencies = [ "autocfg", "futures-channel", @@ -600,9 +600,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "825343c4eef0b63f541f8903f395dc5beb362a979b5799a84062527ef1e37726" +checksum = "d7f3675cfef6a30c8031cf9e6493ebdc3bb3272a3fea3923c4210d1830e6a472" dependencies = [ "bytes", "fnv", @@ -679,9 +679,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60daa14be0e0786db0f03a9e57cb404c9d756eed2b6c62b9ea98ec5743ec75a9" +checksum = "399c583b2979440c60be0821a6199eca73bc3c8dcd9d070d75ac726e2c6186e5" dependencies = [ "bytes", "http", @@ -690,9 +690,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.4.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3a87b616e37e93c22fb19bcd386f02f3af5ea98a25670ad0fce773de23c5e68" +checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" [[package]] name = "httpdate" @@ -702,9 +702,9 @@ checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" [[package]] name = "hyper" -version = "0.14.11" +version = "0.14.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b61cf2d1aebcf6e6352c97b81dc2244ca29194be1b276f5d8ad5c6330fffb11" +checksum = "13f67199e765030fa08fe0bd581af683f0d5bc04ea09c2b1102012c5fb90e7fd" dependencies = [ "bytes", "futures-channel", @@ -816,15 +816,15 @@ checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" [[package]] name = "itoa" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" +checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "js-sys" -version = "0.3.51" +version = "0.3.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83bdfbace3a0e81a4253f73b49e960b053e396a11012cbd49b9b74d6a2b67062" +checksum = "1866b355d9c878e5e607473cbe3f63282c0b7aad2db1dbebf55076c686918254" dependencies = [ "wasm-bindgen", ] @@ -837,9 +837,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.98" +version = "0.2.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "320cfe77175da3a483efed4bc0adc1968ca050b098ce4f2f1c13a56626128790" +checksum = "3cb00336871be5ed2c8ed44b60ae9959dc5b9f08539422ed43f09e34ecaeba21" [[package]] name = "linked-hash-map" @@ -849,9 +849,9 @@ checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "lock_api" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0382880606dff6d15c9476c416d18690b72742aa7b605bb6dd6ec9030fbf07eb" +checksum = "712a4d093c9976e24e7dbca41db895dabcbac38eb5f4045393d17a95bdfb1109" dependencies = [ "scopeguard", ] @@ -867,9 +867,9 @@ dependencies = [ [[package]] name = "matches" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" +checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] name = "md-5" @@ -884,9 +884,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc" +checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "memoffset" @@ -987,9 +987,9 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.26.0" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c55827317fb4c08822499848a14237d2874d6f139828893017237e7ab93eb386" +checksum = "39f37e50073ccad23b6d09bcb5b263f4e76d3bb6038e4a3c08e52162ffa8abc2" dependencies = [ "memchr", ] @@ -1035,9 +1035,9 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", "lock_api", @@ -1046,9 +1046,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" +checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" dependencies = [ "cfg-if", "instant", @@ -1140,9 +1140,9 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.28" +version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c7ed8b8c7b886ea3ed7dde405212185f423ab44682667c8c6dd14aa1d9f6612" +checksum = "b9f5105d4fdaab20335ca9565e106a5d9b82b6219b5ba735731124ac6711d23d" dependencies = [ "unicode-xid", ] @@ -1301,9 +1301,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab49abadf3f9e1c4bc499e8845e152ad87d2ad2d30371841171169e9d75feee" +checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" dependencies = [ "bitflags", ] @@ -1564,9 +1564,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.20" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dead70b0b5e03e9c814bcb6b01e03e68f7c57a80aa48c72ec92152ab3e818d49" +checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" [[package]] name = "rustc_version" @@ -1645,9 +1645,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.3.1" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23a2ac85147a3a11d77ecf1bc7166ec0b92febfa4461c37944e180f319ece467" +checksum = "525bc1abfda2e1998d152c45cf13e696f76d0a4972310b22fac1658b05df7c87" dependencies = [ "bitflags", "core-foundation", @@ -1658,9 +1658,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.3.0" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e4effb91b4b8b6fb7732e670b6cee160278ff8e6bf485c7805d9e319d76e284" +checksum = "a9dd14d83160b528b7bfd66439110573efcfbe281b17fc2ca9f39f550d619c7e" dependencies = [ "core-foundation-sys", "libc", @@ -1677,18 +1677,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.126" +version = "1.0.130" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03" +checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.126" +version = "1.0.130" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43" +checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" dependencies = [ "proc-macro2", "quote", @@ -1697,9 +1697,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.66" +version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "336b10da19a12ad094b59d870ebde26a45402e5b470add4b5fd03c5048a32127" +checksum = "a7f9e390c27c3c0ce8bc5d725f6e4d30a29d26659494aa4b17535f7522c5c950" dependencies = [ "itoa", "ryu", @@ -1729,9 +1729,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.19" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6375dbd828ed6964c3748e4ef6d18e7a175d408ffe184bca01698d0c73f915a9" +checksum = "d8c608a35705a5d3cdc9fbe403147647ff34b921f8e833e49306df898f9b20af" dependencies = [ "dtoa", "indexmap", @@ -1741,9 +1741,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.9.5" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362ae5752fd2137731f9fa25fd4d9058af34666ca1966fb969119cc35719f12" +checksum = "b69f9a4c9740d74c5baa3fd2e547f9525fa8088a8a958e0ca2409a514e33f5fa" dependencies = [ "block-buffer", "cfg-if", @@ -1770,9 +1770,9 @@ checksum = "b6fa3938c99da4914afedd13bf3d79bcb6c277d1b2c398d23257a304d9e1b074" [[package]] name = "shlex" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42a568c8f2cd051a4d283bd6eb0343ac214c1b0f1ac19f93e1175b2dee38c73d" +checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook-registry" @@ -1796,9 +1796,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f173ac3d1a7e3b28003f40de0b5ce7fe2710f9b9dc3fc38664cebee46b3b6527" +checksum = "c307a32c1c5c437f38c7fd45d753050587732ba8628319fbdf12a7e289ccc590" [[package]] name = "smallvec" @@ -1852,9 +1852,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "structopt" -version = "0.3.22" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69b041cdcb67226aca307e6e7be44c8806423d83e018bd662360a93dabce4d71" +checksum = "bf9d950ef167e25e0bdb073cf1d68e9ad2795ac826f2f3f59647817cf23c0bfa" dependencies = [ "clap", "lazy_static", @@ -1863,9 +1863,9 @@ dependencies = [ [[package]] name = "structopt-derive" -version = "0.4.15" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7813934aecf5f51a54775e00068c237de98489463968231a51746bbbc03f9c10" +checksum = "134d838a2c9943ac3125cf6df165eda53493451b719f3255b2a26b85f772d0ba" dependencies = [ "heck", "proc-macro-error", @@ -1882,9 +1882,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.74" +version = "1.0.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1873d832550d4588c3dbc20f01361ab00bfe741048f71e3fecf145a7cc18b29c" +checksum = "c6f107db402c2c2055242dbf4d2af0e69197202e9faacbef9571bbe47f5a1b84" dependencies = [ "proc-macro2", "quote", @@ -1955,9 +1955,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "848a1e1181b9f6753b5e96a092749e29b11d19ede67dfbbd6c7dc7e0f49b5338" +checksum = "5241dd6f21443a3606b432718b166d3cedc962fd4b8bea54a8bc7f514ebda986" dependencies = [ "tinyvec_macros", ] @@ -2023,9 +2023,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1caa0b0c8d94a049db56b5acf8cba99dc0623aab1b26d5b5f5e2d945846b3592" +checksum = "08d3725d3efa29485e87311c5b699de63cde14b00ed4d256b8318aa30ca452cd" dependencies = [ "bytes", "futures-core", @@ -2046,9 +2046,9 @@ dependencies = [ [[package]] name = "tough" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25e4c7428cbe9c2989bd5b7c1fef62e574106020d8e3a8168455fb19b43c81e4" +checksum = "0d69a5b5cc9cb23cf307914e176d28c346a9655bc7a7aac395e1d69924d134f1" dependencies = [ "chrono", "dyn-clone", @@ -2071,9 +2071,9 @@ dependencies = [ [[package]] name = "tough-kms" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87de009112dcd35d79f594074e719d8c89a533ba321ce523bdc88cf4c456bd45" +checksum = "a08cf9e6e2a058fad7b7c7bea7bc9a19bd01d29cd454d7fcadd39ee65121665a" dependencies = [ "pem", "ring", @@ -2087,9 +2087,9 @@ dependencies = [ [[package]] name = "tough-ssm" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a09388d82c02fe77ac4aac43d8d5d2766b43736c7f8d0b1b7fe8c7aa713a093" +checksum = "7b88457e1d0774d2764595664bc46e12b1e6b1996338c5c2623dda242bbd3db7" dependencies = [ "rusoto_core", "rusoto_credential", @@ -2109,9 +2109,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09adeb8c97449311ccd28a427f96fb563e7fd31aabf994189879d9da2394b89d" +checksum = "c2ba9ab62b7d6497a8638dfda5e5c4fb3b2d5a7fca4118f2b96151c8ef1a437e" dependencies = [ "cfg-if", "pin-project-lite", @@ -2120,9 +2120,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.18" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9ff14f98b1a4b289c6248a023c1c2fa1491062964e9fed67ab29c4e4da4a052" +checksum = "46125608c26121c81b0c6d693eab5a420e416da7e43c426d2e8f7df8da8a3acf" dependencies = [ "lazy_static", ] @@ -2135,18 +2135,15 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "typenum" -version = "1.13.0" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879f6906492a7cd215bfa4cf595b600146ccfac0c79bcbd1f3000162af5e8b06" +checksum = "b63708a265f51345575b27fe43f9500ad611579e764c79edbc2037b1121959ec" [[package]] name = "unicode-bidi" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeb8be209bb1c96b7c177c7420d26e04eccacb0eeae6b980e35fcb74678107e0" -dependencies = [ - "matches", -] +checksum = "246f4c42e67e7a4e3c6106ff716a5d067d4132a642840b242e357e468a2a0085" [[package]] name = "unicode-normalization" @@ -2250,9 +2247,9 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.74" +version = "0.2.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54ee1d4ed486f78874278e63e4069fc1ab9f6a18ca492076ffb90c5eb2997fd" +checksum = "5e68338db6becec24d3c7977b5bf8a48be992c934b5d07177e3931f5dc9b076c" dependencies = [ "cfg-if", "serde", @@ -2262,9 +2259,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.74" +version = "0.2.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b33f6a0694ccfea53d94db8b2ed1c3a8a4c86dd936b13b9f0a15ec4a451b900" +checksum = "f34c405b4f0658583dba0c1c7c9b694f3cac32655db463b56c254a1c75269523" dependencies = [ "bumpalo", "lazy_static", @@ -2277,9 +2274,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.24" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fba7978c679d53ce2d0ac80c8c175840feb849a161664365d1287b41f2e67f1" +checksum = "a87d738d4abc4cf22f6eb142f5b9a81301331ee3c767f2fef2fda4e325492060" dependencies = [ "cfg-if", "js-sys", @@ -2289,9 +2286,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.74" +version = "0.2.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "088169ca61430fe1e58b8096c24975251700e7b1f6fd91cc9d59b04fb9b18bd4" +checksum = "b9d5a6580be83b19dc570a8f9c324251687ab2184e57086f71625feb57ec77c8" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2299,9 +2296,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.74" +version = "0.2.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be2241542ff3d9f241f5e2cb6dd09b37efe786df8851c54957683a49f0987a97" +checksum = "e3775a030dc6f5a0afd8a84981a21cc92a781eb429acef9ecce476d0c9113e92" dependencies = [ "proc-macro2", "quote", @@ -2312,15 +2309,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.74" +version = "0.2.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7cff876b8f18eed75a66cf49b65e7f967cb354a7aa16003fb55dbfd25b44b4f" +checksum = "c279e376c7a8e8752a8f1eaa35b7b0bee6bb9fb0cdacfa97cc3f1f289c87e2b4" [[package]] name = "web-sys" -version = "0.3.51" +version = "0.3.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e828417b379f3df7111d3a2a9e5753706cae29c41f7c4029ee9fd77f3e09e582" +checksum = "0a84d70d1ec7d2da2d26a5bd78f4bca1b8c3254805363ce743b7a05bc30d195a" dependencies = [ "js-sys", "wasm-bindgen", From b3a851188fb288e6e62a740980a7ee2c18d1c591 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Mon, 13 Sep 2021 13:42:06 -0700 Subject: [PATCH 0530/1356] tools: update nonzero_ext to 0.3 --- tools/Cargo.lock | 4 ++-- tools/buildsys/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 7a15eaa3..611a6943 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -937,9 +937,9 @@ dependencies = [ [[package]] name = "nonzero_ext" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44a1290799eababa63ea60af0cbc3f03363e328e58f32fb0294798ed3e85f444" +checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" [[package]] name = "ntapi" diff --git a/tools/buildsys/Cargo.toml b/tools/buildsys/Cargo.toml index 5c23cb25..f9651957 100644 --- a/tools/buildsys/Cargo.toml +++ b/tools/buildsys/Cargo.toml @@ -22,4 +22,4 @@ snafu = "0.6" toml = "0.5" url = { version = "2.1.0", features = ["serde"] } walkdir = "2" -nonzero_ext = "0.2.0" +nonzero_ext = "0.3" From 423001c52f3beeb20b058a3a9d008bd723d65612 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Mon, 13 Sep 2021 13:47:56 -0700 Subject: [PATCH 0531/1356] Update serde-plain to 1.0 The update_metadata crate is shared between the sources/ and tools/ workspaces. This update covers both so the tools/ workspace doesn't need to be updated again after sources/. --- tools/Cargo.lock | 15 ++++++++++++--- tools/buildsys/Cargo.toml | 2 +- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 611a6943..d940132e 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -159,7 +159,7 @@ dependencies = [ "regex", "reqwest", "serde", - "serde_plain", + "serde_plain 1.0.0", "sha2", "snafu", "toml", @@ -1715,6 +1715,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_plain" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95455e7e29fada2052e72170af226fbe368a4ca33dee847875325d9fdb133858" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.0" @@ -2061,7 +2070,7 @@ dependencies = [ "ring", "serde", "serde_json", - "serde_plain", + "serde_plain 0.3.0", "snafu", "tempfile", "untrusted", @@ -2188,7 +2197,7 @@ dependencies = [ "semver", "serde", "serde_json", - "serde_plain", + "serde_plain 1.0.0", "snafu", "toml", ] diff --git a/tools/buildsys/Cargo.toml b/tools/buildsys/Cargo.toml index f9651957..bd7ae3d1 100644 --- a/tools/buildsys/Cargo.toml +++ b/tools/buildsys/Cargo.toml @@ -16,7 +16,7 @@ rand = { version = "0.8", default-features = false, features = ["std", "std_rng" regex = "1" reqwest = { version = "0.11.1", default-features = false, features = ["rustls-tls", "blocking"] } serde = { version = "1.0", features = ["derive"] } -serde_plain = "0.3.0" +serde_plain = "1.0" sha2 = "0.9" snafu = "0.6" toml = "0.5" From 4467ca56a72a193c28c85b2ba9ff82680f527281 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Date: Mon, 20 Sep 2021 18:14:59 +0000 Subject: [PATCH 0532/1356] Update hash for v3 root.json --- BUILDING.md | 2 +- tools/pubsys/Infra.toml.example | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/BUILDING.md b/BUILDING.md index 4c4aeabf..c8ef2277 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -138,7 +138,7 @@ Next, you need the Bottlerocket root role, which is used by tuftool to verify th This will download and verify the root role itself: ```bash curl -O "https://cache.bottlerocket.aws/root.json" -sha512sum -c <<<"90393204232a1ad6b0a45528b1f7df1a3e37493b1e05b1c149f081849a292c8dafb4ea5f7ee17bcc664e35f66e37e4cfa4aae9de7a2a28aa31ae6ac3d9bea4d5 root.json" +sha512sum -c <<<"e9b1ea5f9b4f95c9b55edada4238bf00b12845aa98bdd2d3edb63ff82a03ada19444546337ec6d6806cbf329027cf49f7fde31f54d551c5e02acbed7efe75785 root.json" ``` Next, set your desired parameters, and download the kmod kit: diff --git a/tools/pubsys/Infra.toml.example b/tools/pubsys/Infra.toml.example index c52a64e2..46a81709 100644 --- a/tools/pubsys/Infra.toml.example +++ b/tools/pubsys/Infra.toml.example @@ -14,7 +14,7 @@ root_role_sha512 = "0123456789abcdef" # For reference, this is the Bottlerocket root role: #root_role_url = "https://cache.bottlerocket.aws/root.json" -#root_role_sha512 = "90393204232a1ad6b0a45528b1f7df1a3e37493b1e05b1c149f081849a292c8dafb4ea5f7ee17bcc664e35f66e37e4cfa4aae9de7a2a28aa31ae6ac3d9bea4d5" +#root_role_sha512 = "e9b1ea5f9b4f95c9b55edada4238bf00b12845aa98bdd2d3edb63ff82a03ada19444546337ec6d6806cbf329027cf49f7fde31f54d551c5e02acbed7efe75785" # pubsys assumes a single publication key that signs the snapshot, targets, # and timestamp roles. Here you specify where that key lives so we can sign From 0600fe337a45fc320ea43fcda5f4a15f62daeffa Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Fri, 17 Sep 2021 13:49:30 -0700 Subject: [PATCH 0533/1356] Update kernel-5.4 to 5.4.141 --- packages/kernel-5.4/Cargo.toml | 4 ++-- packages/kernel-5.4/kernel-5.4.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.4/Cargo.toml b/packages/kernel-5.4/Cargo.toml index 75384507..3366f20a 100644 --- a/packages/kernel-5.4/Cargo.toml +++ b/packages/kernel-5.4/Cargo.toml @@ -13,8 +13,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/d10a345f3b99842f109529ef5520232b1eba2349b667a7a0a18b1f86cb3eebbd/kernel-5.4.129-63.229.amzn2.src.rpm" -sha512 = "852a1ece96a9f7cf65f81848291a00a43f7e2ae426e65b38b72125a627970f9d7e1a5ab60cbd570d17d63911fa4644aff9a1aa84f8f3096b9ca596a90fa99fc1" +url = "https://cdn.amazonlinux.com/blobstore/9e8b76ee271c50b0190e45d6b3fb69263afc7c8be8c1c3aafc4e663f997a0232/kernel-5.4.141-67.229.amzn2.src.rpm" +sha512 = "2f980b006579d3297481e0e8f8b636501648b05ae50e48a90ac1b576a47745dd9ab45fa92c7c094f7bff6931c502da2af1a3588f96e362a624d33767601d03b6" # RPM BuildRequires [build-dependencies] diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec index 94e5fea6..5799c35b 100644 --- a/packages/kernel-5.4/kernel-5.4.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.4 -Version: 5.4.129 +Version: 5.4.141 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/d10a345f3b99842f109529ef5520232b1eba2349b667a7a0a18b1f86cb3eebbd/kernel-5.4.129-63.229.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/9e8b76ee271c50b0190e45d6b3fb69263afc7c8be8c1c3aafc4e663f997a0232/kernel-5.4.141-67.229.amzn2.src.rpm Source100: config-bottlerocket # Make Lustre FSx work with a newer GCC. From 4a7bb215fe46cf712cb27b60dc2b8d728ebb05fe Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Fri, 17 Sep 2021 13:49:30 -0700 Subject: [PATCH 0534/1356] Update kernel-5.10 to 5.10.59 --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 8a57c08d..953875e8 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,5 +13,5 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/ffdc72c6cf8a4fcebfe8a3175a3f618f42f6ff2b00a36c0da6e04cf00d258daf/kernel-5.10.50-44.132.amzn2.src.rpm" -sha512 = "ff548cfb49be98f1180c30f0c4f13846a690fb162a09be17a910267ac301b9efafacacbc5d873d699e250d8d1962bb48d7095509b6de3ce36ebf1b930efa92d8" +url = "https://cdn.amazonlinux.com/blobstore/67866b408c9bc8889fd8b86dab6fe79e697ac5ef31d321f173b028bf26dcf266/kernel-5.10.59-52.142.amzn2.src.rpm" +sha512 = "bbd90bd9793218f9410ca5d953a718fc00ed212953ea7de889698f66388314ecff4f145b2beb90ebff0bac6e961c6b3b71ca18948dc83194dd2e70feba7cf1db" diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 8d4e05a5..7ab9b69b 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.50 +Version: 5.10.59 Release: 2%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/ffdc72c6cf8a4fcebfe8a3175a3f618f42f6ff2b00a36c0da6e04cf00d258daf/kernel-5.10.50-44.132.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/67866b408c9bc8889fd8b86dab6fe79e697ac5ef31d321f173b028bf26dcf266/kernel-5.10.59-52.142.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 91de62c3f51fc8f16b41ce38786ddd439dcc030e Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Wed, 22 Sep 2021 18:13:53 -0700 Subject: [PATCH 0535/1356] README: fix broken link for TLS bootstraping --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f0b0b387..54e5d75c 100644 --- a/README.md +++ b/README.md @@ -305,7 +305,7 @@ For Kubernetes variants in AWS, you must also specify: For Kubernetes variants in VMware, you must specify: * `settings.kubernetes.cluster-dns-ip`: The IP of the DNS service running in the cluster. -* `settings.kubernetes.bootstrap-token`: The token used for [TLS bootstrapping](https://kubernetes.io/docs/reference/command-line-tools-refe rence/kubelet-tls-bootstrapping/). +* `settings.kubernetes.bootstrap-token`: The token used for [TLS bootstrapping](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/). The following settings can be optionally set to customize the node labels and taints. Remember to quote keys (since they often contain ".") and to quote all values. * `settings.kubernetes.node-labels`: [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) in the form of key, value pairs added when registering the node in the cluster. From b009448de7717f36ee032a0a2c78fca286ca2b80 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Mon, 9 Aug 2021 12:27:23 -0700 Subject: [PATCH 0536/1356] pluto: support IPv6 This adds support for retrieving IPv6 node IP address for IPv6 EKS clusters --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 54e5d75c..aa6a8077 100644 --- a/README.md +++ b/README.md @@ -368,7 +368,7 @@ Static pods can be particularly useful when running in standalone mode. For Kubernetes variants in AWS and VMware, the following are set for you automatically, but you can override them if you know what you're doing! In AWS, [pluto](sources/api/) sets these based on runtime instance information. In VMware, Bottlerocket uses [netdog](sources/api/) (for `node-ip`) or relies on [default values](sources/models/src/vmware-k8s-1.21/defaults.d/). -* `settings.kubernetes.node-ip`: The IPv4 address of this node. +* `settings.kubernetes.node-ip`: The IP address of this node. * `settings.kubernetes.pod-infra-container-image`: The URI of the "pause" container. * `settings.kubernetes.kube-reserved`: Resources reserved for node components. * Bottlerocket provides default values for the resources by [schnauzer](sources/api/): @@ -378,7 +378,7 @@ In VMware, Bottlerocket uses [netdog](sources/api/) (for `node-ip`) or relies on For Kubernetes variants in AWS, the following settings are set for you automatically by [pluto](sources/api/). * `settings.kubernetes.max-pods`: The maximum number of pods that can be scheduled on this node (limited by number of available IPv4 addresses) -* `settings.kubernetes.cluster-dns-ip`: Derived from the EKS IPV4 Service CIDR or the CIDR block of the primary network interface. +* `settings.kubernetes.cluster-dns-ip`: Derived from the EKS Service IP CIDR or the CIDR block of the primary network interface. #### Amazon ECS settings From 22813f14e6580f57bf9fc5c1c9f1e360f8742b77 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Fri, 1 Oct 2021 17:30:43 +0000 Subject: [PATCH 0537/1356] Add *.src.rpm to packages/.gitignore --- packages/.gitignore | 1 + packages/kernel-5.10/.gitignore | 1 - packages/kernel-5.4/.gitignore | 1 - 3 files changed, 1 insertion(+), 2 deletions(-) delete mode 100644 packages/kernel-5.10/.gitignore delete mode 100644 packages/kernel-5.4/.gitignore diff --git a/packages/.gitignore b/packages/.gitignore index 7102c73b..a8efb5f2 100644 --- a/packages/.gitignore +++ b/packages/.gitignore @@ -1 +1,2 @@ *.patch.bz2 +*.src.rpm diff --git a/packages/kernel-5.10/.gitignore b/packages/kernel-5.10/.gitignore deleted file mode 100644 index f0af3ba1..00000000 --- a/packages/kernel-5.10/.gitignore +++ /dev/null @@ -1 +0,0 @@ -kernel-*.src.rpm diff --git a/packages/kernel-5.4/.gitignore b/packages/kernel-5.4/.gitignore deleted file mode 100644 index f0af3ba1..00000000 --- a/packages/kernel-5.4/.gitignore +++ /dev/null @@ -1 +0,0 @@ -kernel-*.src.rpm From 82bfce35515d573970ac70a241e6f9cfa1d22822 Mon Sep 17 00:00:00 2001 From: Andrew Hsu Date: Wed, 29 Sep 2021 16:06:55 -0500 Subject: [PATCH 0538/1356] docs: fix link to issue labels --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a377b56b..374a5ce1 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -42,7 +42,7 @@ GitHub provides additional documentation on [forking a repository](https://help. ## Finding contributions to work on Looking at the existing issues is a great way to find something to contribute on. -As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/bottlerocket-os/bottlerocket/labels/help%20wanted) issues is a great place to start. +As this repository uses GitHub issue [labels](https://github.com/bottlerocket-os/bottlerocket/labels), looking at any ['status/helpwelcome'](https://github.com/bottlerocket-os/bottlerocket/labels/status%2Fhelpwelcome) issues is a great place to start. ## Code of Conduct From 77f7d8f29ab6dc8879057d066deb498d3efb81bd Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Tue, 5 Oct 2021 22:24:13 +0000 Subject: [PATCH 0539/1356] Add *.zip to packages/.gitignore --- packages/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/.gitignore b/packages/.gitignore index a8efb5f2..ab0abbf9 100644 --- a/packages/.gitignore +++ b/packages/.gitignore @@ -1,2 +1,3 @@ *.patch.bz2 *.src.rpm +*.zip From 8dddd1310aa0de14860697600a7305b1dfa22322 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Mon, 18 Oct 2021 14:00:56 -0700 Subject: [PATCH 0540/1356] sources,tools: cargo fmt Runs cargo-fmt over all first-party source code. --- tools/pubsys-config/src/lib.rs | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/tools/pubsys-config/src/lib.rs b/tools/pubsys-config/src/lib.rs index 76b93121..59b2f0ce 100644 --- a/tools/pubsys-config/src/lib.rs +++ b/tools/pubsys-config/src/lib.rs @@ -67,18 +67,12 @@ impl InfraConfig { pub fn from_path_or_lock(path: &Path, default: bool) -> Result { let lock_path = Self::compute_lock_path(&path)?; if lock_path.exists() { - info!( - "Found infra config at path: {}", - lock_path.display() - ); + info!("Found infra config at path: {}", lock_path.display()); Self::from_lock_path(lock_path) } else if default { Self::from_path_or_default(&path) } else { - info!( - "Found infra config at path: {}", - path.display() - ); + info!("Found infra config at path: {}", path.display()); Self::from_path(&path) } } From d0f539c289660e78780c9ca0efe6c7d759b3911b Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Mon, 18 Oct 2021 14:46:46 -0700 Subject: [PATCH 0541/1356] actions-workflow: add check for formatting Adds a new cargo-make task,'check-fmt', that checks first-party source code formatting. Adds 'check-fmt' to github actions build workflow. --- .github/workflows/build.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 62d1f6f7..5bbe1cdb 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -32,4 +32,5 @@ jobs: - run: rustup toolchain install 1.53.0 && rustup default 1.53.0 - run: cargo install --version 0.30.0 cargo-make - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} unit-tests + - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} check-fmt - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} -e BUILDSYS_ARCH=${{ matrix.arch }} -e BUILDSYS_JOBS=12 From 6ce4ada2964c45eb18f1c2e7152710e105e333f8 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 14 Oct 2021 17:15:55 +0000 Subject: [PATCH 0542/1356] build: update SDK to 0.23.0 Signed-off-by: Ben Cressey --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 5bbe1cdb..3eedacef 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -29,7 +29,7 @@ jobs: fail-fast: false steps: - uses: actions/checkout@v2 - - run: rustup toolchain install 1.53.0 && rustup default 1.53.0 + - run: rustup toolchain install 1.56.0 && rustup default 1.56.0 - run: cargo install --version 0.30.0 cargo-make - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} unit-tests - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} check-fmt From ed55e0cac99940b461befd84a26aeafcb4d87df3 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 15 Oct 2021 19:59:25 +0000 Subject: [PATCH 0543/1356] build: run new cargo deny checks Path dependencies without a specified version will be treated as a wildcard dependency and cause an error, so add the version to each. Block multiple versions of crates, but skip the existing cases. The goal is to prevent new instances from coming in unnoticed. Signed-off-by: Ben Cressey --- tools/deny.toml | 15 +++++++++++++++ tools/infrasys/Cargo.toml | 2 +- tools/pubsys-config/Cargo.toml | 2 +- tools/pubsys-setup/Cargo.toml | 2 +- tools/pubsys/Cargo.toml | 6 +++--- 5 files changed, 21 insertions(+), 6 deletions(-) diff --git a/tools/deny.toml b/tools/deny.toml index d9ea74c8..16f12883 100644 --- a/tools/deny.toml +++ b/tools/deny.toml @@ -40,3 +40,18 @@ expression = "ISC" license-files = [ { path = "LICENSE", hash = 0x001c7e6c }, ] + +[bans] +# Deny multiple versions or wildcard dependencies. +multiple-versions = "deny" +wildcards = "deny" + +skip = [ + # older version required by tough 0.11.2 + { name = "serde_plain", version = "0.3.0" }, +] + +[sources] +# Deny crates from unknown registries or git repositories. +unknown-registry = "deny" +unknown-git = "deny" diff --git a/tools/infrasys/Cargo.toml b/tools/infrasys/Cargo.toml index e34fc6f6..9fa21dca 100644 --- a/tools/infrasys/Cargo.toml +++ b/tools/infrasys/Cargo.toml @@ -11,7 +11,7 @@ async-trait = "0.1.51" clap = "2.33" hex = "0.4.0" log = "0.4.14" -pubsys-config = { path = "../pubsys-config/" } +pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } rusoto_cloudformation = { version = "0.47", default-features = false, features = ["rustls"] } rusoto_core = { version = "0.47", default-features = false, features = ["rustls"] } rusoto_s3 = { version = "0.47", default-features = false, features = ["rustls"] } diff --git a/tools/pubsys-config/Cargo.toml b/tools/pubsys-config/Cargo.toml index 816408af..5010acd2 100644 --- a/tools/pubsys-config/Cargo.toml +++ b/tools/pubsys-config/Cargo.toml @@ -11,7 +11,7 @@ chrono = "0.4" home = "0.5" lazy_static = "1.4" log = "0.4" -parse-datetime = { path = "../../sources/parse-datetime" } +parse-datetime = { path = "../../sources/parse-datetime", version = "0.1.0" } serde = { version = "1.0", features = ["derive"] } serde_yaml = "0.8.17" snafu = "0.6" diff --git a/tools/pubsys-setup/Cargo.toml b/tools/pubsys-setup/Cargo.toml index 093f9207..bac72852 100644 --- a/tools/pubsys-setup/Cargo.toml +++ b/tools/pubsys-setup/Cargo.toml @@ -9,7 +9,7 @@ publish = false [dependencies] hex = "0.4.0" log = "0.4" -pubsys-config = { path = "../pubsys-config/" } +pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } reqwest = { version = "0.11.1", default-features = false, features = ["rustls-tls", "blocking"] } sha2 = "0.9" shell-words = "1.0" diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index 5cd08577..f8b3b31f 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -12,13 +12,13 @@ chrono = "0.4" clap = "2.33" coldsnap = { version = "0.3", default-features = false, features = ["rusoto-rustls"]} duct = "0.13.0" -pubsys-config = { path = "../pubsys-config/" } +pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } futures = "0.3.5" indicatif = "0.16.0" lazy_static = "1.4" log = "0.4" num_cpus = "1" -parse-datetime = { path = "../../sources/parse-datetime" } +parse-datetime = { path = "../../sources/parse-datetime", version = "0.1.0" } rayon = "1" # Need to bring in reqwest with a TLS feature so tough can support TLS repos. reqwest = { version = "0.11.1", default-features = false, features = ["rustls-tls", "blocking"] } @@ -43,6 +43,6 @@ toml = "0.5" tough = { version = "0.11", features = ["http"] } tough-kms = "0.3" tough-ssm = "0.6" -update_metadata = { path = "../../sources/updater/update_metadata/" } +update_metadata = { path = "../../sources/updater/update_metadata/", version = "0.1.0" } url = { version = "2.1.0", features = ["serde"] } tempfile = "3.1" From 68664e6702e8d64ee3c5e17f4dd1a1d77177d660 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Mon, 1 Nov 2021 23:19:07 +0000 Subject: [PATCH 0544/1356] docs: Update cargo-make commands so that tasks come after arguments Due to a change in `cargo-make` v0.35.3, arguments like `-e PUBLISH_REGION` must be passed before the task name (instead of after). --- BUILDING.md | 6 ++++-- tools/pubsys/Infra.toml.example | 4 ++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/BUILDING.md b/BUILDING.md index c8ef2277..bc34e19d 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -98,15 +98,17 @@ To use the image in Amazon EC2, we need to register the image as an AMI. For a simple start, pick an [EC2 region](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions), then run: ``` -cargo make ami -e PUBLISH_REGIONS=your-region-here +cargo make -e PUBLISH_REGIONS=your-region-here ami ``` +Note that the task ("ami") must come **after** the arguments to `cargo make` that are specified with `-e`. + Your new AMI ID will be printed after it's registered. If you built your image for a different architecture or variant, just use the same arguments here: ``` -cargo make ami -e PUBLISH_REGIONS=your-region-here -e BUILDSYS_VARIANT=my-variant-here +cargo make -e PUBLISH_REGIONS=your-region-here -e BUILDSYS_VARIANT=my-variant-here ami ``` (There's a lot more detail on building and managing AMIs in the [PUBLISHING](PUBLISHING.md) guide.) diff --git a/tools/pubsys/Infra.toml.example b/tools/pubsys/Infra.toml.example index 46a81709..889b317a 100644 --- a/tools/pubsys/Infra.toml.example +++ b/tools/pubsys/Infra.toml.example @@ -3,7 +3,7 @@ # at the root of the repo, then edit the settings below to match your use case. # You can have any number of repos defined and build a specific one by running like this: -# cargo make repo -e PUBLISH_REPO=myrepo +# cargo make -e PUBLISH_REPO=myrepo repo [repo.default] # URL to your root role JSON file; can be a file:// URL for local files. If # you don't specify one here, a file will be generated for you under /roles. @@ -65,7 +65,7 @@ datacenters = ["north", "south"] # *** # Optional common configuration -# This configuration allow values to be set in a single place if they are common in +# This configuration allow values to be set in a single place if they are common in # multiple datacenters. They can be overridden in the datacenter's block below. [vmware.common] network = "a_network" From ff517d95c5224e9f1087fa1e2778d78fe8074987 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Fri, 5 Nov 2021 15:33:24 -0700 Subject: [PATCH 0545/1356] Remove aws-k8s-1.17 variant --- .github/workflows/build.yml | 2 +- README.md | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3eedacef..a442ac27 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,7 +10,7 @@ jobs: continue-on-error: ${{ matrix.supported }} strategy: matrix: - variant: [aws-k8s-1.17, aws-k8s-1.18, aws-k8s-1.19, aws-k8s-1.20, aws-k8s-1.21, aws-ecs-1] + variant: [aws-k8s-1.18, aws-k8s-1.19, aws-k8s-1.20, aws-k8s-1.21, aws-ecs-1] arch: [x86_64, aarch64] supported: [true] include: diff --git a/README.md b/README.md index aa6a8077..7f1c8946 100644 --- a/README.md +++ b/README.md @@ -50,7 +50,6 @@ For example, an `x86_64` build of the `aws-k8s-1.21` variant will produce an ima The following variants support EKS, as described above: -- `aws-k8s-1.17` - `aws-k8s-1.18` - `aws-k8s-1.19` - `aws-k8s-1.20` @@ -65,8 +64,8 @@ We also have variants in preview status that are designed to be Kubernetes worke - `vmware-k8s-1.20` - `vmware-k8s-1.21` -The `aws-k8s-1.15` and `aws-k8s-1.16` variants are no longer supported. -We recommend users replace `aws-k8s-1.15` and `aws-k8s-1.16` nodes with the [latest variant compatible with their cluster](variants/). +The `aws-k8s-1.15`, `aws-k8s-1.16`, and `aws-k8s-1.17` variants are no longer supported. +We recommend users replace nodes running these variants with the [latest variant compatible with their cluster](variants/). ## Architectures From ff335948152198aa1840c8a4200e465677859772 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Tue, 2 Nov 2021 14:09:54 -0700 Subject: [PATCH 0546/1356] Add high-level documentation for 'apiclient exec' --- README.md | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 7f1c8946..002e8f77 100644 --- a/README.md +++ b/README.md @@ -132,6 +132,11 @@ aws ssm start-session --target INSTANCE_ID With the [default control container](https://github.com/bottlerocket-os/bottlerocket-control-container), you can make [API calls](#api) to configure and manage your Bottlerocket host. To do even more, read the next section about the [admin container](#admin-container). +If you've enabled the admin container, you can access it from the control container like this: + +``` +apiclient exec admin bash +``` ### Admin container @@ -159,6 +164,12 @@ If you're using a custom control container, or want to make the API calls direct apiclient set host-containers.admin.enabled=true ``` +Once you've enabled the admin container, you can either access it through SSH or from the control container like this: + +``` +apiclient exec admin bash +``` + Once you're in the admin container, you can run `sheltie` to get a full root shell in the Bottlerocket host. Be careful; while you can inspect and change even more as root, Bottlerocket's filesystem and dm-verity setup will prevent most changes from persisting over a restart - see [Security](#security). @@ -556,6 +567,12 @@ superpowered = false If the `enabled` flag is `true`, it will be started automatically. All host containers will have the `apiclient` binary available at `/usr/local/bin/apiclient` so they're able to [interact with the API](#using-the-api-client). +You can also use `apiclient` to run programs in other host containers. +For example, to access the admin container: + +``` +apiclient exec admin bash +``` In addition, all host containers come with persistent storage that survives reboots and container start/stop cycles. It's available at `/.bottlerocket/host-containers/$HOST_CONTAINER_NAME` and (since Bottlerocket v1.0.8) `/.bottlerocket/host-containers/current`. @@ -634,7 +651,7 @@ AWS-specific settings are automatically set based on calls to the Instance MetaD ### Logs You can use `logdog` through the [admin container](#admin-container) to obtain an archive of log files from your Bottlerocket host. -SSH to the Bottlerocket host, then run: +SSH to the Bottlerocket host or `apiclient exec admin bash` to access the admin container, then run: ```bash sudo sheltie @@ -651,6 +668,8 @@ ssh -i YOUR_KEY_FILE \ "cat /.bottlerocket/rootfs/var/log/support/bottlerocket-logs.tar.gz" > bottlerocket-logs.tar.gz ``` +(If your instance isn't accessible through SSH, you can use [SSH over SSM](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-getting-started-enable-ssh-connections.html).) + For a list of what is collected, see the logdog [command list](sources/logdog/src/log_request.rs). ### Kdump Support @@ -732,6 +751,8 @@ This way, you can configure your Bottlerocket instance without having to make AP See [Settings](#settings) above for examples and to understand what you can configure. +You can also access host containers through the API using [apiclient exec](sources/api/apiclient/README.md#exec-mode). + The server and client are the user-facing components of the API system, but there are a number of other components that work together to make sure your settings are applied, and that they survive upgrades of Bottlerocket. For more details, see the [API system documentation](sources/api/). From f76670a53c02ba940f2fa2c3620e1801713670a9 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Wed, 27 Oct 2021 19:02:01 -0700 Subject: [PATCH 0547/1356] model: change `container-registry.mirrors` type Changes the model for `container-registry.mirrors` from a `HashMap` to a vector of structs. Adds a custom deserializer to deserialize from either a TOML table or a TOML sequence. --- README.md | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 002e8f77..30a8437e 100644 --- a/README.md +++ b/README.md @@ -421,13 +421,18 @@ These settings can be changed at any time. #### Container image registry settings The following setting is optional and allows you to configure image registry mirrors and pull-through caches for your containers. -* `settings.container-registry.mirrors`: A mapping of container image registry to a list of image registry URL endpoints. When pulling an image from a registry, the container runtime will try the endpoints one by one and use the first working one. +* `settings.container-registry.mirrors`: An array of container image registry mirror settings. Each element specifies the registry and the endpoints for said registry. +When pulling an image from a registry, the container runtime will try the endpoints one by one and use the first working one. (Docker and containerd will still try the default registry URL if the mirrors fail.) * Example user data for setting up image registry mirrors: ``` - [settings.container-registry.mirrors] - "docker.io" = ["https://"] - "gcr.io" = ["https://","http://"] + [[settings.container-registry.mirrors]] + registry = "*" + endpoint = ["https://","https://"] + + [[settings.container-registry.mirrors]] + registry = "docker.io" + endpoint = [ "https://", "https://"] ``` If you use a Bottlerocket variant that uses Docker as the container runtime, like `aws-ecs-1`, you should be aware that Docker only supports pull-through caches for images from Docker Hub (docker.io). Mirrors for other registries are ignored in this case. From 314bdee0c5b15bae21faa0c268ce478f4ecc3a72 Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Mon, 8 Nov 2021 18:43:46 +0000 Subject: [PATCH 0548/1356] tools: update tough to 0.12 --- tools/Cargo.lock | 66 ++++++++++++++++++++-- tools/deny.toml | 5 +- tools/pubsys/Cargo.toml | 2 +- tools/pubsys/src/repo.rs | 20 ++++++- tools/pubsys/src/repo/validate_repo/mod.rs | 8 +-- 5 files changed, 84 insertions(+), 17 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index d940132e..a4641540 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -1067,6 +1067,24 @@ dependencies = [ "snafu", ] +[[package]] +name = "path-absolutize" +version = "3.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b288298a7a3a7b42539e3181ba590d32f2d91237b0691ed5f103875c754b3bf5" +dependencies = [ + "path-dedot", +] + +[[package]] +name = "path-dedot" +version = "3.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bfa72956f6be8524f7f7e2b07972dda393cb0008a6df4451f658b7e1bd1af80" +dependencies = [ + "once_cell", +] + [[package]] name = "pem" version = "0.8.3" @@ -1078,6 +1096,17 @@ dependencies = [ "regex", ] +[[package]] +name = "pem" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06673860db84d02a63942fa69cd9543f2624a5df3aea7f33173048fa7ad5cf1a" +dependencies = [ + "base64", + "once_cell", + "regex", +] + [[package]] name = "percent-encoding" version = "2.1.0" @@ -1184,7 +1213,7 @@ dependencies = [ "tokio", "tokio-stream", "toml", - "tough", + "tough 0.12.0", "tough-kms", "tough-ssm", "update_metadata", @@ -2065,7 +2094,7 @@ dependencies = [ "hex", "log", "olpc-cjson", - "pem", + "pem 0.8.3", "reqwest", "ring", "serde", @@ -2078,20 +2107,47 @@ dependencies = [ "walkdir", ] +[[package]] +name = "tough" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99488309ba53ee931b6ccda1cde07feaab95f214d328e3a7244c0f7563b5909f" +dependencies = [ + "chrono", + "dyn-clone", + "globset", + "hex", + "log", + "olpc-cjson", + "path-absolutize", + "pem 1.0.1", + "percent-encoding", + "reqwest", + "ring", + "serde", + "serde_json", + "serde_plain 1.0.0", + "snafu", + "tempfile", + "untrusted", + "url", + "walkdir", +] + [[package]] name = "tough-kms" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a08cf9e6e2a058fad7b7c7bea7bc9a19bd01d29cd454d7fcadd39ee65121665a" dependencies = [ - "pem", + "pem 0.8.3", "ring", "rusoto_core", "rusoto_credential", "rusoto_kms", "snafu", "tokio", - "tough", + "tough 0.11.2", ] [[package]] @@ -2107,7 +2163,7 @@ dependencies = [ "serde_json", "snafu", "tokio", - "tough", + "tough 0.11.2", ] [[package]] diff --git a/tools/deny.toml b/tools/deny.toml index 16f12883..3beb2794 100644 --- a/tools/deny.toml +++ b/tools/deny.toml @@ -46,10 +46,7 @@ license-files = [ multiple-versions = "deny" wildcards = "deny" -skip = [ - # older version required by tough 0.11.2 - { name = "serde_plain", version = "0.3.0" }, -] +skip = [] [sources] # Deny crates from unknown registries or git repositories. diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index f8b3b31f..c319faa2 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -40,7 +40,7 @@ tinytemplate = "1.1" tokio = { version = "~1.8", features = ["full"] } # LTS tokio-stream = { version = "0.1", features = ["time"] } toml = "0.5" -tough = { version = "0.11", features = ["http"] } +tough = { version = "0.12", features = ["http"] } tough-kms = "0.3" tough-ssm = "0.6" update_metadata = { path = "../../sources/updater/update_metadata/", version = "0.1.0" } diff --git a/tools/pubsys/src/repo.rs b/tools/pubsys/src/repo.rs index 493fb4f9..b08b1a3b 100644 --- a/tools/pubsys/src/repo.rs +++ b/tools/pubsys/src/repo.rs @@ -352,10 +352,14 @@ where match repo_load_result { // If we load it successfully, build an editor and manifest from it. Ok(repo) => { + let target = "manifest.json"; + let target = target + .try_into() + .context(error::ParseTargetName { target })?; let reader = repo - .read_target("manifest.json") + .read_target(&target) .context(error::ReadTarget { - target: "manifest.json", + target: target.raw(), })? .with_context(|| error::NoManifest { metadata_url: metadata_url.clone(), @@ -553,13 +557,17 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { // Copy manifest with proper name instead of tempfile name debug!("Copying manifest.json into {}", targets_out_dir.display()); + let target = "manifest.json"; + let target = target + .try_into() + .context(error::ParseTargetName { target })?; signed_repo .copy_target( &manifest_path, &targets_out_dir, // We should never have matching manifests from different repos PathExists::Fail, - Some("manifest.json"), + Some(&target), ) .context(error::CopyTarget { target: &manifest_path, @@ -708,6 +716,12 @@ mod error { source: tough::error::Error, }, + #[snafu(display("Failed to parse target name from string '{}': {}", target, source))] + ParseTargetName { + target: String, + source: tough::error::Error, + }, + #[snafu(display("Repo exists at '{}' - remove it and try again", path.display()))] RepoExists { path: PathBuf }, diff --git a/tools/pubsys/src/repo/validate_repo/mod.rs b/tools/pubsys/src/repo/validate_repo/mod.rs index 327eeb8b..9a3ddff4 100644 --- a/tools/pubsys/src/repo/validate_repo/mod.rs +++ b/tools/pubsys/src/repo/validate_repo/mod.rs @@ -61,17 +61,17 @@ fn retrieve_targets(repo: &Repository) -> Result<(), Error> { let mut reader = repo .read_target(&target) .with_context(|| repo_error::ReadTarget { - target: target.to_string(), + target: target.raw(), })? .with_context(|| error::TargetMissing { - target: target.to_string(), + target: target.raw(), })?; - info!("Downloading target: {}", target); + info!("Downloading target: {}", target.raw()); thread_pool.spawn(move || { tx.send({ // tough's `Read` implementation validates the target as it's being downloaded io::copy(&mut reader, &mut io::sink()).context(error::TargetDownload { - target: target.to_string(), + target: target.raw(), }) }) // inability to send on this channel is unrecoverable From cf9f3ea7a2a356ea6c91eee416b6e46babf38363 Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Mon, 8 Nov 2021 18:48:14 +0000 Subject: [PATCH 0549/1356] tools: cargo update --- tools/Cargo.lock | 262 +++++++++++++++++++---------------------------- 1 file changed, 108 insertions(+), 154 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index a4641540..d6f4f0b3 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e61f2b7f93d2c7d2b08263acaa4a363b3e276806c68af6134c44f523bf1aacd" +checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" dependencies = [ "gimli", ] @@ -104,9 +104,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.61" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7a905d892734eea339e896738c14b9afce22b5318f64b951e70bf3844419b01" +checksum = "321629d8ba6513061f26707241fa9bc89524ff1cd7a915a97ef0c62c666ce1b6" dependencies = [ "addr2line", "cc", @@ -140,9 +140,9 @@ dependencies = [ [[package]] name = "bstr" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90682c8d613ad3373e66de8c6411e0ae2ab2571e879d2efbf73558cc66f21279" +checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" dependencies = [ "memchr", ] @@ -159,7 +159,7 @@ dependencies = [ "regex", "reqwest", "serde", - "serde_plain 1.0.0", + "serde_plain", "sha2", "snafu", "toml", @@ -169,9 +169,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.7.0" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c59e7af012c713f529e7a3ee57ce9b31ddd858d4b512923602f74608b009631" +checksum = "8f1e260c3a9040a7c19a12468758f4c16f31a81a1fe087482be9570ec864bb6c" [[package]] name = "bytes" @@ -196,9 +196,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.70" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26a6ce4b6a484fa3edb70f7efa6fc430fd2b87285fe8b84304fd0936faa0dc0" +checksum = "79c2681d6594606957bbb8631c4b90a7fcaaa72cdb714743a437b156d6a7eedd" [[package]] name = "cfg-if" @@ -259,22 +259,22 @@ dependencies = [ [[package]] name = "console" -version = "0.14.1" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3993e6445baa160675931ec041a5e03ca84b9c6e32a056150d3aa2bdda0a1f45" +checksum = "a28b32d32ca44b70c3e4acd7db1babf555fa026e385fb95f18028f88848b3c31" dependencies = [ "encode_unicode", - "lazy_static", "libc", + "once_cell", "terminal_size", "winapi", ] [[package]] name = "core-foundation" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62" +checksum = "6888e10551bb93e424d8df1d07f1a8b4fceb0001a3a4b048bfc47554946f47b3" dependencies = [ "core-foundation-sys", "libc", @@ -282,9 +282,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" +checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpufeatures" @@ -441,9 +441,9 @@ checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "encoding_rs" -version = "0.8.28" +version = "0.8.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" +checksum = "a74ea89a0a1b98f6332de42c95baff457ada66d1cb4030f9ff151b2041a1c746" dependencies = [ "cfg-if", ] @@ -581,9 +581,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.25.0" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7" +checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" [[package]] name = "globset" @@ -600,9 +600,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.4" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7f3675cfef6a30c8031cf9e6493ebdc3bb3272a3fea3923c4210d1830e6a472" +checksum = "7fd819562fcebdac5afc5c113c3ec36f902840b70fd4fc458799c8ce4607ae55" dependencies = [ "bytes", "fnv", @@ -668,9 +668,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "527e8c9ac747e28542699a951517aa9a6945af506cd1f2e1b53a576c17b6cc11" +checksum = "1323096b05d41827dadeaee54c9981958c0f94e670bc94ed80037d1a7b8b186b" dependencies = [ "bytes", "fnv", @@ -679,9 +679,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "399c583b2979440c60be0821a6199eca73bc3c8dcd9d070d75ac726e2c6186e5" +checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" dependencies = [ "bytes", "http", @@ -702,9 +702,9 @@ checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" [[package]] name = "hyper" -version = "0.14.12" +version = "0.14.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13f67199e765030fa08fe0bd581af683f0d5bc04ea09c2b1102012c5fb90e7fd" +checksum = "2b91bb1f221b6ea1f1e4371216b70f40748774c2fb5971b450c07773fb92d26b" dependencies = [ "bytes", "futures-channel", @@ -801,9 +801,9 @@ dependencies = [ [[package]] name = "instant" -version = "0.1.10" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bee0328b1209d157ef001c94dd85b4f8f64139adb0eac2659f4b08382b2f474d" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if", ] @@ -822,9 +822,9 @@ checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "js-sys" -version = "0.3.54" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1866b355d9c878e5e607473cbe3f63282c0b7aad2db1dbebf55076c686918254" +checksum = "7cc9ffccd38c451a86bf13657df244e9c3f37493cce8e5e21e940963777acc84" dependencies = [ "wasm-bindgen", ] @@ -837,9 +837,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.101" +version = "0.2.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cb00336871be5ed2c8ed44b60ae9959dc5b9f08539422ed43f09e34ecaeba21" +checksum = "fbe5e23404da5b4f555ef85ebed98fb4083e55a00c317800bc2a50ede9f3d219" [[package]] name = "linked-hash-map" @@ -915,9 +915,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.7.13" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c2bdb6314ec10835cd3293dd268473a835c02b7b352e788be788b3c6ca6bb16" +checksum = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc" dependencies = [ "libc", "log", @@ -987,9 +987,9 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.26.2" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39f37e50073ccad23b6d09bcb5b263f4e76d3bb6038e4a3c08e52162ffa8abc2" +checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9" dependencies = [ "memchr", ] @@ -1085,17 +1085,6 @@ dependencies = [ "once_cell", ] -[[package]] -name = "pem" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd56cbd21fea48d0c440b41cd69c589faacade08c992d9a54e471b79d0fd13eb" -dependencies = [ - "base64", - "once_cell", - "regex", -] - [[package]] name = "pem" version = "1.0.1" @@ -1127,9 +1116,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "ppv-lite86" -version = "0.2.10" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" +checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba" [[package]] name = "proc-macro-error" @@ -1169,9 +1158,9 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.29" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f5105d4fdaab20335ca9565e106a5d9b82b6219b5ba735731124ac6711d23d" +checksum = "ba508cc11742c0dc5c1659771673afbab7a0efab23aa17e854cbab0837ed0b43" dependencies = [ "unicode-xid", ] @@ -1213,7 +1202,7 @@ dependencies = [ "tokio", "tokio-stream", "toml", - "tough 0.12.0", + "tough", "tough-kms", "tough-ssm", "update_metadata", @@ -1256,9 +1245,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" +checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05" dependencies = [ "proc-macro2", ] @@ -1375,9 +1364,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.4" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "246e9f61b9bb77df069a947682be06e31ac43ea37862e244a69f177694ea6d22" +checksum = "66d2927ca2f685faf0fc620ac4834690d29e7abb153add10f5812eef20b5e280" dependencies = [ "base64", "bytes", @@ -1397,6 +1386,7 @@ dependencies = [ "pin-project-lite", "rustls", "serde", + "serde_json", "serde_urlencoded", "tokio", "tokio-rustls", @@ -1726,24 +1716,15 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.67" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7f9e390c27c3c0ce8bc5d725f6e4d30a29d26659494aa4b17535f7522c5c950" +checksum = "e466864e431129c7e0d3476b92f20458e5879919a0596c6472738d9fa2d342f8" dependencies = [ "itoa", "ryu", "serde", ] -[[package]] -name = "serde_plain" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "625fb0da2b006092b426a94acc1611bec52f2ec27bb27b266a9f93c29ee38eda" -dependencies = [ - "serde", -] - [[package]] name = "serde_plain" version = "1.0.0" @@ -1823,9 +1804,9 @@ dependencies = [ [[package]] name = "simplelog" -version = "0.10.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59d0fe306a0ced1c88a58042dc22fc2ddd000982c26d75f6aa09a394547c41e0" +checksum = "85d04ae642154220ef00ee82c36fb07853c10a4f2a0ca6719f9991211d2eb959" dependencies = [ "chrono", "log", @@ -1834,15 +1815,15 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c307a32c1c5c437f38c7fd45d753050587732ba8628319fbdf12a7e289ccc590" +checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" [[package]] name = "smallvec" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" +checksum = "1ecab6c735a6bb4139c0caafd0cc3635748bbb3acf4550e8138122099251f309" [[package]] name = "snafu" @@ -1868,9 +1849,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "765f090f0e423d2b55843402a07915add955e7d60657db13707a159727326cad" +checksum = "5dc90fe6c7be1a323296982db1836d1ea9e47b6839496dde9a541bc496df3516" dependencies = [ "libc", "winapi", @@ -1890,9 +1871,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "structopt" -version = "0.3.23" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf9d950ef167e25e0bdb073cf1d68e9ad2795ac826f2f3f59647817cf23c0bfa" +checksum = "40b9788f4202aa75c240ecc9c15c65185e6a39ccdeb0fd5d008b98825464c87c" dependencies = [ "clap", "lazy_static", @@ -1901,9 +1882,9 @@ dependencies = [ [[package]] name = "structopt-derive" -version = "0.4.16" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134d838a2c9943ac3125cf6df165eda53493451b719f3255b2a26b85f772d0ba" +checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck", "proc-macro-error", @@ -1920,9 +1901,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.76" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6f107db402c2c2055242dbf4d2af0e69197202e9faacbef9571bbe47f5a1b84" +checksum = "f2afee18b8beb5a596ecb4a2dce128c719b4ba399d34126b9e4396e3f9860966" dependencies = [ "proc-macro2", "quote", @@ -1993,9 +1974,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5241dd6f21443a3606b432718b166d3cedc962fd4b8bea54a8bc7f514ebda986" +checksum = "f83b2a3d4d9091d0abd7eba4dc2710b1718583bd4d8992e2190720ea38f391f7" dependencies = [ "tinyvec_macros", ] @@ -2028,9 +2009,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.3.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54473be61f4ebe4efd09cec9bd5d16fa51d70ea0192213d754d2d500457db110" +checksum = "114383b041aa6212c579467afa0075fbbdd0718de036100bc0ba7961d8cb9095" dependencies = [ "proc-macro2", "quote", @@ -2050,9 +2031,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b2f3f698253f03119ac0102beaa64f67a67e08074d03a22d18784104543727f" +checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" dependencies = [ "futures-core", "pin-project-lite", @@ -2061,9 +2042,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d3725d3efa29485e87311c5b699de63cde14b00ed4d256b8318aa30ca452cd" +checksum = "9e99e1983e5d376cd8eb4b66604d2e99e79f5bd988c3055891dcd8c9e2604cc0" dependencies = [ "bytes", "futures-core", @@ -2082,31 +2063,6 @@ dependencies = [ "serde", ] -[[package]] -name = "tough" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d69a5b5cc9cb23cf307914e176d28c346a9655bc7a7aac395e1d69924d134f1" -dependencies = [ - "chrono", - "dyn-clone", - "globset", - "hex", - "log", - "olpc-cjson", - "pem 0.8.3", - "reqwest", - "ring", - "serde", - "serde_json", - "serde_plain 0.3.0", - "snafu", - "tempfile", - "untrusted", - "url", - "walkdir", -] - [[package]] name = "tough" version = "0.12.0" @@ -2120,13 +2076,13 @@ dependencies = [ "log", "olpc-cjson", "path-absolutize", - "pem 1.0.1", + "pem", "percent-encoding", "reqwest", "ring", "serde", "serde_json", - "serde_plain 1.0.0", + "serde_plain", "snafu", "tempfile", "untrusted", @@ -2136,25 +2092,25 @@ dependencies = [ [[package]] name = "tough-kms" -version = "0.3.2" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a08cf9e6e2a058fad7b7c7bea7bc9a19bd01d29cd454d7fcadd39ee65121665a" +checksum = "6e1ece7cb6917b7d503e85d9285e1a7616d2e5ae96c1362087771401559f47d2" dependencies = [ - "pem 0.8.3", + "pem", "ring", "rusoto_core", "rusoto_credential", "rusoto_kms", "snafu", "tokio", - "tough 0.11.2", + "tough", ] [[package]] name = "tough-ssm" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b88457e1d0774d2764595664bc46e12b1e6b1996338c5c2623dda242bbd3db7" +checksum = "303c67d70bcf2352668c42984715a71ccbf1558fcbf64064987caadba07fc771" dependencies = [ "rusoto_core", "rusoto_credential", @@ -2163,7 +2119,7 @@ dependencies = [ "serde_json", "snafu", "tokio", - "tough 0.11.2", + "tough", ] [[package]] @@ -2174,9 +2130,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.27" +version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2ba9ab62b7d6497a8638dfda5e5c4fb3b2d5a7fca4118f2b96151c8ef1a437e" +checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" dependencies = [ "cfg-if", "pin-project-lite", @@ -2185,9 +2141,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.20" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46125608c26121c81b0c6d693eab5a420e416da7e43c426d2e8f7df8da8a3acf" +checksum = "1f4ed65637b8390770814083d20756f87bfa2c21bf2f110babdc5438351746e4" dependencies = [ "lazy_static", ] @@ -2206,9 +2162,9 @@ checksum = "b63708a265f51345575b27fe43f9500ad611579e764c79edbc2037b1121959ec" [[package]] name = "unicode-bidi" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "246f4c42e67e7a4e3c6106ff716a5d067d4132a642840b242e357e468a2a0085" +checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" [[package]] name = "unicode-normalization" @@ -2227,9 +2183,9 @@ checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" [[package]] name = "unicode-width" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" +checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" [[package]] name = "unicode-xid" @@ -2253,7 +2209,7 @@ dependencies = [ "semver", "serde", "serde_json", - "serde_plain 1.0.0", + "serde_plain", "snafu", "toml", ] @@ -2312,21 +2268,19 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.77" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e68338db6becec24d3c7977b5bf8a48be992c934b5d07177e3931f5dc9b076c" +checksum = "632f73e236b219150ea279196e54e610f5dbafa5d61786303d4da54f84e47fce" dependencies = [ "cfg-if", - "serde", - "serde_json", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.77" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f34c405b4f0658583dba0c1c7c9b694f3cac32655db463b56c254a1c75269523" +checksum = "a317bf8f9fba2476b4b2c85ef4c4af8ff39c3c7f0cdfeed4f82c34a880aa837b" dependencies = [ "bumpalo", "lazy_static", @@ -2339,9 +2293,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.27" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a87d738d4abc4cf22f6eb142f5b9a81301331ee3c767f2fef2fda4e325492060" +checksum = "8e8d7523cb1f2a4c96c1317ca690031b714a51cc14e05f712446691f413f5d39" dependencies = [ "cfg-if", "js-sys", @@ -2351,9 +2305,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.77" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d5a6580be83b19dc570a8f9c324251687ab2184e57086f71625feb57ec77c8" +checksum = "d56146e7c495528bf6587663bea13a8eb588d39b36b679d83972e1a2dbbdacf9" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2361,9 +2315,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.77" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3775a030dc6f5a0afd8a84981a21cc92a781eb429acef9ecce476d0c9113e92" +checksum = "7803e0eea25835f8abdc585cd3021b3deb11543c6fe226dcd30b228857c5c5ab" dependencies = [ "proc-macro2", "quote", @@ -2374,15 +2328,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.77" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c279e376c7a8e8752a8f1eaa35b7b0bee6bb9fb0cdacfa97cc3f1f289c87e2b4" +checksum = "0237232789cf037d5480773fe568aac745bfe2afbc11a863e97901780a6b47cc" [[package]] name = "web-sys" -version = "0.3.54" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a84d70d1ec7d2da2d26a5bd78f4bca1b8c3254805363ce743b7a05bc30d195a" +checksum = "38eb105f1c59d9eaa6b5cdc92b859d85b926e82cb2e0945cd0c9259faa6fe9fb" dependencies = [ "js-sys", "wasm-bindgen", @@ -2464,6 +2418,6 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "377db0846015f7ae377174787dd452e1c5f5a9050bc6f954911d01f116daa0cd" +checksum = "d68d9dcec5f9b43a30d38c49f91dfedfaac384cb8f085faca366c26207dd1619" From f5eca31417f8f6da521ba2add608d9c069c02a6e Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Tue, 9 Nov 2021 16:21:48 +0000 Subject: [PATCH 0550/1356] Update kernel-5.4 to 5.4.149 --- ...86-purgatory-Add-fno-stack-protector.patch | 45 ---- ...64-kexec_file-add-crash-dump-support.patch | 221 ------------------ .../0004-libfdt-include-fdt_addresses.c.patch | 44 ---- packages/kernel-5.4/Cargo.toml | 4 +- packages/kernel-5.4/kernel-5.4.spec | 9 +- 5 files changed, 4 insertions(+), 319 deletions(-) delete mode 100644 packages/kernel-5.4/0002-x86-purgatory-Add-fno-stack-protector.patch delete mode 100644 packages/kernel-5.4/0003-arm64-kexec_file-add-crash-dump-support.patch delete mode 100644 packages/kernel-5.4/0004-libfdt-include-fdt_addresses.c.patch diff --git a/packages/kernel-5.4/0002-x86-purgatory-Add-fno-stack-protector.patch b/packages/kernel-5.4/0002-x86-purgatory-Add-fno-stack-protector.patch deleted file mode 100644 index a6f19353..00000000 --- a/packages/kernel-5.4/0002-x86-purgatory-Add-fno-stack-protector.patch +++ /dev/null @@ -1,45 +0,0 @@ -From ff58155ca4fa7e931f34d948fa09fe14c6a66116 Mon Sep 17 00:00:00 2001 -From: Arvind Sankar -Date: Tue, 16 Jun 2020 18:25:47 -0400 -Subject: [PATCH] x86/purgatory: Add -fno-stack-protector - -The purgatory Makefile removes -fstack-protector options if they were -configured in, but does not currently add -fno-stack-protector. - -If gcc was configured with the --enable-default-ssp configure option, -this results in the stack protector still being enabled for the -purgatory (absent distro-specific specs files that might disable it -again for freestanding compilations), if the main kernel is being -compiled with stack protection enabled (if it's disabled for the main -kernel, the top-level Makefile will add -fno-stack-protector). - -This will break the build since commit - e4160b2e4b02 ("x86/purgatory: Fail the build if purgatory.ro has missing symbols") -and prior to that would have caused runtime failure when trying to use -kexec. - -Explicitly add -fno-stack-protector to avoid this, as done in other -Makefiles that need to disable the stack protector. - -Reported-by: Gabriel C -Signed-off-by: Arvind Sankar -Signed-off-by: Linus Torvalds ---- - arch/x86/purgatory/Makefile | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile -index b04e6e72a592..088bd764e0b7 100644 ---- a/arch/x86/purgatory/Makefile -+++ b/arch/x86/purgatory/Makefile -@@ -34,6 +34,7 @@ KCOV_INSTRUMENT := n - PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel - PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss - PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING -+PURGATORY_CFLAGS += $(call cc-option,-fno-stack-protector) - - # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That - # in turn leaves some undefined symbols like __fentry__ in purgatory and not --- -2.30.2 - diff --git a/packages/kernel-5.4/0003-arm64-kexec_file-add-crash-dump-support.patch b/packages/kernel-5.4/0003-arm64-kexec_file-add-crash-dump-support.patch deleted file mode 100644 index e7e40e41..00000000 --- a/packages/kernel-5.4/0003-arm64-kexec_file-add-crash-dump-support.patch +++ /dev/null @@ -1,221 +0,0 @@ -From 3751e728cef2908c15974a5ae44627fd41ef3362 Mon Sep 17 00:00:00 2001 -From: AKASHI Takahiro -Date: Mon, 16 Dec 2019 11:12:47 +0900 -Subject: [PATCH] arm64: kexec_file: add crash dump support - -Enabling crash dump (kdump) includes -* prepare contents of ELF header of a core dump file, /proc/vmcore, - using crash_prepare_elf64_headers(), and -* add two device tree properties, "linux,usable-memory-range" and - "linux,elfcorehdr", which represent respectively a memory range - to be used by crash dump kernel and the header's location - -Signed-off-by: AKASHI Takahiro -Cc: Catalin Marinas -Cc: Will Deacon -Reviewed-by: James Morse -Tested-and-reviewed-by: Bhupesh Sharma -Signed-off-by: Will Deacon ---- - arch/arm64/include/asm/kexec.h | 4 + - arch/arm64/kernel/kexec_image.c | 4 - - arch/arm64/kernel/machine_kexec_file.c | 106 ++++++++++++++++++++++++- - 3 files changed, 106 insertions(+), 8 deletions(-) - -diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h -index 12a561a54128..d24b527e8c00 100644 ---- a/arch/arm64/include/asm/kexec.h -+++ b/arch/arm64/include/asm/kexec.h -@@ -96,6 +96,10 @@ static inline void crash_post_resume(void) {} - struct kimage_arch { - void *dtb; - unsigned long dtb_mem; -+ /* Core ELF header buffer */ -+ void *elf_headers; -+ unsigned long elf_headers_mem; -+ unsigned long elf_headers_sz; - }; - - extern const struct kexec_file_ops kexec_image_ops; -diff --git a/arch/arm64/kernel/kexec_image.c b/arch/arm64/kernel/kexec_image.c -index 29a9428486a5..af9987c154ca 100644 ---- a/arch/arm64/kernel/kexec_image.c -+++ b/arch/arm64/kernel/kexec_image.c -@@ -47,10 +47,6 @@ static void *image_load(struct kimage *image, - struct kexec_segment *kernel_segment; - int ret; - -- /* We don't support crash kernels yet. */ -- if (image->type == KEXEC_TYPE_CRASH) -- return ERR_PTR(-EOPNOTSUPP); -- - /* - * We require a kernel with an unambiguous Image header. Per - * Documentation/arm64/booting.rst, this is the case when image_size -diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c -index 7b08bf9499b6..dd3ae8081b38 100644 ---- a/arch/arm64/kernel/machine_kexec_file.c -+++ b/arch/arm64/kernel/machine_kexec_file.c -@@ -17,12 +17,15 @@ - #include - #include - #include -+#include - #include - #include - #include - #include - - /* relevant device tree properties */ -+#define FDT_PROP_KEXEC_ELFHDR "linux,elfcorehdr" -+#define FDT_PROP_MEM_RANGE "linux,usable-memory-range" - #define FDT_PROP_INITRD_START "linux,initrd-start" - #define FDT_PROP_INITRD_END "linux,initrd-end" - #define FDT_PROP_BOOTARGS "bootargs" -@@ -40,6 +43,10 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image) - vfree(image->arch.dtb); - image->arch.dtb = NULL; - -+ vfree(image->arch.elf_headers); -+ image->arch.elf_headers = NULL; -+ image->arch.elf_headers_sz = 0; -+ - return kexec_image_post_load_cleanup_default(image); - } - -@@ -55,6 +62,31 @@ static int setup_dtb(struct kimage *image, - - off = ret; - -+ ret = fdt_delprop(dtb, off, FDT_PROP_KEXEC_ELFHDR); -+ if (ret && ret != -FDT_ERR_NOTFOUND) -+ goto out; -+ ret = fdt_delprop(dtb, off, FDT_PROP_MEM_RANGE); -+ if (ret && ret != -FDT_ERR_NOTFOUND) -+ goto out; -+ -+ if (image->type == KEXEC_TYPE_CRASH) { -+ /* add linux,elfcorehdr */ -+ ret = fdt_appendprop_addrrange(dtb, 0, off, -+ FDT_PROP_KEXEC_ELFHDR, -+ image->arch.elf_headers_mem, -+ image->arch.elf_headers_sz); -+ if (ret) -+ return (ret == -FDT_ERR_NOSPACE ? -ENOMEM : -EINVAL); -+ -+ /* add linux,usable-memory-range */ -+ ret = fdt_appendprop_addrrange(dtb, 0, off, -+ FDT_PROP_MEM_RANGE, -+ crashk_res.start, -+ crashk_res.end - crashk_res.start + 1); -+ if (ret) -+ return (ret == -FDT_ERR_NOSPACE ? -ENOMEM : -EINVAL); -+ } -+ - /* add bootargs */ - if (cmdline) { - ret = fdt_setprop_string(dtb, off, FDT_PROP_BOOTARGS, cmdline); -@@ -125,8 +157,8 @@ static int setup_dtb(struct kimage *image, - } - - /* -- * More space needed so that we can add initrd, bootargs, kaslr-seed, and -- * rng-seed. -+ * More space needed so that we can add initrd, bootargs, kaslr-seed, -+ * rng-seed, userable-memory-range and elfcorehdr. - */ - #define DTB_EXTRA_SPACE 0x1000 - -@@ -174,6 +206,43 @@ static int create_dtb(struct kimage *image, - } - } - -+static int prepare_elf_headers(void **addr, unsigned long *sz) -+{ -+ struct crash_mem *cmem; -+ unsigned int nr_ranges; -+ int ret; -+ u64 i; -+ phys_addr_t start, end; -+ -+ nr_ranges = 1; /* for exclusion of crashkernel region */ -+ for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, -+ MEMBLOCK_NONE, &start, &end, NULL) -+ nr_ranges++; -+ -+ cmem = kmalloc(sizeof(struct crash_mem) + -+ sizeof(struct crash_mem_range) * nr_ranges, GFP_KERNEL); -+ if (!cmem) -+ return -ENOMEM; -+ -+ cmem->max_nr_ranges = nr_ranges; -+ cmem->nr_ranges = 0; -+ for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, -+ MEMBLOCK_NONE, &start, &end, NULL) { -+ cmem->ranges[cmem->nr_ranges].start = start; -+ cmem->ranges[cmem->nr_ranges].end = end - 1; -+ cmem->nr_ranges++; -+ } -+ -+ /* Exclude crashkernel region */ -+ ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end); -+ -+ if (!ret) -+ ret = crash_prepare_elf64_headers(cmem, true, addr, sz); -+ -+ kfree(cmem); -+ return ret; -+} -+ - int load_other_segments(struct kimage *image, - unsigned long kernel_load_addr, - unsigned long kernel_size, -@@ -181,14 +250,43 @@ int load_other_segments(struct kimage *image, - char *cmdline) - { - struct kexec_buf kbuf; -- void *dtb = NULL; -- unsigned long initrd_load_addr = 0, dtb_len; -+ void *headers, *dtb = NULL; -+ unsigned long headers_sz, initrd_load_addr = 0, dtb_len; - int ret = 0; - - kbuf.image = image; - /* not allocate anything below the kernel */ - kbuf.buf_min = kernel_load_addr + kernel_size; - -+ /* load elf core header */ -+ if (image->type == KEXEC_TYPE_CRASH) { -+ ret = prepare_elf_headers(&headers, &headers_sz); -+ if (ret) { -+ pr_err("Preparing elf core header failed\n"); -+ goto out_err; -+ } -+ -+ kbuf.buffer = headers; -+ kbuf.bufsz = headers_sz; -+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; -+ kbuf.memsz = headers_sz; -+ kbuf.buf_align = SZ_64K; /* largest supported page size */ -+ kbuf.buf_max = ULONG_MAX; -+ kbuf.top_down = true; -+ -+ ret = kexec_add_buffer(&kbuf); -+ if (ret) { -+ vfree(headers); -+ goto out_err; -+ } -+ image->arch.elf_headers = headers; -+ image->arch.elf_headers_mem = kbuf.mem; -+ image->arch.elf_headers_sz = headers_sz; -+ -+ pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n", -+ image->arch.elf_headers_mem, headers_sz, headers_sz); -+ } -+ - /* load initrd */ - if (initrd) { - kbuf.buffer = initrd; --- -2.30.2 - diff --git a/packages/kernel-5.4/0004-libfdt-include-fdt_addresses.c.patch b/packages/kernel-5.4/0004-libfdt-include-fdt_addresses.c.patch deleted file mode 100644 index f2fa541b..00000000 --- a/packages/kernel-5.4/0004-libfdt-include-fdt_addresses.c.patch +++ /dev/null @@ -1,44 +0,0 @@ -From c273a2bd8aa81b72e48736c3aa51f7ffeae39925 Mon Sep 17 00:00:00 2001 -From: AKASHI Takahiro -Date: Mon, 9 Dec 2019 12:03:44 +0900 -Subject: [PATCH] libfdt: include fdt_addresses.c - -In the implementation of kexec_file_loaded-based kdump for arm64, -fdt_appendprop_addrrange() will be needed. - -So include fdt_addresses.c in making libfdt. - -Signed-off-by: AKASHI Takahiro -Cc: Rob Herring -Cc: Frank Rowand -Signed-off-by: Will Deacon ---- - lib/Makefile | 2 +- - lib/fdt_addresses.c | 2 ++ - 2 files changed, 3 insertions(+), 1 deletion(-) - create mode 100644 lib/fdt_addresses.c - -diff --git a/lib/Makefile b/lib/Makefile -index 93217d44237f..c20b1debe9b4 100644 ---- a/lib/Makefile -+++ b/lib/Makefile -@@ -223,7 +223,7 @@ KASAN_SANITIZE_stackdepot.o := n - KCOV_INSTRUMENT_stackdepot.o := n - - libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \ -- fdt_empty_tree.o -+ fdt_empty_tree.o fdt_addresses.o - $(foreach file, $(libfdt_files), \ - $(eval CFLAGS_$(file) = -I $(srctree)/scripts/dtc/libfdt)) - lib-$(CONFIG_LIBFDT) += $(libfdt_files) -diff --git a/lib/fdt_addresses.c b/lib/fdt_addresses.c -new file mode 100644 -index 000000000000..23610bcf390b ---- /dev/null -+++ b/lib/fdt_addresses.c -@@ -0,0 +1,2 @@ -+#include -+#include "../scripts/dtc/libfdt/fdt_addresses.c" --- -2.30.2 - diff --git a/packages/kernel-5.4/Cargo.toml b/packages/kernel-5.4/Cargo.toml index 3366f20a..fd7ad68e 100644 --- a/packages/kernel-5.4/Cargo.toml +++ b/packages/kernel-5.4/Cargo.toml @@ -13,8 +13,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/9e8b76ee271c50b0190e45d6b3fb69263afc7c8be8c1c3aafc4e663f997a0232/kernel-5.4.141-67.229.amzn2.src.rpm" -sha512 = "2f980b006579d3297481e0e8f8b636501648b05ae50e48a90ac1b576a47745dd9ab45fa92c7c094f7bff6931c502da2af1a3588f96e362a624d33767601d03b6" +url = "https://cdn.amazonlinux.com/blobstore/a068a12de784cc571656e680fbd3213773032b6b4d3c940b37b9db664fb7be52/kernel-5.4.149-73.259.amzn2.src.rpm" +sha512 = "d7b86a37257fe02e8fda360397371662215dd916f4f6e82a9c9174bec385dd7347197baa17ba9666dd31f7b41472cde6fc293f431098a53526de1b86a71bb386" # RPM BuildRequires [build-dependencies] diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec index 5799c35b..e55de79d 100644 --- a/packages/kernel-5.4/kernel-5.4.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -1,23 +1,18 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.4 -Version: 5.4.141 +Version: 5.4.149 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/9e8b76ee271c50b0190e45d6b3fb69263afc7c8be8c1c3aafc4e663f997a0232/kernel-5.4.141-67.229.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/a068a12de784cc571656e680fbd3213773032b6b4d3c940b37b9db664fb7be52/kernel-5.4.149-73.259.amzn2.src.rpm Source100: config-bottlerocket # Make Lustre FSx work with a newer GCC. Patch0001: 0001-lustrefsx-Disable-Werror-stringop-overflow.patch -# Required patches for kdump support -Patch0002: 0002-x86-purgatory-Add-fno-stack-protector.patch -Patch0003: 0003-arm64-kexec_file-add-crash-dump-support.patch -Patch0004: 0004-libfdt-include-fdt_addresses.c.patch - # Help out-of-tree module builds run `make prepare` automatically. Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch From e67b81039c2c81497bad24d39db6a807ef531d17 Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Tue, 9 Nov 2021 18:37:40 +0000 Subject: [PATCH 0551/1356] Update kernel-5.10 to 5.10.68 --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 953875e8..12260419 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,5 +13,5 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/67866b408c9bc8889fd8b86dab6fe79e697ac5ef31d321f173b028bf26dcf266/kernel-5.10.59-52.142.amzn2.src.rpm" -sha512 = "bbd90bd9793218f9410ca5d953a718fc00ed212953ea7de889698f66388314ecff4f145b2beb90ebff0bac6e961c6b3b71ca18948dc83194dd2e70feba7cf1db" +url = "https://cdn.amazonlinux.com/blobstore/2463ceff87cbe05e736813f33f5a8b70f9c98effe9eb5167fa613fae1fb9a943/kernel-5.10.68-62.173.amzn2.src.rpm" +sha512 = "42bca6a73a9d6ddae9553f1d71d4f28d436d813b1068f270fe2ae80701201b88946dc3c094829c90f62fc4894910867d7afeccdfbe2abf3a19848fc4c28d51b9" diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 7ab9b69b..424de38f 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.59 -Release: 2%{?dist} +Version: 5.10.68 +Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/67866b408c9bc8889fd8b86dab6fe79e697ac5ef31d321f173b028bf26dcf266/kernel-5.10.59-52.142.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/2463ceff87cbe05e736813f33f5a8b70f9c98effe9eb5167fa613fae1fb9a943/kernel-5.10.68-62.173.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 3ed47e6ec70b6e8b8bd27b7bfe7c2726c2519d74 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sun, 31 Oct 2021 21:28:41 +0000 Subject: [PATCH 0552/1356] kernel: load i8042, keyboard, and mouse as modules If they're built in, they can delay mounting the root filesystem. Signed-off-by: Ben Cressey --- packages/kernel-5.10/config-bottlerocket | 6 ++++++ packages/kernel-5.4/config-bottlerocket | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index e6eb3312..b5f021eb 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -73,3 +73,9 @@ CONFIG_KERNEL_ZSTD=y CONFIG_ZSTD_COMPRESS=y CONFIG_ZSTD_DECOMPRESS=y CONFIG_DECOMPRESS_ZSTD=y + +# Load i8042 controller, keyboard, and mouse as modules, to avoid waiting for +# them before mounting the root device. +CONFIG_SERIO_I8042=m +CONFIG_KEYBOARD_ATKBD=m +CONFIG_MOUSE_PS2=m diff --git a/packages/kernel-5.4/config-bottlerocket b/packages/kernel-5.4/config-bottlerocket index e6eb3312..b5f021eb 100644 --- a/packages/kernel-5.4/config-bottlerocket +++ b/packages/kernel-5.4/config-bottlerocket @@ -73,3 +73,9 @@ CONFIG_KERNEL_ZSTD=y CONFIG_ZSTD_COMPRESS=y CONFIG_ZSTD_DECOMPRESS=y CONFIG_DECOMPRESS_ZSTD=y + +# Load i8042 controller, keyboard, and mouse as modules, to avoid waiting for +# them before mounting the root device. +CONFIG_SERIO_I8042=m +CONFIG_KEYBOARD_ATKBD=m +CONFIG_MOUSE_PS2=m From a47a031430867d04cb875f2f3837971ee7e4818c Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sun, 31 Oct 2021 21:48:55 +0000 Subject: [PATCH 0553/1356] build: disable RAID auto detect Any use of RAID is left up to containers to handle. Signed-off-by: Ben Cressey --- tools/rpm2img | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/rpm2img b/tools/rpm2img index 75637bb8..6285011c 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -203,6 +203,7 @@ menuentry "${PRETTY_NAME} ${VERSION_ID}" { linux (\$root)/vmlinuz root=/dev/dm-0 \\ ${KERNEL_PARAMETERS} \\ rootwait ro \\ + raid=noautodetect \\ random.trust_cpu=on selinux=1 enforcing=1 \\ systemd.log_target=journal-or-kmsg systemd.log_color=0 net.ifnames=0 \\ biosdevname=0 dm_verity.max_bios=-1 dm_verity.dev_wait=1 \\ From 29865487e7f721c578d318ae1239977cab78aff6 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Fri, 12 Nov 2021 11:35:23 -0800 Subject: [PATCH 0554/1356] GH Actions: add file types to ignore list --- .github/workflows/build.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a442ac27..199c733d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -2,8 +2,22 @@ name: Build on: pull_request: branches: [develop] + # Here we list file types that don't affect the build and don't need to use + # up our Actions runners. paths-ignore: + # draw.io (diagrams.net) files, the source of png images for docs + - '**.drawio' + # Example configuration files + - '**.example' + # Markdown documentation - '**.md' + # Images for documentation + - '**.png' + # Templates for README files + - '**.tpl' + # Sample config files and OpenAPI docs + - '**.yaml' + jobs: build: runs-on: [self-hosted, linux, x64] From 87d9c2f8c9bfd7481c14c0465a810185e65396e6 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 21 Oct 2021 04:48:58 +0000 Subject: [PATCH 0555/1356] add package for AMD and Intel CPU microcode Signed-off-by: Ben Cressey --- packages/microcode/Cargo.toml | 19 ++++ packages/microcode/build.rs | 9 ++ packages/microcode/latest-srpm-urls.sh | 2 + packages/microcode/microcode.spec | 120 +++++++++++++++++++++++++ packages/microcode/pkg.rs | 1 + 5 files changed, 151 insertions(+) create mode 100644 packages/microcode/Cargo.toml create mode 100644 packages/microcode/build.rs create mode 100755 packages/microcode/latest-srpm-urls.sh create mode 100644 packages/microcode/microcode.spec create mode 100644 packages/microcode/pkg.rs diff --git a/packages/microcode/Cargo.toml b/packages/microcode/Cargo.toml new file mode 100644 index 00000000..b3dc0893 --- /dev/null +++ b/packages/microcode/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "microcode" +version = "0.1.0" +edition = "2018" +publish = false +build = "build.rs" + +[lib] +path = "pkg.rs" + +# Use latest-srpm-urls.sh to get these. + +[[package.metadata.build-package.external-files]] +url = "https://cdn.amazonlinux.com/blobstore/6d7f707779f6aff41c89bad00f7abe69dc70919cee29a8d3e5060f8070efe71d/linux-firmware-20200421-79.git78c0348.amzn2.src.rpm" +sha512 = "d5a62eca6ddd7ff322574f17359681d03a733acc51c334127f291af5d5e39fcdf821c073ddcd977b2ca088cd95d35dc31db2001ca4c312a62dcbd4ea935434fd" + +[[package.metadata.build-package.external-files]] +url = "https://cdn.amazonlinux.com/blobstore/76e8f9f15ec2b27c70aff3ca15a28df51790b25c73fc8dc1bf1f28a9069b15e8/microcode_ctl-2.1-47.amzn2.0.9.src.rpm" +sha512 = "e1347139d1edbd52d2619d970ba0f03500ba7367d071bb30ab3d209e44b3ff63000fcaa681f7352c79f7d5d2f0753130161b42b0eab7aab97b5b4fc4bfaa1b3b" diff --git a/packages/microcode/build.rs b/packages/microcode/build.rs new file mode 100644 index 00000000..cad8999a --- /dev/null +++ b/packages/microcode/build.rs @@ -0,0 +1,9 @@ +use std::process::{exit, Command}; + +fn main() -> Result<(), std::io::Error> { + let ret = Command::new("buildsys").arg("build-package").status()?; + if !ret.success() { + exit(1); + } + Ok(()) +} diff --git a/packages/microcode/latest-srpm-urls.sh b/packages/microcode/latest-srpm-urls.sh new file mode 100755 index 00000000..a3d4c510 --- /dev/null +++ b/packages/microcode/latest-srpm-urls.sh @@ -0,0 +1,2 @@ +#!/bin/sh +docker run --rm amazonlinux:2 sh -c 'yum install -q -y yum-utils && yumdownloader -q --source --urls linux-firmware microcode_ctl | grep ^http' diff --git a/packages/microcode/microcode.spec b/packages/microcode/microcode.spec new file mode 100644 index 00000000..1d1439eb --- /dev/null +++ b/packages/microcode/microcode.spec @@ -0,0 +1,120 @@ +# This is a wrapper package for binary-only microcode from Intel and AMD. +%global debug_package %{nil} + +# These are specific to the upstream source RPM, and will likely need to be +# updated for each new version. +%global amd_ucode_archive linux-firmware-20200421.tar.gz +%global intel_ucode_archive microcode-20210608-1-amzn.tgz + +Name: %{_cross_os}microcode +Version: 0.0 +Release: 1%{?dist} +Summary: Microcode for AMD and Intel processors +License: LicenseRef-scancode-amd-linux-firmware-export AND LicenseRef-scancode-intel-mcu-2018 + +# Packaging AMD and Intel microcode together is specific to Bottlerocket, and +# RPM only allows one URL field per package, so this is about as accurate as we +# can be. The real upstream URLs for AMD and Intel microcode are given below in +# the subpackage definitions. +URL: https://github.com/bottlerocket-os/bottlerocket/tree/develop/packages/microcode + +# We use Amazon Linux 2 as our upstream for microcode updates. +Source0: https://cdn.amazonlinux.com/blobstore/6d7f707779f6aff41c89bad00f7abe69dc70919cee29a8d3e5060f8070efe71d/linux-firmware-20200421-79.git78c0348.amzn2.src.rpm +Source1: https://cdn.amazonlinux.com/blobstore/76e8f9f15ec2b27c70aff3ca15a28df51790b25c73fc8dc1bf1f28a9069b15e8/microcode_ctl-2.1-47.amzn2.0.9.src.rpm + +# Lets us install "microcode" to pull in the AMD and Intel updates. +Requires: %{_cross_os}microcode-amd +Requires: %{_cross_os}microcode-intel + +%description +%{summary}. + +%package amd +Summary: Microcode for AMD processors +License: LicenseRef-scancode-amd-linux-firmware-export +URL: https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/amd-ucode +Requires: %{_cross_os}microcode-amd-license + +%description amd +%{summary}. + +%package amd-license +Summary: License files for microcode for AMD processors +License: LicenseRef-scancode-amd-linux-firmware-export +URL: https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/plain/LICENSE.amd-ucode + +%description amd-license +%{summary}. + +%package intel +Summary: Microcode for Intel processors +License: LicenseRef-scancode-intel-mcu-2018 +URL: https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files +Requires: %{_cross_os}microcode-intel-license + +%description intel +%{summary}. + +%package intel-license +Summary: License files for microcode for Intel processors +License: LicenseRef-scancode-intel-mcu-2018 +URL: https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files/blob/main/license + +%description intel-license +%{summary}. + +# Lets us install "microcode-licenses" for just the license files. +%package licenses +Summary: License files for microcode for AMD and Intel processors +License: LicenseRef-scancode-amd-linux-firmware-export AND LicenseRef-scancode-intel-mcu-2018 +URL: https://github.com/bottlerocket-os/bottlerocket/tree/develop/packages/microcode +Requires: %{_cross_os}microcode-amd-license +Requires: %{_cross_os}microcode-intel-license + +%description licenses +%{summary}. + +%prep +rpm2cpio %{SOURCE0} | cpio -iu %{amd_ucode_archive} +rpm2cpio %{SOURCE1} | cpio -iu %{intel_ucode_archive} +mkdir amd intel +tar -C amd -xof %{amd_ucode_archive} +tar -C intel -xof %{intel_ucode_archive} +cp {amd/,}LICENSE.amd-ucode +cp intel/intel-ucode-with-caveats/* intel/intel-ucode +cp intel/license LICENSE.intel-ucode + +# Create links to the SPDX identifiers we're using, so they're easier to match +# up with the license text. +ln -s LICENSE.intel-ucode LicenseRef-scancode-intel-mcu-2018 +ln -s LICENSE.amd-ucode LicenseRef-scancode-amd-linux-firmware-export + +%build + +%install +install -d %{buildroot}%{_cross_libdir}/firmware/{amd,intel}-ucode +install -p -m 0644 amd/amd-ucode/*.bin %{buildroot}%{_cross_libdir}/firmware/amd-ucode +install -p -m 0644 intel/intel-ucode/* %{buildroot}%{_cross_libdir}/firmware/intel-ucode + +%files + +%files amd +%dir %{_cross_libdir}/firmware +%dir %{_cross_libdir}/firmware/amd-ucode +%{_cross_libdir}/firmware/amd-ucode/microcode_amd*.bin + +%files amd-license +%license LICENSE.amd-ucode LicenseRef-scancode-amd-linux-firmware-export + +%files intel +%dir %{_cross_libdir}/firmware +%dir %{_cross_libdir}/firmware/intel-ucode +%{_cross_libdir}/firmware/intel-ucode/??-??-?? + +%files intel-license +%license LICENSE.intel-ucode LicenseRef-scancode-intel-mcu-2018 + +%files licenses +%{_cross_attribution_file} + +%changelog diff --git a/packages/microcode/pkg.rs b/packages/microcode/pkg.rs new file mode 100644 index 00000000..d799fb2d --- /dev/null +++ b/packages/microcode/pkg.rs @@ -0,0 +1 @@ +// not used From 0e4cc987768649bd78db0fa02cfc376a6812d13e Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 21 Oct 2021 04:50:06 +0000 Subject: [PATCH 0556/1356] kernel: include microcode in bundled firmware Signed-off-by: Ben Cressey --- packages/kernel-5.10/Cargo.toml | 3 +++ packages/kernel-5.10/kernel-5.10.spec | 28 ++++++++++++++++++++++++--- packages/kernel-5.4/Cargo.toml | 3 +-- packages/kernel-5.4/kernel-5.4.spec | 28 ++++++++++++++++++++++++--- 4 files changed, 54 insertions(+), 8 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 12260419..9b8bb1d5 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -15,3 +15,6 @@ path = "pkg.rs" # Use latest-srpm-url.sh to get this. url = "https://cdn.amazonlinux.com/blobstore/2463ceff87cbe05e736813f33f5a8b70f9c98effe9eb5167fa613fae1fb9a943/kernel-5.10.68-62.173.amzn2.src.rpm" sha512 = "42bca6a73a9d6ddae9553f1d71d4f28d436d813b1068f270fe2ae80701201b88946dc3c094829c90f62fc4894910867d7afeccdfbe2abf3a19848fc4c28d51b9" + +[build-dependencies] +microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 424de38f..7493a4b1 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -22,6 +22,14 @@ BuildRequires: hostname BuildRequires: kmod BuildRequires: openssl-devel +# CPU microcode updates are included as "extra firmware" so the files don't +# need to be installed on the root filesystem. However, we want the license and +# attribution files to be available in the usual place. +%if "%{_cross_arch}" == "x86_64" +BuildRequires: %{_cross_os}microcode +Requires: %{_cross_os}microcode-licenses +%endif + # Pull in expected modules and development files. Requires: %{name}-modules = %{version}-%{release} Requires: %{name}-devel = %{version}-%{release} @@ -66,10 +74,24 @@ for patch in ../*.patch; do done # Patches listed in this spec (Patch0001...) %autopatch -p1 + +%if "%{_cross_arch}" == "x86_64" +microcode="$(find %{_cross_libdir}/firmware -type f -path '*/*-ucode/*' -printf '%%P ')" +cat < ../config-microcode +CONFIG_EXTRA_FIRMWARE="${microcode}" +CONFIG_EXTRA_FIRMWARE_DIR="%{_cross_libdir}/firmware" +EOF +%endif + KCONFIG_CONFIG="arch/%{_cross_karch}/configs/%{_cross_vendor}_defconfig" \ - ARCH="%{_cross_karch}" \ - scripts/kconfig/merge_config.sh ../config-%{_cross_arch} %{SOURCE100} -rm -f ../config-%{_cross_arch} ../*.patch +ARCH="%{_cross_karch}" \ +scripts/kconfig/merge_config.sh \ + ../config-%{_cross_arch} \ +%if "%{_cross_arch}" == "x86_64" + ../config-microcode \ +%endif + %{SOURCE100} +rm -f ../config-* ../*.patch %global kmake \ make -s\\\ diff --git a/packages/kernel-5.4/Cargo.toml b/packages/kernel-5.4/Cargo.toml index fd7ad68e..33974d00 100644 --- a/packages/kernel-5.4/Cargo.toml +++ b/packages/kernel-5.4/Cargo.toml @@ -16,6 +16,5 @@ path = "pkg.rs" url = "https://cdn.amazonlinux.com/blobstore/a068a12de784cc571656e680fbd3213773032b6b4d3c940b37b9db664fb7be52/kernel-5.4.149-73.259.amzn2.src.rpm" sha512 = "d7b86a37257fe02e8fda360397371662215dd916f4f6e82a9c9174bec385dd7347197baa17ba9666dd31f7b41472cde6fc293f431098a53526de1b86a71bb386" -# RPM BuildRequires [build-dependencies] -# Provided by Bottlerocket SDK +microcode = { path = "../microcode" } diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec index e55de79d..650337eb 100644 --- a/packages/kernel-5.4/kernel-5.4.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -33,6 +33,14 @@ BuildRequires: hostname BuildRequires: kmod BuildRequires: openssl-devel +# CPU microcode updates are included as "extra firmware" so the files don't +# need to be installed on the root filesystem. However, we want the license and +# attribution files to be available in the usual place. +%if "%{_cross_arch}" == "x86_64" +BuildRequires: %{_cross_os}microcode +Requires: %{_cross_os}microcode-licenses +%endif + # Pull in expected modules and development files. Requires: %{name}-modules = %{version}-%{release} Requires: %{name}-devel = %{version}-%{release} @@ -77,10 +85,24 @@ for patch in ../*.patch; do done # Patches listed in this spec (Patch0001...) %autopatch -p1 + +%if "%{_cross_arch}" == "x86_64" +microcode="$(find %{_cross_libdir}/firmware -type f -path '*/*-ucode/*' -printf '%%P ')" +cat < ../config-microcode +CONFIG_EXTRA_FIRMWARE="${microcode}" +CONFIG_EXTRA_FIRMWARE_DIR="%{_cross_libdir}/firmware" +EOF +%endif + KCONFIG_CONFIG="arch/%{_cross_karch}/configs/%{_cross_vendor}_defconfig" \ - ARCH="%{_cross_karch}" \ - scripts/kconfig/merge_config.sh ../config-%{_cross_arch} %{SOURCE100} -rm -f ../config-%{_cross_arch} ../*.patch +ARCH="%{_cross_karch}" \ +scripts/kconfig/merge_config.sh \ + ../config-%{_cross_arch} \ +%if "%{_cross_arch}" == "x86_64" + ../config-microcode \ +%endif + %{SOURCE100} +rm -f ../config-* ../*.patch %global kmake \ make -s\\\ From c99441a56e4884f4c3680223f9fcb388a146fbf3 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Tue, 16 Nov 2021 23:08:20 +0000 Subject: [PATCH 0557/1356] build: update SDK to 0.23.1 --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 199c733d..83373605 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -43,7 +43,7 @@ jobs: fail-fast: false steps: - uses: actions/checkout@v2 - - run: rustup toolchain install 1.56.0 && rustup default 1.56.0 + - run: rustup toolchain install 1.56.1 && rustup default 1.56.1 - run: cargo install --version 0.30.0 cargo-make - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} unit-tests - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} check-fmt From c9f1ac09176e10139c5acc4b3b999a112d72b7da Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 12 Nov 2021 00:56:40 +0000 Subject: [PATCH 0558/1356] build: refactor partition layout code Move all decisions about partition sizes, types, etc into a library so that it's easier to support layouts for different disk sizes. The layout is still essentially static, but can be scaled up as needed to support larger root filesystems. For the 2 GiB OS image layout, the end result is almost identical, except for fixing a longstanding bug where the last two partitions on each disk - PRIVATE and DATA - were (1 MiB + 1 sector) in size, and not 1 MiB as intended. Signed-off-by: Ben Cressey --- tools/partyplanner | 211 +++++++++++++++++++++++++++++++++++++++++++++ tools/rpm2img | 151 ++++++++++++++++---------------- 2 files changed, 286 insertions(+), 76 deletions(-) create mode 100755 tools/partyplanner diff --git a/tools/partyplanner b/tools/partyplanner new file mode 100755 index 00000000..f7da0be8 --- /dev/null +++ b/tools/partyplanner @@ -0,0 +1,211 @@ +#!/bin/bash + +############################################################################### +# Section 1: partition type GUIDs + +# Define partition type GUIDs for all OS-managed partitions. This is required +# for the boot partition, where we set gptprio bits in the GUID-specific use +# field, but we might as well do it for all of them. +BOTTLEROCKET_BOOT_TYPECODE="6b636168-7420-6568-2070-6c616e657421" +BOTTLEROCKET_ROOT_TYPECODE="5526016a-1a97-4ea4-b39a-b7c8c6ca4502" +BOTTLEROCKET_HASH_TYPECODE="598f10af-c955-4456-6a99-7720068a6cea" +BOTTLEROCKET_RESERVED_TYPECODE="0c5d99a5-d331-4147-baef-08e2b855bdc9" +BOTTLEROCKET_PRIVATE_TYPECODE="440408bb-eb0b-4328-a6e5-a29038fad706" +BOTTLEROCKET_DATA_TYPECODE="626f7474-6c65-6474-6861-726d61726b73" + +# Under BIOS, the firmware will transfer control to the MBR on the boot device, +# which will pass control to the GRUB stage 2 binary written to the BIOS boot +# partition. The BIOS does not attach any significance to this partition type, +# but GRUB knows to install itself there when we run `grub-bios-setup`. +BIOS_BOOT_TYPECODE="ef02" + +# Under EFI, the firmware will find the EFI system partition and execute the +# program at a platform-defined path like `bootx64.efi`. The partition type +# must match what the firmware expects. +EFI_SYSTEM_TYPECODE="C12A7328-F81F-11D2-BA4B-00A0C93EC93B" + +# Whichever entry point is used for booting the system, it's important to note +# that only one build of GRUB is involved - the one that's installed during the +# image build. + +# GRUB understands the GPT priorities scheme we use to find the active boot +# partition; EFI and BIOS firmware does not. This is why we do not update GRUB +# during our system updates; we would have no way to revert to an earlier copy +# of the bootloader if it failed to boot. +# +# We may eventually want to have an active/passive scheme for EFI partitions, +# to allow for potential GRUB and shim updates on EFI platforms in cases where +# we need to deliver security fixes. For now, add a placeholder partition type +# for an alternate bank. +EFI_BACKUP_TYPECODE="B39CE39C-0A00-B4AB-2D11-F18F8237A21C" + +############################################################################### +# Section 2: fixed size partitions and reservations + +# The GPT header and footer each take up 32 sectors, but we reserve a full MiB +# so that partitions can all be aligned on MiB boundaries. +GPT_MIB="1" # two per disk + +# The BIOS partition is only used on x86 platforms, and only needs to be large +# enough for the GRUB stage 2. Increasing its size will reduce the size of the +# "private" and "reserved" partitions. This should be relatively safe since we +# don't apply image updates to those partitions. +BIOS_MIB="4" # one per disk + +# The GPT and BIOS reservations are fixed overhead that will be deducted from +# the space nominally given to the private partition used to persist settings. +OVERHEAD_MIB="$((GPT_MIB * 2 + BIOS_MIB))" + +# The 'recommended' size for the EFI partition is 100MB but our EFI images are +# under 1MB, so this will suffice for now. It would be possible to increase the +# EFI partition size by taking space from the "reserved" area below. +EFI_MIB="5" # one per bank + +############################################################################### +# Section 3: variable sized partitions + +# These partitions scale based on image size. The scaling factors are chosen so +# that we end up with the same partition sizes for the banks on a 2 GiB image, +# which was the only image size we historically supported. +# +# !!! WARNING !!! +# +# Increasing any of these constants is very likely break systems on update, +# since the corresponding partitions are adjacent on disk and have no room to +# grow. +BOOT_SCALE_FACTOR="20" +ROOT_SCALE_FACTOR="460" +HASH_SCALE_FACTOR="5" +RESERVE_SCALE_FACTOR="15" +PRIVATE_SCALE_FACTOR="24" + +############################################################################### +# Section 4: ASCII art gallery + +# Layout for a 1 GiB OS image. Sizes marked with (*) scale with overall image +# size, based on the constant factors above. + +# +---------------------------------+ +# Prelude | GPT header 1 MiB | 5 MiB +# | BIOS boot partition 4 MiB | Fixed size. +# +---------------------------------+ +# | EFI system partition 5 MiB | +# | Boot partition A 20 MiB* | (image size - prelude - postlude) / 2 +# Bank A | Root partition A 460 MiB* | Example: (1 GiB - 5 MiB - 19 MiB) / 2 +# | Hash partition A 5 MiB* | 500 MiB +# | Reserved partition A 10 MiB* | +# +---------------------------------+ +# | EFI backup partition 5 MiB | +# | Boot partition B 20 MiB* | (image size - prelude - postlude) / 2 +# Bank B | Root partition B 460 MiB* | Example: (1 GiB - 5 MiB - 19 MiB) / 2 +# | Hash partition B 5 MiB* | 500 MiB +# | Reserved partition B 10 MiB* | +# +---------------------------------+ +# | Private partition 18 MiB* | (image size * 24 as MiB) - prelude +# Postlude | GPT footer 1 MiB | GPT is fixed, private partition grows. +# +---------------------------------+ + +############################################################################## +# Section 5: library functions + +# Populate the caller's tables with sizes and offsets for known partitions. +set_partition_sizes() { + local disk_image_gib data_image_gib + local -n pp_size pp_offset + disk_image_gib="${1:?}" + data_image_gib="${2:?}" + + # Table for partition sizes, in MiB. + pp_size="${3:?}" + + # Table for partition offsets from start of disk, in MiB. + pp_offset="${4:?}" + + # Most of the partitions on the main image scale with the overall size. + local boot_mib root_mib hash_mib reserved_mib private_mib + boot_mib="$((disk_image_gib * BOOT_SCALE_FACTOR))" + root_mib="$((disk_image_gib * ROOT_SCALE_FACTOR))" + hash_mib="$((disk_image_gib * HASH_SCALE_FACTOR))" + + # Reserved space is everything left in the bank after the other partitions + # are scaled, minus the fixed 5 MiB EFI partition in that bank. + reserved_mib=$((disk_image_gib * RESERVE_SCALE_FACTOR - EFI_MIB)) + + # Private space scales per GiB, minus the BIOS and GPT partition overhead. + private_mib=$((disk_image_gib * PRIVATE_SCALE_FACTOR - OVERHEAD_MIB)) + + # Skip the GPT label at start of disk. + local offset + ((offset = 1)) + + pp_offset["BIOS"]="${offset}" + pp_size["BIOS"]="${BIOS_MIB}" + ((offset += BIOS_MIB)) + + for bank in A B ; do + pp_offset["EFI-${bank}"]="${offset}" + pp_size["EFI-${bank}"]="${EFI_MIB}" + ((offset += EFI_MIB)) + + pp_offset["BOOT-${bank}"]="${offset}" + pp_size["BOOT-${bank}"]="${boot_mib}" + ((offset += boot_mib)) + + pp_offset["ROOT-${bank}"]="${offset}" + pp_size["ROOT-${bank}"]="${root_mib}" + ((offset += root_mib)) + + pp_offset["HASH-${bank}"]="${offset}" + pp_size["HASH-${bank}"]="${hash_mib}" + ((offset += hash_mib)) + + pp_offset["RESERVED-${bank}"]="${offset}" + pp_size["RESERVED-${bank}"]="${reserved_mib}" + ((offset += reserved_mib)) + done + + pp_offset["PRIVATE"]="${offset}" + pp_size["PRIVATE"]="${private_mib}" + ((offset += private_mib)) + + # The data image is relatively easy to plan, at least until we add support + # for unified images. The first and last MiB are reserved for the GPT labels, + # and the remainder is for the lone "data" partition. + pp_size["DATA"]="$((data_image_gib * 1024 - GPT_MIB * 2))" + pp_offset["DATA"]="1" +} + +# Populate the caller's table with labels for known partitions. +set_partition_labels() { + local -n pp_label + pp_label="${1:?}" + pp_label["BIOS"]="BIOS-BOOT" + pp_label["DATA"]="BOTTLEROCKET-DATA" + pp_label["EFI-A"]="EFI-SYSTEM" + pp_label["EFI-B"]="EFI-BACKUP" + pp_label["PRIVATE"]="BOTTLEROCKET-PRIVATE" + for part in BOOT ROOT HASH RESERVED ; do + for bank in A B ; do + pp_label["${part}-${bank}"]="BOTTLEROCKET-${part}-${bank}" + done + done +} + +# Populate the caller's table with GPT type codes for known partitions. +set_partition_types() { + local -n pp_type + pp_type="${1:?}" + pp_type["BIOS"]="${BIOS_BOOT_TYPECODE}" + pp_type["DATA"]="${BOTTLEROCKET_DATA_TYPECODE}" + pp_type["EFI-A"]="${EFI_SYSTEM_TYPECODE}" + pp_type["EFI-B"]="${EFI_BACKUP_TYPECODE}" + pp_type["PRIVATE"]="${BOTTLEROCKET_PRIVATE_TYPECODE}" + local typecode + for part in BOOT ROOT HASH RESERVED ; do + for bank in A B ; do + typecode="BOTTLEROCKET_${part}_TYPECODE" + typecode="${!typecode}" + pp_type["${part}-${bank}"]="${typecode}" + done + done +} diff --git a/tools/rpm2img b/tools/rpm2img index 6285011c..799bc5c5 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -3,6 +3,10 @@ set -eu -o pipefail shopt -qs failglob +# import the partition helper functions +# shellcheck source=partyplanner +. "${0%/*}/partyplanner" + OUTPUT_FMT="raw" for opt in "$@"; do @@ -33,6 +37,9 @@ BOOT_IMAGE_NAME="${FILENAME_PREFIX}-boot.ext4.lz4" VERITY_IMAGE_NAME="${FILENAME_PREFIX}-root.verity.lz4" ROOT_IMAGE_NAME="${FILENAME_PREFIX}-root.ext4.lz4" +DISK_IMAGE_SIZE_GIB="2" +DATA_IMAGE_SIZE_GIB="1" + DISK_IMAGE="$(mktemp)" BOOT_IMAGE="$(mktemp)" VERITY_IMAGE="$(mktemp)" @@ -56,58 +63,44 @@ VERITY_HASH_ALGORITHM=sha256 VERITY_DATA_BLOCK_SIZE=4096 VERITY_HASH_BLOCK_SIZE=4096 -# Define partition type GUIDs for all OS-managed partitions. This is required -# for the boot partition, where we set gptprio bits in the GUID-specific use -# field, but we might as well do it for all of them. -BOTTLEROCKET_BOOT_TYPECODE="6b636168-7420-6568-2070-6c616e657421" -BOTTLEROCKET_ROOT_TYPECODE="5526016a-1a97-4ea4-b39a-b7c8c6ca4502" -BOTTLEROCKET_HASH_TYPECODE="598f10af-c955-4456-6a99-7720068a6cea" -BOTTLEROCKET_RESERVED_TYPECODE="0c5d99a5-d331-4147-baef-08e2b855bdc9" -BOTTLEROCKET_PRIVATE_TYPECODE="440408bb-eb0b-4328-a6e5-a29038fad706" -BOTTLEROCKET_DATA_TYPECODE="626f7474-6c65-6474-6861-726d61726b73" - -# Under BIOS, the firmware will transfer control to the MBR on the boot device, -# which will pass control to the GRUB stage 2 binary written to the BIOS boot -# partition. The BIOS does not attach any significance to this partition type, -# but GRUB knows to install itself there when we run `grub-bios-setup`. -BIOS_BOOT_TYPECODE="ef02" - -# Under EFI, the firmware will find the EFI system partition and execute the -# program at a platform-defined path like `bootx64.efi`. The partition type -# must match what the firmware expects. -EFI_SYSTEM_TYPECODE="C12A7328-F81F-11D2-BA4B-00A0C93EC93B" - -# Whichever entry point is used for booting the system, it's important to note -# that only one build of GRUB is involved - the one that's installed below when -# we run this script. GRUB understands the GPT priorities scheme we use to find -# the active boot partition; EFI and BIOS firmware does not. This is why we do -# not update GRUB during our system updates; we would have no way to revert to -# an earlier copy of the bootloader if it failed to boot. -# -# We may eventually want to have an active/passive scheme for EFI partitions, -# to allow for potential GRUB and shim updates on EFI platforms in cases where -# we need to deliver security fixes. For now, add a placeholder partition type -# and reserve space for an alternate bank. -EFI_BACKUP_TYPECODE="B39CE39C-0A00-B4AB-2D11-F18F8237A21C" - -truncate -s 2G "${DISK_IMAGE}" -# efi: 5M + boot: 40M + root: 920M + hash: 10M + reserved: 25M = 1000M -# boot partition attributes (-A): 48 = gptprio priority bit; 56 = gptprio successful bit -# partitions are backwards so that we don't make things inconsistent when specifying a wrong end sector :) -sgdisk --clear \ - -n 0:2005M:2047M -c 0:"BOTTLEROCKET-PRIVATE" -t 0:"${BOTTLEROCKET_PRIVATE_TYPECODE}" \ - -n 0:1980M:0 -c 0:"BOTTLEROCKET-RESERVED-B" -t 0:"${BOTTLEROCKET_RESERVED_TYPECODE}" \ - -n 0:1970M:0 -c 0:"BOTTLEROCKET-HASH-B" -t 0:"${BOTTLEROCKET_HASH_TYPECODE}" \ - -n 0:1050M:0 -c 0:"BOTTLEROCKET-ROOT-B" -t 0:"${BOTTLEROCKET_ROOT_TYPECODE}" \ - -n 0:1010M:0 -c 0:"BOTTLEROCKET-BOOT-B" -t 0:"${BOTTLEROCKET_BOOT_TYPECODE}" -A 0:"clear":48 -A 0:"clear":56 \ - -n 0:1005M:0 -c 0:"EFI-BACKUP" -t 0:"${EFI_BACKUP_TYPECODE}" \ - -n 0:980M:0 -c 0:"BOTTLEROCKET-RESERVED-A" -t 0:"${BOTTLEROCKET_RESERVED_TYPECODE}" \ - -n 0:970M:0 -c 0:"BOTTLEROCKET-HASH-A" -t 0:"${BOTTLEROCKET_HASH_TYPECODE}" \ - -n 0:50M:0 -c 0:"BOTTLEROCKET-ROOT-A" -t 0:"${BOTTLEROCKET_ROOT_TYPECODE}" \ - -n 0:10M:0 -c 0:"BOTTLEROCKET-BOOT-A" -t 0:"${BOTTLEROCKET_BOOT_TYPECODE}" -A 0:"set":48 -A 0:"set":56 \ - -n 0:5M:0 -c 0:"EFI-SYSTEM" -t 0:"${EFI_SYSTEM_TYPECODE}" \ - -n 0:1M:0 -c 0:"BIOS-BOOT" -t 0:"${BIOS_BOOT_TYPECODE}" \ - --sort --print "${DISK_IMAGE}" +truncate -s "${DISK_IMAGE_SIZE_GIB}"G "${DISK_IMAGE}" + +declare -A partlabel parttype partsize partoff +set_partition_sizes \ + "${DISK_IMAGE_SIZE_GIB}" "${DATA_IMAGE_SIZE_GIB}" \ + partsize partoff +set_partition_labels partlabel +set_partition_types parttype + +declare -a partargs +for part in \ + BIOS \ + EFI-A BOOT-A ROOT-A HASH-A RESERVED-A \ + EFI-B BOOT-B ROOT-B HASH-B RESERVED-B \ + PRIVATE ; +do + # Each partition is aligned to a 1 MiB boundary, and extends to the sector + # before the next partition starts. Specify the end point in sectors so we + # can subtract a sector to fix the off-by-one error that comes from adding + # start and size together. (1 MiB contains 2048 512-byte sectors.) + part_start="${partoff[${part}]}" + part_end="$((part_start + partsize[${part}]))" + part_end="$((part_end * 2048 - 1))" + + partargs+=(-n "0:${part_start}M:${part_end}") + partargs+=(-c "0:${partlabel[${part}]}") + partargs+=(-t "0:${parttype[${part}]}") + + # Boot partition attributes: + # 48 = gptprio priority bit + # 56 = gptprio successful bit + case "${part}" in + BOOT-A) partargs+=(-A 0:"set":48 -A 0:"set":56) ;; + BOOT-B) partargs+=(-A 0:"clear":48 -A 0:"clear":56) ;; + esac +done + +sgdisk --clear "${partargs[@]}" --sort --print "${DISK_IMAGE}" rpm -iv --root "${ROOT_MOUNT}" "${PACKAGE_DIR}"/*.rpm install -p -m 0644 /host/{COPYRIGHT,LICENSE-APACHE,LICENSE-MIT} "${ROOT_MOUNT}"/usr/share/licenses/ @@ -135,14 +128,12 @@ fi # package has placed the image in /boot/efi/EFI/BOOT. mv "${ROOT_MOUNT}/boot/efi"/* "${EFI_MOUNT}" -# The 'recommended' size for the EFI partition is 100MB but our EFI -# images are under 1MB, so this will suffice for now. -dd if=/dev/zero of="${EFI_IMAGE}" bs=1M count=5 -mkfs.vfat -I -S 512 "${EFI_IMAGE}" $((5*2048)) +dd if=/dev/zero of="${EFI_IMAGE}" bs=1M count="${partsize[EFI-A]}" +mkfs.vfat -I -S 512 "${EFI_IMAGE}" $((partsize[EFI-A] * 2048)) mmd -i "${EFI_IMAGE}" ::/EFI mmd -i "${EFI_IMAGE}" ::/EFI/BOOT mcopy -i "${EFI_IMAGE}" "${EFI_MOUNT}/EFI/BOOT"/*.efi ::/EFI/BOOT -dd if="${EFI_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek=5 +dd if="${EFI_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek="${partoff[EFI-A]}" # Ensure that the grub directory exists. mkdir -p "${ROOT_MOUNT}/boot/grub" @@ -169,13 +160,14 @@ mkdir -p "${ROOT_MOUNT}/lost+found" ROOT_LABELS=$(setfiles -n -d -F -m -r "${ROOT_MOUNT}" \ "${SELINUX_FILE_CONTEXTS}" "${ROOT_MOUNT}" \ | awk -v root="${ROOT_MOUNT}" '{gsub(root"/","/"); gsub(root,"/"); print "ea_set", $1, "security.selinux", $4}') -mkfs.ext4 -O ^has_journal -b "${VERITY_DATA_BLOCK_SIZE}" -d "${ROOT_MOUNT}" "${ROOT_IMAGE}" 920M +mkfs.ext4 -O ^has_journal -b "${VERITY_DATA_BLOCK_SIZE}" -d "${ROOT_MOUNT}" "${ROOT_IMAGE}" "${partsize[ROOT-A]}M" echo "${ROOT_LABELS}" | debugfs -w -f - "${ROOT_IMAGE}" resize2fs -M "${ROOT_IMAGE}" -dd if="${ROOT_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek=50 +dd if="${ROOT_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek="${partoff[ROOT-A]}" # BOTTLEROCKET-VERITY-A -truncate -s 8M "${VERITY_IMAGE}" +veritypart_mib="${partsize[HASH-A]}" +truncate -s "${veritypart_mib}M" "${VERITY_IMAGE}" veritysetup_output="$(veritysetup format \ --format "$VERITY_VERSION" \ --hash "$VERITY_HASH_ALGORITHM" \ @@ -183,16 +175,18 @@ veritysetup_output="$(veritysetup format \ --hash-block-size "$VERITY_HASH_BLOCK_SIZE" \ "${ROOT_IMAGE}" "${VERITY_IMAGE}" \ | tee /dev/stderr)" -if ! stat -c %s "${VERITY_IMAGE}" | grep -q '^8388608$'; then - echo "verity partition is larger than expected (8M)" +verityimage_size="$(stat -c %s "${VERITY_IMAGE}")" +veritypart_bytes="$((veritypart_mib * 1024 * 1024))" +if [ "${verityimage_size}" -gt "${veritypart_bytes}" ] ; then + echo "verity content is larger than partition (${veritypart_mib}M)" exit 1 fi -VERITY_DATA_4K_BLOCKS="$(grep '^Data blocks:' <<<$veritysetup_output | awk '{ print $NF }')" -VERITY_DATA_512B_BLOCKS="$(($VERITY_DATA_4K_BLOCKS * 8))" -VERITY_ROOT_HASH="$(grep '^Root hash:' <<<$veritysetup_output | awk '{ print $NF }')" -VERITY_SALT="$(grep '^Salt:' <<<$veritysetup_output | awk '{ print $NF }')" +VERITY_DATA_4K_BLOCKS="$(grep '^Data blocks:' <<<"${veritysetup_output}" | awk '{ print $NF }')" +VERITY_DATA_512B_BLOCKS="$((VERITY_DATA_4K_BLOCKS * 8))" +VERITY_ROOT_HASH="$(grep '^Root hash:' <<<"${veritysetup_output}" | awk '{ print $NF }')" +VERITY_SALT="$(grep '^Salt:' <<<"${veritysetup_output}" | awk '{ print $NF }')" veritysetup verify "${ROOT_IMAGE}" "${VERITY_IMAGE}" "${VERITY_ROOT_HASH}" -dd if="${VERITY_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek=970 +dd if="${VERITY_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek="${partoff[HASH-A]}" # write GRUB config cat < "${BOOT_MOUNT}/grub/grub.cfg" @@ -217,10 +211,10 @@ mkdir -p "${BOOT_MOUNT}/lost+found" BOOT_LABELS=$(setfiles -n -d -F -m -r "${BOOT_MOUNT}" \ "${SELINUX_FILE_CONTEXTS}" "${BOOT_MOUNT}" \ | awk -v root="${BOOT_MOUNT}" '{gsub(root"/","/"); gsub(root,"/"); print "ea_set", $1, "security.selinux", $4}') -mkfs.ext4 -O ^has_journal -d "${BOOT_MOUNT}" "${BOOT_IMAGE}" 40M +mkfs.ext4 -O ^has_journal -d "${BOOT_MOUNT}" "${BOOT_IMAGE}" "${partsize[BOOT-A]}M" echo "${BOOT_LABELS}" | debugfs -w -f - "${BOOT_IMAGE}" resize2fs -M "${BOOT_IMAGE}" -dd if="${BOOT_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek=10 +dd if="${BOOT_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek="${partoff[BOOT-A]}" # BOTTLEROCKET-PRIVATE @@ -229,23 +223,28 @@ dd if="${BOOT_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek=10 # - adjust the inode ratio since we expect lots of small files # - retain the inode size to allow most settings to be stored inline # - retain the block size to handle worse-case alignment for hardware -mkfs.ext4 -b 4096 -i 4096 -I 256 "${PRIVATE_IMAGE}" 42M -dd if="${PRIVATE_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek=2005 +mkfs.ext4 -b 4096 -i 4096 -I 256 "${PRIVATE_IMAGE}" "${partsize[PRIVATE]}M" +dd if="${PRIVATE_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek="${partoff[PRIVATE]}" # BOTTLEROCKET-DATA -truncate -s 1G "${DATA_IMAGE}" +truncate -s "${DATA_IMAGE_SIZE_GIB}"G "${DATA_IMAGE}" +data_start="${partoff[DATA]}" +data_end=$((data_start + partsize[DATA])) +data_end=$((data_end * 2048 - 1)) sgdisk --clear \ - -n 0:1M:1023M -c 0:"BOTTLEROCKET-DATA" -t 0:"${BOTTLEROCKET_DATA_TYPECODE}" \ - --sort --print "${DATA_IMAGE}" + -n "0:${data_start}M:${data_end}" \ + -c "0:${partlabel[DATA]}" \ + -t "0:${parttype[DATA]}" \ + --sort --print "${DATA_IMAGE}" # If we build on a host with SELinux enabled, we could end up with labels that # do not match our policy. Since we allow replacing the data volume at runtime, # we can't count on these labels being correct in any case, and it's better to # remove them all. UNLABELED=$(find "${DATA_MOUNT}" \ | awk -v root="${DATA_MOUNT}" '{gsub(root"/","/"); gsub(root,"/"); print "ea_rm", $1, "security.selinux"}') -mkfs.ext4 -d "${DATA_MOUNT}" "${BOTTLEROCKET_DATA}" 1022M +mkfs.ext4 -d "${DATA_MOUNT}" "${BOTTLEROCKET_DATA}" "${partsize[DATA]}M" echo "${UNLABELED}" | debugfs -w -f - "${BOTTLEROCKET_DATA}" -dd if="${BOTTLEROCKET_DATA}" of="${DATA_IMAGE}" conv=notrunc bs=1M seek=1 +dd if="${BOTTLEROCKET_DATA}" of="${DATA_IMAGE}" conv=notrunc bs=1M seek="${partoff[DATA]}" sgdisk -v "${DISK_IMAGE}" sgdisk -v "${DATA_IMAGE}" From 9b1ca66ed48709564a7a3df6370952c7625d0253 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sun, 14 Nov 2021 17:44:38 +0000 Subject: [PATCH 0559/1356] build: support custom OS and data image sizes Add a new structure to variant metadata so the OS and data disk sizes can be overridden. The previous values - 2 GiB for the OS disk, and 1 GIB for the data disk, are promoted to the default values so existing variant definitions continue working. References to "disk image" in `rpm2img` have been changed to refer to "os image" instead, to avoid confusion over which image is meant. The expected sizes for these images are now required parameters. Signed-off-by: Ben Cressey --- tools/buildsys/src/builder.rs | 11 ++++++- tools/buildsys/src/main.rs | 3 +- tools/buildsys/src/manifest.rs | 58 +++++++++++++++++++++++++++++++++- tools/partyplanner | 14 ++++---- tools/rpm2img | 37 +++++++++++----------- 5 files changed, 94 insertions(+), 29 deletions(-) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index 19afa0fa..a83661eb 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -21,7 +21,7 @@ use std::path::{Path, PathBuf}; use std::process::Output; use walkdir::{DirEntry, WalkDir}; -use crate::manifest::{ImageFormat, SupportedArch}; +use crate::manifest::{ImageFormat, ImageLayout, SupportedArch}; /* There's a bug in BuildKit that can lead to a build failure during parallel @@ -116,6 +116,7 @@ impl VariantBuilder { pub(crate) fn build( packages: &[String], image_format: Option<&ImageFormat>, + image_layout: Option<&ImageLayout>, kernel_parameters: Option<&Vec>, ) -> Result { let output_dir: PathBuf = getenv("BUILDSYS_OUTPUT_DIR")?.into(); @@ -126,6 +127,12 @@ impl VariantBuilder { .context(error::UnsupportedArch { arch: &arch })? .goarch(); + let image_layout = image_layout.cloned().unwrap_or_default(); + let ImageLayout { + os_image_size_gib, + data_image_size_gib, + } = image_layout; + let mut args = Vec::new(); args.build_arg("PACKAGES", packages.join(" ")); args.build_arg("ARCH", &arch); @@ -143,6 +150,8 @@ impl VariantBuilder { Some(ImageFormat::Vmdk) => "vmdk", }, ); + args.build_arg("OS_IMAGE_SIZE_GIB", format!("{}", os_image_size_gib)); + args.build_arg("DATA_IMAGE_SIZE_GIB", format!("{}", data_image_size_gib)); args.build_arg( "KERNEL_PARAMETERS", kernel_parameters diff --git a/tools/buildsys/src/main.rs b/tools/buildsys/src/main.rs index 69037840..b3415b77 100644 --- a/tools/buildsys/src/main.rs +++ b/tools/buildsys/src/main.rs @@ -192,8 +192,9 @@ fn build_variant() -> Result<()> { if let Some(packages) = manifest.included_packages() { let image_format = manifest.image_format(); + let image_layout = manifest.image_layout(); let kernel_parameters = manifest.kernel_parameters(); - VariantBuilder::build(&packages, image_format, kernel_parameters) + VariantBuilder::build(&packages, image_format, image_layout, kernel_parameters) .context(error::BuildAttempt)?; } else { println!("cargo:warning=No included packages in manifest. Skipping variant build."); diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index 77e11ae5..e4c1ba48 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -66,13 +66,32 @@ releases-url = "https://www.example.com/releases" included-packages = ["release"] ``` -`image-format` is the desired format of the built image. +`image-format` is the desired format for the built images. This can be `raw` (the default), `vmdk`, or `qcow2`. ``` [package.metadata.build-variant] image-format = "vmdk" ``` +`image-layout` is the desired layout for the built images. + +`os-image-size-gib` is the desired size of the "os" disk image in GiB. +The specified size will be automatically divided into two banks, where each +bank contains the set of partitions needed for in-place upgrades. Roughly 40% +will be available for each root filesystem partition, with the rest allocated +to other essential system partitions. + +`data-image-size-gib` is the desired size of the "data" disk image in GiB. +The full size will be used for the single data partition, except for the 2 MiB +overhead for the GPT labels and partition alignment. The data partition will be +automatically resized to fill the disk on boot, so it is usually not necessary +to increase this value. +``` +[package.metadata.build-variant.image-layout] +os-image-size-gib = 2 +data-image-size-gib = 1 +``` + `supported-arches` is the list of architectures the variant is able to run on. The values can be `x86_64` and `aarch64`. If not specified, the variant can run on any of those architectures. @@ -101,6 +120,9 @@ use std::fmt; use std::fs; use std::path::{Path, PathBuf}; +static DEFAULT_OS_IMAGE_SIZE_GIB: u32 = 2; +static DEFAULT_DATA_IMAGE_SIZE_GIB: u32 = 1; + /// The nested structures here are somewhat complex, but they make it trivial /// to deserialize the structure we expect to find in the manifest. #[derive(Deserialize, Debug)] @@ -148,6 +170,11 @@ impl ManifestInfo { self.build_variant().and_then(|b| b.image_format.as_ref()) } + /// Convenience method to return the image layout, if specified. + pub(crate) fn image_layout(&self) -> Option<&ImageLayout> { + self.build_variant().and_then(|b| b.image_layout.as_ref()) + } + /// Convenience method to return the supported architectures for this variant. pub(crate) fn supported_arches(&self) -> Option<&HashSet> { self.build_variant() @@ -204,6 +231,7 @@ pub(crate) struct BuildPackage { pub(crate) struct BuildVariant { pub(crate) included_packages: Option>, pub(crate) image_format: Option, + pub(crate) image_layout: Option, pub(crate) supported_arches: Option>, pub(crate) kernel_parameters: Option>, } @@ -216,6 +244,34 @@ pub(crate) enum ImageFormat { Vmdk, } +#[derive(Deserialize, Debug, Copy, Clone)] +#[serde(rename_all = "kebab-case")] +pub(crate) struct ImageLayout { + #[serde(default = "ImageLayout::default_os_image_size_gib")] + pub(crate) os_image_size_gib: u32, + #[serde(default = "ImageLayout::default_data_image_size_gib")] + pub(crate) data_image_size_gib: u32, +} + +impl ImageLayout { + fn default_os_image_size_gib() -> u32 { + DEFAULT_OS_IMAGE_SIZE_GIB + } + + fn default_data_image_size_gib() -> u32 { + DEFAULT_DATA_IMAGE_SIZE_GIB + } +} + +impl Default for ImageLayout { + fn default() -> Self { + Self { + os_image_size_gib: Self::default_os_image_size_gib(), + data_image_size_gib: Self::default_data_image_size_gib(), + } + } +} + #[derive(Deserialize, Debug, PartialEq, Eq, Hash)] #[serde(rename_all = "lowercase")] pub(crate) enum SupportedArch { diff --git a/tools/partyplanner b/tools/partyplanner index f7da0be8..601e97c6 100755 --- a/tools/partyplanner +++ b/tools/partyplanner @@ -110,9 +110,9 @@ PRIVATE_SCALE_FACTOR="24" # Populate the caller's tables with sizes and offsets for known partitions. set_partition_sizes() { - local disk_image_gib data_image_gib + local os_image_gib data_image_gib local -n pp_size pp_offset - disk_image_gib="${1:?}" + os_image_gib="${1:?}" data_image_gib="${2:?}" # Table for partition sizes, in MiB. @@ -123,16 +123,16 @@ set_partition_sizes() { # Most of the partitions on the main image scale with the overall size. local boot_mib root_mib hash_mib reserved_mib private_mib - boot_mib="$((disk_image_gib * BOOT_SCALE_FACTOR))" - root_mib="$((disk_image_gib * ROOT_SCALE_FACTOR))" - hash_mib="$((disk_image_gib * HASH_SCALE_FACTOR))" + boot_mib="$((os_image_gib * BOOT_SCALE_FACTOR))" + root_mib="$((os_image_gib * ROOT_SCALE_FACTOR))" + hash_mib="$((os_image_gib * HASH_SCALE_FACTOR))" # Reserved space is everything left in the bank after the other partitions # are scaled, minus the fixed 5 MiB EFI partition in that bank. - reserved_mib=$((disk_image_gib * RESERVE_SCALE_FACTOR - EFI_MIB)) + reserved_mib=$((os_image_gib * RESERVE_SCALE_FACTOR - EFI_MIB)) # Private space scales per GiB, minus the BIOS and GPT partition overhead. - private_mib=$((disk_image_gib * PRIVATE_SCALE_FACTOR - OVERHEAD_MIB)) + private_mib=$((os_image_gib * PRIVATE_SCALE_FACTOR - OVERHEAD_MIB)) # Skip the GPT label at start of disk. local offset diff --git a/tools/rpm2img b/tools/rpm2img index 799bc5c5..0c00b561 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -15,6 +15,8 @@ for opt in "$@"; do --package-dir=*) PACKAGE_DIR="${optarg}" ;; --output-dir=*) OUTPUT_DIR="${optarg}" ;; --output-fmt=*) OUTPUT_FMT="${optarg}" ;; + --os-image-size-gib=*) OS_IMAGE_SIZE_GIB="${optarg}" ;; + --data-image-size-gib=*) DATA_IMAGE_SIZE_GIB="${optarg}" ;; esac done @@ -30,17 +32,14 @@ mkdir -p "${OUTPUT_DIR}" FILENAME_PREFIX="${IMAGE_NAME}-${VARIANT}-${ARCH}-${VERSION_ID}-${BUILD_ID}" -DISK_IMAGE_BASENAME="${FILENAME_PREFIX}" +OS_IMAGE_BASENAME="${FILENAME_PREFIX}" DATA_IMAGE_BASENAME="${FILENAME_PREFIX}-data" BOOT_IMAGE_NAME="${FILENAME_PREFIX}-boot.ext4.lz4" VERITY_IMAGE_NAME="${FILENAME_PREFIX}-root.verity.lz4" ROOT_IMAGE_NAME="${FILENAME_PREFIX}-root.ext4.lz4" -DISK_IMAGE_SIZE_GIB="2" -DATA_IMAGE_SIZE_GIB="1" - -DISK_IMAGE="$(mktemp)" +OS_IMAGE="$(mktemp)" BOOT_IMAGE="$(mktemp)" VERITY_IMAGE="$(mktemp)" ROOT_IMAGE="$(mktemp)" @@ -63,11 +62,11 @@ VERITY_HASH_ALGORITHM=sha256 VERITY_DATA_BLOCK_SIZE=4096 VERITY_HASH_BLOCK_SIZE=4096 -truncate -s "${DISK_IMAGE_SIZE_GIB}"G "${DISK_IMAGE}" +truncate -s "${OS_IMAGE_SIZE_GIB}"G "${OS_IMAGE}" declare -A partlabel parttype partsize partoff set_partition_sizes \ - "${DISK_IMAGE_SIZE_GIB}" "${DATA_IMAGE_SIZE_GIB}" \ + "${OS_IMAGE_SIZE_GIB}" "${DATA_IMAGE_SIZE_GIB}" \ partsize partoff set_partition_labels partlabel set_partition_types parttype @@ -100,7 +99,7 @@ do esac done -sgdisk --clear "${partargs[@]}" --sort --print "${DISK_IMAGE}" +sgdisk --clear "${partargs[@]}" --sort --print "${OS_IMAGE}" rpm -iv --root "${ROOT_MOUNT}" "${PACKAGE_DIR}"/*.rpm install -p -m 0644 /host/{COPYRIGHT,LICENSE-APACHE,LICENSE-MIT} "${ROOT_MOUNT}"/usr/share/licenses/ @@ -112,13 +111,13 @@ rm -rf "${ROOT_MOUNT}"/var/lib "${ROOT_MOUNT}"/usr/share/licenses/* if [[ "${ARCH}" == "x86_64" ]]; then # MBR and BIOS-BOOT - echo "(hd0) ${DISK_IMAGE}" > "${ROOT_MOUNT}/boot/grub/device.map" + echo "(hd0) ${OS_IMAGE}" > "${ROOT_MOUNT}/boot/grub/device.map" "${ROOT_MOUNT}/sbin/grub-bios-setup" \ --directory="${ROOT_MOUNT}/boot/grub" \ --device-map="${ROOT_MOUNT}/boot/grub/device.map" \ --root="hd0" \ --skip-fs-probe \ - "${DISK_IMAGE}" + "${OS_IMAGE}" rm -vf "${ROOT_MOUNT}"/boot/grub/* "${ROOT_MOUNT}"/sbin/grub* fi @@ -133,7 +132,7 @@ mkfs.vfat -I -S 512 "${EFI_IMAGE}" $((partsize[EFI-A] * 2048)) mmd -i "${EFI_IMAGE}" ::/EFI mmd -i "${EFI_IMAGE}" ::/EFI/BOOT mcopy -i "${EFI_IMAGE}" "${EFI_MOUNT}/EFI/BOOT"/*.efi ::/EFI/BOOT -dd if="${EFI_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek="${partoff[EFI-A]}" +dd if="${EFI_IMAGE}" of="${OS_IMAGE}" conv=notrunc bs=1M seek="${partoff[EFI-A]}" # Ensure that the grub directory exists. mkdir -p "${ROOT_MOUNT}/boot/grub" @@ -163,7 +162,7 @@ ROOT_LABELS=$(setfiles -n -d -F -m -r "${ROOT_MOUNT}" \ mkfs.ext4 -O ^has_journal -b "${VERITY_DATA_BLOCK_SIZE}" -d "${ROOT_MOUNT}" "${ROOT_IMAGE}" "${partsize[ROOT-A]}M" echo "${ROOT_LABELS}" | debugfs -w -f - "${ROOT_IMAGE}" resize2fs -M "${ROOT_IMAGE}" -dd if="${ROOT_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek="${partoff[ROOT-A]}" +dd if="${ROOT_IMAGE}" of="${OS_IMAGE}" conv=notrunc bs=1M seek="${partoff[ROOT-A]}" # BOTTLEROCKET-VERITY-A veritypart_mib="${partsize[HASH-A]}" @@ -186,7 +185,7 @@ VERITY_DATA_512B_BLOCKS="$((VERITY_DATA_4K_BLOCKS * 8))" VERITY_ROOT_HASH="$(grep '^Root hash:' <<<"${veritysetup_output}" | awk '{ print $NF }')" VERITY_SALT="$(grep '^Salt:' <<<"${veritysetup_output}" | awk '{ print $NF }')" veritysetup verify "${ROOT_IMAGE}" "${VERITY_IMAGE}" "${VERITY_ROOT_HASH}" -dd if="${VERITY_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek="${partoff[HASH-A]}" +dd if="${VERITY_IMAGE}" of="${OS_IMAGE}" conv=notrunc bs=1M seek="${partoff[HASH-A]}" # write GRUB config cat < "${BOOT_MOUNT}/grub/grub.cfg" @@ -214,7 +213,7 @@ BOOT_LABELS=$(setfiles -n -d -F -m -r "${BOOT_MOUNT}" \ mkfs.ext4 -O ^has_journal -d "${BOOT_MOUNT}" "${BOOT_IMAGE}" "${partsize[BOOT-A]}M" echo "${BOOT_LABELS}" | debugfs -w -f - "${BOOT_IMAGE}" resize2fs -M "${BOOT_IMAGE}" -dd if="${BOOT_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek="${partoff[BOOT-A]}" +dd if="${BOOT_IMAGE}" of="${OS_IMAGE}" conv=notrunc bs=1M seek="${partoff[BOOT-A]}" # BOTTLEROCKET-PRIVATE @@ -224,7 +223,7 @@ dd if="${BOOT_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek="${partoff[BOOT # - retain the inode size to allow most settings to be stored inline # - retain the block size to handle worse-case alignment for hardware mkfs.ext4 -b 4096 -i 4096 -I 256 "${PRIVATE_IMAGE}" "${partsize[PRIVATE]}M" -dd if="${PRIVATE_IMAGE}" of="${DISK_IMAGE}" conv=notrunc bs=1M seek="${partoff[PRIVATE]}" +dd if="${PRIVATE_IMAGE}" of="${OS_IMAGE}" conv=notrunc bs=1M seek="${partoff[PRIVATE]}" # BOTTLEROCKET-DATA truncate -s "${DATA_IMAGE_SIZE_GIB}"G "${DATA_IMAGE}" @@ -246,18 +245,18 @@ mkfs.ext4 -d "${DATA_MOUNT}" "${BOTTLEROCKET_DATA}" "${partsize[DATA]}M" echo "${UNLABELED}" | debugfs -w -f - "${BOTTLEROCKET_DATA}" dd if="${BOTTLEROCKET_DATA}" of="${DATA_IMAGE}" conv=notrunc bs=1M seek="${partoff[DATA]}" -sgdisk -v "${DISK_IMAGE}" +sgdisk -v "${OS_IMAGE}" sgdisk -v "${DATA_IMAGE}" if [[ ${OUTPUT_FMT} == "raw" ]]; then - lz4 -vc "${DISK_IMAGE}" >"${OUTPUT_DIR}/${DISK_IMAGE_BASENAME}.img.lz4" + lz4 -vc "${OS_IMAGE}" >"${OUTPUT_DIR}/${OS_IMAGE_BASENAME}.img.lz4" lz4 -vc "${DATA_IMAGE}" >"${OUTPUT_DIR}/${DATA_IMAGE_BASENAME}.img.lz4" elif [[ ${OUTPUT_FMT} == "qcow2" ]]; then - qemu-img convert -f raw -O qcow2 "${DISK_IMAGE}" "${OUTPUT_DIR}/${DISK_IMAGE_BASENAME}.qcow2" + qemu-img convert -f raw -O qcow2 "${OS_IMAGE}" "${OUTPUT_DIR}/${OS_IMAGE_BASENAME}.qcow2" qemu-img convert -f raw -O qcow2 "${DATA_IMAGE}" "${OUTPUT_DIR}/${DATA_IMAGE_BASENAME}.qcow2" elif [[ ${OUTPUT_FMT} == "vmdk" ]]; then # Stream optimization is required for creating an Open Virtual Appliance (OVA) - qemu-img convert -f raw -O vmdk -o subformat=streamOptimized "${DISK_IMAGE}" "${OUTPUT_DIR}/${DISK_IMAGE_BASENAME}.vmdk" + qemu-img convert -f raw -O vmdk -o subformat=streamOptimized "${OS_IMAGE}" "${OUTPUT_DIR}/${OS_IMAGE_BASENAME}.vmdk" qemu-img convert -f raw -O vmdk -o subformat=streamOptimized "${DATA_IMAGE}" "${OUTPUT_DIR}/${DATA_IMAGE_BASENAME}.vmdk" fi From 61e6ba5746eb2648629ff45ad9cf39e43dcd5297 Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Fri, 19 Nov 2021 13:23:08 -0800 Subject: [PATCH 0560/1356] Add 'apiclient get' for simple API retrieval --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 30a8437e..4ebffcec 100644 --- a/README.md +++ b/README.md @@ -248,7 +248,7 @@ Here we'll describe the settings you can configure on your Bottlerocket instance You can see the current settings with an API request: ``` -apiclient -u /settings +apiclient get settings ``` This will return all of the current settings in JSON format. From 4b34d5fb0d9191e67b84b38792d35b809878ecbe Mon Sep 17 00:00:00 2001 From: Pascal Bourdier Date: Tue, 30 Nov 2021 12:15:24 -0800 Subject: [PATCH 0561/1356] Docs: fix typo in README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 4ebffcec..d91d4055 100644 --- a/README.md +++ b/README.md @@ -505,7 +505,7 @@ Here are the metrics settings: #### Custom CA certificates settings -By defualt, Bottlerocket ships with the Mozilla CA certificate store, but you can add self-signed certificates through the API using these settings: +By default, Bottlerocket ships with the Mozilla CA certificate store, but you can add self-signed certificates through the API using these settings: * `settings.pki..data`: Base64-encoded PEM-formatted certificates bundle; it can contain more than one certificate * `settings.pki..trusted`: Whether the certificates in the bundle are trusted; defaults to `false` when not provided From fb6ecdfc0f1a61a475ad9a4c3f6634c5f8c876dd Mon Sep 17 00:00:00 2001 From: Matt Briggs Date: Wed, 1 Dec 2021 10:59:21 -0800 Subject: [PATCH 0562/1356] Update kernel-5.10 to 5.10.75 --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 9b8bb1d5..5e4e7944 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,8 +13,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/2463ceff87cbe05e736813f33f5a8b70f9c98effe9eb5167fa613fae1fb9a943/kernel-5.10.68-62.173.amzn2.src.rpm" -sha512 = "42bca6a73a9d6ddae9553f1d71d4f28d436d813b1068f270fe2ae80701201b88946dc3c094829c90f62fc4894910867d7afeccdfbe2abf3a19848fc4c28d51b9" +url = "https://cdn.amazonlinux.com/blobstore/04e0825929fcabea05c6d875de848d96ac317449eecfff360a4ab08ed1ff60ab/kernel-5.10.75-79.358.amzn2.src.rpm" +sha512 = "115c29e87a65c7f5c76245e9cb21c8ce4ed4153f393ccaa0a59e9210a22da0b3fe3eb2ad8ea73c4c107b04318f44be9deb85b1f11e0272888b1b62b97e72d519" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 7493a4b1..297f8fa0 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.68 +Version: 5.10.75 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/2463ceff87cbe05e736813f33f5a8b70f9c98effe9eb5167fa613fae1fb9a943/kernel-5.10.68-62.173.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/04e0825929fcabea05c6d875de848d96ac317449eecfff360a4ab08ed1ff60ab/kernel-5.10.75-79.358.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From dbcfb8c98bae0969c6d335a27669a472f096ff48 Mon Sep 17 00:00:00 2001 From: Matt Briggs Date: Wed, 1 Dec 2021 11:05:22 -0800 Subject: [PATCH 0563/1356] Update kernel-5.4 to 5.4.156 --- packages/kernel-5.4/Cargo.toml | 4 ++-- packages/kernel-5.4/kernel-5.4.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.4/Cargo.toml b/packages/kernel-5.4/Cargo.toml index 33974d00..9f368094 100644 --- a/packages/kernel-5.4/Cargo.toml +++ b/packages/kernel-5.4/Cargo.toml @@ -13,8 +13,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/a068a12de784cc571656e680fbd3213773032b6b4d3c940b37b9db664fb7be52/kernel-5.4.149-73.259.amzn2.src.rpm" -sha512 = "d7b86a37257fe02e8fda360397371662215dd916f4f6e82a9c9174bec385dd7347197baa17ba9666dd31f7b41472cde6fc293f431098a53526de1b86a71bb386" +url = "https://cdn.amazonlinux.com/blobstore/1f5404c1e1cc923f488ae6290e17b38ac000e32e35364a0d7b567dda0ecba127/kernel-5.4.156-83.273.amzn2.src.rpm" +sha512 = "c41e5913ec7786a17498b1032e973c0f139eadbafec2834527c3efdcb3371a316015a209b6ab82e863f08d62c7161da7fbc117b79c8d936afea64253a77c3df8" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec index 650337eb..11f01088 100644 --- a/packages/kernel-5.4/kernel-5.4.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.4 -Version: 5.4.149 +Version: 5.4.156 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/a068a12de784cc571656e680fbd3213773032b6b4d3c940b37b9db664fb7be52/kernel-5.4.149-73.259.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/1f5404c1e1cc923f488ae6290e17b38ac000e32e35364a0d7b567dda0ecba127/kernel-5.4.156-83.273.amzn2.src.rpm Source100: config-bottlerocket # Make Lustre FSx work with a newer GCC. From 6208abe569979d853a0802981466977755736070 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Fri, 8 Oct 2021 20:51:17 +0000 Subject: [PATCH 0564/1356] Add IPMI kernel module to 5.10 kernel Signed-off-by: Arnaldo Garcia Rincon --- packages/kernel-5.10/config-bottlerocket | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index b5f021eb..aacbcb6f 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -79,3 +79,6 @@ CONFIG_DECOMPRESS_ZSTD=y CONFIG_SERIO_I8042=m CONFIG_KEYBOARD_ATKBD=m CONFIG_MOUSE_PS2=m + +# Add support for IPMI drivers +CONFIG_IPMI_HANDLER=m From 807e358cebc2b5ebca2c0a7b30117c2197256772 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 18 Nov 2021 00:06:32 +0000 Subject: [PATCH 0565/1356] build: add support for building unified images For some targets such as bare metal systems, the requirement for a separate block device to hold the data partition is unworkable. Implement a "unified" image layout, which places the data partition after the final OS partition, and is suitable for targets which may only have one disk. The old "split" layout remains the default. Signed-off-by: Ben Cressey --- tools/buildsys/src/builder.rs | 10 ++++- tools/buildsys/src/manifest.rs | 28 +++++++++++-- tools/partyplanner | 35 ++++++++++++---- tools/rpm2img | 73 ++++++++++++++++++++++++++-------- 4 files changed, 117 insertions(+), 29 deletions(-) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index a83661eb..293bd561 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -21,7 +21,7 @@ use std::path::{Path, PathBuf}; use std::process::Output; use walkdir::{DirEntry, WalkDir}; -use crate::manifest::{ImageFormat, ImageLayout, SupportedArch}; +use crate::manifest::{ImageFormat, ImageLayout, PartitionPlan, SupportedArch}; /* There's a bug in BuildKit that can lead to a build failure during parallel @@ -131,6 +131,7 @@ impl VariantBuilder { let ImageLayout { os_image_size_gib, data_image_size_gib, + partition_plan, } = image_layout; let mut args = Vec::new(); @@ -152,6 +153,13 @@ impl VariantBuilder { ); args.build_arg("OS_IMAGE_SIZE_GIB", format!("{}", os_image_size_gib)); args.build_arg("DATA_IMAGE_SIZE_GIB", format!("{}", data_image_size_gib)); + args.build_arg( + "PARTITION_PLAN", + match partition_plan { + PartitionPlan::Split => "split", + PartitionPlan::Unified => "unified", + }, + ); args.build_arg( "KERNEL_PARAMETERS", kernel_parameters diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index e4c1ba48..b9a4f332 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -86,10 +86,15 @@ The full size will be used for the single data partition, except for the 2 MiB overhead for the GPT labels and partition alignment. The data partition will be automatically resized to fill the disk on boot, so it is usually not necessary to increase this value. + +`partition-plan` is the desired strategy for image partitioning. +This can be `split` (the default) for "os" and "data" images backed by separate +volumes, or `unified` to have "os" and "data" share the same volume. ``` [package.metadata.build-variant.image-layout] os-image-size-gib = 2 data-image-size-gib = 1 +partition-plan = "split" ``` `supported-arches` is the list of architectures the variant is able to run on. @@ -120,9 +125,6 @@ use std::fmt; use std::fs; use std::path::{Path, PathBuf}; -static DEFAULT_OS_IMAGE_SIZE_GIB: u32 = 2; -static DEFAULT_DATA_IMAGE_SIZE_GIB: u32 = 1; - /// The nested structures here are somewhat complex, but they make it trivial /// to deserialize the structure we expect to find in the manifest. #[derive(Deserialize, Debug)] @@ -251,8 +253,16 @@ pub(crate) struct ImageLayout { pub(crate) os_image_size_gib: u32, #[serde(default = "ImageLayout::default_data_image_size_gib")] pub(crate) data_image_size_gib: u32, + #[serde(default = "ImageLayout::default_partition_plan")] + pub(crate) partition_plan: PartitionPlan, } +/// These are the historical defaults for all variants, before we added support +/// for customizing these properties. +static DEFAULT_OS_IMAGE_SIZE_GIB: u32 = 2; +static DEFAULT_DATA_IMAGE_SIZE_GIB: u32 = 1; +static DEFAULT_PARTITION_PLAN: PartitionPlan = PartitionPlan::Split; + impl ImageLayout { fn default_os_image_size_gib() -> u32 { DEFAULT_OS_IMAGE_SIZE_GIB @@ -261,6 +271,10 @@ impl ImageLayout { fn default_data_image_size_gib() -> u32 { DEFAULT_DATA_IMAGE_SIZE_GIB } + + fn default_partition_plan() -> PartitionPlan { + DEFAULT_PARTITION_PLAN + } } impl Default for ImageLayout { @@ -268,10 +282,18 @@ impl Default for ImageLayout { Self { os_image_size_gib: Self::default_os_image_size_gib(), data_image_size_gib: Self::default_data_image_size_gib(), + partition_plan: Self::default_partition_plan(), } } } +#[derive(Deserialize, Debug, Copy, Clone)] +#[serde(rename_all = "lowercase")] +pub(crate) enum PartitionPlan { + Split, + Unified, +} + #[derive(Deserialize, Debug, PartialEq, Eq, Hash)] #[serde(rename_all = "lowercase")] pub(crate) enum SupportedArch { diff --git a/tools/partyplanner b/tools/partyplanner index 601e97c6..edd852f8 100755 --- a/tools/partyplanner +++ b/tools/partyplanner @@ -110,16 +110,21 @@ PRIVATE_SCALE_FACTOR="24" # Populate the caller's tables with sizes and offsets for known partitions. set_partition_sizes() { - local os_image_gib data_image_gib + local os_image_gib data_image_gib partition_plan local -n pp_size pp_offset os_image_gib="${1:?}" data_image_gib="${2:?}" + # Whether we're building a layout for a "split" image, where OS and data + # volumes are on separate disks, or a "unified" image, where they share the + # same disk. + partition_plan="${3:?}" + # Table for partition sizes, in MiB. - pp_size="${3:?}" + pp_size="${4:?}" # Table for partition offsets from start of disk, in MiB. - pp_offset="${4:?}" + pp_offset="${5:?}" # Most of the partitions on the main image scale with the overall size. local boot_mib root_mib hash_mib reserved_mib private_mib @@ -168,11 +173,25 @@ set_partition_sizes() { pp_size["PRIVATE"]="${private_mib}" ((offset += private_mib)) - # The data image is relatively easy to plan, at least until we add support - # for unified images. The first and last MiB are reserved for the GPT labels, - # and the remainder is for the lone "data" partition. - pp_size["DATA"]="$((data_image_gib * 1024 - GPT_MIB * 2))" - pp_offset["DATA"]="1" + case "${partition_plan}" in + split) + # For a split data image, the first and last MiB are reserved for the GPT + # labels, and the rest is for the "data" partition. + pp_size["DATA"]="$((data_image_gib * 1024 - GPT_MIB * 2))" + pp_offset["DATA"]="1" + ;; + unified) + # For a unified image, we've already accounted for the GPT label space in + # the earlier calculations, so all the space is for the "data" partition. + pp_size["DATA"]="$((data_image_gib * 1024))" + pp_offset["DATA"]="${offset}" + ((offset += data_image_gib * 1024)) + ;; + *) + echo "unknown partition plan '${partition_plan}'" >&2 + exit 1 + ;; + esac } # Populate the caller's table with labels for known partitions. diff --git a/tools/rpm2img b/tools/rpm2img index 0c00b561..956d44a3 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -17,6 +17,7 @@ for opt in "$@"; do --output-fmt=*) OUTPUT_FMT="${optarg}" ;; --os-image-size-gib=*) OS_IMAGE_SIZE_GIB="${optarg}" ;; --data-image-size-gib=*) DATA_IMAGE_SIZE_GIB="${optarg}" ;; + --partition-plan=*) PARTITION_PLAN="${optarg}" ;; esac done @@ -28,6 +29,14 @@ case "${OUTPUT_FMT}" in ;; esac +case "${PARTITION_PLAN}" in + split|unified) ;; + *) + echo "unexpected partition plan '${PARTITION_PLAN}'" >&2 + exit 1 + ;; +esac + mkdir -p "${OUTPUT_DIR}" FILENAME_PREFIX="${IMAGE_NAME}-${VARIANT}-${ARCH}-${VERSION_ID}-${BUILD_ID}" @@ -62,11 +71,19 @@ VERITY_HASH_ALGORITHM=sha256 VERITY_DATA_BLOCK_SIZE=4096 VERITY_HASH_BLOCK_SIZE=4096 -truncate -s "${OS_IMAGE_SIZE_GIB}"G "${OS_IMAGE}" +case "${PARTITION_PLAN}" in + split) + truncate -s "${OS_IMAGE_SIZE_GIB}G" "${OS_IMAGE}" + truncate -s "${DATA_IMAGE_SIZE_GIB}G" "${DATA_IMAGE}" + ;; + unified) + truncate -s "$((OS_IMAGE_SIZE_GIB + DATA_IMAGE_SIZE_GIB))G" "${OS_IMAGE}" + ;; +esac declare -A partlabel parttype partsize partoff set_partition_sizes \ - "${OS_IMAGE_SIZE_GIB}" "${DATA_IMAGE_SIZE_GIB}" \ + "${OS_IMAGE_SIZE_GIB}" "${DATA_IMAGE_SIZE_GIB}" "${PARTITION_PLAN}" \ partsize partoff set_partition_labels partlabel set_partition_types parttype @@ -76,8 +93,13 @@ for part in \ BIOS \ EFI-A BOOT-A ROOT-A HASH-A RESERVED-A \ EFI-B BOOT-B ROOT-B HASH-B RESERVED-B \ - PRIVATE ; + PRIVATE DATA ; do + # We only append the data partition if we're using the unified layout. + if [ "${part}" == "DATA" ] && [ "${PARTITION_PLAN}" != "unified" ] ; then + continue + fi + # Each partition is aligned to a 1 MiB boundary, and extends to the sector # before the next partition starts. Specify the end point in sectors so we # can subtract a sector to fix the off-by-one error that comes from adding @@ -101,6 +123,18 @@ done sgdisk --clear "${partargs[@]}" --sort --print "${OS_IMAGE}" +# Partition the separate data disk, if we're using the split layout. +if [ "${PARTITION_PLAN}" == "split" ] ; then + data_start="${partoff[DATA]}" + data_end=$((data_start + partsize[DATA])) + data_end=$((data_end * 2048 - 1)) + sgdisk --clear \ + -n "0:${data_start}M:${data_end}" \ + -c "0:${partlabel[DATA]}" \ + -t "0:${parttype[DATA]}" \ + --sort --print "${DATA_IMAGE}" +fi + rpm -iv --root "${ROOT_MOUNT}" "${PACKAGE_DIR}"/*.rpm install -p -m 0644 /host/{COPYRIGHT,LICENSE-APACHE,LICENSE-MIT} "${ROOT_MOUNT}"/usr/share/licenses/ mksquashfs \ @@ -226,15 +260,7 @@ mkfs.ext4 -b 4096 -i 4096 -I 256 "${PRIVATE_IMAGE}" "${partsize[PRIVATE]}M" dd if="${PRIVATE_IMAGE}" of="${OS_IMAGE}" conv=notrunc bs=1M seek="${partoff[PRIVATE]}" # BOTTLEROCKET-DATA -truncate -s "${DATA_IMAGE_SIZE_GIB}"G "${DATA_IMAGE}" -data_start="${partoff[DATA]}" -data_end=$((data_start + partsize[DATA])) -data_end=$((data_end * 2048 - 1)) -sgdisk --clear \ - -n "0:${data_start}M:${data_end}" \ - -c "0:${partlabel[DATA]}" \ - -t "0:${parttype[DATA]}" \ - --sort --print "${DATA_IMAGE}" + # If we build on a host with SELinux enabled, we could end up with labels that # do not match our policy. Since we allow replacing the data volume at runtime, # we can't count on these labels being correct in any case, and it's better to @@ -243,21 +269,34 @@ UNLABELED=$(find "${DATA_MOUNT}" \ | awk -v root="${DATA_MOUNT}" '{gsub(root"/","/"); gsub(root,"/"); print "ea_rm", $1, "security.selinux"}') mkfs.ext4 -d "${DATA_MOUNT}" "${BOTTLEROCKET_DATA}" "${partsize[DATA]}M" echo "${UNLABELED}" | debugfs -w -f - "${BOTTLEROCKET_DATA}" -dd if="${BOTTLEROCKET_DATA}" of="${DATA_IMAGE}" conv=notrunc bs=1M seek="${partoff[DATA]}" +case "${PARTITION_PLAN}" in + split) + dd if="${BOTTLEROCKET_DATA}" of="${DATA_IMAGE}" conv=notrunc bs=1M seek="${partoff[DATA]}" + ;; + unified) + dd if="${BOTTLEROCKET_DATA}" of="${OS_IMAGE}" conv=notrunc bs=1M seek="${partoff[DATA]}" + ;; +esac sgdisk -v "${OS_IMAGE}" -sgdisk -v "${DATA_IMAGE}" +[ -s "${DATA_IMAGE}" ] && sgdisk -v "${DATA_IMAGE}" if [[ ${OUTPUT_FMT} == "raw" ]]; then lz4 -vc "${OS_IMAGE}" >"${OUTPUT_DIR}/${OS_IMAGE_BASENAME}.img.lz4" - lz4 -vc "${DATA_IMAGE}" >"${OUTPUT_DIR}/${DATA_IMAGE_BASENAME}.img.lz4" + if [ -s "${DATA_IMAGE}" ] ; then + lz4 -vc "${DATA_IMAGE}" >"${OUTPUT_DIR}/${DATA_IMAGE_BASENAME}.img.lz4" + fi elif [[ ${OUTPUT_FMT} == "qcow2" ]]; then qemu-img convert -f raw -O qcow2 "${OS_IMAGE}" "${OUTPUT_DIR}/${OS_IMAGE_BASENAME}.qcow2" - qemu-img convert -f raw -O qcow2 "${DATA_IMAGE}" "${OUTPUT_DIR}/${DATA_IMAGE_BASENAME}.qcow2" + if [ -s "${DATA_IMAGE}" ] ; then + qemu-img convert -f raw -O qcow2 "${DATA_IMAGE}" "${OUTPUT_DIR}/${DATA_IMAGE_BASENAME}.qcow2" + fi elif [[ ${OUTPUT_FMT} == "vmdk" ]]; then # Stream optimization is required for creating an Open Virtual Appliance (OVA) qemu-img convert -f raw -O vmdk -o subformat=streamOptimized "${OS_IMAGE}" "${OUTPUT_DIR}/${OS_IMAGE_BASENAME}.vmdk" - qemu-img convert -f raw -O vmdk -o subformat=streamOptimized "${DATA_IMAGE}" "${OUTPUT_DIR}/${DATA_IMAGE_BASENAME}.vmdk" + if [ -s "${DATA_IMAGE}" ] ; then + qemu-img convert -f raw -O vmdk -o subformat=streamOptimized "${DATA_IMAGE}" "${OUTPUT_DIR}/${DATA_IMAGE_BASENAME}.vmdk" + fi fi lz4 -9vc "${BOOT_IMAGE}" >"${OUTPUT_DIR}/${BOOT_IMAGE_NAME}" From d45d53496172a89f0ce0d2e2db33014b8c403e77 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 18 Nov 2021 19:06:40 +0000 Subject: [PATCH 0566/1356] pubsys: add support for publishing unified images Although the existing AWS and VMware variants use the "split" image layout, custom variants for these platforms might use the "unified" layout instead. Adapt the AMI registration and OVA creation logic to account for the possibility that we only build a single disk image. Signed-off-by: Ben Cressey --- tools/pubsys/src/aws/ami/mod.rs | 4 +- tools/pubsys/src/aws/ami/register.rs | 62 +++++++++++++++++----------- 2 files changed, 40 insertions(+), 26 deletions(-) diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index 9f3d9acd..fc23716d 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -37,7 +37,7 @@ pub(crate) struct AmiArgs { /// Path to the image containing the data volume #[structopt(short = "d", long, parse(from_os_str))] - data_image: PathBuf, + data_image: Option, /// Desired root volume size in gibibytes #[structopt(long)] @@ -45,7 +45,7 @@ pub(crate) struct AmiArgs { /// Desired data volume size in gibibytes #[structopt(long)] - data_volume_size: i64, + data_volume_size: Option, /// The architecture of the machine image #[structopt(short = "a", long, parse(try_from_str = parse_arch))] diff --git a/tools/pubsys/src/aws/ami/register.rs b/tools/pubsys/src/aws/ami/register.rs index 9cd02c02..45dc4b41 100644 --- a/tools/pubsys/src/aws/ami/register.rs +++ b/tools/pubsys/src/aws/ami/register.rs @@ -32,10 +32,7 @@ async fn _register_image( ec2_client: &Ec2Client, cleanup_snapshot_ids: &mut Vec, ) -> Result { - debug!( - "Uploading root and data images into EBS snapshots in {}", - region - ); + debug!("Uploading images into EBS snapshots in {}", region); let uploader = SnapshotUploader::new(ebs_client); let root_snapshot = snapshot_from_image(&ami_args.root_image, &uploader, None, ami_args.no_progress) @@ -46,19 +43,19 @@ async fn _register_image( })?; cleanup_snapshot_ids.push(root_snapshot.clone()); - let data_snapshot = - snapshot_from_image(&ami_args.data_image, &uploader, None, ami_args.no_progress) + let mut data_snapshot = None; + if let Some(data_image) = &ami_args.data_image { + let snapshot = snapshot_from_image(data_image, &uploader, None, ami_args.no_progress) .await .context(error::Snapshot { path: &ami_args.root_image, region, })?; - cleanup_snapshot_ids.push(data_snapshot.clone()); + cleanup_snapshot_ids.push(snapshot.clone()); + data_snapshot = Some(snapshot); + } - info!( - "Waiting for root and data snapshots to become available in {}", - region - ); + info!("Waiting for snapshots to become available in {}", region); let waiter = SnapshotWaiter::new(ec2_client.clone()); waiter .wait(&root_snapshot, Default::default()) @@ -66,12 +63,15 @@ async fn _register_image( .context(error::WaitSnapshot { snapshot_type: "root", })?; - waiter - .wait(&data_snapshot, Default::default()) - .await - .context(error::WaitSnapshot { - snapshot_type: "data", - })?; + + if let Some(ref data_snapshot) = data_snapshot { + waiter + .wait(&data_snapshot, Default::default()) + .await + .context(error::WaitSnapshot { + snapshot_type: "data", + })?; + } // Prepare parameters for AMI registration request let root_bdm = BlockDeviceMapping { @@ -86,16 +86,25 @@ async fn _register_image( ..Default::default() }; - let mut data_bdm = root_bdm.clone(); - data_bdm.device_name = Some(DATA_DEVICE_NAME.to_string()); - if let Some(ebs) = data_bdm.ebs.as_mut() { - ebs.snapshot_id = Some(data_snapshot.clone()); - ebs.volume_size = Some(ami_args.data_volume_size); + let mut data_bdm = None; + if let Some(ref data_snapshot) = data_snapshot { + let mut bdm = root_bdm.clone(); + bdm.device_name = Some(DATA_DEVICE_NAME.to_string()); + if let Some(ebs) = bdm.ebs.as_mut() { + ebs.snapshot_id = Some(data_snapshot.clone()); + ebs.volume_size = ami_args.data_volume_size; + } + data_bdm = Some(bdm); + } + + let mut block_device_mappings = vec![root_bdm]; + if let Some(data_bdm) = data_bdm { + block_device_mappings.push(data_bdm); } let register_request = RegisterImageRequest { architecture: Some(ami_args.arch.clone()), - block_device_mappings: Some(vec![root_bdm, data_bdm]), + block_device_mappings: Some(block_device_mappings), description: ami_args.description.clone(), ena_support: Some(ENA), name: ami_args.name.clone(), @@ -115,9 +124,14 @@ async fn _register_image( .image_id .context(error::MissingImageId { region })?; + let mut snapshot_ids = vec![root_snapshot]; + if let Some(data_snapshot) = data_snapshot { + snapshot_ids.push(data_snapshot); + } + Ok(RegisteredIds { image_id, - snapshot_ids: vec![root_snapshot, data_snapshot], + snapshot_ids, }) } From 2a7142c51fdc2be794a0a622dd70b32def183c49 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Fri, 17 Dec 2021 05:32:06 +0000 Subject: [PATCH 0567/1356] api: add oci-hooks setting to enable OCI hooks The oci-hooks setting allows a user to enable OCI hooks provided by the OS. For the time being, the only OCI hook provided is the `log4j2-hotpatch`, which applies the hotpatch for Apache Log4j2 to containers running JVMs. Signed-off-by: Arnaldo Garcia Rincon --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index d91d4055..886b5cbd 100644 --- a/README.md +++ b/README.md @@ -418,6 +418,13 @@ These settings can be changed at any time. Supported values are `debug`, `info`, `warn`, `error`, and `crit`, and the default is `info`. * `settings.ecs.enable-spot-instance-draining`: If the instance receives a spot termination notice, the agent will set the instance's state to `DRAINING`, so the workload can be moved gracefully before the instance is removed. Defaults to `false`. +#### OCI Hooks settings + +Bottlerocket allows you to opt-in to use additional [OCI hooks](https://github.com/opencontainers/runtime-spec/blob/main/runtime.md#lifecycle) for your orchestrated containers. +Once you opt-in to use additional OCI hooks, any new orchestrated containers will be configured with them, but existing containers won't be changed. + +* `settings.oci-hooks.log4j-hotpatch-enabled`: Enables the [hotdog OCI hooks](https://github.com/bottlerocket-os/hotdog), which are used to inject the [Log4j Hot Patch](https://github.com/corretto/hotpatch-for-apache-log4j2) into containers. Defaults to `false`. + #### Container image registry settings The following setting is optional and allows you to configure image registry mirrors and pull-through caches for your containers. From 2bfcdab126fa124e5fa864dfe72ec72c12f77d65 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Fri, 17 Dec 2021 01:34:14 +0000 Subject: [PATCH 0568/1356] docs: recommend Docker 20.10.10+ in BUILDING guide --- BUILDING.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/BUILDING.md b/BUILDING.md index bc34e19d..7972068a 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -53,8 +53,9 @@ cargo install cargo-make Bottlerocket uses [Docker](https://docs.docker.com/install/#supported-platforms) to orchestrate package and image builds. -We recommend Docker 19.03 or later. +We recommend Docker 20.10.10 or later. Builds rely on Docker's integrated BuildKit support, which has received many fixes and improvements in newer versions. +The default seccomp policy of older versions of Docker do not support the `clone3` syscall in recent versions of Fedora or Ubuntu, on which the Bottlerocket SDK is based. You'll need to have Docker installed and running, with your user account added to the `docker` group. Docker's [post-installation steps for Linux](https://docs.docker.com/install/linux/linux-postinstall/) will walk you through that. From 4a3f242f430178922c673f2704e5c73503e549fd Mon Sep 17 00:00:00 2001 From: Josh Soref <2119212+jsoref@users.noreply.github.com> Date: Wed, 5 Jan 2022 14:30:23 -0500 Subject: [PATCH 0569/1356] Spelling (#1880) * spelling: accommodating * spelling: bootstrap * spelling: configuration * spelling: couldn't * spelling: datastore * spelling: example * spelling: interaction * spelling: parameter * spelling: program * spelling: received * spelling: shouldn't * spelling: specific * spelling: values Signed-off-by: Josh Soref --- tools/infrasys/src/error.rs | 2 +- tools/infrasys/src/s3.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/infrasys/src/error.rs b/tools/infrasys/src/error.rs index 8e474ef3..6dd8c3ba 100644 --- a/tools/infrasys/src/error.rs +++ b/tools/infrasys/src/error.rs @@ -18,7 +18,7 @@ pub enum Error { }, #[snafu(display( - "Recieved CREATE_FAILED status for CFN stack '{}' in '{}'", + "Received CREATE_FAILED status for CFN stack '{}' in '{}'", stack_name, region ))] diff --git a/tools/infrasys/src/s3.rs b/tools/infrasys/src/s3.rs index 1496d69e..c91863b9 100644 --- a/tools/infrasys/src/s3.rs +++ b/tools/infrasys/src/s3.rs @@ -37,7 +37,7 @@ pub fn format_prefix(prefix: &str) -> String { /// Output: The stack_arn of the stack w/ the S3 bucket, the CFN allocated bucket name, /// and the bucket url (for the url fields in Infra.lock) pub async fn create_s3_bucket(region: &str, stack_name: &str) -> Result<(String, String, String)> { - // TODO: Add support for accomodating pre-existing buckets (skip this creation process) + // TODO: Add support for accommodating pre-existing buckets (skip this creation process) let cfn_client = CloudFormationClient::new( Region::from_str(region).context(error::ParseRegion { what: region })?, ); From 55385f033906151b21b7cb291b6c2e1691d2983d Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Thu, 6 Jan 2022 22:52:54 +0000 Subject: [PATCH 0570/1356] grub: Add "console" to `terminal_output` command for BIOS systems This change adds the local console to the list of terminals to which grub will send output. --- packages/grub/bios.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/grub/bios.cfg b/packages/grub/bios.cfg index 3cf5fb99..06e7f532 100644 --- a/packages/grub/bios.cfg +++ b/packages/grub/bios.cfg @@ -1,6 +1,6 @@ set no_modules=y serial -terminal_output serial +terminal_output serial console gptprio.next -d boot_dev -u boot_uuid set root=$boot_dev set prefix=($root)/grub From 2099c4ad85888478954997c908fd79805a6dc449 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Thu, 6 Jan 2022 22:15:34 +0000 Subject: [PATCH 0571/1356] grub: Use `grub-mkimage` from build step rather than SDK version This changes the grub build process to use `grub-mkimage` from the build step in the spec, rather than use `grub2-mkimage` from the SDK. The reason for this is the SDK's version is a Fedora build which may not match our grub images or modules. --- packages/grub/grub.spec | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index 89692eac..744e1591 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -178,7 +178,7 @@ MODS="configfile echo ext2 gptprio linux normal part_gpt reboot sleep zstd" pushd bios-build %make_install mkdir -p %{buildroot}%{biosdir} -grub2-mkimage \ +%{buildroot}%{_cross_bindir}/grub-mkimage \ -c %{S:1} \ -d ./grub-core/ \ -O "i386-pc" \ @@ -193,7 +193,7 @@ popd pushd efi-build %make_install mkdir -p %{buildroot}%{efidir} -grub2-mkimage \ +%{buildroot}%{_cross_bindir}/grub-mkimage \ -c %{S:2} \ -d ./grub-core/ \ -O "%{_cross_grub_efi_format}" \ From dc0080c47dbfca0569fa9d33ce331404a56f75d0 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Fri, 7 Jan 2022 20:19:31 +0000 Subject: [PATCH 0572/1356] kernel 5.10: Add config to support additional hardware This commit adds new config to kernel 5.10 for variants meant to run on bare metal. It adds support for SATA drives, Intel networking hardware (1G/10G), as well as additional suport for UEFI video. This config will continue to grow and morph over time as we support additional hardware, etc. --- packages/kernel-5.10/config-bottlerocket | 36 ++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index aacbcb6f..dcba4f8f 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -4,10 +4,37 @@ # The root filesystem is ext4 CONFIG_EXT4_FS=y -# NVMe for EC2 Nitro platforms (C5, M5, and later) +# NVMe support CONFIG_BLK_DEV_NVME=y CONFIG_NVME_CORE=y +# SATA support +CONFIG_BLK_DEV_SD=y +CONFIG_SATA_AHCI=y +CONFIG_ATA=y +CONFIG_ATA_PIIX=y + +# Network support +CONFIG_ETHERNET=y +CONFIG_NET_CORE=y +CONFIG_NETDEVICES=y + +# Intel network support +CONFIG_IGB=m +CONFIG_IGBVF=m +CONFIG_NET_VENDOR_INTEL=m +CONFIG_IGB_HWMON=y +CONFIG_E1000=m +CONFIG_E1000e=m +CONFIG_E1000e_hwts=y + +# Intel 10G network support +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBEVF=m + # Xen blkfront for Xen-based EC2 platforms CONFIG_XEN_BLKDEV_FRONTEND=y @@ -22,11 +49,16 @@ CONFIG_DAX=y CONFIG_DM_INIT=y CONFIG_DM_VERITY=y -# Enable EFI. +# EFI CONFIG_EFI=y CONFIG_EFI_STUB=y CONFIG_EFI_MIXED=y +# EFI video +CONFIG_FB=y +CONFIG_FB_EFI=y +CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER=y + # yama LSM for ptrace restrictions CONFIG_SECURITY_YAMA=y From b80ad3d122d46534c1738f24d6f5aa8a18dd602a Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Wed, 12 Jan 2022 20:42:05 +0000 Subject: [PATCH 0573/1356] workflows: Add `metal-dev` variant to GH checks --- .github/workflows/build.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 83373605..f7889e8a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -34,6 +34,9 @@ jobs: - variant: vmware-dev arch: x86_64 supported: false + - variant: metal-dev + arch: x86_64 + supported: false - variant: vmware-k8s-1.20 arch: x86_64 supported: true From c4e6a579ef0ba003494f5a12d8bf93dc4a9cbca0 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Wed, 12 Jan 2022 19:07:59 +0000 Subject: [PATCH 0574/1356] build: integrate bottlerocket-license-tool Add support for the `Licenses.toml` file, used to provide licensing information for some packages. When the `Licenses.toml` file and the `licenses` folder are provided, they are copied to the build directory of the package. If either path is missing, empty configurations are provided. When this happens, packages that use `Licenses.toml` will fail to build. Signed-off-by: Arnaldo Garcia Rincon --- .gitignore | 2 ++ BUILDING.md | 29 +++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/.gitignore b/.gitignore index e2e553ee..52063a7b 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,5 @@ /*.pem /keys /roles +/Licenses.toml +/licenses diff --git a/BUILDING.md b/BUILDING.md index 7972068a..add28ce7 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -92,6 +92,35 @@ cargo make -e BUILDSYS_ARCH=my-arch-here (You can use variant and arch arguments together, too.) +#### Package licenses + +Most packages will include license files extracted from upstream source archives. +However, in some rare cases there are multiple licenses that could apply to a package. +Bottlerocket's build system uses the `Licenses.toml` file in conjuction with the `licenses` directory to configure the licenses used for such special packages. +Here is an example of a simple `Licenses.toml` configuration file: + +```toml +[package] +spdx-id = "SPDX-ID" +licenses = [ + { path = "the-license.txt" } +] +``` + +In the previous example, it is expected that the file `the-license.txt` is present in `licenses`. +You can retrieve the licenses from a remote endpoint, or the local filesystem if you specify the `license-url` field: + +```toml +[package] +spdx-id = "SPDX-ID AND SPDX-ID-2" # Package with multiple licenses +licenses = [ + # This file is copied from a file system, and will be saved as `path` + { license-url = "file:///path/to/spdx-id-license.txt", path = "spdx-id-license.txt" }, + # This file is fetched from an https endpoint, and will be saved as `path` + { license-url = "https://localhost/spdx-id-license-v2.txt", path = "spdx-id-license-2.txt" } +] +``` + ### Register an AMI To use the image in Amazon EC2, we need to register the image as an AMI. From 4245d45460a57003838ce7b20f9c844dea8ee553 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Fri, 14 Jan 2022 16:24:39 -0800 Subject: [PATCH 0575/1356] README: update caveat about host-containers source updates Ever since 3bd666b94d84d5c11290659834eca06c1f55f4d3, host containers restart whenever their settings change. Users no longer have to disable and re-enable host-containers to apply changes to its `source` --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 886b5cbd..d4280156 100644 --- a/README.md +++ b/README.md @@ -593,7 +593,7 @@ The default `admin` host-container, for example, stores its SSH host keys under There are a few important caveats to understand about host containers: * They're not orchestrated. They only start or stop according to that `enabled` flag. * They run in a separate instance of containerd than the one used for orchestrated containers like Kubernetes pods. -* They're not updated automatically. You need to update the `source`, disable the container, commit those changes, then re-enable it. +* They're not updated automatically. You need to update the `source` and commit those changes. * If you set `superpowered` to true, they'll essentially have root access to the host. Because of these caveats, host containers are only intended for special use cases. From e26c5b0cdd7e80d54eca8f278c6a3412c9868bc4 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Sat, 15 Jan 2022 00:22:15 +0000 Subject: [PATCH 0576/1356] kernel-5.10: include missing file for kernel-devel archive The kernel-devel archive in the 5.10 kernel is missing the sources for the 'hweight' tool for 'arm64', which causes compilation failures in out-of-tree kernel modules. Signed-off-by: Arnaldo Garcia Rincon --- packages/kernel-5.10/kernel-5.10.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 297f8fa0..0bdaa7c6 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -171,7 +171,7 @@ sed -i \ find tools/{arch/%{_cross_karch},include,objtool,scripts}/ -type f ! -name \*.o -print echo tools/build/fixdep.c find tools/lib/subcmd -type f -print - find tools/lib/{ctype,rbtree,string,str_error_r}.c + find tools/lib/{ctype,hweight,rbtree,string,str_error_r}.c echo kernel/bounds.c echo kernel/time/timeconst.bc From dd8e53a65d00f3f4edcca76697c5fd48bf5c62b4 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Fri, 14 Jan 2022 22:52:13 +0000 Subject: [PATCH 0577/1356] buildsys: allow 'dead code' in BuildPackage This allows us to include data within BuildPackage that is not yet in use (ex. releases_url). --- tools/buildsys/src/manifest.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index b9a4f332..028033bb 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -220,6 +220,7 @@ struct Metadata { #[derive(Deserialize, Debug)] #[serde(rename_all = "kebab-case")] +#[allow(dead_code)] pub(crate) struct BuildPackage { pub(crate) external_files: Option>, pub(crate) package_name: Option, From a341c3a8aaffa468ae88ed2844b8d261e440d09e Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Fri, 14 Jan 2022 23:01:51 +0000 Subject: [PATCH 0578/1356] actions-workflow: bump rust to 1.58.0 and cargo-make to 0.35.8 --- .github/workflows/build.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f7889e8a..15cecfbf 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -4,7 +4,7 @@ on: branches: [develop] # Here we list file types that don't affect the build and don't need to use # up our Actions runners. - paths-ignore: + paths-ignore: # draw.io (diagrams.net) files, the source of png images for docs - '**.drawio' # Example configuration files @@ -46,8 +46,8 @@ jobs: fail-fast: false steps: - uses: actions/checkout@v2 - - run: rustup toolchain install 1.56.1 && rustup default 1.56.1 - - run: cargo install --version 0.30.0 cargo-make + - run: rustup toolchain install 1.58.0 && rustup default 1.58.0 + - run: cargo install --version 0.35.8 cargo-make - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} unit-tests - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} check-fmt - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} -e BUILDSYS_ARCH=${{ matrix.arch }} -e BUILDSYS_JOBS=12 From bad7c8b6c61e3c0d4188441880674cae6b26bf98 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Wed, 19 Jan 2022 18:56:51 +0000 Subject: [PATCH 0579/1356] grub: remove `grub2-tools` as a dependency This package is no longer necessary as we'll use `grub-mkimage` from build step. --- packages/grub/grub.spec | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index 744e1591..145c3f06 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -63,7 +63,6 @@ BuildRequires: automake BuildRequires: bison BuildRequires: flex BuildRequires: gettext-devel -BuildRequires: grub2-tools BuildRequires: %{_cross_os}glibc-devel %description From cae01a73b1e0088f640c8f4e378f91f3efce96fd Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Thu, 16 Dec 2021 01:48:10 +0000 Subject: [PATCH 0580/1356] variants: Add metal-k8s-1.21 variant This change adds an additional variant `metal-k8s-1.21`, which includes necessary Kubernetes packages and settings for running Bottlerocket on metal in a Kubernetes cluster. --- .github/workflows/build.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 15cecfbf..adf7373d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -37,6 +37,9 @@ jobs: - variant: metal-dev arch: x86_64 supported: false + - variant: metal-k8s-1.21 + arch: x86_64 + supported: false - variant: vmware-k8s-1.20 arch: x86_64 supported: true From 9ff841ccde915558867c9ad2cb74a59ea9d5226a Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Thu, 13 Jan 2022 22:46:26 -0800 Subject: [PATCH 0581/1356] model, schnauzer: change `kubernetes.node-taints` type Changes the model for `kubernetes.node-taints` from a map of keys to single values/effect to a map of keys to list of values/effects. Adds a custom deserializer to deserialize from both ways of representing node-taints in Bottlerocket user-data. --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index d4280156..4d24997e 100644 --- a/README.md +++ b/README.md @@ -319,15 +319,15 @@ For Kubernetes variants in VMware, you must specify: The following settings can be optionally set to customize the node labels and taints. Remember to quote keys (since they often contain ".") and to quote all values. * `settings.kubernetes.node-labels`: [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) in the form of key, value pairs added when registering the node in the cluster. -* `settings.kubernetes.node-taints`: [Taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) in the form of key, value and effect entries added when registering the node in the cluster. +* `settings.kubernetes.node-taints`: [Taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) in the form of key, values and effects entries added when registering the node in the cluster. * Example user data for setting up labels and taints: ``` [settings.kubernetes.node-labels] "label1" = "foo" "label2" = "bar" [settings.kubernetes.node-taints] - "dedicated" = "experimental:PreferNoSchedule" - "special" = "true:NoSchedule" + "dedicated" = ["experimental:PreferNoSchedule", "experimental:NoExecute"] + "special" = ["true:NoSchedule"] ``` The following settings are optional and allow you to further configure your cluster. From 6d15f0be06e769c8e3b8366d6cdd3801fe489541 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Fri, 21 Jan 2022 18:37:00 +0000 Subject: [PATCH 0582/1356] actions-workflow: bump rust to 1.58.1 --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index adf7373d..78f3969d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -49,7 +49,7 @@ jobs: fail-fast: false steps: - uses: actions/checkout@v2 - - run: rustup toolchain install 1.58.0 && rustup default 1.58.0 + - run: rustup toolchain install 1.58.1 && rustup default 1.58.1 - run: cargo install --version 0.35.8 cargo-make - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} unit-tests - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} check-fmt From 07c6136817cee8d91a4ee954e41ed614bdb6cce5 Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Mon, 24 Jan 2022 17:51:24 +0000 Subject: [PATCH 0583/1356] Update kernel-5.10 to 5.10.93 --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 5e4e7944..53442753 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,8 +13,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/04e0825929fcabea05c6d875de848d96ac317449eecfff360a4ab08ed1ff60ab/kernel-5.10.75-79.358.amzn2.src.rpm" -sha512 = "115c29e87a65c7f5c76245e9cb21c8ce4ed4153f393ccaa0a59e9210a22da0b3fe3eb2ad8ea73c4c107b04318f44be9deb85b1f11e0272888b1b62b97e72d519" +url = "https://cdn.amazonlinux.com/blobstore/c80d649c51b68fdb2bc126c326f83fed93ed242d675f978a9a0da4012e9789a5/kernel-5.10.93-87.444.amzn2.src.rpm" +sha512 = "1e5442b0da15123e6a3c6c6b32f8f3b2ff53565fb9f2a76b778b315ea484a87423fef05bb1aed501c1a1f61507d5edac23bf1b1694bab3a73610ac6af22b190e" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 0bdaa7c6..530cfb9f 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.75 +Version: 5.10.93 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/04e0825929fcabea05c6d875de848d96ac317449eecfff360a4ab08ed1ff60ab/kernel-5.10.75-79.358.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/c80d649c51b68fdb2bc126c326f83fed93ed242d675f978a9a0da4012e9789a5/kernel-5.10.93-87.444.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 86cf4274053d11fd671ee5516a7b54ec674d8401 Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Mon, 24 Jan 2022 17:53:47 +0000 Subject: [PATCH 0584/1356] Update kernel-5.4 to 5.4.172 --- packages/kernel-5.4/Cargo.toml | 4 ++-- packages/kernel-5.4/kernel-5.4.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.4/Cargo.toml b/packages/kernel-5.4/Cargo.toml index 9f368094..ecd7ae97 100644 --- a/packages/kernel-5.4/Cargo.toml +++ b/packages/kernel-5.4/Cargo.toml @@ -13,8 +13,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/1f5404c1e1cc923f488ae6290e17b38ac000e32e35364a0d7b567dda0ecba127/kernel-5.4.156-83.273.amzn2.src.rpm" -sha512 = "c41e5913ec7786a17498b1032e973c0f139eadbafec2834527c3efdcb3371a316015a209b6ab82e863f08d62c7161da7fbc117b79c8d936afea64253a77c3df8" +url = "https://cdn.amazonlinux.com/blobstore/9d3d2fc3caf5bc68bcc257a426b1a3177f60f1acd62e27d772b58156c1b76e57/kernel-5.4.172-90.336.amzn2.src.rpm" +sha512 = "a99575479a7aa0f5aaf264d105435af48d1201ecb133e9b4842e21b1b6d73220aa41ba44f5b400fd6662d4325208567c4defc31f4fdbc53b46be560031e835ef" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec index 11f01088..7ff6d066 100644 --- a/packages/kernel-5.4/kernel-5.4.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.4 -Version: 5.4.156 +Version: 5.4.172 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/1f5404c1e1cc923f488ae6290e17b38ac000e32e35364a0d7b567dda0ecba127/kernel-5.4.156-83.273.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/9d3d2fc3caf5bc68bcc257a426b1a3177f60f1acd62e27d772b58156c1b76e57/kernel-5.4.172-90.336.amzn2.src.rpm Source100: config-bottlerocket # Make Lustre FSx work with a newer GCC. From 70a44f4d84a9e1a86e3d3e83b2d966a566944ab6 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Wed, 24 Nov 2021 01:42:43 +0000 Subject: [PATCH 0585/1356] packages: add NVIDIA tesla 470 driver This commit adds the NVIDIA tesla 470 driver. The drivers are subpackages of `kmod-5.10-nvidia`, since we don't want to have a spec file per driver type per driver version. The spec file for `kmod-5.10-nvidia` holds the drivers that are compatible with the 5.10 kernel. New spec files should be added for newer kernel and driver versions. The `kmod-5.10-nvidia` package provides a tmpfilesd conf file to create the lib modules directory where the kernel modules will be created. Each subpackage installs the libraries and binaries underneath `%{_cross_bindir}/nvidia/` and `%{_cross_libdir}/nvidia/` respectively to prevent collisions while building the subpackages. Kernel module objects are installed in `%{_cross_datadir}/nvidia//modules`, so that `driverdog` can compile them at runtime. Each subpackage provides a drop-in configuration file for containerd, that sets the `NVIDIA_PATH` environment variable. This environment variable must be set to the directory that contains the NVIDIA userland tools, which will be mounted on the containers by `libnvidia-container`. The environment variable is set for containerd, since `libnvidia-container` is called by the runtime to set up the containers. Signed-off-by: Arnaldo Garcia Rincon --- .gitignore | 1 + packages/kmod-5.10-nvidia/Cargo.toml | 24 ++ packages/kmod-5.10-nvidia/build.rs | 9 + .../kmod-5.10-nvidia/kmod-5.10-nvidia.spec | 215 ++++++++++++++++++ .../nvidia-dependencies-modules-load.conf | 2 + .../kmod-5.10-nvidia/nvidia-ld.so.conf.in | 1 + .../nvidia-tesla-build-config.toml.in | 18 ++ .../kmod-5.10-nvidia/nvidia-tesla-path.env.in | 1 + .../nvidia-tesla-tmpfiles.conf.in | 3 + .../kmod-5.10-nvidia/nvidia-tmpfiles.conf.in | 1 + packages/kmod-5.10-nvidia/pkg.rs | 1 + 11 files changed, 276 insertions(+) create mode 100644 packages/kmod-5.10-nvidia/Cargo.toml create mode 100644 packages/kmod-5.10-nvidia/build.rs create mode 100644 packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec create mode 100644 packages/kmod-5.10-nvidia/nvidia-dependencies-modules-load.conf create mode 100644 packages/kmod-5.10-nvidia/nvidia-ld.so.conf.in create mode 100644 packages/kmod-5.10-nvidia/nvidia-tesla-build-config.toml.in create mode 100644 packages/kmod-5.10-nvidia/nvidia-tesla-path.env.in create mode 100644 packages/kmod-5.10-nvidia/nvidia-tesla-tmpfiles.conf.in create mode 100644 packages/kmod-5.10-nvidia/nvidia-tmpfiles.conf.in create mode 100644 packages/kmod-5.10-nvidia/pkg.rs diff --git a/.gitignore b/.gitignore index 52063a7b..9c27e13b 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ /roles /Licenses.toml /licenses +*.run diff --git a/packages/kmod-5.10-nvidia/Cargo.toml b/packages/kmod-5.10-nvidia/Cargo.toml new file mode 100644 index 00000000..32b506d5 --- /dev/null +++ b/packages/kmod-5.10-nvidia/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "kmod-5_10-nvidia" +version = "0.1.0" +edition = "2018" +publish = false +build = "build.rs" + +[lib] +path = "pkg.rs" + +[package.metadata.build-package] +package-name = "kmod-5.10-nvidia" + +[[package.metadata.build-package.external-files]] +url = "https://us.download.nvidia.com/tesla/470.82.01/NVIDIA-Linux-x86_64-470.82.01.run" +sha512 = "86eac5e2d4fae5525a9332b77da58c0c12e76a35db023a2b14de7d9615b20ba4850a04fa189189c0dcf712f1f343fee98b954aaa6e9b83a959de3c3b8259c7c2" + +[[package.metadata.build-package.external-files]] +url = "https://us.download.nvidia.com/tesla/470.82.01/NVIDIA-Linux-aarch64-470.82.01.run" +sha512 = "62c4adf6fa3c3474c3a09c08ed8056d4e9d00a90effa3851add10d6b2603c23f9986c32ace2e1b2ed7b735779430d634856a06e93af41431db439dfc79503cd8" + +[build-dependencies] +glibc = { path = "../glibc" } +kernel-5_10 = { path = "../kernel-5.10" } diff --git a/packages/kmod-5.10-nvidia/build.rs b/packages/kmod-5.10-nvidia/build.rs new file mode 100644 index 00000000..cad8999a --- /dev/null +++ b/packages/kmod-5.10-nvidia/build.rs @@ -0,0 +1,9 @@ +use std::process::{exit, Command}; + +fn main() -> Result<(), std::io::Error> { + let ret = Command::new("buildsys").arg("build-package").status()?; + if !ret.success() { + exit(1); + } + Ok(()) +} diff --git a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec new file mode 100644 index 00000000..79d16869 --- /dev/null +++ b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec @@ -0,0 +1,215 @@ +%global nvidia_tesla_470_version 470.82.01 +%global spdx_id %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml spdx-id nvidia) +%global license_file %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml path nvidia -p ./licenses) + +Name: %{_cross_os}kmod-5.10-nvidia +Version: 1.0.0 +Release: 1%{?dist} +Summary: NVIDIA drivers for the 5.10 kernel +# We use these licences because we only ship our own software in the main package, +# each subpackage includes the LICENSE file provided by the Licenses.toml file +License: Apache-2.0 OR MIT +URL: http://www.nvidia.com/ + +# NVIDIA .run scripts from 0 to 199 +Source0: https://us.download.nvidia.com/tesla/%{nvidia_tesla_470_version}/NVIDIA-Linux-x86_64-%{nvidia_tesla_470_version}.run +Source1: https://us.download.nvidia.com/tesla/%{nvidia_tesla_470_version}/NVIDIA-Linux-aarch64-%{nvidia_tesla_470_version}.run + +# Common NVIDIA conf files from 200 to 299 +Source200: nvidia-tmpfiles.conf.in +Source202: nvidia-dependencies-modules-load.conf + +# NVIDIA tesla conf files from 300 to 399 +Source300: nvidia-tesla-tmpfiles.conf.in +Source301: nvidia-tesla-build-config.toml.in +Source302: nvidia-tesla-path.env.in +Source303: nvidia-ld.so.conf.in + +BuildRequires: %{_cross_os}glibc-devel +BuildRequires: %{_cross_os}kernel-5.10-archive + +%description +%{summary}. + +%package tesla-470 +Summary: NVIDIA 470 Tesla driver +Version: %{nvidia_tesla_470_version} +License: %{spdx_id} +Requires: %{name} + +%description tesla-470 +%{summary} + +%prep +# Extract nvidia sources with `-x`, otherwise the script will try to install +# the driver in the current run +sh %{_sourcedir}/NVIDIA-Linux-%{_cross_arch}-%{nvidia_tesla_470_version}.run -x + +%global kernel_sources %{_builddir}/kernel-devel +tar -xf %{_cross_datadir}/bottlerocket/kernel-devel.tar.xz + +%build +pushd NVIDIA-Linux-%{_cross_arch}-%{nvidia_tesla_470_version}/kernel + +# This recipe was based in the NVIDIA yum/dnf specs: +# https://github.com/NVIDIA/yum-packaging-precompiled-kmod + +# We set IGNORE_CC_MISMATCH even though we are using the same compiler used to compile the kernel, if +# we don't set this flag the compilation fails +make %{?_smp_mflags} ARCH=%{_cross_karch} IGNORE_CC_MISMATCH=1 SYSSRC=%{kernel_sources} CC=%{_cross_target}-gcc LD=%{_cross_target}-ld + +%{_cross_target}-strip -g --strip-unneeded nvidia/nv-interface.o +%{_cross_target}-strip -g --strip-unneeded nvidia-uvm.o +%{_cross_target}-strip -g --strip-unneeded nvidia-drm.o +%{_cross_target}-strip -g --strip-unneeded nvidia-peermem/nvidia-peermem.o +%{_cross_target}-strip -g --strip-unneeded nvidia-modeset/nv-modeset-interface.o + +# We delete these files since we just stripped the input .o files above, and +# will be build at runtime in the host +rm nvidia{,-modeset,-peermem}.o + +# Delete the .ko files created in make command, just to be safe that we +# don't include any linked module in the base image +rm nvidia{,-modeset,-peermem,-drm}.ko + +popd + +%install +install -d %{buildroot}%{_cross_libexecdir} +install -d %{buildroot}%{_cross_libdir} +install -d %{buildroot}%{_cross_tmpfilesdir} +install -d %{buildroot}%{_cross_unitdir} +install -d %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/{drivers,ld.so.conf.d} + +KERNEL_VERSION=$(cat %{kernel_sources}/include/config/kernel.release) +sed -e "s|__KERNEL_VERSION__|${KERNEL_VERSION}|" %{S:200} > nvidia.conf +install -p -m 0644 nvidia.conf %{buildroot}%{_cross_tmpfilesdir} + +# Install modules-load.d drop-in to autoload required kernel modules +install -d %{buildroot}%{_cross_libdir}/modules-load.d +install -p -m 0644 %{S:202} %{buildroot}%{_cross_libdir}/modules-load.d/nvidia-dependencies.conf + +# Begin NVIDIA tesla 470 +pushd NVIDIA-Linux-%{_cross_arch}-%{nvidia_tesla_470_version} +# We install bins and libs in a versioned directory to prevent collisions with future drivers versions +install -d %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version} +install -d %{buildroot}%{_cross_libdir}/nvidia/tesla/%{nvidia_tesla_470_version}/ +install -d %{buildroot}%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d +install -d %{buildroot}%{_cross_factorydir}/nvidia/tesla/%{nvidia_tesla_470_version} + +sed -e 's|__NVIDIA_VERSION__|%{nvidia_tesla_470_version}|' %{S:300} > nvidia-tesla-%{nvidia_tesla_470_version}.conf +install -m 0644 nvidia-tesla-%{nvidia_tesla_470_version}.conf %{buildroot}%{_cross_tmpfilesdir}/ +sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/|' %{S:301} > \ + nvidia-tesla-%{nvidia_tesla_470_version}.toml +install -m 0644 nvidia-tesla-%{nvidia_tesla_470_version}.toml %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/drivers +# Install nvidia-path environment file, will be used as a drop-in for containerd.service since +# libnvidia-container locates and mounts helper binaries into the containers from either +# `PATH` or `NVIDIA_PATH` +sed -e 's|__NVIDIA_BINDIR__|%{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version}|' %{S:302} > nvidia-path.env +install -m 0644 nvidia-path.env %{buildroot}%{_cross_factorydir}/nvidia/tesla/%{nvidia_tesla_470_version} +# We need to add `_cross_libdir/nvidia_tesla_470_version` to the paths loaded by the ldconfig service +# because libnvidia-container uses the `ldcache` file created by the service, to locate and mount the +# libraries into the containers +sed -e 's|__LIBDIR__|%{_cross_libdir}|' %{S:303} | sed -e 's|__NVIDIA_VERSION__|%{nvidia_tesla_470_version}|' \ + > nvidia-tesla-%{nvidia_tesla_470_version}.conf +install -m 0644 nvidia-tesla-%{nvidia_tesla_470_version}.conf %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/ + +# driver +install kernel/nvidia.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d +install kernel/nvidia/nv-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d +install kernel/nvidia/nv-kernel.o_binary %{buildroot}%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nv-kernel.o + +# uvm +install kernel/nvidia-uvm.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d +install kernel/nvidia-uvm.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d + +# modeset +install kernel/nvidia-modeset.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d +install kernel/nvidia-modeset/nv-modeset-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d +install kernel/nvidia-modeset/nv-modeset-kernel.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d + +# peermem +install kernel/nvidia-peermem.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d +install kernel/nvidia-peermem/nvidia-peermem.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d + +# drm +install kernel/nvidia-drm.mod.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d +install kernel/nvidia-drm.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d + +install -m 755 nvidia-smi %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version} +install -m 755 nvidia-debugdump %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version} +install -m 755 nvidia-cuda-mps-control %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version} +install -m 755 nvidia-cuda-mps-server %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version} +%if "%{_cross_arch}" == "x86_64" +install -m 755 nvidia-ngx-updater %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version} +%endif + +# TODO: add remaining libraries +# misc +# Add libnvidia-ml.so for testing purposes +install -m755 libnvidia-ml.so.%{nvidia_tesla_470_version} %{buildroot}%{_cross_libdir}/nvidia/tesla/%{nvidia_tesla_470_version} + +ln -s libnvidia-ml.so.%{nvidia_tesla_470_version} %{buildroot}%{_cross_libdir}/nvidia/tesla/%{nvidia_tesla_470_version}/libnvidia-ml.so.1 + +popd + +%files +%{_cross_attribution_file} +%dir %{_cross_libexecdir}/nvidia +%dir %{_cross_libdir}/nvidia +%dir %{_cross_datadir}/nvidia +%dir %{_cross_libdir}/modules-load.d +%dir %{_cross_factorydir}%{_cross_sysconfdir}/drivers +%{_cross_tmpfilesdir}/nvidia.conf +%{_cross_libdir}/systemd/system/ +%{_cross_libdir}/modules-load.d/nvidia-dependencies.conf + +%files tesla-470 +%license %{license_file} +%dir %{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version} +%dir %{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version} +%dir %{_cross_libdir}/nvidia/tesla/%{nvidia_tesla_470_version} +%dir %{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d +%dir %{_cross_factorydir}/nvidia/tesla/%{nvidia_tesla_470_version} + +# Binaries +%{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version}/nvidia-debugdump +%{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version}/nvidia-smi + +# Libraries +%{_cross_libdir}/nvidia/tesla/%{nvidia_tesla_470_version}/libnvidia-ml.so.1 +%{_cross_libdir}/nvidia/tesla/%{nvidia_tesla_470_version}/libnvidia-ml.so.%{nvidia_tesla_470_version} + +# Configuration files +%{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-tesla-%{nvidia_tesla_470_version}.toml +%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/nvidia-tesla-%{nvidia_tesla_470_version}.conf +%{_cross_factorydir}/nvidia/tesla/%{nvidia_tesla_470_version}/nvidia-path.env + +# driver +%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nvidia.mod.o +%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nv-interface.o +%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nv-kernel.o + +# uvm +%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nvidia-uvm.mod.o +%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nvidia-uvm.o + +# modeset +%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nv-modeset-interface.o +%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nv-modeset-kernel.o +%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nvidia-modeset.mod.o + +# tmpfiles +%{_cross_tmpfilesdir}/nvidia-tesla-%{nvidia_tesla_470_version}.conf + +# Neither nvidia-peermem nor nvidia-drm are included in driver container images, we exclude them +# for now, and we will add them if requested +%exclude %{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nvidia-peermem.mod.o +%exclude %{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nvidia-peermem.o +%exclude %{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nvidia-drm.mod.o +%exclude %{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nvidia-drm.o +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version}/nvidia-cuda-mps-control +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version}/nvidia-cuda-mps-server +%if "%{_cross_arch}" == "x86_64" +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version}/nvidia-ngx-updater +%endif diff --git a/packages/kmod-5.10-nvidia/nvidia-dependencies-modules-load.conf b/packages/kmod-5.10-nvidia/nvidia-dependencies-modules-load.conf new file mode 100644 index 00000000..86f884a6 --- /dev/null +++ b/packages/kmod-5.10-nvidia/nvidia-dependencies-modules-load.conf @@ -0,0 +1,2 @@ +i2c_core +ipmi_msghandler diff --git a/packages/kmod-5.10-nvidia/nvidia-ld.so.conf.in b/packages/kmod-5.10-nvidia/nvidia-ld.so.conf.in new file mode 100644 index 00000000..a07b0ccb --- /dev/null +++ b/packages/kmod-5.10-nvidia/nvidia-ld.so.conf.in @@ -0,0 +1 @@ +__LIBDIR__/nvidia/tesla/__NVIDIA_VERSION__/ diff --git a/packages/kmod-5.10-nvidia/nvidia-tesla-build-config.toml.in b/packages/kmod-5.10-nvidia/nvidia-tesla-build-config.toml.in new file mode 100644 index 00000000..fb74dc51 --- /dev/null +++ b/packages/kmod-5.10-nvidia/nvidia-tesla-build-config.toml.in @@ -0,0 +1,18 @@ +[nvidia-tesla] +lib-modules-path = "kernel/drivers/extra/video/nvidia/tesla" +objects-source = "__NVIDIA_MODULES__" + +[nvidia-tesla.object-files."nvidia.o"] +link-objects = ["nv-interface.o", "nv-kernel.o"] + +[nvidia-tesla.kernel-modules."nvidia.ko"] +link-objects = ["nvidia.o", "nvidia.mod.o"] + +[nvidia-tesla.object-files."nvidia-modeset.o"] +link-objects = ["nv-modeset-interface.o", "nv-modeset-kernel.o"] + +[nvidia-tesla.kernel-modules."nvidia-modeset.ko"] +link-objects = ["nvidia-modeset.o", "nvidia-modeset.mod.o"] + +[nvidia-tesla.kernel-modules."nvidia-uvm.ko"] +link-objects = ["nvidia-uvm.o", "nvidia-uvm.mod.o"] diff --git a/packages/kmod-5.10-nvidia/nvidia-tesla-path.env.in b/packages/kmod-5.10-nvidia/nvidia-tesla-path.env.in new file mode 100644 index 00000000..28f74deb --- /dev/null +++ b/packages/kmod-5.10-nvidia/nvidia-tesla-path.env.in @@ -0,0 +1 @@ +NVIDIA_PATH=__NVIDIA_BINDIR__ diff --git a/packages/kmod-5.10-nvidia/nvidia-tesla-tmpfiles.conf.in b/packages/kmod-5.10-nvidia/nvidia-tesla-tmpfiles.conf.in new file mode 100644 index 00000000..f208e1d2 --- /dev/null +++ b/packages/kmod-5.10-nvidia/nvidia-tesla-tmpfiles.conf.in @@ -0,0 +1,3 @@ +C /etc/drivers/nvidia-tesla-__NVIDIA_VERSION__.toml +C /etc/containerd/nvidia.env - - - - /usr/share/factory/nvidia/tesla/__NVIDIA_VERSION__/nvidia-path.env +C /etc/ld.so.conf.d/nvidia-tesla-__NVIDIA_VERSION__.conf diff --git a/packages/kmod-5.10-nvidia/nvidia-tmpfiles.conf.in b/packages/kmod-5.10-nvidia/nvidia-tmpfiles.conf.in new file mode 100644 index 00000000..d95dbad9 --- /dev/null +++ b/packages/kmod-5.10-nvidia/nvidia-tmpfiles.conf.in @@ -0,0 +1 @@ +D /lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/tesla 0755 root root - diff --git a/packages/kmod-5.10-nvidia/pkg.rs b/packages/kmod-5.10-nvidia/pkg.rs new file mode 100644 index 00000000..d799fb2d --- /dev/null +++ b/packages/kmod-5.10-nvidia/pkg.rs @@ -0,0 +1 @@ +// not used From 37c3fbc1177a91b574ffc6a041d70eb6ab79bd89 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Tue, 11 Jan 2022 01:40:51 +0000 Subject: [PATCH 0586/1356] actions: add matrix `fetch-upstream` variable The `fetch-upstream` variable is be used to fetch upstream sources when they aren't provided in the lookaside cache. Signed-off-by: Arnaldo Garcia Rincon --- .github/workflows/build.yml | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 78f3969d..c8f0d304 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -27,30 +27,59 @@ jobs: variant: [aws-k8s-1.18, aws-k8s-1.19, aws-k8s-1.20, aws-k8s-1.21, aws-ecs-1] arch: [x86_64, aarch64] supported: [true] + fetch-upstream: ["false"] include: - variant: aws-dev arch: x86_64 supported: false + fetch-upstream: "false" - variant: vmware-dev arch: x86_64 supported: false + fetch-upstream: "false" - variant: metal-dev arch: x86_64 supported: false + fetch-upstream: "false" - variant: metal-k8s-1.21 arch: x86_64 supported: false + fetch-upstream: "false" - variant: vmware-k8s-1.20 arch: x86_64 supported: true + fetch-upstream: "false" - variant: vmware-k8s-1.21 arch: x86_64 supported: true + fetch-upstream: "false" + - variant: aws-k8s-1.21-nvidia + arch: x86_64 + supported: true + fetch-upstream: "true" + - variant: aws-k8s-1.21-nvidia + arch: aarch64 + supported: true + fetch-upstream: "true" fail-fast: false steps: - uses: actions/checkout@v2 - run: rustup toolchain install 1.58.1 && rustup default 1.58.1 - run: cargo install --version 0.35.8 cargo-make + - if: contains(matrix.variant, 'nvidia') + run: | + cat <<-EOF > Licenses.toml + [nvidia] + spdx-id = "LICENSE-LicenseRef-NVIDIA-Customer" + licenses = [ + { path = "NVIDIA", license-url = "https://www.nvidia.com/en-us/drivers/nvidia-license/" } + ] + EOF - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} unit-tests - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} check-fmt - - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} -e BUILDSYS_ARCH=${{ matrix.arch }} -e BUILDSYS_JOBS=12 + - run: | + cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} \ + -e BUILDSYS_ARCH=${{ matrix.arch }} \ + -e BUILDSYS_JOBS=12 \ + -e BUILDSYS_UPSTREAM_SOURCE_FALLBACK=${{ matrix.fetch-upstream }} \ + -e BUILDSYS_UPSTREAM_LICENSE_FETCH=${{ matrix.fetch-upstream }} From cdf11e0e583b959801ce82aa1749e5083098dede Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Tue, 2 Nov 2021 00:57:46 +0000 Subject: [PATCH 0587/1356] variants: add aws-k8s-1.21-nvidia Signed-off-by: Arnaldo Garcia Rincon --- BUILDING.md | 28 ++++++++++++++++++++++++++++ README.md | 1 + 2 files changed, 29 insertions(+) diff --git a/BUILDING.md b/BUILDING.md index add28ce7..9a337330 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -121,6 +121,34 @@ licenses = [ ] ``` +#### NVIDIA variants + +If you want to build the `aws-k8s-1.21-nvidia` variant, you can follow these steps to prepare a `Licenses.toml` file using the [License for customer use of NVIDIA software](https://www.nvidia.com/en-us/drivers/nvidia-license/): + +1. Create a `Licenses.toml` file in your Bottlerocket root directory, with the following content: + +```toml +[nvidia] +spdx-id = "LicensesRef-NVIDIA-Customer-Use" +licenses = [ + { path = "LICENSE", license-url = "https://www.nvidia.com/en-us/drivers/nvidia-license/" } +] +``` + +2. Fetch the licenses with this command: + +```shell +cargo make fetch-licenses -e BUILDSYS_UPSTREAM_LICENSES_FETCH=true +``` + +3. Build your image, setting the `BUILDSYS_UPSTREAM_SOURCE_FALLBACK` flag to `true`, if you haven't cached the driver's sources: + +```shell +cargo make \ + -e BUILDSYS_VARIANT=aws-k8s-1.21-nvidia \ + -e BUILDSYS_UPSTREAM_SOURCE_FALLBACK="true" +``` + ### Register an AMI To use the image in Amazon EC2, we need to register the image as an AMI. diff --git a/README.md b/README.md index 4d24997e..51a01d70 100644 --- a/README.md +++ b/README.md @@ -54,6 +54,7 @@ The following variants support EKS, as described above: - `aws-k8s-1.19` - `aws-k8s-1.20` - `aws-k8s-1.21` +- `aws-k8s-1.21-nvidia` The following variant supports ECS: From b2e61bf28ddef045896e69ba216e104e55aaa7a9 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Mon, 31 Jan 2022 15:27:48 -0800 Subject: [PATCH 0588/1356] BUILDING.md: fix typo BUILDSYS_UPSTREAM_LICENSES_FETCH should be BUILDSYS_UPSTREAM_LICENSE_FETCH --- BUILDING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/BUILDING.md b/BUILDING.md index 9a337330..c227bdd4 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -138,7 +138,7 @@ licenses = [ 2. Fetch the licenses with this command: ```shell -cargo make fetch-licenses -e BUILDSYS_UPSTREAM_LICENSES_FETCH=true +cargo make fetch-licenses -e BUILDSYS_UPSTREAM_LICENSE_FETCH=true ``` 3. Build your image, setting the `BUILDSYS_UPSTREAM_SOURCE_FALLBACK` flag to `true`, if you haven't cached the driver's sources: From 6cbc47bf9b32eddf0eb0ef35c1102682c25a104a Mon Sep 17 00:00:00 2001 From: Rajashree Mandaogane Date: Thu, 30 Sep 2021 14:02:48 -0700 Subject: [PATCH 0589/1356] Add noProxy setting example --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 51a01d70..41fe341a 100644 --- a/README.md +++ b/README.md @@ -476,7 +476,11 @@ These settings will configure the proxying behavior of the following services: * [ecs.service](packages/ecs-agent/ecs.service) * `settings.network.https-proxy`: The HTTPS proxy server to be used by services listed above. -* `settings.network.no-proxy`: A list of hosts that are excluded from proxying. +* `settings.network.no-proxy`: A list of hosts that are excluded from proxying. + Example: + ``` + settings.network.no-proxy = ["localhost","127.0.0.1"] + ``` The no-proxy list will automatically include entries for localhost. From 731229cc59814633ab135a0506180129a80fe778 Mon Sep 17 00:00:00 2001 From: Rajashree Mandaogane Date: Mon, 4 Oct 2021 12:56:12 -0700 Subject: [PATCH 0590/1356] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 41fe341a..afd45de7 100644 --- a/README.md +++ b/README.md @@ -479,6 +479,7 @@ These settings will configure the proxying behavior of the following services: * `settings.network.no-proxy`: A list of hosts that are excluded from proxying. Example: ``` + settings.network.https-proxy = "1.2.3.4" settings.network.no-proxy = ["localhost","127.0.0.1"] ``` From 1ecc2a3373b1cf27d956b01f4305348074566ce5 Mon Sep 17 00:00:00 2001 From: Rajashree Mandaogane Date: Tue, 1 Feb 2022 10:31:24 -0800 Subject: [PATCH 0591/1356] Update README.md Co-authored-by: Tom Kirchner --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index afd45de7..6f94e5f2 100644 --- a/README.md +++ b/README.md @@ -479,8 +479,9 @@ These settings will configure the proxying behavior of the following services: * `settings.network.no-proxy`: A list of hosts that are excluded from proxying. Example: ``` - settings.network.https-proxy = "1.2.3.4" - settings.network.no-proxy = ["localhost","127.0.0.1"] + [settings.network] + https-proxy = "1.2.3.4:8080" + no-proxy = ["localhost", "127.0.0.1"] ``` The no-proxy list will automatically include entries for localhost. From 608728c28fcac424062408d6007be10714e51a89 Mon Sep 17 00:00:00 2001 From: Matthew James Briggs Date: Wed, 2 Feb 2022 14:51:53 -0800 Subject: [PATCH 0592/1356] sources: update snafu to 0.7 --- tools/Cargo.lock | 47 +++++++++++++++++++++++++++++++++++------------ tools/deny.toml | 5 ++++- 2 files changed, 39 insertions(+), 13 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index d6f4f0b3..b9602c4f 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -161,7 +161,7 @@ dependencies = [ "serde", "serde_plain", "sha2", - "snafu", + "snafu 0.6.10", "toml", "url", "walkdir", @@ -252,7 +252,7 @@ dependencies = [ "rusoto_ec2", "rusoto_signature", "sha2", - "snafu", + "snafu 0.6.10", "tempfile", "tokio", ] @@ -792,7 +792,7 @@ dependencies = [ "sha2", "shell-words", "simplelog", - "snafu", + "snafu 0.6.10", "structopt", "tokio", "toml", @@ -1064,7 +1064,7 @@ version = "0.1.0" dependencies = [ "cargo-readme", "chrono", - "snafu", + "snafu 0.7.0", ] [[package]] @@ -1195,7 +1195,7 @@ dependencies = [ "serde", "serde_json", "simplelog", - "snafu", + "snafu 0.6.10", "structopt", "tempfile", "tinytemplate", @@ -1220,7 +1220,7 @@ dependencies = [ "parse-datetime", "serde", "serde_yaml", - "snafu", + "snafu 0.6.10", "toml", "url", ] @@ -1236,7 +1236,7 @@ dependencies = [ "sha2", "shell-words", "simplelog", - "snafu", + "snafu 0.6.10", "structopt", "tempfile", "toml", @@ -1833,7 +1833,18 @@ checksum = "eab12d3c261b2308b0d80c26fffb58d17eba81a4be97890101f416b478c79ca7" dependencies = [ "backtrace", "doc-comment", - "snafu-derive", + "snafu-derive 0.6.10", +] + +[[package]] +name = "snafu" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2eba135d2c579aa65364522eb78590cdf703176ef71ad4c32b00f58f7afb2df5" +dependencies = [ + "backtrace", + "doc-comment", + "snafu-derive 0.7.0", ] [[package]] @@ -1847,6 +1858,18 @@ dependencies = [ "syn", ] +[[package]] +name = "snafu-derive" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a7fe9b0669ef117c5cabc5549638528f36771f058ff977d7689deb517833a75" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "socket2" version = "0.4.2" @@ -2083,7 +2106,7 @@ dependencies = [ "serde", "serde_json", "serde_plain", - "snafu", + "snafu 0.6.10", "tempfile", "untrusted", "url", @@ -2101,7 +2124,7 @@ dependencies = [ "rusoto_core", "rusoto_credential", "rusoto_kms", - "snafu", + "snafu 0.6.10", "tokio", "tough", ] @@ -2117,7 +2140,7 @@ dependencies = [ "rusoto_ssm", "serde", "serde_json", - "snafu", + "snafu 0.6.10", "tokio", "tough", ] @@ -2210,7 +2233,7 @@ dependencies = [ "serde", "serde_json", "serde_plain", - "snafu", + "snafu 0.7.0", "toml", ] diff --git a/tools/deny.toml b/tools/deny.toml index 3beb2794..eca08746 100644 --- a/tools/deny.toml +++ b/tools/deny.toml @@ -46,7 +46,10 @@ license-files = [ multiple-versions = "deny" wildcards = "deny" -skip = [] +skip-tree = [ + # temporarily using a different version of snafu + { name = "parse-datetime", version = "0.1.0" }, +] [sources] # Deny crates from unknown registries or git repositories. From 5f1f7e75df4ea68d7b1f0788f1e4ee8e440a7356 Mon Sep 17 00:00:00 2001 From: Matthew James Briggs Date: Wed, 2 Feb 2022 18:45:27 -0800 Subject: [PATCH 0593/1356] tools: update snafu to 0.7 --- tools/Cargo.lock | 10 +- tools/buildsys/Cargo.toml | 2 +- tools/buildsys/src/builder.rs | 34 ++++--- tools/buildsys/src/builder/error.rs | 2 +- tools/buildsys/src/cache.rs | 35 ++++--- tools/buildsys/src/cache/error.rs | 2 +- tools/buildsys/src/main.rs | 24 ++--- tools/buildsys/src/manifest.rs | 5 +- tools/buildsys/src/manifest/error.rs | 2 +- tools/buildsys/src/project.rs | 2 +- tools/buildsys/src/project/error.rs | 2 +- tools/buildsys/src/spec.rs | 4 +- tools/buildsys/src/spec/error.rs | 2 +- tools/infrasys/Cargo.toml | 2 +- tools/infrasys/src/error.rs | 2 +- tools/infrasys/src/keys.rs | 37 +++---- tools/infrasys/src/main.rs | 46 ++++----- tools/infrasys/src/root.rs | 20 ++-- tools/infrasys/src/s3.rs | 38 ++++---- tools/infrasys/src/shared.rs | 16 +-- .../test_tomls/toml_yaml_conversion.yml | 40 ++++++++ tools/pubsys-config/Cargo.toml | 2 +- tools/pubsys-config/src/lib.rs | 16 +-- tools/pubsys-config/src/vmware.rs | 10 +- tools/pubsys-setup/Cargo.toml | 2 +- tools/pubsys-setup/src/main.rs | 62 ++++++------ tools/pubsys/Cargo.toml | 2 +- tools/pubsys/src/aws/ami/mod.rs | 64 ++++++------ tools/pubsys/src/aws/ami/register.rs | 22 +++-- tools/pubsys/src/aws/ami/snapshot.rs | 6 +- tools/pubsys/src/aws/ami/wait.rs | 20 ++-- tools/pubsys/src/aws/client.rs | 12 +-- tools/pubsys/src/aws/mod.rs | 6 +- tools/pubsys/src/aws/promote_ssm/mod.rs | 28 +++--- tools/pubsys/src/aws/publish_ami/mod.rs | 58 +++++------ tools/pubsys/src/aws/ssm/mod.rs | 36 +++---- tools/pubsys/src/aws/ssm/ssm.rs | 16 +-- tools/pubsys/src/aws/ssm/template.rs | 20 ++-- tools/pubsys/src/main.rs | 38 +++++--- tools/pubsys/src/repo.rs | 97 ++++++++++--------- .../pubsys/src/repo/check_expirations/mod.rs | 14 +-- tools/pubsys/src/repo/refresh_repo/mod.rs | 30 +++--- tools/pubsys/src/repo/validate_repo/mod.rs | 22 ++--- tools/pubsys/src/vmware/govc.rs | 8 +- tools/pubsys/src/vmware/upload_ova/mod.rs | 26 ++--- 45 files changed, 507 insertions(+), 437 deletions(-) create mode 100644 tools/infrasys/test_tomls/toml_yaml_conversion.yml diff --git a/tools/Cargo.lock b/tools/Cargo.lock index b9602c4f..627d9a1b 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -161,7 +161,7 @@ dependencies = [ "serde", "serde_plain", "sha2", - "snafu 0.6.10", + "snafu 0.7.0", "toml", "url", "walkdir", @@ -792,7 +792,7 @@ dependencies = [ "sha2", "shell-words", "simplelog", - "snafu 0.6.10", + "snafu 0.7.0", "structopt", "tokio", "toml", @@ -1195,7 +1195,7 @@ dependencies = [ "serde", "serde_json", "simplelog", - "snafu 0.6.10", + "snafu 0.7.0", "structopt", "tempfile", "tinytemplate", @@ -1220,7 +1220,7 @@ dependencies = [ "parse-datetime", "serde", "serde_yaml", - "snafu 0.6.10", + "snafu 0.7.0", "toml", "url", ] @@ -1236,7 +1236,7 @@ dependencies = [ "sha2", "shell-words", "simplelog", - "snafu 0.6.10", + "snafu 0.7.0", "structopt", "tempfile", "toml", diff --git a/tools/buildsys/Cargo.toml b/tools/buildsys/Cargo.toml index bd7ae3d1..3fd048ec 100644 --- a/tools/buildsys/Cargo.toml +++ b/tools/buildsys/Cargo.toml @@ -18,7 +18,7 @@ reqwest = { version = "0.11.1", default-features = false, features = ["rustls-tl serde = { version = "1.0", features = ["derive"] } serde_plain = "1.0" sha2 = "0.9" -snafu = "0.6" +snafu = "0.7" toml = "0.5" url = { version = "2.1.0", features = ["serde"] } walkdir = "2" diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index 293bd561..28da087a 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -77,18 +77,18 @@ impl PackageBuilder { let output_dir: PathBuf = getenv("BUILDSYS_PACKAGES_DIR")?.into(); let arch = getenv("BUILDSYS_ARCH")?; let goarch = serde_plain::from_str::(&arch) - .context(error::UnsupportedArch { arch: &arch })? + .context(error::UnsupportedArchSnafu { arch: &arch })? .goarch(); // We do *not* want to rebuild most packages when the variant changes, because most aren't // affected; packages that care about variant should "echo cargo:rerun-if-env-changed=VAR" // themselves in the package's spec file. let var = "BUILDSYS_VARIANT"; - let variant = env::var(var).context(error::Environment { var })?; + let variant = env::var(var).context(error::EnvironmentSnafu { var })?; // Same for repo, which is used to determine the correct root.json, which is only included // in the os package. let var = "PUBLISH_REPO"; - let repo = env::var(var).context(error::Environment { var })?; + let repo = env::var(var).context(error::EnvironmentSnafu { var })?; let mut args = Vec::new(); args.build_arg("PACKAGE", package); @@ -124,7 +124,7 @@ impl VariantBuilder { let variant = getenv("BUILDSYS_VARIANT")?; let arch = getenv("BUILDSYS_ARCH")?; let goarch = serde_plain::from_str::(&arch) - .context(error::UnsupportedArch { arch: &arch })? + .context(error::UnsupportedArchSnafu { arch: &arch })? .goarch(); let image_layout = image_layout.cloned().unwrap_or_default(); @@ -201,7 +201,7 @@ fn build( ) -> Result<()> { // Our Dockerfile is in the top-level directory. let root = getenv("BUILDSYS_ROOT_DIR")?; - env::set_current_dir(&root).context(error::DirectoryChange { path: &root })?; + env::set_current_dir(&root).context(error::DirectoryChangeSnafu { path: &root })?; // Compute a per-checkout prefix for the tag to avoid collisions. let mut d = Sha512::new(); @@ -304,7 +304,7 @@ fn docker(args: &[String], retry: Retry) -> Result { .stdout_capture() .unchecked() .run() - .context(error::CommandStart)?; + .context(error::CommandStartSnafu)?; let stdout = String::from_utf8_lossy(&output.stdout); println!("{}", &stdout); @@ -314,7 +314,7 @@ fn docker(args: &[String], retry: Retry) -> Result { ensure!( retry_messages.iter().any(|m| m.is_match(&stdout)) && attempt < max_attempts, - error::DockerExecution { + error::DockerExecutionSnafu { args: &args.join(" ") } ); @@ -346,7 +346,7 @@ fn create_build_dir(kind: &BuildType, name: &str, arch: &str) -> Result .iter() .collect(); - fs::create_dir_all(&path).context(error::DirectoryCreate { path: &path })?; + fs::create_dir_all(&path).context(error::DirectoryCreateSnafu { path: &path })?; Ok(path) } @@ -372,16 +372,16 @@ where for artifact_file in find_files(&build_dir, is_artifact) { let mut marker_file = artifact_file.clone().into_os_string(); marker_file.push(MARKER_EXTENSION); - File::create(&marker_file).context(error::FileCreate { path: &marker_file })?; + File::create(&marker_file).context(error::FileCreateSnafu { path: &marker_file })?; let mut output_file: PathBuf = output_dir.as_ref().into(); output_file.push( artifact_file .file_name() - .context(error::BadFilename { path: &output_file })?, + .context(error::BadFilenameSnafu { path: &output_file })?, ); - fs::rename(&artifact_file, &output_file).context(error::FileRename { + fs::rename(&artifact_file, &output_file).context(error::FileRenameSnafu { old_path: &artifact_file, new_path: &output_file, })?; @@ -410,15 +410,17 @@ where output_file.push( marker_file .file_name() - .context(error::BadFilename { path: &marker_file })?, + .context(error::BadFilenameSnafu { path: &marker_file })?, ); output_file.set_extension(""); if output_file.exists() { - std::fs::remove_file(&output_file).context(error::FileRemove { path: &output_file })?; + std::fs::remove_file(&output_file) + .context(error::FileRemoveSnafu { path: &output_file })?; } - std::fs::remove_file(&marker_file).context(error::FileRemove { path: &marker_file })?; + std::fs::remove_file(&marker_file) + .context(error::FileRemoveSnafu { path: &marker_file })?; } Ok(()) @@ -439,7 +441,7 @@ where .max_depth(1) .into_iter() .filter_entry(move |e| filter(e)) - .flat_map(|e| e.context(error::DirectoryWalk)) + .flat_map(|e| e.context(error::DirectoryWalkSnafu)) .map(|e| e.into_path()) } @@ -448,7 +450,7 @@ where /// output. fn getenv(var: &str) -> Result { println!("cargo:rerun-if-env-changed={}", var); - env::var(var).context(error::Environment { var }) + env::var(var).context(error::EnvironmentSnafu { var }) } // =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= diff --git a/tools/buildsys/src/builder/error.rs b/tools/buildsys/src/builder/error.rs index a6476e93..6fccfd62 100644 --- a/tools/buildsys/src/builder/error.rs +++ b/tools/buildsys/src/builder/error.rs @@ -2,7 +2,7 @@ use snafu::Snafu; use std::path::PathBuf; #[derive(Debug, Snafu)] -#[snafu(visibility = "pub(super)")] +#[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(display("Failed to start command: {}", source))] CommandStart { source: std::io::Error }, diff --git a/tools/buildsys/src/cache.rs b/tools/buildsys/src/cache.rs index 6d3e6a47..3e924935 100644 --- a/tools/buildsys/src/cache.rs +++ b/tools/buildsys/src/cache.rs @@ -32,7 +32,7 @@ impl LookasideCache { let path = &f.path.as_ref().unwrap_or_else(|| &url_file_name); ensure!( path.components().count() == 1, - error::ExternalFileName { path } + error::ExternalFileNameSnafu { path } ); let hash = &f.sha512; @@ -41,7 +41,7 @@ impl LookasideCache { Ok(_) => continue, Err(e) => { eprintln!("{}", e); - fs::remove_file(path).context(error::ExternalFileDelete { path })?; + fs::remove_file(path).context(error::ExternalFileDeleteSnafu { path })?; } } } @@ -53,7 +53,8 @@ impl LookasideCache { let url = format!("{}/{}/{}/{}", LOOKASIDE_CACHE.to_string(), name, hash, name); match Self::fetch_file(&url, &tmp, hash) { Ok(_) => { - fs::rename(&tmp, path).context(error::ExternalFileRename { path: &tmp })?; + fs::rename(&tmp, path) + .context(error::ExternalFileRenameSnafu { path: &tmp })?; continue; } Err(e) => { @@ -65,7 +66,7 @@ impl LookasideCache { if std::env::var("BUILDSYS_UPSTREAM_SOURCE_FALLBACK") == Ok("true".to_string()) { println!("Fetching {:?} from upstream source", url_file_name); Self::fetch_file(&f.url, &tmp, hash)?; - fs::rename(&tmp, path).context(error::ExternalFileRename { path: &tmp })?; + fs::rename(&tmp, path).context(error::ExternalFileRenameSnafu { path: &tmp })?; } } @@ -76,48 +77,52 @@ impl LookasideCache { /// then verifies the contents against the SHA-512 hash provided. fn fetch_file>(url: &str, path: P, hash: &str) -> Result<()> { let path = path.as_ref(); - let mut resp = reqwest::blocking::get(url).context(error::ExternalFileRequest { url })?; + let mut resp = + reqwest::blocking::get(url).context(error::ExternalFileRequestSnafu { url })?; let status = resp.status(); ensure!( status.is_success(), - error::ExternalFileFetch { url, status } + error::ExternalFileFetchSnafu { url, status } ); - let f = File::create(path).context(error::ExternalFileOpen { path })?; + let f = File::create(path).context(error::ExternalFileOpenSnafu { path })?; let mut f = BufWriter::new(f); resp.copy_to(&mut f) - .context(error::ExternalFileSave { path })?; + .context(error::ExternalFileSaveSnafu { path })?; drop(f); match Self::verify_file(path, hash) { Ok(_) => Ok(()), Err(e) => { - fs::remove_file(path).context(error::ExternalFileDelete { path })?; + fs::remove_file(path).context(error::ExternalFileDeleteSnafu { path })?; Err(e) } } } fn extract_file_name(url: &str) -> Result { - let parsed = reqwest::Url::parse(url).context(error::ExternalFileUrl { url })?; + let parsed = reqwest::Url::parse(url).context(error::ExternalFileUrlSnafu { url })?; let name = parsed .path_segments() - .context(error::ExternalFileName { path: url })? + .context(error::ExternalFileNameSnafu { path: url })? .last() - .context(error::ExternalFileName { path: url })?; + .context(error::ExternalFileNameSnafu { path: url })?; Ok(name.into()) } /// Reads a file from disk and compares it to the expected SHA-512 hash. fn verify_file>(path: P, hash: &str) -> Result<()> { let path = path.as_ref(); - let mut f = File::open(path).context(error::ExternalFileOpen { path })?; + let mut f = File::open(path).context(error::ExternalFileOpenSnafu { path })?; let mut d = Sha512::new(); - io::copy(&mut f, &mut d).context(error::ExternalFileLoad { path })?; + io::copy(&mut f, &mut d).context(error::ExternalFileLoadSnafu { path })?; let digest = hex::encode(d.finalize()); - ensure!(digest == hash, error::ExternalFileVerify { path, hash }); + ensure!( + digest == hash, + error::ExternalFileVerifySnafu { path, hash } + ); Ok(()) } } diff --git a/tools/buildsys/src/cache/error.rs b/tools/buildsys/src/cache/error.rs index 55fab980..4617ad9e 100644 --- a/tools/buildsys/src/cache/error.rs +++ b/tools/buildsys/src/cache/error.rs @@ -3,7 +3,7 @@ use std::io; use std::path::PathBuf; #[derive(Debug, Snafu)] -#[snafu(visibility = "pub(super)")] +#[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(display("Bad file name '{}'", path.display()))] ExternalFileName { path: PathBuf }, diff --git a/tools/buildsys/src/main.rs b/tools/buildsys/src/main.rs index b3415b77..e8ab2427 100644 --- a/tools/buildsys/src/main.rs +++ b/tools/buildsys/src/main.rs @@ -29,7 +29,7 @@ mod error { use snafu::Snafu; #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] + #[snafu(visibility(pub(super)))] pub(super) enum Error { ManifestParse { source: super::manifest::error::Error, @@ -125,12 +125,12 @@ fn build_package() -> Result<()> { let variant = getenv("BUILDSYS_VARIANT")?; let variant_manifest_path = root_dir.join("variants").join(variant).join(manifest_file); let variant_manifest = - ManifestInfo::new(variant_manifest_path).context(error::ManifestParse)?; + ManifestInfo::new(variant_manifest_path).context(error::ManifestParseSnafu)?; supported_arch(&variant_manifest)?; let manifest_dir: PathBuf = getenv("CARGO_MANIFEST_DIR")?.into(); let manifest = - ManifestInfo::new(manifest_dir.join(manifest_file)).context(error::ManifestParse)?; + ManifestInfo::new(manifest_dir.join(manifest_file)).context(error::ManifestParseSnafu)?; // if manifest has package.metadata.build-package.variant-specific = true, then println rerun-if-env-changed if let Some(sensitive) = manifest.variant_sensitive() { @@ -140,7 +140,7 @@ fn build_package() -> Result<()> { } if let Some(files) = manifest.external_files() { - LookasideCache::fetch(&files).context(error::ExternalFileFetch)?; + LookasideCache::fetch(&files).context(error::ExternalFileFetchSnafu)?; } if let Some(groups) = manifest.source_groups() { @@ -149,7 +149,7 @@ fn build_package() -> Result<()> { println!("cargo:rerun-if-env-changed={}", var); let dirs = groups.iter().map(|d| root.join(d)).collect::>(); - let info = ProjectInfo::crawl(&dirs).context(error::ProjectCrawl)?; + let info = ProjectInfo::crawl(&dirs).context(error::ProjectCrawlSnafu)?; for f in info.files { println!("cargo:rerun-if-changed={}", f.display()); } @@ -165,7 +165,7 @@ fn build_package() -> Result<()> { let spec = format!("{}.spec", package); println!("cargo:rerun-if-changed={}", spec); - let info = SpecInfo::new(PathBuf::from(&spec)).context(error::SpecParse)?; + let info = SpecInfo::new(PathBuf::from(&spec)).context(error::SpecParseSnafu)?; for f in info.sources { println!("cargo:rerun-if-changed={}", f.display()); @@ -175,7 +175,7 @@ fn build_package() -> Result<()> { println!("cargo:rerun-if-changed={}", f.display()); } - PackageBuilder::build(&package).context(error::BuildAttempt)?; + PackageBuilder::build(&package).context(error::BuildAttemptSnafu)?; Ok(()) } @@ -186,7 +186,7 @@ fn build_variant() -> Result<()> { println!("cargo:rerun-if-changed={}", manifest_file); let manifest = - ManifestInfo::new(manifest_dir.join(manifest_file)).context(error::ManifestParse)?; + ManifestInfo::new(manifest_dir.join(manifest_file)).context(error::ManifestParseSnafu)?; supported_arch(&manifest)?; @@ -195,7 +195,7 @@ fn build_variant() -> Result<()> { let image_layout = manifest.image_layout(); let kernel_parameters = manifest.kernel_parameters(); VariantBuilder::build(&packages, image_format, image_layout, kernel_parameters) - .context(error::BuildAttempt)?; + .context(error::BuildAttemptSnafu)?; } else { println!("cargo:warning=No included packages in manifest. Skipping variant build."); } @@ -208,11 +208,11 @@ fn supported_arch(manifest: &ManifestInfo) -> Result<()> { if let Some(supported_arches) = manifest.supported_arches() { let arch = getenv("BUILDSYS_ARCH")?; let current_arch: SupportedArch = - serde_plain::from_str(&arch).context(error::UnknownArch { arch: &arch })?; + serde_plain::from_str(&arch).context(error::UnknownArchSnafu { arch: &arch })?; ensure!( supported_arches.contains(¤t_arch), - error::UnsupportedArch { + error::UnsupportedArchSnafu { arch: &arch, supported_arches: supported_arches .into_iter() @@ -226,5 +226,5 @@ fn supported_arch(manifest: &ManifestInfo) -> Result<()> { /// Retrieve a variable that we expect to be set in the environment. fn getenv(var: &str) -> Result { - env::var(var).context(error::Environment { var }) + env::var(var).context(error::EnvironmentSnafu { var }) } diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index 028033bb..82bb7bae 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -137,8 +137,9 @@ impl ManifestInfo { /// Extract the settings we understand from `Cargo.toml`. pub(crate) fn new>(path: P) -> Result { let path = path.as_ref(); - let manifest_data = fs::read_to_string(path).context(error::ManifestFileRead { path })?; - toml::from_str(&manifest_data).context(error::ManifestFileLoad { path }) + let manifest_data = + fs::read_to_string(path).context(error::ManifestFileReadSnafu { path })?; + toml::from_str(&manifest_data).context(error::ManifestFileLoadSnafu { path }) } /// Convenience method to return the list of source groups. diff --git a/tools/buildsys/src/manifest/error.rs b/tools/buildsys/src/manifest/error.rs index c536822d..a2f2056c 100644 --- a/tools/buildsys/src/manifest/error.rs +++ b/tools/buildsys/src/manifest/error.rs @@ -3,7 +3,7 @@ use std::io; use std::path::PathBuf; #[derive(Debug, Snafu)] -#[snafu(visibility = "pub(super)")] +#[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(display("Failed to read manifest file '{}': {}", path.display(), source))] ManifestFileRead { path: PathBuf, source: io::Error }, diff --git a/tools/buildsys/src/project.rs b/tools/buildsys/src/project.rs index 0ab8df24..08b5d4ff 100644 --- a/tools/buildsys/src/project.rs +++ b/tools/buildsys/src/project.rs @@ -31,7 +31,7 @@ impl ProjectInfo { files.extend( walker .filter_entry(|e| !Self::ignored(e)) - .flat_map(|e| e.context(error::DirectoryWalk)) + .flat_map(|e| e.context(error::DirectoryWalkSnafu)) .map(|e| e.into_path()) .filter(|e| e.is_file()), ); diff --git a/tools/buildsys/src/project/error.rs b/tools/buildsys/src/project/error.rs index d81ec589..03502682 100644 --- a/tools/buildsys/src/project/error.rs +++ b/tools/buildsys/src/project/error.rs @@ -1,7 +1,7 @@ use snafu::Snafu; #[derive(Debug, Snafu)] -#[snafu(visibility = "pub(super)")] +#[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(display("Failed to walk directory to find project files: {}", source))] DirectoryWalk { source: walkdir::Error }, diff --git a/tools/buildsys/src/spec.rs b/tools/buildsys/src/spec.rs index e50b34ab..946b97f7 100644 --- a/tools/buildsys/src/spec.rs +++ b/tools/buildsys/src/spec.rs @@ -32,14 +32,14 @@ impl SpecInfo { /// "Parse" a spec file, extracting values of potential interest. fn parse>(path: P) -> Result<(Vec, Vec)> { let path = path.as_ref(); - let f = File::open(path).context(error::SpecFileRead { path })?; + let f = File::open(path).context(error::SpecFileReadSnafu { path })?; let f = BufReader::new(f); let mut sources = Vec::new(); let mut patches = Vec::new(); for line in f.lines() { - let line = line.context(error::SpecFileRead { path })?; + let line = line.context(error::SpecFileReadSnafu { path })?; let mut tokens = line.split_whitespace().collect::>(); if let Some(t) = tokens.pop_front() { diff --git a/tools/buildsys/src/spec/error.rs b/tools/buildsys/src/spec/error.rs index 90af879e..969ccf32 100644 --- a/tools/buildsys/src/spec/error.rs +++ b/tools/buildsys/src/spec/error.rs @@ -3,7 +3,7 @@ use std::io; use std::path::PathBuf; #[derive(Debug, Snafu)] -#[snafu(visibility = "pub(super)")] +#[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(display("Failed to read spec file '{}': {}", path.display(), source))] SpecFileRead { path: PathBuf, source: io::Error }, diff --git a/tools/infrasys/Cargo.toml b/tools/infrasys/Cargo.toml index 9fa21dca..fc78cc32 100644 --- a/tools/infrasys/Cargo.toml +++ b/tools/infrasys/Cargo.toml @@ -20,7 +20,7 @@ serde_yaml = "0.8.17" sha2 = "0.9" shell-words = "1.0.0" simplelog = "0.10.0" -snafu = "0.6" +snafu = "0.7" structopt = { version = "0.3", default-features = false } tokio = { version = "~1.8", default-features = false, features = ["macros", "rt-multi-thread"] } # LTS toml = "0.5" diff --git a/tools/infrasys/src/error.rs b/tools/infrasys/src/error.rs index 6dd8c3ba..20bbdc01 100644 --- a/tools/infrasys/src/error.rs +++ b/tools/infrasys/src/error.rs @@ -3,7 +3,7 @@ use std::io; use std::path::PathBuf; #[derive(Debug, Snafu)] -#[snafu(visibility = "pub(super)")] +#[snafu(visibility(pub(super)))] pub enum Error { #[snafu(display( "Failed to create CFN stack '{}' in '{}': {}", diff --git a/tools/infrasys/src/keys.rs b/tools/infrasys/src/keys.rs index 7bd8c0b8..15afde39 100644 --- a/tools/infrasys/src/keys.rs +++ b/tools/infrasys/src/keys.rs @@ -18,7 +18,7 @@ pub async fn create_keys(signing_key_config: &mut SigningKeyConfig) -> Result<() SigningKeyConfig::kms { config, .. } => { config .as_mut() - .context(error::MissingConfig { + .context(error::MissingConfigSnafu { missing: "config field for a kms key", })? .create_kms_keys() @@ -33,7 +33,7 @@ pub fn check_signing_key_config(signing_key_config: &SigningKeyConfig) -> Result match signing_key_config { SigningKeyConfig::file { .. } => (), SigningKeyConfig::kms { config, .. } => { - let config = config.as_ref().context(error::MissingConfig { + let config = config.as_ref().context(error::MissingConfigSnafu { missing: "config field for a kms key", })?; @@ -43,19 +43,19 @@ pub fn check_signing_key_config(signing_key_config: &SigningKeyConfig) -> Result config.key_alias.as_ref(), ) { // everything is unspecified (no way to allocate a key_id) - (true, true, None) => error::KeyConfig { + (true, true, None) => error::KeyConfigSnafu { missing: "an available_key or region/key_alias", } .fail()?, // regions is populated, but no key alias // (it doesn't matter if available keys are listed or not) - (_, false, None) => error::KeyConfig { + (_, false, None) => error::KeyConfigSnafu { missing: "key_alias", } .fail()?, // key alias is populated, but no key regions to create keys in // (it doesn't matter if available keys are listed or not) - (_, true, Some(..)) => error::KeyConfig { missing: "region" }.fail()?, + (_, true, Some(..)) => error::KeyConfigSnafu { missing: "region" }.fail()?, _ => (), }; } @@ -85,19 +85,19 @@ impl KMSKeyConfigExt for KMSKeyConfig { for region in self.regions.iter() { let stack_name = format!( "TUF-KMS-{}", - self.key_alias.as_ref().context(error::KeyConfig { + self.key_alias.as_ref().context(error::KeyConfigSnafu { missing: "key_alias", })? ); let cfn_client = CloudFormationClient::new( - Region::from_str(region).context(error::ParseRegion { what: region })?, + Region::from_str(region).context(error::ParseRegionSnafu { what: region })?, ); let cfn_filepath = format!( "{}/infrasys/cloudformation-templates/kms_key_setup.yml", shared::getenv("BUILDSYS_TOOLS_DIR")? ); let cfn_template = fs::read_to_string(&cfn_filepath) - .context(error::FileRead { path: cfn_filepath })?; + .context(error::FileReadSnafu { path: cfn_filepath })?; let stack_result = cfn_client .create_stack(CreateStackInput { @@ -105,7 +105,7 @@ impl KMSKeyConfigExt for KMSKeyConfig { "Alias".to_string(), self.key_alias .as_ref() - .context(error::KeyConfig { + .context(error::KeyConfigSnafu { missing: "key_alias", })? .to_string(), @@ -115,7 +115,7 @@ impl KMSKeyConfigExt for KMSKeyConfig { ..Default::default() }) .await - .context(error::CreateStack { + .context(error::CreateStackSnafu { stack_name: &stack_name, region, })?; @@ -123,19 +123,20 @@ impl KMSKeyConfigExt for KMSKeyConfig { let stack_arn = stack_result .clone() .stack_id - .context(error::ParseResponse { + .context(error::ParseResponseSnafu { what: "stack_id", resource_name: &stack_name, })?; let output_array = shared::get_stack_outputs(&cfn_client, &stack_name, region).await?; - let key_id = output_array[0] - .output_value - .as_ref() - .context(error::ParseResponse { - what: "outputs[0].output_value (key id)", - resource_name: stack_name, - })?; + let key_id = + output_array[0] + .output_value + .as_ref() + .context(error::ParseResponseSnafu { + what: "outputs[0].output_value (key id)", + resource_name: stack_name, + })?; self.available_keys .insert(key_id.to_string(), region.to_string()); self.key_stack_arns diff --git a/tools/infrasys/src/main.rs b/tools/infrasys/src/main.rs index 6464189f..94bd5437 100644 --- a/tools/infrasys/src/main.rs +++ b/tools/infrasys/src/main.rs @@ -62,11 +62,11 @@ fn run() -> Result<()> { // Parse and store the args passed to the program let args = Args::from_args(); - SimpleLogger::init(args.log_level, LogConfig::default()).context(error::Logger)?; + SimpleLogger::init(args.log_level, LogConfig::default()).context(error::LoggerSnafu)?; match args.subcommand { SubCommand::CreateInfra(ref run_task_args) => { - let rt = Runtime::new().context(error::Runtime)?; + let rt = Runtime::new().context(error::RuntimeSnafu)?; rt.block_on(async { create_infra(&args.infra_config_path, &run_task_args.root_role_path).await }) @@ -75,7 +75,7 @@ fn run() -> Result<()> { } fn check_infra_lock(toml_path: &Path) -> Result<()> { - let lock_path = InfraConfig::compute_lock_path(toml_path).context(error::Config)?; + let lock_path = InfraConfig::compute_lock_path(toml_path).context(error::ConfigSnafu)?; ensure!(!lock_path.is_file(), { error!( @@ -83,7 +83,7 @@ fn check_infra_lock(toml_path: &Path) -> Result<()> { \nPlease clean up your TUF resources in AWS, delete Infra.lock, and run again.", lock_path.display() ); - error::FileExists { path: lock_path } + error::FileExistsSnafu { path: lock_path } }); Ok(()) } @@ -92,18 +92,18 @@ fn check_infra_lock(toml_path: &Path) -> Result<()> { async fn create_infra(toml_path: &Path, root_role_path: &Path) -> Result<()> { check_infra_lock(toml_path)?; info!("Parsing Infra.toml..."); - let mut infra_config = InfraConfig::from_path(toml_path).context(error::Config)?; + let mut infra_config = InfraConfig::from_path(toml_path).context(error::ConfigSnafu)?; let repos = infra_config .repo .as_mut() - .context(error::MissingConfig { missing: "repo" })?; + .context(error::MissingConfigSnafu { missing: "repo" })?; let s3_info_map = infra_config .aws .as_mut() - .context(error::MissingConfig { missing: "aws" })? + .context(error::MissingConfigSnafu { missing: "aws" })? .s3 .as_mut() - .context(error::MissingConfig { missing: "aws.s3" })?; + .context(error::MissingConfigSnafu { missing: "aws.s3" })?; for (repo_name, repo_config) in repos.iter_mut() { // Validate repo_config and unwrap required optional data @@ -135,7 +135,7 @@ async fn create_infra(toml_path: &Path, root_role_path: &Path) -> Result<()> { if repo_info.metadata_base_url.is_none() { *repo_info.metadata_base_url = Some( Url::parse(format!("https://{}{}/", &bucket_rdn, &repo_info.prefix).as_str()) - .context(error::ParseUrl { input: &bucket_rdn })?, + .context(error::ParseUrlSnafu { input: &bucket_rdn })?, ); } if repo_info.targets_url.is_none() { @@ -143,7 +143,7 @@ async fn create_infra(toml_path: &Path, root_role_path: &Path) -> Result<()> { Url::parse( format!("https://{}{}/targets/", &bucket_rdn, &repo_info.prefix).as_str(), ) - .context(error::ParseUrl { input: &bucket_rdn })?, + .context(error::ParseUrlSnafu { input: &bucket_rdn })?, ); } if repo_info.root_role_url.is_none() { @@ -151,10 +151,10 @@ async fn create_infra(toml_path: &Path, root_role_path: &Path) -> Result<()> { Url::parse( format!("https://{}{}/root.json", &bucket_rdn, &repo_info.prefix).as_str(), ) - .context(error::ParseUrl { input: &bucket_rdn })?, + .context(error::ParseUrlSnafu { input: &bucket_rdn })?, ); } - let root_role_data = fs::read_to_string(&root_role_path).context(error::FileRead { + let root_role_data = fs::read_to_string(&root_role_path).context(error::FileReadSnafu { path: root_role_path, })?; let mut d = Sha512::new(); @@ -165,15 +165,15 @@ async fn create_infra(toml_path: &Path, root_role_path: &Path) -> Result<()> { // Generate Infra.lock info!("Writing Infra.lock..."); - let yaml_string = serde_yaml::to_string(&infra_config).context(error::InvalidYaml)?; + let yaml_string = serde_yaml::to_string(&infra_config).context(error::InvalidYamlSnafu)?; fs::write( toml_path .parent() - .context(error::Parent { path: toml_path })? + .context(error::ParentSnafu { path: toml_path })? .join("Infra.lock"), yaml_string, ) - .context(error::FileWrite { path: toml_path })?; + .context(error::FileWriteSnafu { path: toml_path })?; info!("Complete!"); Ok(()) @@ -205,17 +205,17 @@ impl<'a> ValidRepoInfo<'a> { repo_config .file_hosting_config_name .as_ref() - .context(error::MissingConfig { + .context(error::MissingConfigSnafu { missing: "file_hosting_config_name", })?; let s3_info = s3_info_map .get_mut(s3_stack_name) - .context(error::MissingConfig { + .context(error::MissingConfigSnafu { missing: format!("aws.s3 config with name {}", s3_stack_name), })?; Ok(ValidRepoInfo { s3_stack_name: s3_stack_name.to_string(), - s3_region: s3_info.region.as_ref().context(error::MissingConfig { + s3_region: s3_info.region.as_ref().context(error::MissingConfigSnafu { missing: format!("region for '{}' s3 config", s3_stack_name), })?, bucket_name: &mut s3_info.bucket_name, @@ -223,29 +223,29 @@ impl<'a> ValidRepoInfo<'a> { vpce_id: s3_info .vpc_endpoint_id .as_ref() - .context(error::MissingConfig { + .context(error::MissingConfigSnafu { missing: format!("vpc_endpoint_id for '{}' s3 config", s3_stack_name), })?, prefix: s3::format_prefix(&s3_info.s3_prefix), signing_keys: repo_config .signing_keys .as_mut() - .context(error::MissingConfig { + .context(error::MissingConfigSnafu { missing: format!("signing_keys for '{}' repo config", repo_name), })?, root_keys: repo_config .root_keys .as_mut() - .context(error::MissingConfig { + .context(error::MissingConfigSnafu { missing: format!("root_keys for '{}' repo config", repo_name), })?, root_key_threshold: repo_config.root_key_threshold.as_mut().context( - error::MissingConfig { + error::MissingConfigSnafu { missing: format!("root_key_threshold for '{}' repo config", repo_name), }, )?, pub_key_threshold: repo_config.pub_key_threshold.as_ref().context( - error::MissingConfig { + error::MissingConfigSnafu { missing: format!("pub_key_threshold for '{}' repo config", repo_name), }, )?, diff --git a/tools/infrasys/src/root.rs b/tools/infrasys/src/root.rs index 819d92d8..e447f045 100644 --- a/tools/infrasys/src/root.rs +++ b/tools/infrasys/src/root.rs @@ -14,16 +14,16 @@ macro_rules! tuftool { ($region:expr, $format_str:expr, $($format_arg:expr),*) => { let arg_str = format!($format_str, $($format_arg),*); trace!("tuftool arg string: {}", arg_str); - let args = shell_words::split(&arg_str).context(error::CommandSplit { command: &arg_str })?; + let args = shell_words::split(&arg_str).context(error::CommandSplitSnafu { command: &arg_str })?; trace!("tuftool split args: {:#?}", args); let status = Command::new("tuftool") .args(args) .env("AWS_REGION", $region) .status() - .context(error::TuftoolSpawn)?; + .context(error::TuftoolSpawnSnafu)?; - ensure!(status.success(), error::TuftoolResult { + ensure!(status.success(), error::TuftoolResultSnafu { command: arg_str, code: status.code().map(|i| i.to_string()).unwrap_or_else(|| "".to_string()) }); @@ -33,7 +33,7 @@ macro_rules! tuftool { pub fn check_root(root_role_path: &Path) -> Result<()> { ensure!(!root_role_path.is_file(), { warn!("Cowardly refusing to overwrite the existing root.json at {}. Please manually delete it and run again.", root_role_path.display()); - error::FileExists { + error::FileExistsSnafu { path: root_role_path, } }); @@ -43,11 +43,11 @@ pub fn check_root(root_role_path: &Path) -> Result<()> { /// Creates the directory where root.json will live and creates root.json itself according to details specified in root-role-path pub fn create_root(root_role_path: &Path) -> Result<()> { // Make /roles and /keys directories, if they don't exist, so we can write generated files. - let role_dir = root_role_path.parent().context(error::InvalidPath { + let role_dir = root_role_path.parent().context(error::InvalidPathSnafu { path: root_role_path, thing: "root role", })?; - fs::create_dir_all(role_dir).context(error::Mkdir { path: role_dir })?; + fs::create_dir_all(role_dir).context(error::MkdirSnafu { path: role_dir })?; // Initialize root tuftool!( Region::default().name(), @@ -75,7 +75,7 @@ pub fn add_keys( SigningKeyConfig::kms { key_id, config, .. } => add_keys_kms( &config .as_ref() - .context(error::MissingConfig { + .context(error::MissingConfigSnafu { missing: "config field for a kms key", })? .available_keys, @@ -103,7 +103,7 @@ fn add_keys_kms( ) -> Result<()> { ensure!( (*available_keys).len() >= (*threshold).get(), - error::InvalidThreshold { + error::InvalidThresholdSnafu { threshold: threshold.to_string(), num_keys: (*available_keys).len(), } @@ -160,7 +160,7 @@ fn add_keys_kms( available_keys .iter() .next() - .context(error::KeyCreation)? + .context(error::KeyCreationSnafu)? .0 .to_string(), ); @@ -178,7 +178,7 @@ pub fn sign_root(signing_key_config: &SigningKeyConfig, filepath: &str) -> Resul SigningKeyConfig::kms { config, .. } => { for (keyid, region) in config .as_ref() - .context(error::MissingConfig { + .context(error::MissingConfigSnafu { missing: "KMS key details", })? .available_keys diff --git a/tools/infrasys/src/s3.rs b/tools/infrasys/src/s3.rs index c91863b9..ee998aa3 100644 --- a/tools/infrasys/src/s3.rs +++ b/tools/infrasys/src/s3.rs @@ -39,7 +39,7 @@ pub fn format_prefix(prefix: &str) -> String { pub async fn create_s3_bucket(region: &str, stack_name: &str) -> Result<(String, String, String)> { // TODO: Add support for accommodating pre-existing buckets (skip this creation process) let cfn_client = CloudFormationClient::new( - Region::from_str(region).context(error::ParseRegion { what: region })?, + Region::from_str(region).context(error::ParseRegionSnafu { what: region })?, ); let cfn_filepath: PathBuf = format!( "{}/infrasys/cloudformation-templates/s3_setup.yml", @@ -47,7 +47,7 @@ pub async fn create_s3_bucket(region: &str, stack_name: &str) -> Result<(String, ) .into(); let cfn_template = - fs::read_to_string(&cfn_filepath).context(error::FileRead { path: cfn_filepath })?; + fs::read_to_string(&cfn_filepath).context(error::FileReadSnafu { path: cfn_filepath })?; let stack_result = cfn_client .create_stack(CreateStackInput { stack_name: stack_name.to_string(), @@ -55,12 +55,12 @@ pub async fn create_s3_bucket(region: &str, stack_name: &str) -> Result<(String, ..Default::default() }) .await - .context(error::CreateStack { stack_name, region })?; + .context(error::CreateStackSnafu { stack_name, region })?; // We don't have to wait for successful stack creation to grab the stack ARN let stack_arn = stack_result .clone() .stack_id - .context(error::ParseResponse { + .context(error::ParseResponseSnafu { what: "stack_id", resource_name: stack_name, })?; @@ -70,7 +70,7 @@ pub async fn create_s3_bucket(region: &str, stack_name: &str) -> Result<(String, let bucket_name = output_array[0] .output_value .as_ref() - .context(error::ParseResponse { + .context(error::ParseResponseSnafu { what: "outputs[0].output_value (bucket name)", resource_name: stack_name, })? @@ -78,7 +78,7 @@ pub async fn create_s3_bucket(region: &str, stack_name: &str) -> Result<(String, let bucket_rdn = output_array[1] .output_value .as_ref() - .context(error::ParseResponse { + .context(error::ParseResponseSnafu { what: "outputs[1].output_value (bucket url)", resource_name: stack_name, })? @@ -99,7 +99,7 @@ pub async fn add_bucket_policy( ) -> Result<()> { // Get old policy let s3_client = - S3Client::new(Region::from_str(region).context(error::ParseRegion { what: region })?); + S3Client::new(Region::from_str(region).context(error::ParseRegionSnafu { what: region })?); let mut policy: serde_json::Value = match s3_client .get_bucket_policy(GetBucketPolicyRequest { bucket: bucket_name.to_string(), @@ -107,11 +107,11 @@ pub async fn add_bucket_policy( }) .await { - Ok(output) => serde_json::from_str(&output.policy.context(error::ParseResponse { + Ok(output) => serde_json::from_str(&output.policy.context(error::ParseResponseSnafu { what: "policy", resource_name: bucket_name, })?) - .context(error::InvalidJson { + .context(error::InvalidJsonSnafu { what: format!("retrieved bucket policy for {}", &bucket_name), })?, @@ -119,7 +119,7 @@ pub async fn add_bucket_policy( r#"{"Version": "2008-10-17", "Statement": []}"#, ) - .context(error::InvalidJson { + .context(error::InvalidJsonSnafu { what: format!("new bucket policy for {}", &bucket_name), })?, }; @@ -139,29 +139,29 @@ pub async fn add_bucket_policy( }}"#, bucket_name, prefix, vpcid )) - .context(error::InvalidJson { + .context(error::InvalidJsonSnafu { what: format!("new bucket policy for {}", &bucket_name), })?; // Append new policy onto old one policy .get_mut("Statement") - .context(error::GetPolicyStatement { bucket_name })? + .context(error::GetPolicyStatementSnafu { bucket_name })? .as_array_mut() - .context(error::GetPolicyStatement { bucket_name })? + .context(error::GetPolicyStatementSnafu { bucket_name })? .push(new_bucket_policy); // Push the new policy as a string s3_client .put_bucket_policy(PutBucketPolicyRequest { bucket: bucket_name.to_string(), - policy: serde_json::to_string(&policy).context(error::InvalidJson { + policy: serde_json::to_string(&policy).context(error::InvalidJsonSnafu { what: format!("new bucket policy for {}", &bucket_name), })?, ..Default::default() }) .await - .context(error::PutPolicy { bucket_name })?; + .context(error::PutPolicySnafu { bucket_name })?; Ok(()) } @@ -177,13 +177,13 @@ pub async fn upload_file( file_path: &Path, ) -> Result<()> { let s3_client = - S3Client::new(Region::from_str(region).context(error::ParseRegion { what: region })?); + S3Client::new(Region::from_str(region).context(error::ParseRegionSnafu { what: region })?); // File --> Bytes - let mut file = File::open(file_path).context(error::FileOpen { path: file_path })?; + let mut file = File::open(file_path).context(error::FileOpenSnafu { path: file_path })?; let mut buffer = Vec::new(); file.read_to_end(&mut buffer) - .context(error::FileRead { path: file_path })?; + .context(error::FileReadSnafu { path: file_path })?; s3_client .put_object(PutObjectRequest { @@ -193,7 +193,7 @@ pub async fn upload_file( ..Default::default() }) .await - .context(error::PutObject { bucket_name })?; + .context(error::PutObjectSnafu { bucket_name })?; Ok(()) } diff --git a/tools/infrasys/src/shared.rs b/tools/infrasys/src/shared.rs index addde7e3..0f147f56 100644 --- a/tools/infrasys/src/shared.rs +++ b/tools/infrasys/src/shared.rs @@ -14,7 +14,7 @@ pub enum KeyRole { /// Retrieve a BUILDSYS_* variable that we expect to be set in the environment pub fn getenv(var: &str) -> Result { - env::var(var).context(error::Environment { var }) + env::var(var).context(error::EnvironmentSnafu { var }) } /// Generates a parameter type object used to specify parameters in CloudFormation templates @@ -39,9 +39,9 @@ pub async fn get_stack_outputs( ..Default::default() }) .await - .context(error::DescribeStack { stack_name, region })? + .context(error::DescribeStackSnafu { stack_name, region })? .stacks - .context(error::ParseResponse { + .context(error::ParseResponseSnafu { what: "stacks", resource_name: stack_name, })?[0] @@ -54,11 +54,11 @@ pub async fn get_stack_outputs( while status != "CREATE_COMPLETE" { ensure!( max_attempts > 0, - error::CreateStackTimeout { stack_name, region } + error::CreateStackTimeoutSnafu { stack_name, region } ); ensure!( status != "CREATE_FAILED", - error::CreateStackFailure { stack_name, region } + error::CreateStackFailureSnafu { stack_name, region } ); info!( "Waiting for stack resources to be ready, current status is '{}'...", @@ -71,9 +71,9 @@ pub async fn get_stack_outputs( ..Default::default() }) .await - .context(error::DescribeStack { stack_name, region })? + .context(error::DescribeStackSnafu { stack_name, region })? .stacks - .context(error::ParseResponse { + .context(error::ParseResponseSnafu { what: "stacks", resource_name: stack_name, })?[0] @@ -82,7 +82,7 @@ pub async fn get_stack_outputs( max_attempts -= 1; } - let output_array = stack_outputs.outputs.context(error::ParseResponse { + let output_array = stack_outputs.outputs.context(error::ParseResponseSnafu { what: "outputs", resource_name: stack_name, })?; diff --git a/tools/infrasys/test_tomls/toml_yaml_conversion.yml b/tools/infrasys/test_tomls/toml_yaml_conversion.yml new file mode 100644 index 00000000..c9482f65 --- /dev/null +++ b/tools/infrasys/test_tomls/toml_yaml_conversion.yml @@ -0,0 +1,40 @@ +--- +repo: + default: + root_role_url: ~ + root_role_sha512: ~ + signing_keys: + kms: + key_id: ~ + available_keys: + e4a8f7fe-2272-4e51-bc3e-3f719c77eb31: us-west-1 + key_alias: ~ + regions: [] + key_stack_arns: {} + root_keys: + kms: + key_id: ~ + available_keys: + e4a8f7fe-2272-4e51-bc3e-3f719c77eb31: us-west-1 + key_alias: ~ + regions: [] + key_stack_arns: {} + metadata_base_url: ~ + targets_url: ~ + file_hosting_config_name: TUF-Repo-S3-Buck + root_key_threshold: 1 + pub_key_threshold: 1 +aws: + regions: [] + role: ~ + profile: ~ + region: {} + ssm_prefix: ~ + s3: + TUF-Repo-S3-Buck: + region: us-west-2 + s3_prefix: /my-bottlerocket-remix + vpc_endpoint_id: vpc-12345 + stack_arn: ~ + bucket_name: ~ +vmware: ~ diff --git a/tools/pubsys-config/Cargo.toml b/tools/pubsys-config/Cargo.toml index 5010acd2..ae3b5dda 100644 --- a/tools/pubsys-config/Cargo.toml +++ b/tools/pubsys-config/Cargo.toml @@ -14,6 +14,6 @@ log = "0.4" parse-datetime = { path = "../../sources/parse-datetime", version = "0.1.0" } serde = { version = "1.0", features = ["derive"] } serde_yaml = "0.8.17" -snafu = "0.6" +snafu = "0.7" toml = "0.5" url = { version = "2.1.0", features = ["serde"] } diff --git a/tools/pubsys-config/src/lib.rs b/tools/pubsys-config/src/lib.rs index 59b2f0ce..7c660c7a 100644 --- a/tools/pubsys-config/src/lib.rs +++ b/tools/pubsys-config/src/lib.rs @@ -35,8 +35,8 @@ impl InfraConfig { P: AsRef, { let path = path.as_ref(); - let infra_config_str = fs::read_to_string(path).context(error::File { path })?; - toml::from_str(&infra_config_str).context(error::InvalidToml { path }) + let infra_config_str = fs::read_to_string(path).context(error::FileSnafu { path })?; + toml::from_str(&infra_config_str).context(error::InvalidTomlSnafu { path }) } /// Deserializes an InfraConfig from a Infra.lock file at a given path @@ -45,8 +45,8 @@ impl InfraConfig { P: AsRef, { let path = path.as_ref(); - let infra_config_str = fs::read_to_string(path).context(error::File { path })?; - serde_yaml::from_str(&infra_config_str).context(error::InvalidLock { path }) + let infra_config_str = fs::read_to_string(path).context(error::FileSnafu { path })?; + serde_yaml::from_str(&infra_config_str).context(error::InvalidLockSnafu { path }) } /// Deserializes an InfraConfig from a given path, if it exists, otherwise builds a default @@ -97,7 +97,7 @@ impl InfraConfig { Ok(infra_config_path .as_ref() .parent() - .context(error::Parent { + .context(error::ParentSnafu { path: infra_config_path.as_ref(), })? .join("Infra.lock")) @@ -232,8 +232,8 @@ impl RepoExpirationPolicy { P: AsRef, { let path = path.as_ref(); - let expiration_str = fs::read_to_string(path).context(error::File { path })?; - toml::from_str(&expiration_str).context(error::InvalidToml { path }) + let expiration_str = fs::read_to_string(path).context(error::FileSnafu { path })?; + toml::from_str(&expiration_str).context(error::InvalidTomlSnafu { path }) } } @@ -252,7 +252,7 @@ mod error { use std::path::PathBuf; #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] + #[snafu(visibility(pub(super)))] pub enum Error { #[snafu(display("Failed to read '{}': {}", path.display(), source))] File { path: PathBuf, source: io::Error }, diff --git a/tools/pubsys-config/src/vmware.rs b/tools/pubsys-config/src/vmware.rs index a97e8e10..e5d43256 100644 --- a/tools/pubsys-config/src/vmware.rs +++ b/tools/pubsys-config/src/vmware.rs @@ -94,7 +94,7 @@ impl DatacenterBuilder { /// value. pub fn build(self) -> Result { let get_or_err = - |opt: Option, what: &str| opt.context(error::MissingConfig { what }); + |opt: Option, what: &str| opt.context(error::MissingConfigSnafu { what }); Ok(Datacenter { vsphere_url: get_or_err(self.vsphere_url, "vSphere URL")?, @@ -133,8 +133,8 @@ impl DatacenterCredsConfig { P: AsRef, { let path = path.as_ref(); - let creds_config_str = fs::read_to_string(path).context(error::File { path })?; - toml::from_str(&creds_config_str).context(error::InvalidToml { path }) + let creds_config_str = fs::read_to_string(path).context(error::FileSnafu { path })?; + toml::from_str(&creds_config_str).context(error::InvalidTomlSnafu { path }) } } @@ -169,7 +169,7 @@ impl DatacenterCredsBuilder { /// contains a value pub fn build(self) -> Result { let get_or_err = - |opt: Option, what: &str| opt.context(error::MissingConfig { what }); + |opt: Option, what: &str| opt.context(error::MissingConfigSnafu { what }); Ok(DatacenterCreds { username: get_or_err(self.username, "vSphere username")?, @@ -202,7 +202,7 @@ mod error { use std::path::PathBuf; #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] + #[snafu(visibility(pub(super)))] pub enum Error { #[snafu(display("Failed to read '{}': {}", path.display(), source))] File { path: PathBuf, source: io::Error }, diff --git a/tools/pubsys-setup/Cargo.toml b/tools/pubsys-setup/Cargo.toml index bac72852..7b186e6d 100644 --- a/tools/pubsys-setup/Cargo.toml +++ b/tools/pubsys-setup/Cargo.toml @@ -14,7 +14,7 @@ reqwest = { version = "0.11.1", default-features = false, features = ["rustls-tl sha2 = "0.9" shell-words = "1.0" simplelog = "0.10" -snafu = "0.6" +snafu = "0.7" structopt = { version = "0.3", default-features = false } tempfile = "3.1" toml = "0.5" diff --git a/tools/pubsys-setup/src/main.rs b/tools/pubsys-setup/src/main.rs index 838b48b0..fc7b2bab 100644 --- a/tools/pubsys-setup/src/main.rs +++ b/tools/pubsys-setup/src/main.rs @@ -55,15 +55,15 @@ macro_rules! tuftool { ($format_str:expr, $($format_arg:expr),*) => { let arg_str = format!($format_str, $($format_arg),*); trace!("tuftool arg string: {}", arg_str); - let args = shell_words::split(&arg_str).context(error::CommandSplit { command: &arg_str })?; + let args = shell_words::split(&arg_str).context(error::CommandSplitSnafu { command: &arg_str })?; trace!("tuftool split args: {:#?}", args); let status = Command::new("tuftool") .args(args) .status() - .context(error::TuftoolSpawn)?; + .context(error::TuftoolSpawnSnafu)?; - ensure!(status.success(), error::TuftoolResult { + ensure!(status.success(), error::TuftoolResultSnafu { command: arg_str, code: status.code().map(|i| i.to_string()).unwrap_or_else(|| "".to_string()) }); @@ -76,19 +76,19 @@ fn run() -> Result<()> { let args = Args::from_args(); // SimpleLogger will send errors to stderr and anything less to stdout. - SimpleLogger::init(args.log_level, LogConfig::default()).context(error::Logger)?; + SimpleLogger::init(args.log_level, LogConfig::default()).context(error::LoggerSnafu)?; // Make /roles and /keys directories, if they don't exist, so we can write generated files. - let role_dir = args.root_role_path.parent().context(error::Path { + let role_dir = args.root_role_path.parent().context(error::PathSnafu { path: &args.root_role_path, thing: "root role", })?; - let key_dir = args.default_key_path.parent().context(error::Path { + let key_dir = args.default_key_path.parent().context(error::PathSnafu { path: &args.default_key_path, thing: "key", })?; - fs::create_dir_all(role_dir).context(error::Mkdir { path: role_dir })?; - fs::create_dir_all(key_dir).context(error::Mkdir { path: key_dir })?; + fs::create_dir_all(role_dir).context(error::MkdirSnafu { path: role_dir })?; + fs::create_dir_all(key_dir).context(error::MkdirSnafu { path: key_dir })?; // Main branching logic for deciding whether to create role/key, use what we have, or error. match find_root_role_and_key(&args)? { @@ -96,7 +96,7 @@ fn run() -> Result<()> { (Some(_root_role_path), None) => { ensure!( args.allow_missing_key, - error::MissingKey { repo: args.repo } + error::MissingKeySnafu { repo: args.repo } ); Ok(()) } @@ -109,7 +109,7 @@ fn run() -> Result<()> { } let temp_root_role = - NamedTempFile::new_in(&role_dir).context(error::TempFileCreate { + NamedTempFile::new_in(&role_dir).context(error::TempFileCreateSnafu { purpose: "root role", })?; let temp_root_role_path = temp_root_role.path().display(); @@ -141,7 +141,7 @@ fn run() -> Result<()> { Url::from_file_path(&args.default_key_path) .ok() - .context(error::FileToUrl { + .context(error::FileToUrlSnafu { path: args.default_key_path, })? }; @@ -151,7 +151,7 @@ fn run() -> Result<()> { temp_root_role .persist_noclobber(&args.root_role_path) - .context(error::TempFilePersist { + .context(error::TempFilePersistSnafu { path: &args.root_role_path, })?; @@ -163,7 +163,7 @@ fn run() -> Result<()> { // Root role files don't need to be secret. fs::set_permissions(&args.root_role_path, fs::Permissions::from_mode(0o644)).context( - error::SetMode { + error::SetModeSnafu { path: &args.root_role_path, }, )?; @@ -177,9 +177,11 @@ fn run() -> Result<()> { fn find_root_role_and_key(args: &Args) -> Result<(Option<&PathBuf>, Option)> { let (mut root_role_path, mut key_url) = (None, None); - if InfraConfig::lock_or_infra_config_exists(&args.infra_config_path).context(error::Config)? { + if InfraConfig::lock_or_infra_config_exists(&args.infra_config_path) + .context(error::ConfigSnafu)? + { let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) - .context(error::Config)?; + .context(error::ConfigSnafu)?; trace!("Parsed infra config: {:?}", infra_config); // Check whether the user has the relevant repo defined in their Infra.toml. @@ -195,7 +197,7 @@ fn find_root_role_and_key(args: &Args) -> Result<(Option<&PathBuf>, Option) // If it's already been downloaded, just confirm the checksum. if args.root_role_path.exists() { let root_role_data = - fs::read_to_string(&args.root_role_path).context(error::ReadFile { + fs::read_to_string(&args.root_role_path).context(error::ReadFileSnafu { path: &args.root_role_path, })?; let mut d = Sha512::new(); @@ -204,7 +206,7 @@ fn find_root_role_and_key(args: &Args) -> Result<(Option<&PathBuf>, Option) ensure!( &digest == sha512, - error::Hash { + error::HashSnafu { expected: sha512, got: digest, thing: args.root_role_path.to_string_lossy() @@ -221,13 +223,13 @@ fn find_root_role_and_key(args: &Args) -> Result<(Option<&PathBuf>, Option) let path = url .to_file_path() .ok() - .with_context(|| error::UrlToFile { url: url.clone() })?; - fs::read_to_string(&path).context(error::ReadFile { path: &path })? + .with_context(|| error::UrlToFileSnafu { url: url.clone() })?; + fs::read_to_string(&path).context(error::ReadFileSnafu { path: &path })? } else { reqwest::blocking::get(url.clone()) - .with_context(|| error::GetUrl { url: url.clone() })? + .with_context(|_| error::GetUrlSnafu { url: url.clone() })? .text() - .with_context(|| error::GetUrl { url: url.clone() })? + .with_context(|_| error::GetUrlSnafu { url: url.clone() })? }; let mut d = Sha512::new(); @@ -236,7 +238,7 @@ fn find_root_role_and_key(args: &Args) -> Result<(Option<&PathBuf>, Option) ensure!( &digest == sha512, - error::Hash { + error::HashSnafu { expected: sha512, got: digest, thing: url.to_string() @@ -244,9 +246,11 @@ fn find_root_role_and_key(args: &Args) -> Result<(Option<&PathBuf>, Option) ); // Write root role to expected path on disk. - fs::write(&args.root_role_path, &root_role_data).context(error::WriteFile { - path: &args.root_role_path, - })?; + fs::write(&args.root_role_path, &root_role_data).context( + error::WriteFileSnafu { + path: &args.root_role_path, + }, + )?; debug!("Downloaded root role to {}", args.root_role_path.display()); } @@ -254,14 +258,14 @@ fn find_root_role_and_key(args: &Args) -> Result<(Option<&PathBuf>, Option) } else if repo_config.root_role_url.is_some() || repo_config.root_role_sha512.is_some() { // Must specify both URL and checksum. - error::RootRoleConfig.fail()?; + error::RootRoleConfigSnafu.fail()?; } if let Some(key_config) = &repo_config.signing_keys { key_url = Some( Url::try_from(key_config.clone()) .ok() - .context(error::SigningKeyUrl { repo: &args.repo })?, + .context(error::SigningKeyUrlSnafu { repo: &args.repo })?, ); } } else { @@ -284,7 +288,7 @@ fn find_root_role_and_key(args: &Args) -> Result<(Option<&PathBuf>, Option) } if key_url.is_none() && args.default_key_path.exists() { key_url = Some(Url::from_file_path(&args.default_key_path).ok().context( - error::FileToUrl { + error::FileToUrlSnafu { path: &args.default_key_path, }, )?); @@ -310,7 +314,7 @@ mod error { use url::Url; #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] + #[snafu(visibility(pub(super)))] pub(super) enum Error { #[snafu(display("Error splitting shell command - {} - input: {}", source, command))] CommandSplit { diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index c319faa2..eafa77ce 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -31,7 +31,7 @@ rusoto_signature = "0.47.0" rusoto_ssm = { version = "0.47.0", default-features = false, features = ["rustls"] } rusoto_sts = { version = "0.47.0", default-features = false, features = ["rustls"] } simplelog = "0.10.0" -snafu = "0.6" +snafu = "0.7" semver = "1.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index fc23716d..e7343abe 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -78,8 +78,9 @@ pub(crate) async fn run(args: &Args, ami_args: &AmiArgs) -> Result<()> { Ok(amis) => { // Write the AMI IDs to file if requested if let Some(ref path) = ami_args.ami_output { - let file = File::create(path).context(error::FileCreate { path })?; - serde_json::to_writer_pretty(file, &amis).context(error::Serialize { path })?; + let file = File::create(path).context(error::FileCreateSnafu { path })?; + serde_json::to_writer_pretty(file, &amis) + .context(error::SerializeSnafu { path })?; info!("Wrote AMI data to {}", path.display()); } Ok(()) @@ -92,8 +93,8 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> let mut amis = HashMap::new(); // If a lock file exists, use that, otherwise use Infra.toml or default - let infra_config = - InfraConfig::from_path_or_lock(&args.infra_config_path, true).context(error::Config)?; + let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, true) + .context(error::ConfigSnafu)?; trace!("Using infra config: {:?}", infra_config); let aws = infra_config.aws.unwrap_or_else(|| Default::default()); @@ -105,12 +106,12 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> aws.regions.clone().into() } .into_iter() - .map(|name| region_from_string(&name, &aws).context(error::ParseRegion)) + .map(|name| region_from_string(&name, &aws).context(error::ParseRegionSnafu)) .collect::>>()?; ensure!( !regions.is_empty(), - error::MissingConfig { + error::MissingConfigSnafu { missing: "aws.regions" } ); @@ -119,16 +120,18 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> let base_region = regions.remove(0); // Build EBS client for snapshot management, and EC2 client for registration - let base_ebs_client = - build_client::(&base_region, &base_region, &aws).context(error::Client { + let base_ebs_client = build_client::(&base_region, &base_region, &aws).context( + error::ClientSnafu { client_type: "EBS", region: base_region.name(), - })?; - let base_ec2_client = - build_client::(&base_region, &base_region, &aws).context(error::Client { + }, + )?; + let base_ec2_client = build_client::(&base_region, &base_region, &aws).context( + error::ClientSnafu { client_type: "EC2", region: base_region.name(), - })?; + }, + )?; // Check if the AMI already exists, in which case we can use the existing ID, otherwise we // register a new one. @@ -139,7 +142,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> &base_ec2_client, ) .await - .context(error::GetAmiId { + .context(error::GetAmiIdSnafu { name: &ami_args.name, arch: &ami_args.arch, region: base_region.name(), @@ -154,7 +157,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> ); let snapshot_ids = get_snapshots(&found_id, &base_region, &base_ec2_client) .await - .context(error::GetSnapshots { + .context(error::GetSnapshotsSnafu { image_id: &found_id, region: base_region.name(), })?; @@ -171,7 +174,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> &base_ec2_client, ) .await - .context(error::RegisterImage { + .context(error::RegisterImageSnafu { name: &ami_args.name, arch: &ami_args.arch, region: base_region.name(), @@ -206,7 +209,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> &aws, ) .await - .context(error::WaitAmi { + .context(error::WaitAmiSnafu { id: &ids_of_image.image_id, region: base_region.name(), })?; @@ -220,18 +223,19 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> // Get the account ID used in the base region; we don't need to grant to it so we can remove it // from the list. - let base_sts_client = - build_client::(&base_region, &base_region, &aws).context(error::Client { + let base_sts_client = build_client::(&base_region, &base_region, &aws).context( + error::ClientSnafu { client_type: "STS", region: base_region.name(), - })?; + }, + )?; let response = base_sts_client .get_caller_identity(GetCallerIdentityRequest {}) .await - .context(error::GetCallerIdentity { + .context(error::GetCallerIdentitySnafu { region: base_region.name(), })?; - let base_account_id = response.account.context(error::MissingInResponse { + let base_account_id = response.account.context(error::MissingInResponseSnafu { request_type: "GetCallerIdentity", missing: "account", })?; @@ -251,7 +255,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> &base_region, ) .await - .context(error::GrantAccess { + .context(error::GrantAccessSnafu { thing: "snapshots", region: base_region.name(), })?; @@ -265,7 +269,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> &base_region, ) .await - .context(error::GrantAccess { + .context(error::GrantAccessSnafu { thing: "image", region: base_region.name(), })?; @@ -276,7 +280,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> let mut ec2_clients = HashMap::with_capacity(regions.len()); for region in regions.iter() { let ec2_client = - build_client::(®ion, &base_region, &aws).context(error::Client { + build_client::(®ion, &base_region, &aws).context(error::ClientSnafu { client_type: "EC2", region: region.name(), })?; @@ -299,7 +303,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> // If an AMI already existed, just add it to our list, otherwise prepare a copy request. let mut copy_requests = Vec::with_capacity(regions.len()); for (region, get_response) in get_responses { - let get_response = get_response.context(error::GetAmiId { + let get_response = get_response.context(error::GetAmiIdSnafu { name: &ami_args.name, arch: &ami_args.arch, region: region.name(), @@ -389,7 +393,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> } } - ensure!(!saw_error, error::AmiCopy); + ensure!(!saw_error, error::AmiCopySnafu); Ok(amis) } @@ -425,7 +429,7 @@ async fn get_account_ids( let mut sts_clients = HashMap::with_capacity(regions.len()); for region in regions.iter() { let sts_client = - build_client::(®ion, &base_region, &aws).context(error::Client { + build_client::(®ion, &base_region, &aws).context(error::ClientSnafu { client_type: "STS", region: region.name(), })?; @@ -450,10 +454,10 @@ async fn get_account_ids( )> = request_stream.collect().await; for (region, response) in responses { - let response = response.context(error::GetCallerIdentity { + let response = response.context(error::GetCallerIdentitySnafu { region: region.name(), })?; - let account_id = response.account.context(error::MissingInResponse { + let account_id = response.account.context(error::MissingInResponseSnafu { request_type: "GetCallerIdentity", missing: "account", })?; @@ -472,7 +476,7 @@ mod error { use std::path::PathBuf; #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] + #[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(display("Some AMIs failed to copy, see above"))] AmiCopy, diff --git a/tools/pubsys/src/aws/ami/register.rs b/tools/pubsys/src/aws/ami/register.rs index 45dc4b41..50d78f7e 100644 --- a/tools/pubsys/src/aws/ami/register.rs +++ b/tools/pubsys/src/aws/ami/register.rs @@ -37,7 +37,7 @@ async fn _register_image( let root_snapshot = snapshot_from_image(&ami_args.root_image, &uploader, None, ami_args.no_progress) .await - .context(error::Snapshot { + .context(error::SnapshotSnafu { path: &ami_args.root_image, region, })?; @@ -47,7 +47,7 @@ async fn _register_image( if let Some(data_image) = &ami_args.data_image { let snapshot = snapshot_from_image(data_image, &uploader, None, ami_args.no_progress) .await - .context(error::Snapshot { + .context(error::SnapshotSnafu { path: &ami_args.root_image, region, })?; @@ -60,7 +60,7 @@ async fn _register_image( waiter .wait(&root_snapshot, Default::default()) .await - .context(error::WaitSnapshot { + .context(error::WaitSnapshotSnafu { snapshot_type: "root", })?; @@ -68,7 +68,7 @@ async fn _register_image( waiter .wait(&data_snapshot, Default::default()) .await - .context(error::WaitSnapshot { + .context(error::WaitSnapshotSnafu { snapshot_type: "data", })?; } @@ -118,11 +118,11 @@ async fn _register_image( let register_response = ec2_client .register_image(register_request) .await - .context(error::RegisterImage { region })?; + .context(error::RegisterImageSnafu { region })?; let image_id = register_response .image_id - .context(error::MissingImageId { region })?; + .context(error::MissingImageIdSnafu { region })?; let mut snapshot_ids = vec![root_snapshot]; if let Some(data_snapshot) = data_snapshot { @@ -207,14 +207,14 @@ where let describe_response = ec2_client .describe_images(describe_request) .await - .context(error::DescribeImages { region })?; + .context(error::DescribeImagesSnafu { region })?; if let Some(mut images) = describe_response.images { if images.is_empty() { return Ok(None); } ensure!( images.len() == 1, - error::MultipleImages { + error::MultipleImagesSnafu { images: images .into_iter() .map(|i| i.image_id.unwrap_or_else(|| "".to_string())) @@ -224,7 +224,9 @@ where let image = images.remove(0); // If there is an image but we couldn't find the ID of it, fail rather than returning None, // which would indicate no image. - let id = image.image_id.context(error::MissingImageId { region })?; + let id = image + .image_id + .context(error::MissingImageIdSnafu { region })?; Ok(Some(id)) } else { Ok(None) @@ -237,7 +239,7 @@ mod error { use std::path::PathBuf; #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] + #[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(display("Failed to describe images in {}: {}", region, source))] DescribeImages { diff --git a/tools/pubsys/src/aws/ami/snapshot.rs b/tools/pubsys/src/aws/ami/snapshot.rs index ab358053..04a9d0d6 100644 --- a/tools/pubsys/src/aws/ami/snapshot.rs +++ b/tools/pubsys/src/aws/ami/snapshot.rs @@ -31,13 +31,13 @@ where let progress_bar = build_progress_bar(no_progress, "Uploading snapshot"); let filename = path .file_name() - .context(error::InvalidImagePath { path })? + .context(error::InvalidImagePathSnafu { path })? .to_string_lossy(); uploader .upload_from_file(path, desired_size, Some(&filename), progress_bar) .await - .context(error::UploadSnapshot) + .context(error::UploadSnapshotSnafu) } mod error { @@ -45,7 +45,7 @@ mod error { use std::path::PathBuf; #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] + #[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(display("Invalid image path '{}'", path.display()))] InvalidImagePath { path: PathBuf }, diff --git a/tools/pubsys/src/aws/ami/wait.rs b/tools/pubsys/src/aws/ami/wait.rs index 61c9ffb4..77fb23b8 100644 --- a/tools/pubsys/src/aws/ami/wait.rs +++ b/tools/pubsys/src/aws/ami/wait.rs @@ -27,7 +27,7 @@ pub(crate) async fn wait_for_ami( // Stop if we're over max, unless we're on a success streak, then give it some wiggle room. ensure!( (attempts - successes) <= max_attempts, - error::MaxAttempts { + error::MaxAttemptsSnafu { id, max_attempts, region: region.name() @@ -41,17 +41,15 @@ pub(crate) async fn wait_for_ami( // Use a new client each time so we have more confidence that different endpoints can see // the new AMI. let ec2_client = - build_client::(®ion, &sts_region, &aws).context(error::Client { + build_client::(®ion, &sts_region, &aws).context(error::ClientSnafu { client_type: "EC2", region: region.name(), })?; - let describe_response = - ec2_client - .describe_images(describe_request) - .await - .context(error::DescribeImages { - region: region.name(), - })?; + let describe_response = ec2_client.describe_images(describe_request).await.context( + error::DescribeImagesSnafu { + region: region.name(), + }, + )?; // The response contains an Option>, so we have to check that we got a // list at all, and then that the list contains the ID in question. if let Some(images) = describe_response.images { @@ -76,7 +74,7 @@ pub(crate) async fn wait_for_ami( !["invalid", "deregistered", "failed", "error"] .iter() .any(|e| e == found_state), - error::State { + error::StateSnafu { id, state: found_state, region: region.name() @@ -113,7 +111,7 @@ mod error { use snafu::Snafu; #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] + #[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(display("Error creating {} client in {}: {}", client_type, region, source))] Client { diff --git a/tools/pubsys/src/aws/client.rs b/tools/pubsys/src/aws/client.rs index 588885a5..042883e2 100644 --- a/tools/pubsys/src/aws/client.rs +++ b/tools/pubsys/src/aws/client.rs @@ -72,7 +72,7 @@ pub(crate) fn build_client( base_provider(&aws.profile)?, )?; Ok(T::new_with( - rusoto_core::HttpClient::new().context(error::HttpClient)?, + rusoto_core::HttpClient::new().context(error::HttpClientSnafu)?, provider, region.clone(), )) @@ -106,7 +106,7 @@ where let mut provider = CredentialsProvider(Box::new(base_provider)); for assume_role in assume_roles { let sts = StsClient::new_with( - HttpClient::new().context(error::HttpClient)?, + HttpClient::new().context(error::HttpClientSnafu)?, provider, sts_region.clone(), ); @@ -120,7 +120,7 @@ where None, // MFA serial ); provider = CredentialsProvider(Box::new( - AutoRefreshingProvider::new(expiring_provider).context(error::Provider)?, + AutoRefreshingProvider::new(expiring_provider).context(error::ProviderSnafu)?, )); } Ok(provider) @@ -130,12 +130,12 @@ where /// credentials mechanisms. fn base_provider(maybe_profile: &Option) -> Result { if let Some(profile) = maybe_profile { - let mut p = ProfileProvider::new().context(error::Provider)?; + let mut p = ProfileProvider::new().context(error::ProviderSnafu)?; p.set_profile(profile); Ok(CredentialsProvider(Box::new(p))) } else { Ok(CredentialsProvider(Box::new( - DefaultCredentialsProvider::new().context(error::Provider)?, + DefaultCredentialsProvider::new().context(error::ProviderSnafu)?, ))) } } @@ -144,7 +144,7 @@ pub(crate) mod error { use snafu::Snafu; #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] + #[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(display("Failed to create HTTP client: {}", source))] HttpClient { diff --git a/tools/pubsys/src/aws/mod.rs b/tools/pubsys/src/aws/mod.rs index bb81132c..954a3b36 100644 --- a/tools/pubsys/src/aws/mod.rs +++ b/tools/pubsys/src/aws/mod.rs @@ -19,7 +19,7 @@ fn region_from_string(name: &str, aws: &AwsConfig) -> Result { name: name.to_string(), endpoint, }, - None => name.parse().context(error::ParseRegion { name })?, + None => name.parse().context(error::ParseRegionSnafu { name })?, }) } @@ -28,7 +28,7 @@ pub(crate) fn parse_arch(input: &str) -> Result { match input { "x86_64" | "amd64" => Ok("x86_64".to_string()), "arm64" | "aarch64" => Ok("arm64".to_string()), - _ => error::ParseArch { + _ => error::ParseArchSnafu { input, msg: "unknown architecture", } @@ -40,7 +40,7 @@ mod error { use snafu::Snafu; #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] + #[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(display("Failed to parse arch '{}': {}", input, msg))] ParseArch { input: String, msg: String }, diff --git a/tools/pubsys/src/aws/promote_ssm/mod.rs b/tools/pubsys/src/aws/promote_ssm/mod.rs index e27f3507..2b7a2fb1 100644 --- a/tools/pubsys/src/aws/promote_ssm/mod.rs +++ b/tools/pubsys/src/aws/promote_ssm/mod.rs @@ -53,8 +53,8 @@ pub(crate) async fn run(args: &Args, promote_args: &PromoteArgs) -> Result<()> { // Setup =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= // If a lock file exists, use that, otherwise use Infra.toml - let infra_config = - InfraConfig::from_path_or_lock(&args.infra_config_path, false).context(error::Config)?; + let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) + .context(error::ConfigSnafu)?; trace!("Parsed infra config: {:#?}", infra_config); let aws = infra_config.aws.unwrap_or_else(Default::default); @@ -67,12 +67,12 @@ pub(crate) async fn run(args: &Args, promote_args: &PromoteArgs) -> Result<()> { aws.regions.clone().into() } .into_iter() - .map(|name| region_from_string(&name, &aws).context(error::ParseRegion)) + .map(|name| region_from_string(&name, &aws).context(error::ParseRegionSnafu)) .collect::>>()?; ensure!( !regions.is_empty(), - error::MissingConfig { + error::MissingConfigSnafu { missing: "aws.regions" } ); @@ -81,7 +81,7 @@ pub(crate) async fn run(args: &Args, promote_args: &PromoteArgs) -> Result<()> { let mut ssm_clients = HashMap::with_capacity(regions.len()); for region in ®ions { let ssm_client = - build_client::(region, &base_region, &aws).context(error::Client { + build_client::(region, &base_region, &aws).context(error::ClientSnafu { client_type: "SSM", region: region.name(), })?; @@ -111,7 +111,7 @@ pub(crate) async fn run(args: &Args, promote_args: &PromoteArgs) -> Result<()> { // in their naming let template_parameters = template::get_parameters(&promote_args.template_path, &source_build_context) - .context(error::FindTemplates)?; + .context(error::FindTemplatesSnafu)?; if template_parameters.parameters.is_empty() { info!( @@ -126,10 +126,10 @@ pub(crate) async fn run(args: &Args, promote_args: &PromoteArgs) -> Result<()> { // from the same template, so we know what to copy. let source_parameter_map = template::render_parameter_names(&template_parameters, ssm_prefix, &source_build_context) - .context(error::RenderTemplates)?; + .context(error::RenderTemplatesSnafu)?; let target_parameter_map = template::render_parameter_names(&template_parameters, ssm_prefix, &target_build_context) - .context(error::RenderTemplates)?; + .context(error::RenderTemplatesSnafu)?; // Parameters are the same in each region, so we need to associate each region with each of // the parameter names so we can fetch them. @@ -155,21 +155,21 @@ pub(crate) async fn run(args: &Args, promote_args: &PromoteArgs) -> Result<()> { info!("Getting current SSM parameters for source and target names"); let current_source_parameters = ssm::get_parameters(&source_keys, &ssm_clients) .await - .context(error::FetchSsm)?; + .context(error::FetchSsmSnafu)?; trace!( "Current source SSM parameters: {:#?}", current_source_parameters ); ensure!( !current_source_parameters.is_empty(), - error::EmptySource { + error::EmptySourceSnafu { version: &promote_args.source } ); let current_target_parameters = ssm::get_parameters(&target_keys, &ssm_clients) .await - .context(error::FetchSsm)?; + .context(error::FetchSsmSnafu)?; trace!( "Current target SSM parameters: {:#?}", current_target_parameters @@ -208,12 +208,12 @@ pub(crate) async fn run(args: &Args, promote_args: &PromoteArgs) -> Result<()> { info!("Setting updated SSM parameters."); ssm::set_parameters(&set_parameters, &ssm_clients) .await - .context(error::SetSsm)?; + .context(error::SetSsmSnafu)?; info!("Validating whether live parameters in SSM reflect changes."); ssm::validate_parameters(&set_parameters, &ssm_clients) .await - .context(error::ValidateSsm)?; + .context(error::ValidateSsmSnafu)?; info!("All parameters match requested values."); Ok(()) @@ -225,7 +225,7 @@ mod error { use snafu::Snafu; #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] + #[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(display("Error creating {} client in {}: {}", client_type, region, source))] Client { diff --git a/tools/pubsys/src/aws/publish_ami/mod.rs b/tools/pubsys/src/aws/publish_ami/mod.rs index 0e692df9..84b9c25f 100644 --- a/tools/pubsys/src/aws/publish_ami/mod.rs +++ b/tools/pubsys/src/aws/publish_ami/mod.rs @@ -66,12 +66,12 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { "Using AMI data from path: {}", publish_args.ami_input.display() ); - let file = File::open(&publish_args.ami_input).context(error::File { + let file = File::open(&publish_args.ami_input).context(error::FileSnafu { op: "open", path: &publish_args.ami_input, })?; let mut ami_input: HashMap = - serde_json::from_reader(file).context(error::Deserialize { + serde_json::from_reader(file).context(error::DeserializeSnafu { path: &publish_args.ami_input, })?; trace!("Parsed AMI input: {:?}", ami_input); @@ -80,14 +80,14 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { // file if a user created one manually, and they shouldn't be creating an empty file. ensure!( !ami_input.is_empty(), - error::Input { + error::InputSnafu { path: &publish_args.ami_input } ); // If a lock file exists, use that, otherwise use Infra.toml or default - let infra_config = - InfraConfig::from_path_or_lock(&args.infra_config_path, true).context(error::Config)?; + let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, true) + .context(error::ConfigSnafu)?; trace!("Using infra config: {:?}", infra_config); let aws = infra_config.aws.unwrap_or_else(Default::default); @@ -100,11 +100,11 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { }; ensure!( !regions.is_empty(), - error::MissingConfig { + error::MissingConfigSnafu { missing: "aws.regions" } ); - let base_region = region_from_string(®ions[0], &aws).context(error::ParseRegion)?; + let base_region = region_from_string(®ions[0], &aws).context(error::ParseRegionSnafu)?; // Check that the requested regions are a subset of the regions we *could* publish from the AMI // input JSON. @@ -112,7 +112,7 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { let known_regions = HashSet::<&String>::from_iter(ami_input.keys()); ensure!( requested_regions.is_subset(&known_regions), - error::UnknownRegions { + error::UnknownRegionsSnafu { regions: requested_regions .difference(&known_regions) .map(|s| s.to_string()) @@ -126,10 +126,10 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { let image = ami_input .remove(&name) // This could only happen if someone removes the check above... - .with_context(|| error::UnknownRegions { + .with_context(|| error::UnknownRegionsSnafu { regions: vec![name.clone()], })?; - let region = region_from_string(&name, &aws).context(error::ParseRegion)?; + let region = region_from_string(&name, &aws).context(error::ParseRegionSnafu)?; amis.insert(region, image); } @@ -138,7 +138,7 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { let mut ec2_clients = HashMap::with_capacity(amis.len()); for region in amis.keys() { let ec2_client = - build_client::(®ion, &base_region, &aws).context(error::Client { + build_client::(®ion, &base_region, &aws).context(error::ClientSnafu { client_type: "EC2", region: region.name(), })?; @@ -162,7 +162,7 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { // Make sure waits succeeded and AMIs are available. for ((region, image_id), wait_response) in wait_responses { - wait_response.context(error::WaitAmi { + wait_response.context(error::WaitAmiSnafu { id: &image_id, region: region.name(), })?; @@ -209,25 +209,27 @@ pub(crate) async fn get_snapshots( ..Default::default() }; let describe_response = ec2_client.describe_images(describe_request).await; - let describe_response = describe_response.context(error::DescribeImages { + let describe_response = describe_response.context(error::DescribeImagesSnafu { region: region.name(), })?; // Get the image description, ensuring we only have one. - let mut images = describe_response.images.context(error::MissingInResponse { - request_type: "DescribeImages", - missing: "images", - })?; + let mut images = describe_response + .images + .context(error::MissingInResponseSnafu { + request_type: "DescribeImages", + missing: "images", + })?; ensure!( !images.is_empty(), - error::MissingImage { + error::MissingImageSnafu { region: region.name(), image_id: image_id.to_string(), } ); ensure!( images.len() == 1, - error::MultipleImages { + error::MultipleImagesSnafu { region: region.name(), images: images .into_iter() @@ -240,24 +242,24 @@ pub(crate) async fn get_snapshots( // Look into the block device mappings for snapshots. let bdms = image .block_device_mappings - .context(error::MissingInResponse { + .context(error::MissingInResponseSnafu { request_type: "DescribeImages", missing: "block_device_mappings", })?; ensure!( !bdms.is_empty(), - error::MissingInResponse { + error::MissingInResponseSnafu { request_type: "DescribeImages", missing: "non-empty block_device_mappings" } ); let mut snapshot_ids = Vec::with_capacity(bdms.len()); for bdm in bdms { - let ebs = bdm.ebs.context(error::MissingInResponse { + let ebs = bdm.ebs.context(error::MissingInResponseSnafu { request_type: "DescribeImages", missing: "ebs in block_device_mappings", })?; - let snapshot_id = ebs.snapshot_id.context(error::MissingInResponse { + let snapshot_id = ebs.snapshot_id.context(error::MissingInResponseSnafu { request_type: "DescribeImages", missing: "snapshot_id in block_device_mappings.ebs", })?; @@ -332,7 +334,7 @@ pub(crate) async fn modify_snapshots( )> = request_stream.collect().await; for (snapshot_id, response) in responses { - response.context(error::ModifyImageAttribute { + response.context(error::ModifyImageAttributeSnafu { snapshot_id, region: region.name(), })? @@ -399,7 +401,7 @@ pub(crate) async fn modify_regional_snapshots( ensure!( error_count == 0, - error::ModifySnapshotAttributes { + error::ModifySnapshotAttributesSnafu { error_count, success_count, } @@ -430,7 +432,7 @@ pub(crate) async fn modify_image( ec2_client .modify_image_attribute(modify_image_request) .await - .context(error::ModifyImageAttributes { + .context(error::ModifyImageAttributesSnafu { image_id, region: region.name(), }) @@ -488,7 +490,7 @@ pub(crate) async fn modify_regional_images( ensure!( error_count == 0, - error::ModifyImagesAttributes { + error::ModifyImagesAttributesSnafu { error_count, success_count, } @@ -506,7 +508,7 @@ mod error { use std::path::PathBuf; #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] + #[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(display("Error creating {} client in {}: {}", client_type, region, source))] Client { diff --git a/tools/pubsys/src/aws/ssm/mod.rs b/tools/pubsys/src/aws/ssm/mod.rs index d2245921..7659de30 100644 --- a/tools/pubsys/src/aws/ssm/mod.rs +++ b/tools/pubsys/src/aws/ssm/mod.rs @@ -57,8 +57,8 @@ pub(crate) async fn run(args: &Args, ssm_args: &SsmArgs) -> Result<()> { // Setup =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= // If a lock file exists, use that, otherwise use Infra.toml - let infra_config = - InfraConfig::from_path_or_lock(&args.infra_config_path, false).context(error::Config)?; + let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) + .context(error::ConfigSnafu)?; trace!("Parsed infra config: {:#?}", infra_config); let aws = infra_config.aws.unwrap_or_else(Default::default); let ssm_prefix = aws.ssm_prefix.as_deref().unwrap_or_else(|| ""); @@ -71,18 +71,18 @@ pub(crate) async fn run(args: &Args, ssm_args: &SsmArgs) -> Result<()> { }; ensure!( !regions.is_empty(), - error::MissingConfig { + error::MissingConfigSnafu { missing: "aws.regions" } ); - let base_region = region_from_string(®ions[0], &aws).context(error::ParseRegion)?; + let base_region = region_from_string(®ions[0], &aws).context(error::ParseRegionSnafu)?; let amis = parse_ami_input(®ions, &ssm_args, &aws)?; let mut ssm_clients = HashMap::with_capacity(amis.len()); for region in amis.keys() { let ssm_client = - build_client::(®ion, &base_region, &aws).context(error::Client { + build_client::(®ion, &base_region, &aws).context(error::ClientSnafu { client_type: "SSM", region: region.name(), })?; @@ -103,7 +103,7 @@ pub(crate) async fn run(args: &Args, ssm_args: &SsmArgs) -> Result<()> { ssm_args.template_path.display() ); let template_parameters = template::get_parameters(&ssm_args.template_path, &build_context) - .context(error::FindTemplates)?; + .context(error::FindTemplatesSnafu)?; if template_parameters.parameters.is_empty() { info!( @@ -115,7 +115,7 @@ pub(crate) async fn run(args: &Args, ssm_args: &SsmArgs) -> Result<()> { let new_parameters = template::render_parameters(template_parameters, amis, ssm_prefix, &build_context) - .context(error::RenderTemplates)?; + .context(error::RenderTemplatesSnafu)?; trace!("Generated templated parameters: {:#?}", new_parameters); // SSM get/compare =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= @@ -124,7 +124,7 @@ pub(crate) async fn run(args: &Args, ssm_args: &SsmArgs) -> Result<()> { let new_parameter_names: Vec<&SsmKey> = new_parameters.keys().collect(); let current_parameters = ssm::get_parameters(&new_parameter_names, &ssm_clients) .await - .context(error::FetchSsm)?; + .context(error::FetchSsmSnafu)?; trace!("Current SSM parameters: {:#?}", current_parameters); // Show the difference between source and target parameters in SSM. @@ -139,7 +139,7 @@ pub(crate) async fn run(args: &Args, ssm_args: &SsmArgs) -> Result<()> { if !ssm_args.allow_clobber { let current_keys: HashSet<&SsmKey> = current_parameters.keys().collect(); let new_keys: HashSet<&SsmKey> = parameters_to_set.keys().collect(); - ensure!(current_keys.is_disjoint(&new_keys), error::NoClobber); + ensure!(current_keys.is_disjoint(&new_keys), error::NoClobberSnafu); } // SSM set =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= @@ -147,12 +147,12 @@ pub(crate) async fn run(args: &Args, ssm_args: &SsmArgs) -> Result<()> { info!("Setting updated SSM parameters."); ssm::set_parameters(¶meters_to_set, &ssm_clients) .await - .context(error::SetSsm)?; + .context(error::SetSsmSnafu)?; info!("Validating whether live parameters in SSM reflect changes."); ssm::validate_parameters(¶meters_to_set, &ssm_clients) .await - .context(error::ValidateSsm)?; + .context(error::ValidateSsmSnafu)?; info!("All parameters match requested values."); Ok(()) @@ -195,12 +195,12 @@ fn parse_ami_input( aws: &AwsConfig, ) -> Result> { info!("Using AMI data from path: {}", ssm_args.ami_input.display()); - let file = File::open(&ssm_args.ami_input).context(error::File { + let file = File::open(&ssm_args.ami_input).context(error::FileSnafu { op: "open", path: &ssm_args.ami_input, })?; let mut ami_input: HashMap = - serde_json::from_reader(file).context(error::Deserialize { + serde_json::from_reader(file).context(error::DeserializeSnafu { path: &ssm_args.ami_input, })?; trace!("Parsed AMI input: {:#?}", ami_input); @@ -209,7 +209,7 @@ fn parse_ami_input( // file if a user created one manually, and they shouldn't be creating an empty file. ensure!( !ami_input.is_empty(), - error::Input { + error::InputSnafu { path: &ssm_args.ami_input } ); @@ -220,7 +220,7 @@ fn parse_ami_input( let known_regions = HashSet::<&String>::from_iter(ami_input.keys()); ensure!( requested_regions.is_subset(&known_regions), - error::UnknownRegions { + error::UnknownRegionsSnafu { regions: requested_regions .difference(&known_regions) .map(|s| s.to_string()) @@ -234,10 +234,10 @@ fn parse_ami_input( let image = ami_input .remove(name) // This could only happen if someone removes the check above... - .with_context(|| error::UnknownRegions { + .with_context(|| error::UnknownRegionsSnafu { regions: vec![name.clone()], })?; - let region = region_from_string(&name, &aws).context(error::ParseRegion)?; + let region = region_from_string(&name, &aws).context(error::ParseRegionSnafu)?; amis.insert(region, image); } @@ -301,7 +301,7 @@ mod error { use std::path::PathBuf; #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] + #[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(display("Error creating {} client in {}: {}", client_type, region, source))] Client { diff --git a/tools/pubsys/src/aws/ssm/ssm.rs b/tools/pubsys/src/aws/ssm/ssm.rs index 3f2dc472..91676e2c 100644 --- a/tools/pubsys/src/aws/ssm/ssm.rs +++ b/tools/pubsys/src/aws/ssm/ssm.rs @@ -80,7 +80,7 @@ where new_regions.insert(region.name().to_string()); continue; } else { - return Err(e).context(error::GetParameters { + return Err(e).context(error::GetParametersSnafu { region: region.name(), }); } @@ -95,7 +95,7 @@ where let total_count = valid_count + invalid_count; ensure!( total_count == expected_len, - error::MissingInResponse { + error::MissingInResponseSnafu { region: region.name(), request_type: "GetParameters", missing: format!( @@ -109,12 +109,12 @@ where if let Some(valid_parameters) = response.parameters { if !valid_parameters.is_empty() { for parameter in valid_parameters { - let name = parameter.name.context(error::MissingInResponse { + let name = parameter.name.context(error::MissingInResponseSnafu { region: region.name(), request_type: "GetParameters", missing: "parameter name", })?; - let value = parameter.value.context(error::MissingInResponse { + let value = parameter.value.context(error::MissingInResponseSnafu { region: region.name(), request_type: "GetParameters", missing: format!("value for parameter {}", name), @@ -187,7 +187,7 @@ pub(crate) async fn set_parameters( ensure!( request_interval <= max_interval, - error::Throttled { max_interval } + error::ThrottledSnafu { max_interval } ); // Build requests for parameters. We need to group them by region so we can run each @@ -284,7 +284,7 @@ pub(crate) async fn set_parameters( ); } } - return error::SetParameters { + return error::SetParametersSnafu { failure_count: failed_parameters.len(), total_count, } @@ -330,7 +330,7 @@ pub(crate) async fn validate_parameters( success = false; } } - ensure!(success, error::ValidateParameters); + ensure!(success, error::ValidateParametersSnafu); Ok(()) } @@ -342,7 +342,7 @@ mod error { use std::time::Duration; #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] + #[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(display("Failed to fetch SSM parameters in {}: {}", region, source))] GetParameters { diff --git a/tools/pubsys/src/aws/ssm/template.rs b/tools/pubsys/src/aws/ssm/template.rs index c869567a..52aec136 100644 --- a/tools/pubsys/src/aws/ssm/template.rs +++ b/tools/pubsys/src/aws/ssm/template.rs @@ -40,12 +40,12 @@ pub(crate) fn get_parameters( template_path: &Path, build_context: &BuildContext<'_>, ) -> Result { - let templates_str = fs::read_to_string(&template_path).context(error::File { + let templates_str = fs::read_to_string(&template_path).context(error::FileSnafu { op: "read", path: &template_path, })?; let mut template_parameters: TemplateParameters = - toml::from_str(&templates_str).context(error::InvalidToml { + toml::from_str(&templates_str).context(error::InvalidTomlSnafu { path: &template_path, })?; trace!("Parsed templates: {:#?}", template_parameters); @@ -54,7 +54,7 @@ pub(crate) fn get_parameters( // conditionals below, we allow that and just don't set any parameters. ensure!( !template_parameters.parameters.is_empty(), - error::NoTemplates { + error::NoTemplatesSnafu { path: template_path } ); @@ -101,17 +101,17 @@ pub(crate) fn render_parameters( for tp in &template_parameters.parameters { let mut tt = TinyTemplate::new(); tt.add_template("name", &tp.name) - .context(error::AddTemplate { template: &tp.name })?; + .context(error::AddTemplateSnafu { template: &tp.name })?; tt.add_template("value", &tp.value) - .context(error::AddTemplate { + .context(error::AddTemplateSnafu { template: &tp.value, })?; let name_suffix = tt .render("name", &context) - .context(error::RenderTemplate { template: &tp.name })?; + .context(error::RenderTemplateSnafu { template: &tp.name })?; let value = tt .render("value", &context) - .context(error::RenderTemplate { + .context(error::RenderTemplateSnafu { template: &tp.value, })?; @@ -137,10 +137,10 @@ pub(crate) fn render_parameter_names( for tp in &template_parameters.parameters { let mut tt = TinyTemplate::new(); tt.add_template("name", &tp.name) - .context(error::AddTemplate { template: &tp.name })?; + .context(error::AddTemplateSnafu { template: &tp.name })?; let name_suffix = tt .render("name", &build_context) - .context(error::RenderTemplate { template: &tp.name })?; + .context(error::RenderTemplateSnafu { template: &tp.name })?; new_parameters.insert(tp.name.clone(), join_name(ssm_prefix, &name_suffix)); } @@ -164,7 +164,7 @@ mod error { use std::path::PathBuf; #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] + #[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(display("Error building template from '{}': {}", template, source))] AddTemplate { diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs index 48cce8c4..29d5f34f 100644 --- a/tools/pubsys/src/main.rs +++ b/tools/pubsys/src/main.rs @@ -40,46 +40,54 @@ fn run() -> Result<()> { let args = Args::from_args(); // SimpleLogger will send errors to stderr and anything less to stdout. - SimpleLogger::init(args.log_level, LogConfig::default()).context(error::Logger)?; + SimpleLogger::init(args.log_level, LogConfig::default()).context(error::LoggerSnafu)?; match args.subcommand { - SubCommand::Repo(ref repo_args) => repo::run(&args, &repo_args).context(error::Repo), + SubCommand::Repo(ref repo_args) => repo::run(&args, &repo_args).context(error::RepoSnafu), SubCommand::ValidateRepo(ref validate_repo_args) => { - repo::validate_repo::run(&args, &validate_repo_args).context(error::ValidateRepo) + repo::validate_repo::run(&args, &validate_repo_args).context(error::ValidateRepoSnafu) } SubCommand::CheckRepoExpirations(ref check_expirations_args) => { repo::check_expirations::run(&args, &check_expirations_args) - .context(error::CheckExpirations) + .context(error::CheckExpirationsSnafu) } SubCommand::RefreshRepo(ref refresh_repo_args) => { - repo::refresh_repo::run(&args, &refresh_repo_args).context(error::RefreshRepo) + repo::refresh_repo::run(&args, &refresh_repo_args).context(error::RefreshRepoSnafu) } SubCommand::Ami(ref ami_args) => { - let rt = Runtime::new().context(error::Runtime)?; - rt.block_on(async { aws::ami::run(&args, &ami_args).await.context(error::Ami) }) + let rt = Runtime::new().context(error::RuntimeSnafu)?; + rt.block_on(async { + aws::ami::run(&args, &ami_args) + .await + .context(error::AmiSnafu) + }) } SubCommand::PublishAmi(ref publish_args) => { - let rt = Runtime::new().context(error::Runtime)?; + let rt = Runtime::new().context(error::RuntimeSnafu)?; rt.block_on(async { aws::publish_ami::run(&args, &publish_args) .await - .context(error::PublishAmi) + .context(error::PublishAmiSnafu) }) } SubCommand::Ssm(ref ssm_args) => { - let rt = Runtime::new().context(error::Runtime)?; - rt.block_on(async { aws::ssm::run(&args, &ssm_args).await.context(error::Ssm) }) + let rt = Runtime::new().context(error::RuntimeSnafu)?; + rt.block_on(async { + aws::ssm::run(&args, &ssm_args) + .await + .context(error::SsmSnafu) + }) } SubCommand::PromoteSsm(ref promote_args) => { - let rt = Runtime::new().context(error::Runtime)?; + let rt = Runtime::new().context(error::RuntimeSnafu)?; rt.block_on(async { aws::promote_ssm::run(&args, &promote_args) .await - .context(error::PromoteSsm) + .context(error::PromoteSsmSnafu) }) } SubCommand::UploadOva(ref upload_args) => { - vmware::upload_ova::run(&args, &upload_args).context(error::UploadOva) + vmware::upload_ova::run(&args, &upload_args).context(error::UploadOvaSnafu) } } } @@ -138,7 +146,7 @@ mod error { use snafu::Snafu; #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] + #[snafu(visibility(pub(super)))] pub(super) enum Error { #[snafu(display("Failed to build AMI: {}", source))] Ami { source: crate::aws::ami::Error }, diff --git a/tools/pubsys/src/repo.rs b/tools/pubsys/src/repo.rs index b08b1a3b..3892e999 100644 --- a/tools/pubsys/src/repo.rs +++ b/tools/pubsys/src/repo.rs @@ -111,9 +111,9 @@ fn update_manifest(repo_args: &RepoArgs, manifest: &mut Manifest) -> Result<()> let filename = |path: &PathBuf| -> Result { Ok(path .file_name() - .context(error::InvalidImagePath { path })? + .context(error::InvalidImagePathSnafu { path })? .to_str() - .context(error::NonUtf8Path { path })? + .context(error::NonUtf8PathSnafu { path })? .to_string()) }; @@ -135,7 +135,7 @@ fn update_manifest(repo_args: &RepoArgs, manifest: &mut Manifest) -> Result<()> repo_args.variant.clone(), images, ) - .context(error::AddUpdate)?; + .context(error::AddUpdateSnafu)?; // Add migrations =^..^= =^..^= =^..^= =^..^= @@ -143,10 +143,11 @@ fn update_manifest(repo_args: &RepoArgs, manifest: &mut Manifest) -> Result<()> "Using release config from path: {}", repo_args.release_config_path.display() ); - let release = - Release::from_path(&repo_args.release_config_path).context(error::UpdateMetadataRead { + let release = Release::from_path(&repo_args.release_config_path).context( + error::UpdateMetadataReadSnafu { path: &repo_args.release_config_path, - })?; + }, + )?; trace!( "Adding migrations to manifest for versions: {:#?}", release @@ -169,10 +170,11 @@ fn update_manifest(repo_args: &RepoArgs, manifest: &mut Manifest) -> Result<()> "Offsets from that file will be added to the release start time of: {}", wave_start_time ); - let waves = - UpdateWaves::from_path(&repo_args.wave_policy_path).context(error::UpdateMetadataRead { + let waves = UpdateWaves::from_path(&repo_args.wave_policy_path).context( + error::UpdateMetadataReadSnafu { path: &repo_args.wave_policy_path, - })?; + }, + )?; manifest .set_waves( repo_args.variant.clone(), @@ -181,7 +183,7 @@ fn update_manifest(repo_args: &RepoArgs, manifest: &mut Manifest) -> Result<()> wave_start_time, &waves, ) - .context(error::SetWaves { + .context(error::SetWavesSnafu { wave_policy_path: &repo_args.wave_policy_path, })?; @@ -205,7 +207,7 @@ fn set_expirations( editor .snapshot_expires(snapshot_expiration) .targets_expires(targets_expiration) - .context(error::SetTargetsExpiration { + .context(error::SetTargetsExpirationSnafu { expiration: targets_expiration, })? .timestamp_expires(timestamp_expiration); @@ -222,7 +224,7 @@ fn set_versions(editor: &mut RepositoryEditor) -> Result<()> { editor .snapshot_version(version) .targets_version(version) - .context(error::SetTargetsVersion { version })? + .context(error::SetTargetsVersionSnafu { version })? .timestamp_version(version); Ok(()) @@ -244,16 +246,16 @@ where debug!("Adding target from path: {}", target_path.display()); editor .add_target_path(&target_path) - .context(error::AddTarget { path: &target_path })?; + .context(error::AddTargetSnafu { path: &target_path })?; } - let manifest_target = Target::from_path(&manifest_path).context(error::BuildTarget { + let manifest_target = Target::from_path(&manifest_path).context(error::BuildTargetSnafu { path: manifest_path.as_ref(), })?; debug!("Adding target for manifest.json"); editor .add_target("manifest.json", manifest_target) - .context(error::AddTarget { + .context(error::AddTargetSnafu { path: "manifest.json", })?; @@ -264,7 +266,7 @@ where repo_args.repo_expiration_policy_path.display() ); let expiration = RepoExpirationPolicy::from_path(&repo_args.repo_expiration_policy_path) - .context(error::Config)?; + .context(error::ConfigSnafu)?; let expiration_start_time = repo_args.release_start_time.unwrap_or(*DEFAULT_START_TIME); let snapshot_expiration = expiration_start_time + expiration.snapshot_expiration; @@ -277,7 +279,7 @@ where editor .snapshot_expires(snapshot_expiration) .targets_expires(targets_expiration) - .context(error::SetTargetsExpiration { + .context(error::SetTargetsExpirationSnafu { expiration: targets_expiration, })? .timestamp_expires(timestamp_expiration); @@ -291,7 +293,7 @@ where editor .snapshot_version(version) .targets_version(version) - .context(error::SetTargetsVersion { version })? + .context(error::SetTargetsVersionSnafu { version })? .timestamp_version(version); Ok(()) @@ -314,7 +316,7 @@ fn repo_urls<'a>( }; let metadata_url_str = format!("{}{}{}/{}", metadata_base_url, base_slash, variant, arch); - let metadata_url = Url::parse(&metadata_url_str).context(error::ParseUrl { + let metadata_url = Url::parse(&metadata_url_str).context(error::ParseUrlSnafu { input: &metadata_url_str, })?; @@ -341,7 +343,7 @@ where // Try to load the repo... let repo_load_result = RepositoryLoader::new( - File::open(root_role_path).context(error::File { + File::open(root_role_path).context(error::FileSnafu { path: root_role_path, })?, metadata_url.clone(), @@ -355,21 +357,21 @@ where let target = "manifest.json"; let target = target .try_into() - .context(error::ParseTargetName { target })?; + .context(error::ParseTargetNameSnafu { target })?; let reader = repo .read_target(&target) - .context(error::ReadTarget { + .context(error::ReadTargetSnafu { target: target.raw(), })? - .with_context(|| error::NoManifest { + .with_context(|| error::NoManifestSnafu { metadata_url: metadata_url.clone(), })?; - let manifest = serde_json::from_reader(reader).context(error::InvalidJson { + let manifest = serde_json::from_reader(reader).context(error::InvalidJsonSnafu { path: "manifest.json", })?; - let editor = - RepositoryEditor::from_repo(root_role_path, repo).context(error::EditorFromRepo)?; + let editor = RepositoryEditor::from_repo(root_role_path, repo) + .context(error::EditorFromRepoSnafu)?; Ok(Some((editor, manifest))) } @@ -379,7 +381,7 @@ where if is_file_not_found_error(&e) { Ok(None) } else { - Err(e).with_context(|| error::RepoLoad { + Err(e).with_context(|_| error::RepoLoadSnafu { metadata_base_url: metadata_url.clone(), }) } @@ -404,11 +406,11 @@ fn get_signing_key_source(signing_key_config: &SigningKeyConfig) -> Result Result Result> { if let Some(region) = config.available_keys.get(key_id) { Ok(Some(KmsClient::new( - Region::from_str(region).context(error::ParseRegion { what: region })?, + Region::from_str(region).context(error::ParseRegionSnafu { what: region })?, ))) } else { Ok(None) @@ -446,7 +448,7 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { // existing repository. (The targets directory is shared, so it's fine if that exists.) ensure!( !Path::exists(&metadata_out_dir), - error::RepoExists { + error::RepoExistsSnafu { path: metadata_out_dir } ); @@ -454,8 +456,8 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { // Build repo =^..^= =^..^= =^..^= =^..^= // If a lock file exists, use that, otherwise use Infra.toml or default - let infra_config = - InfraConfig::from_path_or_lock(&args.infra_config_path, true).context(error::Config)?; + let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, true) + .context(error::ConfigSnafu)?; trace!("Using infra config: {:?}", infra_config); // If the user has the requested (or "default") repo defined in their Infra.toml, use it, @@ -491,7 +493,8 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { metadata_url ); ( - RepositoryEditor::new(&repo_args.root_role_path).context(error::NewEditor)?, + RepositoryEditor::new(&repo_args.root_role_path) + .context(error::NewEditorSnafu)?, Manifest::default(), ) } @@ -499,7 +502,7 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { } else { info!("Did not find metadata and target URLs in infra config, creating a new repository"); ( - RepositoryEditor::new(&repo_args.root_role_path).context(error::NewEditor)?, + RepositoryEditor::new(&repo_args.root_role_path).context(error::NewEditorSnafu)?, Manifest::default(), ) }; @@ -508,9 +511,9 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { update_manifest(&repo_args, &mut manifest)?; // Write manifest to tempfile so it can be copied in as target later let manifest_path = NamedTempFile::new() - .context(error::TempFile)? + .context(error::TempFileSnafu)? .into_temp_path(); - update_metadata::write_file(&manifest_path, &manifest).context(error::ManifestWrite { + update_metadata::write_file(&manifest_path, &manifest).context(error::ManifestWriteSnafu { path: &manifest_path, })?; @@ -536,7 +539,7 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { } else { ensure!( repo_args.default_key_path.exists(), - error::MissingConfig { + error::MissingConfigSnafu { missing: "signing_keys in repo config, and we found no local key", } ); @@ -545,13 +548,13 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { }) }; - let signed_repo = editor.sign(&[key_source]).context(error::RepoSign)?; + let signed_repo = editor.sign(&[key_source]).context(error::RepoSignSnafu)?; // Write repo =^..^= =^..^= =^..^= =^..^= // Write targets first so we don't have invalid metadata if targets fail info!("Writing repo targets to: {}", targets_out_dir.display()); - fs::create_dir_all(&targets_out_dir).context(error::CreateDir { + fs::create_dir_all(&targets_out_dir).context(error::CreateDirSnafu { path: &targets_out_dir, })?; @@ -560,7 +563,7 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { let target = "manifest.json"; let target = target .try_into() - .context(error::ParseTargetName { target })?; + .context(error::ParseTargetNameSnafu { target })?; signed_repo .copy_target( &manifest_path, @@ -569,7 +572,7 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { PathExists::Fail, Some(&target), ) - .context(error::CopyTarget { + .context(error::CopyTargetSnafu { target: &manifest_path, path: &targets_out_dir, })?; @@ -583,7 +586,7 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { ); signed_repo .copy_target(copy_target, &targets_out_dir, PathExists::Skip, None) - .context(error::CopyTarget { + .context(error::CopyTargetSnafu { target: copy_target, path: &targets_out_dir, })?; @@ -596,19 +599,19 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { ); signed_repo .link_target(link_target, &targets_out_dir, PathExists::Skip, None) - .context(error::LinkTarget { + .context(error::LinkTargetSnafu { target: link_target, path: &targets_out_dir, })?; } info!("Writing repo metadata to: {}", metadata_out_dir.display()); - fs::create_dir_all(&metadata_out_dir).context(error::CreateDir { + fs::create_dir_all(&metadata_out_dir).context(error::CreateDirSnafu { path: &metadata_out_dir, })?; signed_repo .write(&metadata_out_dir) - .context(error::RepoWrite { + .context(error::RepoWriteSnafu { path: &repo_args.outdir, })?; @@ -623,7 +626,7 @@ mod error { use url::Url; #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] + #[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(display("Failed to add new update to manifest: {}", source))] AddUpdate { diff --git a/tools/pubsys/src/repo/check_expirations/mod.rs b/tools/pubsys/src/repo/check_expirations/mod.rs index 11ad4aaf..8af39b9d 100644 --- a/tools/pubsys/src/repo/check_expirations/mod.rs +++ b/tools/pubsys/src/repo/check_expirations/mod.rs @@ -82,7 +82,7 @@ fn check_expirations( ) -> Result<()> { // Load the repository let repo = RepositoryLoader::new( - File::open(root_role_path).context(repo_error::File { + File::open(root_role_path).context(repo_error::FileSnafu { path: root_role_path, })?, metadata_url.clone(), @@ -91,7 +91,7 @@ fn check_expirations( // We're gonna check the expiration ourselves .expiration_enforcement(ExpirationEnforcement::Unsafe) .load() - .context(repo_error::RepoLoad { + .context(repo_error::RepoLoadSnafu { metadata_base_url: metadata_url.clone(), })?; info!("Loaded TUF repo:\t{}", metadata_url); @@ -132,16 +132,16 @@ fn check_expirations( pub(crate) fn run(args: &Args, check_expirations_args: &CheckExpirationsArgs) -> Result<()> { // If a lock file exists, use that, otherwise use Infra.toml let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) - .context(repo_error::Config)?; + .context(repo_error::ConfigSnafu)?; trace!("Parsed infra config: {:?}", infra_config); let repo_config = infra_config .repo .as_ref() - .context(repo_error::MissingConfig { + .context(repo_error::MissingConfigSnafu { missing: "repo section", })? .get(&check_expirations_args.repo) - .with_context(|| repo_error::MissingConfig { + .with_context(|| repo_error::MissingConfigSnafu { missing: format!("definition for repo {}", &check_expirations_args.repo), })?; @@ -150,7 +150,7 @@ pub(crate) fn run(args: &Args, check_expirations_args: &CheckExpirationsArgs) -> &check_expirations_args.variant, &check_expirations_args.arch, )? - .context(repo_error::MissingRepoUrls { + .context(repo_error::MissingRepoUrlsSnafu { repo: &check_expirations_args.repo, })?; check_expirations( @@ -168,7 +168,7 @@ mod error { use url::Url; #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] + #[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(context(false), display("{}", source))] Repo { source: crate::repo::Error }, diff --git a/tools/pubsys/src/repo/refresh_repo/mod.rs b/tools/pubsys/src/repo/refresh_repo/mod.rs index 5bdea815..c8d243cb 100644 --- a/tools/pubsys/src/repo/refresh_repo/mod.rs +++ b/tools/pubsys/src/repo/refresh_repo/mod.rs @@ -73,7 +73,7 @@ fn refresh_repo( // existing repository. ensure!( !Path::exists(&metadata_out_dir), - repo_error::RepoExists { + repo_error::RepoExistsSnafu { path: metadata_out_dir } ); @@ -86,7 +86,7 @@ fn refresh_repo( // Load the repository and get the repo editor for it let repo = RepositoryLoader::new( - File::open(root_role_path).context(repo_error::File { + File::open(root_role_path).context(repo_error::FileSnafu { path: root_role_path, })?, metadata_url.clone(), @@ -94,11 +94,11 @@ fn refresh_repo( ) .expiration_enforcement(expiration_enforcement) .load() - .context(repo_error::RepoLoad { + .context(repo_error::RepoLoadSnafu { metadata_base_url: metadata_url.clone(), })?; - let mut repo_editor = - RepositoryEditor::from_repo(&root_role_path, repo).context(repo_error::EditorFromRepo)?; + let mut repo_editor = RepositoryEditor::from_repo(&root_role_path, repo) + .context(repo_error::EditorFromRepoSnafu)?; info!("Loaded TUF repo: {}", metadata_url); // Refresh the expiration dates of all non-root metadata files @@ -110,16 +110,16 @@ fn refresh_repo( // Sign the repository let signed_repo = repo_editor .sign(&[key_source]) - .context(repo_error::RepoSign)?; + .context(repo_error::RepoSignSnafu)?; // Write out the metadata files for the repository info!("Writing repo metadata to: {}", metadata_out_dir.display()); - fs::create_dir_all(&metadata_out_dir).context(repo_error::CreateDir { + fs::create_dir_all(&metadata_out_dir).context(repo_error::CreateDirSnafu { path: &metadata_out_dir, })?; signed_repo .write(&metadata_out_dir) - .context(repo_error::RepoWrite { + .context(repo_error::RepoWriteSnafu { path: &metadata_out_dir, })?; @@ -130,17 +130,17 @@ fn refresh_repo( pub(crate) fn run(args: &Args, refresh_repo_args: &RefreshRepoArgs) -> Result<(), Error> { // If a lock file exists, use that, otherwise use Infra.toml let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) - .context(repo_error::Config)?; + .context(repo_error::ConfigSnafu)?; trace!("Parsed infra config: {:?}", infra_config); let repo_config = infra_config .repo .as_ref() - .context(repo_error::MissingConfig { + .context(repo_error::MissingConfigSnafu { missing: "repo section", })? .get(&refresh_repo_args.repo) - .context(repo_error::MissingConfig { + .context(repo_error::MissingConfigSnafu { missing: format!("definition for repo {}", &refresh_repo_args.repo), })?; @@ -153,7 +153,7 @@ pub(crate) fn run(args: &Args, refresh_repo_args: &RefreshRepoArgs) -> Result<() } else { ensure!( refresh_repo_args.default_key_path.exists(), - repo_error::MissingConfig { + repo_error::MissingConfigSnafu { missing: "signing_keys in repo config, and we found no local key", } ); @@ -169,14 +169,14 @@ pub(crate) fn run(args: &Args, refresh_repo_args: &RefreshRepoArgs) -> Result<() ); let expiration = RepoExpirationPolicy::from_path(&refresh_repo_args.repo_expiration_policy_path) - .context(repo_error::Config)?; + .context(repo_error::ConfigSnafu)?; let repo_urls = repo_urls( &repo_config, &refresh_repo_args.variant, &refresh_repo_args.arch, )? - .context(repo_error::MissingRepoUrls { + .context(repo_error::MissingRepoUrlsSnafu { repo: &refresh_repo_args.repo, })?; refresh_repo( @@ -200,7 +200,7 @@ mod error { use url::Url; #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] + #[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(context(false), display("{}", source))] Repo { source: crate::repo::Error }, diff --git a/tools/pubsys/src/repo/validate_repo/mod.rs b/tools/pubsys/src/repo/validate_repo/mod.rs index 9a3ddff4..1feeea2e 100644 --- a/tools/pubsys/src/repo/validate_repo/mod.rs +++ b/tools/pubsys/src/repo/validate_repo/mod.rs @@ -51,7 +51,7 @@ fn retrieve_targets(repo: &Repository) -> Result<(), Error> { let thread_pool = rayon::ThreadPoolBuilder::new() .num_threads(min(num_cpus::get(), MAX_DOWNLOAD_THREADS)) .build() - .context(error::ThreadPool)?; + .context(error::ThreadPoolSnafu)?; // create the channels through which our download results will be passed let (tx, rx) = mpsc::channel(); @@ -60,17 +60,17 @@ fn retrieve_targets(repo: &Repository) -> Result<(), Error> { let tx = tx.clone(); let mut reader = repo .read_target(&target) - .with_context(|| repo_error::ReadTarget { + .with_context(|_| repo_error::ReadTargetSnafu { target: target.raw(), })? - .with_context(|| error::TargetMissing { + .with_context(|| error::TargetMissingSnafu { target: target.raw(), })?; info!("Downloading target: {}", target.raw()); thread_pool.spawn(move || { tx.send({ // tough's `Read` implementation validates the target as it's being downloaded - io::copy(&mut reader, &mut io::sink()).context(error::TargetDownload { + io::copy(&mut reader, &mut io::sink()).context(error::TargetDownloadSnafu { target: target.raw(), }) }) @@ -101,14 +101,14 @@ fn validate_repo( ) -> Result<(), Error> { // Load the repository let repo = RepositoryLoader::new( - File::open(root_role_path).context(repo_error::File { + File::open(root_role_path).context(repo_error::FileSnafu { path: root_role_path, })?, metadata_url.clone(), targets_url.clone(), ) .load() - .context(repo_error::RepoLoad { + .context(repo_error::RepoLoadSnafu { metadata_base_url: metadata_url.clone(), })?; info!("Loaded TUF repo: {}", metadata_url); @@ -124,16 +124,16 @@ fn validate_repo( pub(crate) fn run(args: &Args, validate_repo_args: &ValidateRepoArgs) -> Result<(), Error> { // If a lock file exists, use that, otherwise use Infra.toml let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) - .context(repo_error::Config)?; + .context(repo_error::ConfigSnafu)?; trace!("Parsed infra config: {:?}", infra_config); let repo_config = infra_config .repo .as_ref() - .context(repo_error::MissingConfig { + .context(repo_error::MissingConfigSnafu { missing: "repo section", })? .get(&validate_repo_args.repo) - .context(repo_error::MissingConfig { + .context(repo_error::MissingConfigSnafu { missing: format!("definition for repo {}", &validate_repo_args.repo), })?; @@ -142,7 +142,7 @@ pub(crate) fn run(args: &Args, validate_repo_args: &ValidateRepoArgs) -> Result< &validate_repo_args.variant, &validate_repo_args.arch, )? - .context(repo_error::MissingRepoUrls { + .context(repo_error::MissingRepoUrlsSnafu { repo: &validate_repo_args.repo, })?; validate_repo( @@ -158,7 +158,7 @@ mod error { use std::io; #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] + #[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(display("Invalid percentage specified: {} is greater than 100", percentage))] InvalidPercentage { percentage: u8 }, diff --git a/tools/pubsys/src/vmware/govc.rs b/tools/pubsys/src/vmware/govc.rs index 8248f2c8..f576fd6c 100644 --- a/tools/pubsys/src/vmware/govc.rs +++ b/tools/pubsys/src/vmware/govc.rs @@ -99,7 +99,7 @@ impl Govc { /// to run in the container. // The arguments are `&[&str]` in an attempt to be as flexible as possible for the caller fn docker_run(docker_env: &[&str], mount: Option<&[&str]>, command: &[&str]) -> Result { - let sdk = env::var("BUILDSYS_SDK_IMAGE").context(error::Environment { + let sdk = env::var("BUILDSYS_SDK_IMAGE").context(error::EnvironmentSnafu { var: "BUILDSYS_SDK_IMAGE", })?; trace!("SDK image: {}", sdk); @@ -120,14 +120,14 @@ fn docker_run(docker_env: &[&str], mount: Option<&[&str]>, command: &[&str]) -> .stdout_capture() .unchecked() .run() - .context(error::CommandStart)?; + .context(error::CommandStartSnafu)?; let stdout = String::from_utf8_lossy(&output.stdout); trace!("{}", stdout); if output.status.success() { Ok(output) } else { - error::Docker { output: stdout }.fail() + error::DockerSnafu { output: stdout }.fail() } } @@ -158,7 +158,7 @@ pub(crate) mod error { use snafu::Snafu; #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] + #[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(display("Failed to start command: {}", source))] CommandStart { source: std::io::Error }, diff --git a/tools/pubsys/src/vmware/upload_ova/mod.rs b/tools/pubsys/src/vmware/upload_ova/mod.rs index 3634327e..4a00b396 100644 --- a/tools/pubsys/src/vmware/upload_ova/mod.rs +++ b/tools/pubsys/src/vmware/upload_ova/mod.rs @@ -47,12 +47,12 @@ pub(crate) struct UploadArgs { pub(crate) fn run(args: &Args, upload_args: &UploadArgs) -> Result<()> { // If a lock file exists, use that, otherwise use Infra.toml or default let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, true) - .context(error::InfraConfig)?; + .context(error::InfraConfigSnafu)?; trace!("Using infra config: {:?}", infra_config); let vmware = infra_config .vmware - .context(error::MissingConfig { missing: "vmware" })?; + .context(error::MissingConfigSnafu { missing: "vmware" })?; // If the user gave an override list of datacenters, use it, otherwise use what's in the config let upload_datacenters = if !upload_args.datacenters.is_empty() { @@ -62,7 +62,7 @@ pub(crate) fn run(args: &Args, upload_args: &UploadArgs) -> Result<()> { }; ensure!( !upload_datacenters.is_empty(), - error::MissingConfig { + error::MissingConfigSnafu { missing: "vmware.datacenters" } ); @@ -74,7 +74,7 @@ pub(crate) fn run(args: &Args, upload_args: &UploadArgs) -> Result<()> { let creds_file = if let Some(ref creds_file) = *VMWARE_CREDS_PATH { if creds_file.exists() { info!("Using vSphere credentials file at {}", creds_file.display()); - DatacenterCredsConfig::from_path(creds_file).context(error::VmwareConfig)? + DatacenterCredsConfig::from_path(creds_file).context(error::VmwareConfigSnafu)? } else { info!("vSphere credentials file not found, will attempt to use environment"); DatacenterCredsConfig::default() @@ -90,13 +90,13 @@ pub(crate) fn run(args: &Args, upload_args: &UploadArgs) -> Result<()> { let dc_common = vmware.common.as_ref(); // Read the import spec as a template - let import_spec_str = fs::read_to_string(&upload_args.spec).context(error::File { + let import_spec_str = fs::read_to_string(&upload_args.spec).context(error::FileSnafu { action: "read", path: &upload_args.spec, })?; let mut tt = TinyTemplate::new(); tt.add_template(SPEC_TEMPLATE_NAME, &import_spec_str) - .context(error::AddTemplate { + .context(error::AddTemplateSnafu { path: &upload_args.spec, })?; @@ -117,7 +117,7 @@ pub(crate) fn run(args: &Args, upload_args: &UploadArgs) -> Result<()> { .take_missing_from(dc_config) .take_missing_from(dc_common) .build() - .context(error::DatacenterBuild)?; + .context(error::DatacenterBuildSnafu)?; // Use a similar pattern here for credentials; start with environment variables and fill in // any missing items with the datacenter-specific credentials from file. @@ -125,12 +125,12 @@ pub(crate) fn run(args: &Args, upload_args: &UploadArgs) -> Result<()> { let creds: DatacenterCreds = creds_env .take_missing_from(dc_creds) .build() - .context(error::CredsBuild)?; + .context(error::CredsBuildSnafu)?; // Render the import spec with this datacenter's details and write to temp file let rendered_spec = render_spec(&tt, &datacenter.network, upload_args.mark_as_template)?; - let import_spec = NamedTempFile::new().context(error::TempFile)?; - fs::write(import_spec.path(), &rendered_spec).context(error::File { + let import_spec = NamedTempFile::new().context(error::TempFileSnafu)?; + fs::write(import_spec.path(), &rendered_spec).context(error::FileSnafu { action: "write", path: import_spec.path(), })?; @@ -150,7 +150,7 @@ pub(crate) fn run(args: &Args, upload_args: &UploadArgs) -> Result<()> { Govc::new(datacenter, creds) .upload_ova(&upload_args.name, &upload_args.ova, import_spec) - .context(error::UploadOva)?; + .context(error::UploadOvaSnafu)?; } Ok(()) @@ -176,7 +176,7 @@ where Ok(tt .render(SPEC_TEMPLATE_NAME, &context) - .context(error::RenderTemplate)?) + .context(error::RenderTemplateSnafu)?) } mod error { @@ -185,7 +185,7 @@ mod error { use std::path::PathBuf; #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] + #[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(display("Error building template from '{}': {}", path.display(), source))] AddTemplate { From 5c9117b22f04410149f0d59960e75fbf23c8525e Mon Sep 17 00:00:00 2001 From: Matthew James Briggs Date: Wed, 2 Feb 2022 19:06:26 -0800 Subject: [PATCH 0594/1356] tools: update rust dependencies --- tools/Cargo.lock | 516 ++++++++++++++++++++-------------- tools/buildsys/Cargo.toml | 2 +- tools/deny.toml | 13 + tools/infrasys/Cargo.toml | 4 +- tools/pubsys-setup/Cargo.toml | 4 +- tools/pubsys/Cargo.toml | 2 +- 6 files changed, 325 insertions(+), 216 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 627d9a1b..08734391 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -28,18 +28,18 @@ dependencies = [ [[package]] name = "ansi_term" -version = "0.11.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" dependencies = [ "winapi", ] [[package]] name = "argh" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f023c76cd7975f9969f8e29f0e461decbdc7f51048ce43427107a3d192f1c9bf" +checksum = "dbb41d85d92dfab96cb95ab023c265c5e4261bb956c0fb49ca06d90c570f1958" dependencies = [ "argh_derive", "argh_shared", @@ -47,9 +47,9 @@ dependencies = [ [[package]] name = "argh_derive" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48ad219abc0c06ca788aface2e3a1970587e3413ab70acd20e54b6ec524c1f8f" +checksum = "be69f70ef5497dd6ab331a50bd95c6ac6b8f7f17a7967838332743fbd58dc3b5" dependencies = [ "argh_shared", "heck", @@ -60,9 +60,9 @@ dependencies = [ [[package]] name = "argh_shared" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38de00daab4eac7d753e97697066238d67ce9d7e2d823ab4f72fe14af29f3f33" +checksum = "e6f8c380fa28aa1b36107cd97f0196474bb7241bb95a453c5c01a15ac74b2eac" [[package]] name = "assert-json-diff" @@ -76,9 +76,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.51" +version = "0.1.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44318e776df68115a881de9a8fd1b9e53368d7a4a5ce4cc48517da3393233a5e" +checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" dependencies = [ "proc-macro2", "quote", @@ -104,9 +104,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.63" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "321629d8ba6513061f26707241fa9bc89524ff1cd7a915a97ef0c62c666ce1b6" +checksum = "5e121dee8023ce33ab248d9ce1493df03c3b38a659b240096fcbd7048ff9c31f" dependencies = [ "addr2line", "cc", @@ -138,6 +138,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "block-buffer" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1d36a02058e76b040de25a4464ba1c80935655595b661505c8b39b664828b95" +dependencies = [ + "generic-array", +] + [[package]] name = "bstr" version = "0.2.17" @@ -160,7 +169,7 @@ dependencies = [ "reqwest", "serde", "serde_plain", - "sha2", + "sha2 0.10.1", "snafu 0.7.0", "toml", "url", @@ -169,9 +178,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.8.0" +version = "3.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f1e260c3a9040a7c19a12468758f4c16f31a81a1fe087482be9570ec864bb6c" +checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" [[package]] name = "bytes" @@ -196,9 +205,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.71" +version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79c2681d6594606957bbb8631c4b90a7fcaaa72cdb714743a437b156d6a7eedd" +checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" [[package]] name = "cfg-if" @@ -222,9 +231,9 @@ dependencies = [ [[package]] name = "clap" -version = "2.33.3" +version = "2.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" +checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ "ansi_term", "atty", @@ -251,7 +260,7 @@ dependencies = [ "rusoto_ebs", "rusoto_ec2", "rusoto_signature", - "sha2", + "sha2 0.9.9", "snafu 0.6.10", "tempfile", "tokio", @@ -297,18 +306,18 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.2.1" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" +checksum = "a2209c310e29876f7f0b2721e7e26b84aff178aa3da5d091f9bfbf47669e60e3" dependencies = [ "cfg-if", ] [[package]] name = "crossbeam-channel" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4" +checksum = "e54ea8bc3fb1ee042f5aace6e3c6e025d3874866da222930f70ce62aceba0bfa" dependencies = [ "cfg-if", "crossbeam-utils", @@ -327,9 +336,9 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" +checksum = "97242a70df9b89a65d0b6df3c4bf5b9ce03c5b7309019777fbde37e7537f8762" dependencies = [ "cfg-if", "crossbeam-utils", @@ -340,14 +349,23 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d82cfc11ce7f2c3faef78d8a684447b40d503d9681acebed6cb728d45940c4db" +checksum = "cfcae03edb34f947e64acdb1c33ec169824e20657e9ecb61cef6c8c74dcb8120" dependencies = [ "cfg-if", "lazy_static", ] +[[package]] +name = "crypto-common" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d6b536309245c849479fba3da410962a43ed8e51c26b729208ec0ac2798d0" +dependencies = [ + "generic-array", +] + [[package]] name = "crypto-mac" version = "0.11.1" @@ -364,7 +382,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1a816186fa68d9e426e3cb4ae4dff1fcd8e4a2c34b781bf7a822574a0d0aac8" dependencies = [ - "sct", + "sct 0.6.1", ] [[package]] @@ -376,6 +394,17 @@ dependencies = [ "generic-array", ] +[[package]] +name = "digest" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b697d66081d42af4fba142d56918a3cb21dc8eb63372c6b85d14f44fb9c5979b" +dependencies = [ + "block-buffer 0.10.0", + "crypto-common", + "generic-array", +] + [[package]] name = "dirs-next" version = "2.0.0" @@ -403,12 +432,6 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" -[[package]] -name = "dtoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" - [[package]] name = "duct" version = "0.13.5" @@ -441,13 +464,22 @@ checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "encoding_rs" -version = "0.8.29" +version = "0.8.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a74ea89a0a1b98f6332de42c95baff457ada66d1cb4030f9ff151b2041a1c746" +checksum = "7896dc8abb250ffdda33912550faa54c88ec8b998dec0b2c55ab224921ce11df" dependencies = [ "cfg-if", ] +[[package]] +name = "fastrand" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +dependencies = [ + "instant", +] + [[package]] name = "fnv" version = "1.0.7" @@ -466,9 +498,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12aa0eb539080d55c3f2d45a67c3b58b6b0773c1a3ca2dfec66d58c97fd66ca" +checksum = "28560757fe2bb34e79f907794bb6b22ae8b0e5c669b638a1132f2592b19035b4" dependencies = [ "futures-channel", "futures-core", @@ -481,9 +513,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5da6ba8c3bb3c165d3c7319fc1cc8304facf1fb8db99c5de877183c08a273888" +checksum = "ba3dda0b6588335f360afc675d0564c17a77a2bda81ca178a4b6081bd86c7f0b" dependencies = [ "futures-core", "futures-sink", @@ -491,15 +523,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d1c26957f23603395cd326b0ffe64124b818f4449552f960d815cfba83a53d" +checksum = "d0c8ff0461b82559810cdccfde3215c3f373807f5e5232b71479bff7bb2583d7" [[package]] name = "futures-executor" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45025be030969d763025784f7f355043dc6bc74093e4ecc5000ca4dc50d8745c" +checksum = "29d6d2ff5bb10fb95c85b8ce46538a2e5f5e7fdc755623a7d4529ab8a4ed9d2a" dependencies = [ "futures-core", "futures-task", @@ -508,18 +540,16 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "522de2a0fe3e380f1bc577ba0474108faf3f6b18321dbf60b3b9c39a75073377" +checksum = "b1f9d34af5a1aac6fb380f735fe510746c38067c5bf16c7fd250280503c971b2" [[package]] name = "futures-macro" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18e4a4b95cea4b4ccbcf1c5675ca7c4ee4e9e75eb79944d07defde18068f79bb" +checksum = "6dbd947adfffb0efc70599b3ddcf7b5597bb5fa9e245eb99f62b3a5f7bb8bd3c" dependencies = [ - "autocfg", - "proc-macro-hack", "proc-macro2", "quote", "syn", @@ -527,23 +557,22 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36ea153c13024fe480590b3e3d4cad89a0cfacecc24577b68f86c6ced9c2bc11" +checksum = "e3055baccb68d74ff6480350f8d6eb8fcfa3aa11bdc1a1ae3afdd0514617d508" [[package]] name = "futures-task" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d3d00f4eddb73e498a54394f228cd55853bdf059259e8e7bc6e69d408892e99" +checksum = "6ee7c6485c30167ce4dfb83ac568a849fe53274c831081476ee13e0dce1aad72" [[package]] name = "futures-util" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36568465210a3a6ee45e1f165136d68671471a501e632e9a98d96872222b5481" +checksum = "d9b5cf40b47a271f77a8b1bec03ca09044d99d2372c0de244e66430761127164" dependencies = [ - "autocfg", "futures-channel", "futures-core", "futures-io", @@ -553,16 +582,14 @@ dependencies = [ "memchr", "pin-project-lite", "pin-utils", - "proc-macro-hack", - "proc-macro-nested", "slab", ] [[package]] name = "generic-array" -version = "0.14.4" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" dependencies = [ "typenum", "version_check", @@ -570,9 +597,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c" dependencies = [ "cfg-if", "libc", @@ -600,9 +627,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.7" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fd819562fcebdac5afc5c113c3ec36f902840b70fd4fc458799c8ce4607ae55" +checksum = "d9f1f717ddc7b2ba36df7e871fd88db79326551d3d6f1fc406fbfd28b582ff8e" dependencies = [ "bytes", "fnv", @@ -654,7 +681,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" dependencies = [ "crypto-mac", - "digest", + "digest 0.9.0", ] [[package]] @@ -668,13 +695,13 @@ dependencies = [ [[package]] name = "http" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1323096b05d41827dadeaee54c9981958c0f94e670bc94ed80037d1a7b8b186b" +checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" dependencies = [ "bytes", "fnv", - "itoa", + "itoa 1.0.1", ] [[package]] @@ -696,15 +723,15 @@ checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" [[package]] name = "httpdate" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.14" +version = "0.14.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b91bb1f221b6ea1f1e4371216b70f40748774c2fb5971b450c07773fb92d26b" +checksum = "b7ec3e62bdc98a2f0393a5048e4c30ef659440ea6e0e572965103e72bd836f55" dependencies = [ "bytes", "futures-channel", @@ -715,7 +742,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa", + "itoa 0.4.8", "pin-project-lite", "socket2", "tokio", @@ -734,11 +761,24 @@ dependencies = [ "futures-util", "hyper", "log", - "rustls", + "rustls 0.19.1", "rustls-native-certs", "tokio", - "tokio-rustls", - "webpki", + "tokio-rustls 0.22.0", + "webpki 0.21.4", +] + +[[package]] +name = "hyper-rustls" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" +dependencies = [ + "http", + "hyper", + "rustls 0.20.2", + "tokio", + "tokio-rustls 0.23.2", ] [[package]] @@ -754,9 +794,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" +checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" dependencies = [ "autocfg", "hashbrown", @@ -789,7 +829,7 @@ dependencies = [ "rusoto_s3", "serde_json", "serde_yaml", - "sha2", + "sha2 0.10.1", "shell-words", "simplelog", "snafu 0.7.0", @@ -820,11 +860,17 @@ version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" +[[package]] +name = "itoa" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" + [[package]] name = "js-sys" -version = "0.3.55" +version = "0.3.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cc9ffccd38c451a86bf13657df244e9c3f37493cce8e5e21e940963777acc84" +checksum = "a38fc24e30fd564ce974c02bf1d337caddff65be6cc4735a1f7eab22a7440f04" dependencies = [ "wasm-bindgen", ] @@ -837,9 +883,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.107" +version = "0.2.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbe5e23404da5b4f555ef85ebed98fb4083e55a00c317800bc2a50ede9f3d219" +checksum = "e74d72e0f9b65b5b4ca49a346af3976df0f9c61d550727f349ecd559f251a26c" [[package]] name = "linked-hash-map" @@ -849,9 +895,9 @@ checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "lock_api" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712a4d093c9976e24e7dbca41db895dabcbac38eb5f4045393d17a95bdfb1109" +checksum = "88943dd7ef4a2e5a4bfa2753aaab3013e34ce2533d1996fb18ef591e315e2b3b" dependencies = [ "scopeguard", ] @@ -877,8 +923,8 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b5a279bb9607f9f53c22d496eade00d138d1bdcccd07d74650387cf94942a15" dependencies = [ - "block-buffer", - "digest", + "block-buffer 0.9.0", + "digest 0.9.0", "opaque-debug", ] @@ -890,9 +936,9 @@ checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "memoffset" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" dependencies = [ "autocfg", ] @@ -971,9 +1017,9 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" dependencies = [ "hermit-abi", "libc", @@ -1007,9 +1053,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" +checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5" [[package]] name = "opaque-debug" @@ -1019,9 +1065,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl-probe" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "os_pipe" @@ -1087,13 +1133,11 @@ dependencies = [ [[package]] name = "pem" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06673860db84d02a63942fa69cd9543f2624a5df3aea7f33173048fa7ad5cf1a" +checksum = "e9a3b09a20e374558580a4914d3b7d89bd61b954a5a5e1dcbea98753addb1947" dependencies = [ "base64", - "once_cell", - "regex", ] [[package]] @@ -1104,9 +1148,9 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project-lite" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" +checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" [[package]] name = "pin-utils" @@ -1116,9 +1160,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "ppv-lite86" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba" +checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" [[package]] name = "proc-macro-error" @@ -1144,23 +1188,11 @@ dependencies = [ "version_check", ] -[[package]] -name = "proc-macro-hack" -version = "0.5.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" - -[[package]] -name = "proc-macro-nested" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" - [[package]] name = "proc-macro2" -version = "1.0.32" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba508cc11742c0dc5c1659771673afbab7a0efab23aa17e854cbab0837ed0b43" +checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" dependencies = [ "unicode-xid", ] @@ -1233,7 +1265,7 @@ dependencies = [ "log", "pubsys-config", "reqwest", - "sha2", + "sha2 0.10.1", "shell-words", "simplelog", "snafu 0.7.0", @@ -1245,9 +1277,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.10" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05" +checksum = "864d3e96a899863136fc6e99f3d7cae289dafe43bf2c5ac19b70df7210c0a145" dependencies = [ "proc-macro2", ] @@ -1364,19 +1396,20 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.6" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d2927ca2f685faf0fc620ac4834690d29e7abb153add10f5812eef20b5e280" +checksum = "87f242f1488a539a79bac6dbe7c8609ae43b7914b7736210f239a37cccb32525" dependencies = [ "base64", "bytes", "encoding_rs", "futures-core", "futures-util", + "h2", "http", "http-body", "hyper", - "hyper-rustls", + "hyper-rustls 0.23.0", "ipnet", "js-sys", "lazy_static", @@ -1384,12 +1417,13 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", - "rustls", + "rustls 0.20.2", + "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls", + "tokio-rustls 0.23.2", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -1440,7 +1474,7 @@ dependencies = [ "futures", "http", "hyper", - "hyper-rustls", + "hyper-rustls 0.22.1", "lazy_static", "log", "rusoto_credential", @@ -1535,7 +1569,7 @@ dependencies = [ "base64", "bytes", "chrono", - "digest", + "digest 0.9.0", "futures", "hex", "hmac", @@ -1548,7 +1582,7 @@ dependencies = [ "rusoto_credential", "rustc_version", "serde", - "sha2", + "sha2 0.9.9", "tokio", ] @@ -1605,8 +1639,20 @@ dependencies = [ "base64", "log", "ring", - "sct", - "webpki", + "sct 0.6.1", + "webpki 0.21.4", +] + +[[package]] +name = "rustls" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d37e5e2290f3e040b594b1a9e04377c2c671f1a1cfd9bfdef82106ac1c113f84" +dependencies = [ + "log", + "ring", + "sct 0.7.0", + "webpki 0.22.0", ] [[package]] @@ -1616,16 +1662,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" dependencies = [ "openssl-probe", - "rustls", + "rustls 0.19.1", "schannel", "security-framework", ] +[[package]] +name = "rustls-pemfile" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" +dependencies = [ + "base64", +] + [[package]] name = "ryu" -version = "1.0.5" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" [[package]] name = "same-file" @@ -1662,11 +1717,21 @@ dependencies = [ "untrusted", ] +[[package]] +name = "sct" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "security-framework" -version = "2.4.2" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525bc1abfda2e1998d152c45cf13e696f76d0a4972310b22fac1658b05df7c87" +checksum = "3fed7948b6c68acbb6e20c334f55ad635dc0f75506963de4464289fbd3b051ac" dependencies = [ "bitflags", "core-foundation", @@ -1677,9 +1742,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.4.2" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9dd14d83160b528b7bfd66439110573efcfbe281b17fc2ca9f39f550d619c7e" +checksum = "a57321bf8bc2362081b2599912d2961fe899c0efadf1b4b2f8d48b3e253bb96c" dependencies = [ "core-foundation-sys", "libc", @@ -1696,18 +1761,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.130" +version = "1.0.136" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" +checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.130" +version = "1.0.136" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" +checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" dependencies = [ "proc-macro2", "quote", @@ -1716,11 +1781,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.69" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e466864e431129c7e0d3476b92f20458e5879919a0596c6472738d9fa2d342f8" +checksum = "d23c1ba4cf0efd44be32017709280b32d1cea5c3f1275c3b6d9e8bc54f758085" dependencies = [ - "itoa", + "itoa 1.0.1", "ryu", "serde", ] @@ -1736,41 +1801,52 @@ dependencies = [ [[package]] name = "serde_urlencoded" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa", + "itoa 1.0.1", "ryu", "serde", ] [[package]] name = "serde_yaml" -version = "0.8.21" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8c608a35705a5d3cdc9fbe403147647ff34b921f8e833e49306df898f9b20af" +checksum = "a4a521f2940385c165a24ee286aa8599633d162077a54bdcae2a6fd5a7bfa7a0" dependencies = [ - "dtoa", "indexmap", + "ryu", "serde", "yaml-rust", ] [[package]] name = "sha2" -version = "0.9.8" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b69f9a4c9740d74c5baa3fd2e547f9525fa8088a8a958e0ca2409a514e33f5fa" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ - "block-buffer", + "block-buffer 0.9.0", "cfg-if", "cpufeatures", - "digest", + "digest 0.9.0", "opaque-debug", ] +[[package]] +name = "sha2" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99c3bd8169c58782adad9290a9af5939994036b76187f7b4f0e6de91dbbfc0ec" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.1", +] + [[package]] name = "shared_child" version = "0.3.5" @@ -1804,9 +1880,9 @@ dependencies = [ [[package]] name = "simplelog" -version = "0.10.2" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85d04ae642154220ef00ee82c36fb07853c10a4f2a0ca6719f9991211d2eb959" +checksum = "c1348164456f72ca0116e4538bdaabb0ddb622c7d9f16387c725af3e96d6001c" dependencies = [ "chrono", "log", @@ -1821,9 +1897,9 @@ checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" [[package]] name = "smallvec" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ecab6c735a6bb4139c0caafd0cc3635748bbb3acf4550e8138122099251f309" +checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" [[package]] name = "snafu" @@ -1831,7 +1907,6 @@ version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eab12d3c261b2308b0d80c26fffb58d17eba81a4be97890101f416b478c79ca7" dependencies = [ - "backtrace", "doc-comment", "snafu-derive 0.6.10", ] @@ -1872,9 +1947,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.2" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dc90fe6c7be1a323296982db1836d1ea9e47b6839496dde9a541bc496df3516" +checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" dependencies = [ "libc", "winapi", @@ -1894,9 +1969,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "structopt" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40b9788f4202aa75c240ecc9c15c65185e6a39ccdeb0fd5d008b98825464c87c" +checksum = "0c6b5c64445ba8094a6ab0c3cd2ad323e07171012d9c98b0b15651daf1787a10" dependencies = [ "clap", "lazy_static", @@ -1924,9 +1999,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.81" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2afee18b8beb5a596ecb4a2dce128c719b4ba399d34126b9e4396e3f9860966" +checksum = "8a65b3f4ffa0092e9887669db0eae07941f023991ab58ea44da8fe8e2d511c6b" dependencies = [ "proc-macro2", "quote", @@ -1935,13 +2010,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" dependencies = [ "cfg-if", + "fastrand", "libc", - "rand", "redox_syscall", "remove_dir_all", "winapi", @@ -1997,9 +2072,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83b2a3d4d9091d0abd7eba4dc2710b1718583bd4d8992e2190720ea38f391f7" +checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" dependencies = [ "tinyvec_macros", ] @@ -2012,9 +2087,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.8.3" +version = "1.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00a287ce596d527f273dea7638a044739234740dbad141e7ed0c62c7d0c9c55a" +checksum = "cdc46ca74dd45faeaaf96a8fbe2406f425829705ee62100ccaa9b34a2145cff8" dependencies = [ "autocfg", "bytes", @@ -2032,9 +2107,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.5.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "114383b041aa6212c579467afa0075fbbdd0718de036100bc0ba7961d8cb9095" +checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" dependencies = [ "proc-macro2", "quote", @@ -2047,9 +2122,20 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ - "rustls", + "rustls 0.19.1", + "tokio", + "webpki 0.21.4", +] + +[[package]] +name = "tokio-rustls" +version = "0.23.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a27d5f2b839802bd8267fa19b0530f5a08b9c08cd417976be2a65d130fe1c11b" +dependencies = [ + "rustls 0.20.2", "tokio", - "webpki", + "webpki 0.22.0", ] [[package]] @@ -2088,9 +2174,9 @@ dependencies = [ [[package]] name = "tough" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99488309ba53ee931b6ccda1cde07feaab95f214d328e3a7244c0f7563b5909f" +checksum = "708125a84e70820bccc5fc11d7196664415be2b02b81ba6946e70e10803aa4da" dependencies = [ "chrono", "dyn-clone", @@ -2106,7 +2192,7 @@ dependencies = [ "serde", "serde_json", "serde_plain", - "snafu 0.6.10", + "snafu 0.7.0", "tempfile", "untrusted", "url", @@ -2115,32 +2201,32 @@ dependencies = [ [[package]] name = "tough-kms" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e1ece7cb6917b7d503e85d9285e1a7616d2e5ae96c1362087771401559f47d2" +checksum = "a910dad24be252ff379d79a49c44ed36b3e8b0f5d34b79a8967df24e685bae2d" dependencies = [ "pem", "ring", "rusoto_core", "rusoto_credential", "rusoto_kms", - "snafu 0.6.10", + "snafu 0.7.0", "tokio", "tough", ] [[package]] name = "tough-ssm" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "303c67d70bcf2352668c42984715a71ccbf1558fcbf64064987caadba07fc771" +checksum = "cf1c96981e5a2302abc1ea54f076ac47c2ffe2abcdcc147f7668ee8b3212c094" dependencies = [ "rusoto_core", "rusoto_credential", "rusoto_ssm", "serde", "serde_json", - "snafu 0.6.10", + "snafu 0.7.0", "tokio", "tough", ] @@ -2179,9 +2265,9 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "typenum" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b63708a265f51345575b27fe43f9500ad611579e764c79edbc2037b1121959ec" +checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] name = "unicode-bidi" @@ -2258,9 +2344,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "version_check" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "walkdir" @@ -2291,9 +2377,9 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.78" +version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "632f73e236b219150ea279196e54e610f5dbafa5d61786303d4da54f84e47fce" +checksum = "25f1af7423d8588a3d840681122e72e6a24ddbcb3f0ec385cac0d12d24256c06" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -2301,9 +2387,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.78" +version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a317bf8f9fba2476b4b2c85ef4c4af8ff39c3c7f0cdfeed4f82c34a880aa837b" +checksum = "8b21c0df030f5a177f3cba22e9bc4322695ec43e7257d865302900290bcdedca" dependencies = [ "bumpalo", "lazy_static", @@ -2316,9 +2402,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e8d7523cb1f2a4c96c1317ca690031b714a51cc14e05f712446691f413f5d39" +checksum = "2eb6ec270a31b1d3c7e266b999739109abce8b6c87e4b31fcfcd788b65267395" dependencies = [ "cfg-if", "js-sys", @@ -2328,9 +2414,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.78" +version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56146e7c495528bf6587663bea13a8eb588d39b36b679d83972e1a2dbbdacf9" +checksum = "2f4203d69e40a52ee523b2529a773d5ffc1dc0071801c87b3d270b471b80ed01" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2338,9 +2424,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.78" +version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7803e0eea25835f8abdc585cd3021b3deb11543c6fe226dcd30b228857c5c5ab" +checksum = "bfa8a30d46208db204854cadbb5d4baf5fcf8071ba5bf48190c3e59937962ebc" dependencies = [ "proc-macro2", "quote", @@ -2351,15 +2437,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.78" +version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0237232789cf037d5480773fe568aac745bfe2afbc11a863e97901780a6b47cc" +checksum = "3d958d035c4438e28c70e4321a2911302f10135ce78a9c7834c0cab4123d06a2" [[package]] name = "web-sys" -version = "0.3.55" +version = "0.3.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38eb105f1c59d9eaa6b5cdc92b859d85b926e82cb2e0945cd0c9259faa6fe9fb" +checksum = "c060b319f29dd25724f09a2ba1418f142f539b2be99fbf4d2d5a8f7330afb8eb" dependencies = [ "js-sys", "wasm-bindgen", @@ -2375,13 +2461,23 @@ dependencies = [ "untrusted", ] +[[package]] +name = "webpki" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "webpki-roots" -version = "0.21.1" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aabe153544e473b775453675851ecc86863d2a81d786d741f6b76778f2a48940" +checksum = "552ceb903e957524388c4d3475725ff2c8b7960922063af6ce53c9a43da07449" dependencies = [ - "webpki", + "webpki 0.22.0", ] [[package]] @@ -2441,6 +2537,6 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d68d9dcec5f9b43a30d38c49f91dfedfaac384cb8f085faca366c26207dd1619" +checksum = "7c88870063c39ee00ec285a2f8d6a966e5b6fb2becc4e8dac77ed0d370ed6006" diff --git a/tools/buildsys/Cargo.toml b/tools/buildsys/Cargo.toml index 3fd048ec..d9971fda 100644 --- a/tools/buildsys/Cargo.toml +++ b/tools/buildsys/Cargo.toml @@ -17,7 +17,7 @@ regex = "1" reqwest = { version = "0.11.1", default-features = false, features = ["rustls-tls", "blocking"] } serde = { version = "1.0", features = ["derive"] } serde_plain = "1.0" -sha2 = "0.9" +sha2 = "0.10" snafu = "0.7" toml = "0.5" url = { version = "2.1.0", features = ["serde"] } diff --git a/tools/deny.toml b/tools/deny.toml index eca08746..7068b6b2 100644 --- a/tools/deny.toml +++ b/tools/deny.toml @@ -27,6 +27,16 @@ exceptions = [ { name = "webpki-roots", allow = ["MPL-2.0"], version = "*" }, ] +# https://github.com/hsivonen/encoding_rs The non-test code that isn't generated from the WHATWG data in this crate is +# under Apache-2.0 OR MIT. Test code is under CC0. +[[licenses.clarify]] +name = "encoding_rs" +version = "0.8.30" +expression = "(Apache-2.0 OR MIT) AND BSD-3-Clause" +license-files = [ + { path = "COPYRIGHT", hash = 0x39f8ad31 } +] + [[licenses.clarify]] name = "ring" expression = "MIT AND ISC AND OpenSSL" @@ -49,6 +59,9 @@ wildcards = "deny" skip-tree = [ # temporarily using a different version of snafu { name = "parse-datetime", version = "0.1.0" }, + + # rusoto is using a different version of reqwest. + { name = "rusoto_core", version = "0.47.0" }, ] [sources] diff --git a/tools/infrasys/Cargo.toml b/tools/infrasys/Cargo.toml index fc78cc32..21c56660 100644 --- a/tools/infrasys/Cargo.toml +++ b/tools/infrasys/Cargo.toml @@ -17,9 +17,9 @@ rusoto_core = { version = "0.47", default-features = false, features = ["rustls" rusoto_s3 = { version = "0.47", default-features = false, features = ["rustls"] } serde_json = "1.0.66" serde_yaml = "0.8.17" -sha2 = "0.9" +sha2 = "0.10" shell-words = "1.0.0" -simplelog = "0.10.0" +simplelog = "0.11" snafu = "0.7" structopt = { version = "0.3", default-features = false } tokio = { version = "~1.8", default-features = false, features = ["macros", "rt-multi-thread"] } # LTS diff --git a/tools/pubsys-setup/Cargo.toml b/tools/pubsys-setup/Cargo.toml index 7b186e6d..7bfee00f 100644 --- a/tools/pubsys-setup/Cargo.toml +++ b/tools/pubsys-setup/Cargo.toml @@ -11,9 +11,9 @@ hex = "0.4.0" log = "0.4" pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } reqwest = { version = "0.11.1", default-features = false, features = ["rustls-tls", "blocking"] } -sha2 = "0.9" +sha2 = "0.10" shell-words = "1.0" -simplelog = "0.10" +simplelog = "0.11" snafu = "0.7" structopt = { version = "0.3", default-features = false } tempfile = "3.1" diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index eafa77ce..dab33fea 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -30,7 +30,7 @@ rusoto_kms = { version = "0.47.0", default-features = false, features = ["rustls rusoto_signature = "0.47.0" rusoto_ssm = { version = "0.47.0", default-features = false, features = ["rustls"] } rusoto_sts = { version = "0.47.0", default-features = false, features = ["rustls"] } -simplelog = "0.10.0" +simplelog = "0.11" snafu = "0.7" semver = "1.0" serde = { version = "1.0", features = ["derive"] } From 92b6f382d486335503d99ba48471bb53f422329e Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Mon, 24 Jan 2022 20:22:09 +0000 Subject: [PATCH 0595/1356] kmod-5.10-nvidia: add releases url --- packages/kmod-5.10-nvidia/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/kmod-5.10-nvidia/Cargo.toml b/packages/kmod-5.10-nvidia/Cargo.toml index 32b506d5..c7db0428 100644 --- a/packages/kmod-5.10-nvidia/Cargo.toml +++ b/packages/kmod-5.10-nvidia/Cargo.toml @@ -10,6 +10,7 @@ path = "pkg.rs" [package.metadata.build-package] package-name = "kmod-5.10-nvidia" +releases-url = "https://docs.nvidia.com/datacenter/tesla/" [[package.metadata.build-package.external-files]] url = "https://us.download.nvidia.com/tesla/470.82.01/NVIDIA-Linux-x86_64-470.82.01.run" From 87e0953c7bc462e4d60a587cf6e681fc4f57ac86 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Mon, 24 Jan 2022 19:48:48 +0000 Subject: [PATCH 0596/1356] kmod-5.10-nvidia: add remaining libraries The NVIDIA sources provide user-space libraries that will be mounted into the containers, depending on the set of driver capabilities configured for the workload. Signed-off-by: Arnaldo Garcia Rincon --- .../kmod-5.10-nvidia/kmod-5.10-nvidia.spec | 251 ++++++++++++------ 1 file changed, 176 insertions(+), 75 deletions(-) diff --git a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec index 79d16869..df60a9d0 100644 --- a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec +++ b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec @@ -1,4 +1,6 @@ -%global nvidia_tesla_470_version 470.82.01 +%global tesla_470 470.82.01 +%global tesla_470_libdir %{_cross_libdir}/nvidia/tesla/%{tesla_470} +%global tesla_470_bindir %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} %global spdx_id %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml spdx-id nvidia) %global license_file %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml path nvidia -p ./licenses) @@ -12,8 +14,8 @@ License: Apache-2.0 OR MIT URL: http://www.nvidia.com/ # NVIDIA .run scripts from 0 to 199 -Source0: https://us.download.nvidia.com/tesla/%{nvidia_tesla_470_version}/NVIDIA-Linux-x86_64-%{nvidia_tesla_470_version}.run -Source1: https://us.download.nvidia.com/tesla/%{nvidia_tesla_470_version}/NVIDIA-Linux-aarch64-%{nvidia_tesla_470_version}.run +Source0: https://us.download.nvidia.com/tesla/%{tesla_470}/NVIDIA-Linux-x86_64-%{tesla_470}.run +Source1: https://us.download.nvidia.com/tesla/%{tesla_470}/NVIDIA-Linux-aarch64-%{tesla_470}.run # Common NVIDIA conf files from 200 to 299 Source200: nvidia-tmpfiles.conf.in @@ -33,7 +35,7 @@ BuildRequires: %{_cross_os}kernel-5.10-archive %package tesla-470 Summary: NVIDIA 470 Tesla driver -Version: %{nvidia_tesla_470_version} +Version: %{tesla_470} License: %{spdx_id} Requires: %{name} @@ -43,13 +45,13 @@ Requires: %{name} %prep # Extract nvidia sources with `-x`, otherwise the script will try to install # the driver in the current run -sh %{_sourcedir}/NVIDIA-Linux-%{_cross_arch}-%{nvidia_tesla_470_version}.run -x +sh %{_sourcedir}/NVIDIA-Linux-%{_cross_arch}-%{tesla_470}.run -x %global kernel_sources %{_builddir}/kernel-devel tar -xf %{_cross_datadir}/bottlerocket/kernel-devel.tar.xz %build -pushd NVIDIA-Linux-%{_cross_arch}-%{nvidia_tesla_470_version}/kernel +pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_470}/kernel # This recipe was based in the NVIDIA yum/dnf specs: # https://github.com/NVIDIA/yum-packaging-precompiled-kmod @@ -90,66 +92,76 @@ install -d %{buildroot}%{_cross_libdir}/modules-load.d install -p -m 0644 %{S:202} %{buildroot}%{_cross_libdir}/modules-load.d/nvidia-dependencies.conf # Begin NVIDIA tesla 470 -pushd NVIDIA-Linux-%{_cross_arch}-%{nvidia_tesla_470_version} +pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_470} # We install bins and libs in a versioned directory to prevent collisions with future drivers versions -install -d %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version} -install -d %{buildroot}%{_cross_libdir}/nvidia/tesla/%{nvidia_tesla_470_version}/ -install -d %{buildroot}%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d -install -d %{buildroot}%{_cross_factorydir}/nvidia/tesla/%{nvidia_tesla_470_version} - -sed -e 's|__NVIDIA_VERSION__|%{nvidia_tesla_470_version}|' %{S:300} > nvidia-tesla-%{nvidia_tesla_470_version}.conf -install -m 0644 nvidia-tesla-%{nvidia_tesla_470_version}.conf %{buildroot}%{_cross_tmpfilesdir}/ -sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/|' %{S:301} > \ - nvidia-tesla-%{nvidia_tesla_470_version}.toml -install -m 0644 nvidia-tesla-%{nvidia_tesla_470_version}.toml %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/drivers +install -d %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} +install -d %{buildroot}%{tesla_470_libdir} +install -d %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d +install -d %{buildroot}%{_cross_factorydir}/nvidia/tesla/%{tesla_470} + +sed -e 's|__NVIDIA_VERSION__|%{tesla_470}|' %{S:300} > nvidia-tesla-%{tesla_470}.conf +install -m 0644 nvidia-tesla-%{tesla_470}.conf %{buildroot}%{_cross_tmpfilesdir}/ +sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/|' %{S:301} > \ + nvidia-tesla-%{tesla_470}.toml +install -m 0644 nvidia-tesla-%{tesla_470}.toml %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/drivers # Install nvidia-path environment file, will be used as a drop-in for containerd.service since # libnvidia-container locates and mounts helper binaries into the containers from either # `PATH` or `NVIDIA_PATH` -sed -e 's|__NVIDIA_BINDIR__|%{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version}|' %{S:302} > nvidia-path.env -install -m 0644 nvidia-path.env %{buildroot}%{_cross_factorydir}/nvidia/tesla/%{nvidia_tesla_470_version} -# We need to add `_cross_libdir/nvidia_tesla_470_version` to the paths loaded by the ldconfig service +sed -e 's|__NVIDIA_BINDIR__|%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470}|' %{S:302} > nvidia-path.env +install -m 0644 nvidia-path.env %{buildroot}%{_cross_factorydir}/nvidia/tesla/%{tesla_470} +# We need to add `_cross_libdir/tesla_470` to the paths loaded by the ldconfig service # because libnvidia-container uses the `ldcache` file created by the service, to locate and mount the # libraries into the containers -sed -e 's|__LIBDIR__|%{_cross_libdir}|' %{S:303} | sed -e 's|__NVIDIA_VERSION__|%{nvidia_tesla_470_version}|' \ - > nvidia-tesla-%{nvidia_tesla_470_version}.conf -install -m 0644 nvidia-tesla-%{nvidia_tesla_470_version}.conf %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/ +sed -e 's|__LIBDIR__|%{_cross_libdir}|' %{S:303} | sed -e 's|__NVIDIA_VERSION__|%{tesla_470}|' \ + > nvidia-tesla-%{tesla_470}.conf +install -m 0644 nvidia-tesla-%{tesla_470}.conf %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/ # driver -install kernel/nvidia.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d -install kernel/nvidia/nv-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d -install kernel/nvidia/nv-kernel.o_binary %{buildroot}%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nv-kernel.o +install kernel/nvidia.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d +install kernel/nvidia/nv-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d +install kernel/nvidia/nv-kernel.o_binary %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nv-kernel.o # uvm -install kernel/nvidia-uvm.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d -install kernel/nvidia-uvm.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d +install kernel/nvidia-uvm.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d +install kernel/nvidia-uvm.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d # modeset -install kernel/nvidia-modeset.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d -install kernel/nvidia-modeset/nv-modeset-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d -install kernel/nvidia-modeset/nv-modeset-kernel.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d +install kernel/nvidia-modeset.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d +install kernel/nvidia-modeset/nv-modeset-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d +install kernel/nvidia-modeset/nv-modeset-kernel.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d # peermem -install kernel/nvidia-peermem.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d -install kernel/nvidia-peermem/nvidia-peermem.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d +install kernel/nvidia-peermem.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d +install kernel/nvidia-peermem/nvidia-peermem.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d # drm -install kernel/nvidia-drm.mod.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d -install kernel/nvidia-drm.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d +install kernel/nvidia-drm.mod.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d +install kernel/nvidia-drm.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d -install -m 755 nvidia-smi %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version} -install -m 755 nvidia-debugdump %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version} -install -m 755 nvidia-cuda-mps-control %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version} -install -m 755 nvidia-cuda-mps-server %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version} +# Binaries +install -m 755 nvidia-smi %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} +install -m 755 nvidia-debugdump %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} +install -m 755 nvidia-cuda-mps-control %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} +install -m 755 nvidia-cuda-mps-server %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} %if "%{_cross_arch}" == "x86_64" -install -m 755 nvidia-ngx-updater %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version} +install -m 755 nvidia-ngx-updater %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} %endif -# TODO: add remaining libraries -# misc -# Add libnvidia-ml.so for testing purposes -install -m755 libnvidia-ml.so.%{nvidia_tesla_470_version} %{buildroot}%{_cross_libdir}/nvidia/tesla/%{nvidia_tesla_470_version} +# We install all the libraries, and filter them out in the 'files' section, so we can catch +# when new libraries are added +install -m 755 *.so* %{buildroot}/%{tesla_470_libdir}/ + +# This library has the same SONAME as libEGL.so.1.1.0, this will cause collisions while +# the symlinks are created. For now, we only symlink libEGL.so.1.1.0. +EXCLUDED_LIBS="libEGL.so.%{tesla_470}" -ln -s libnvidia-ml.so.%{nvidia_tesla_470_version} %{buildroot}%{_cross_libdir}/nvidia/tesla/%{nvidia_tesla_470_version}/libnvidia-ml.so.1 +for lib in $(find . -maxdepth 1 -type f -name 'lib*.so.*' -printf '%%P\n'); do + [[ "${EXCLUDED_LIBS}" =~ "${lib}" ]] && continue + soname="$(%{_cross_target}-readelf -d "${lib}" | awk '/SONAME/{print $5}' | tr -d '[]')" + [ -n "${soname}" ] || continue + [ "${lib}" == "${soname}" ] && continue + ln -s "${lib}" %{buildroot}/%{tesla_470_libdir}/"${soname}" +done popd @@ -166,50 +178,139 @@ popd %files tesla-470 %license %{license_file} -%dir %{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version} -%dir %{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version} -%dir %{_cross_libdir}/nvidia/tesla/%{nvidia_tesla_470_version} -%dir %{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d -%dir %{_cross_factorydir}/nvidia/tesla/%{nvidia_tesla_470_version} +%dir %{_cross_datadir}/nvidia/tesla/%{tesla_470} +%dir %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} +%dir %{tesla_470_libdir} +%dir %{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d +%dir %{_cross_factorydir}/nvidia/tesla/%{tesla_470} # Binaries -%{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version}/nvidia-debugdump -%{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version}/nvidia-smi - -# Libraries -%{_cross_libdir}/nvidia/tesla/%{nvidia_tesla_470_version}/libnvidia-ml.so.1 -%{_cross_libdir}/nvidia/tesla/%{nvidia_tesla_470_version}/libnvidia-ml.so.%{nvidia_tesla_470_version} +%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470}/nvidia-debugdump +%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470}/nvidia-smi # Configuration files -%{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-tesla-%{nvidia_tesla_470_version}.toml -%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/nvidia-tesla-%{nvidia_tesla_470_version}.conf -%{_cross_factorydir}/nvidia/tesla/%{nvidia_tesla_470_version}/nvidia-path.env +%{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-tesla-%{tesla_470}.toml +%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/nvidia-tesla-%{tesla_470}.conf +%{_cross_factorydir}/nvidia/tesla/%{tesla_470}/nvidia-path.env # driver -%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nvidia.mod.o -%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nv-interface.o -%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nv-kernel.o +%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nvidia.mod.o +%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nv-interface.o +%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nv-kernel.o # uvm -%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nvidia-uvm.mod.o -%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nvidia-uvm.o +%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nvidia-uvm.mod.o +%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nvidia-uvm.o # modeset -%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nv-modeset-interface.o -%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nv-modeset-kernel.o -%{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nvidia-modeset.mod.o +%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nv-modeset-interface.o +%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nv-modeset-kernel.o +%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nvidia-modeset.mod.o # tmpfiles -%{_cross_tmpfilesdir}/nvidia-tesla-%{nvidia_tesla_470_version}.conf +%{_cross_tmpfilesdir}/nvidia-tesla-%{tesla_470}.conf + +# We only install the libraries required by all the DRIVER_CAPABILITIES, described here: +# https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html#driver-capabilities + +# Utility libs +%{tesla_470_libdir}/libnvidia-ml.so.%{tesla_470} +%{tesla_470_libdir}/libnvidia-ml.so.1 +%{tesla_470_libdir}/libnvidia-cfg.so.%{tesla_470} +%{tesla_470_libdir}/libnvidia-cfg.so.1 +%{tesla_470_libdir}/libnvidia-nvvm.so.4.0.0 +%{tesla_470_libdir}/libnvidia-nvvm.so.4 + +# Compute libs +%{tesla_470_libdir}/libcuda.so.%{tesla_470} +%{tesla_470_libdir}/libcuda.so.1 +%{tesla_470_libdir}/libnvidia-opencl.so.470.82.01 +%{tesla_470_libdir}/libnvidia-opencl.so.1 +%{tesla_470_libdir}/libnvidia-ptxjitcompiler.so.%{tesla_470} +%{tesla_470_libdir}/libnvidia-ptxjitcompiler.so.1 +%{tesla_470_libdir}/libnvidia-allocator.so.470.82.01 +%{tesla_470_libdir}/libnvidia-allocator.so.1 +%{tesla_470_libdir}/libOpenCL.so.1.0.0 +%{tesla_470_libdir}/libOpenCL.so.1 +%if "%{_cross_arch}" == "x86_64" +%{tesla_470_libdir}/libnvidia-compiler.so.%{tesla_470} +%endif + +# Video libs +%{tesla_470_libdir}/libvdpau_nvidia.so.%{tesla_470} +%{tesla_470_libdir}/libvdpau_nvidia.so.1 +%{tesla_470_libdir}/libnvidia-encode.so.%{tesla_470} +%{tesla_470_libdir}/libnvidia-encode.so.1 +%{tesla_470_libdir}/libnvidia-opticalflow.so.%{tesla_470} +%{tesla_470_libdir}/libnvidia-opticalflow.so.1 +%{tesla_470_libdir}/libnvcuvid.so.%{tesla_470} +%{tesla_470_libdir}/libnvcuvid.so.1 + +# Graphics libs +%{tesla_470_libdir}/libnvidia-eglcore.so.%{tesla_470} +%{tesla_470_libdir}/libnvidia-glcore.so.%{tesla_470} +%{tesla_470_libdir}/libnvidia-tls.so.%{tesla_470} +%{tesla_470_libdir}/libnvidia-glsi.so.%{tesla_470} +%{tesla_470_libdir}/libnvidia-rtcore.so.%{tesla_470} +%{tesla_470_libdir}/libnvidia-fbc.so.%{tesla_470} +%{tesla_470_libdir}/libnvidia-fbc.so.1 +%{tesla_470_libdir}/libnvoptix.so.%{tesla_470} +%{tesla_470_libdir}/libnvoptix.so.1 +%{tesla_470_libdir}/libnvidia-vulkan-producer.so.%{tesla_470} +%if "%{_cross_arch}" == "x86_64" +%{tesla_470_libdir}/libnvidia-ifr.so.%{tesla_470} +%{tesla_470_libdir}/libnvidia-ifr.so.1 +%endif + +# Graphics GLVND libs +%{tesla_470_libdir}/libnvidia-glvkspirv.so.%{tesla_470} +%{tesla_470_libdir}/libnvidia-cbl.so.%{tesla_470} +%{tesla_470_libdir}/libGLX_nvidia.so.%{tesla_470} +%{tesla_470_libdir}/libGLX_nvidia.so.0 +%{tesla_470_libdir}/libEGL_nvidia.so.%{tesla_470} +%{tesla_470_libdir}/libEGL_nvidia.so.0 +%{tesla_470_libdir}/libGLESv2_nvidia.so.%{tesla_470} +%{tesla_470_libdir}/libGLESv2_nvidia.so.2 +%{tesla_470_libdir}/libGLESv1_CM_nvidia.so.%{tesla_470} +%{tesla_470_libdir}/libGLESv1_CM_nvidia.so.1 + +# Graphics compat +%{tesla_470_libdir}/libEGL.so.1.1.0 +%{tesla_470_libdir}/libEGL.so.1 +%{tesla_470_libdir}/libEGL.so.%{tesla_470} +%{tesla_470_libdir}/libGL.so.1.7.0 +%{tesla_470_libdir}/libGL.so.1 +%{tesla_470_libdir}/libGLESv1_CM.so.1.2.0 +%{tesla_470_libdir}/libGLESv1_CM.so.1 +%{tesla_470_libdir}/libGLESv2.so.2.1.0 +%{tesla_470_libdir}/libGLESv2.so.2 + +# NGX +%if "%{_cross_arch}" == "x86_64" +%{tesla_470_libdir}/libnvidia-ngx.so.%{tesla_470} +%{tesla_470_libdir}/libnvidia-ngx.so.1 +%endif # Neither nvidia-peermem nor nvidia-drm are included in driver container images, we exclude them # for now, and we will add them if requested -%exclude %{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nvidia-peermem.mod.o -%exclude %{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nvidia-peermem.o -%exclude %{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nvidia-drm.mod.o -%exclude %{_cross_datadir}/nvidia/tesla/%{nvidia_tesla_470_version}/module-objects.d/nvidia-drm.o -%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version}/nvidia-cuda-mps-control -%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version}/nvidia-cuda-mps-server +%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nvidia-peermem.mod.o +%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nvidia-peermem.o +%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nvidia-drm.mod.o +%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nvidia-drm.o +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470}/nvidia-cuda-mps-control +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470}/nvidia-cuda-mps-server %if "%{_cross_arch}" == "x86_64" -%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{nvidia_tesla_470_version}/nvidia-ngx-updater +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470}/nvidia-ngx-updater %endif + +# None of these libraries are required by libnvidia-container, so they +# won't be used by a containerized workload +%exclude %{tesla_470_libdir}/libGLX.so.0 +%exclude %{tesla_470_libdir}/libGLdispatch.so.0 +%exclude %{tesla_470_libdir}/libOpenGL.so.0 +%exclude %{tesla_470_libdir}/libglxserver_nvidia.so.470.82.01 +%exclude %{tesla_470_libdir}/libnvidia-egl-wayland.so.1.1.7 +%exclude %{tesla_470_libdir}/libnvidia-gtk2.so.470.82.01 +%exclude %{tesla_470_libdir}/libnvidia-gtk3.so.470.82.01 +%exclude %{tesla_470_libdir}/nvidia_drv.so +%exclude %{tesla_470_libdir}/libnvidia-egl-wayland.so.1 From 8e3104db1d021bc5753f65a42b3919a7132c10e8 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 4 Feb 2022 01:14:19 +0000 Subject: [PATCH 0597/1356] kernel: add fix for cgroup v1 release agent Signed-off-by: Ben Cressey --- ...re-capabilities-to-set-release_agent.patch | 54 ++++++++++++++++++ packages/kernel-5.10/kernel-5.10.spec | 4 ++ ...re-capabilities-to-set-release_agent.patch | 55 +++++++++++++++++++ ...sx-Disable-Werror-stringop-overflow.patch} | 2 +- packages/kernel-5.4/kernel-5.4.spec | 7 ++- 5 files changed, 119 insertions(+), 3 deletions(-) create mode 100644 packages/kernel-5.10/0001-cgroup-v1-Require-capabilities-to-set-release_agent.patch create mode 100644 packages/kernel-5.4/0001-cgroup-v1-Require-capabilities-to-set-release_agent.patch rename packages/kernel-5.4/{0001-lustrefsx-Disable-Werror-stringop-overflow.patch => 3001-lustrefsx-Disable-Werror-stringop-overflow.patch} (93%) diff --git a/packages/kernel-5.10/0001-cgroup-v1-Require-capabilities-to-set-release_agent.patch b/packages/kernel-5.10/0001-cgroup-v1-Require-capabilities-to-set-release_agent.patch new file mode 100644 index 00000000..fbd7a04b --- /dev/null +++ b/packages/kernel-5.10/0001-cgroup-v1-Require-capabilities-to-set-release_agent.patch @@ -0,0 +1,54 @@ +From ea044e9e1c65fe83d2e50580e43c4e9775414af1 Mon Sep 17 00:00:00 2001 +From: "Eric W. Biederman" +Date: Thu, 20 Jan 2022 11:04:01 -0600 +Subject: [PATCH] cgroup-v1: Require capabilities to set release_agent + +The cgroup release_agent is called with call_usermodehelper. The function +call_usermodehelper starts the release_agent with a full set fo capabilities. +Therefore require capabilities when setting the release_agaent. + +Reported-by: Tabitha Sable +Tested-by: Tabitha Sable +Fixes: 81a6a5cdd2c5 ("Task Control Groups: automatic userspace notification of idle cgroups") +Cc: stable@vger.kernel.org # v2.6.24+ +Signed-off-by: "Eric W. Biederman" +Signed-off-by: Tejun Heo +--- + kernel/cgroup/cgroup-v1.c | 14 ++++++++++++++ + 1 file changed, 14 insertions(+) + +diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c +index 7f71b54c06c5..69fba563c810 100644 +--- a/kernel/cgroup/cgroup-v1.c ++++ b/kernel/cgroup/cgroup-v1.c +@@ -545,6 +545,14 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of, + + BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); + ++ /* ++ * Release agent gets called with all capabilities, ++ * require capabilities to set release agent. ++ */ ++ if ((of->file->f_cred->user_ns != &init_user_ns) || ++ !capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ + cgrp = cgroup_kn_lock_live(of->kn, false); + if (!cgrp) + return -ENODEV; +@@ -958,6 +966,12 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param) + /* Specifying two release agents is forbidden */ + if (ctx->release_agent) + return invalfc(fc, "release_agent respecified"); ++ /* ++ * Release agent gets called with all capabilities, ++ * require capabilities to set release agent. ++ */ ++ if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) ++ return invalfc(fc, "Setting release_agent not allowed"); + ctx->release_agent = param->string; + param->string = NULL; + break; +-- +2.32.0 + diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 530cfb9f..d0df34ae 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -10,8 +10,12 @@ URL: https://www.kernel.org/ Source0: https://cdn.amazonlinux.com/blobstore/c80d649c51b68fdb2bc126c326f83fed93ed242d675f978a9a0da4012e9789a5/kernel-5.10.93-87.444.amzn2.src.rpm Source100: config-bottlerocket +# cgroup v1 release agent fix +Patch0001: 0001-cgroup-v1-Require-capabilities-to-set-release_agent.patch + # Help out-of-tree module builds run `make prepare` automatically. Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch + # Add zstd support for compressed kernel modules Patch2000: 2000-kbuild-move-module-strip-compression-code-into-scrip.patch Patch2001: 2001-kbuild-add-support-for-zstd-compressed-modules.patch diff --git a/packages/kernel-5.4/0001-cgroup-v1-Require-capabilities-to-set-release_agent.patch b/packages/kernel-5.4/0001-cgroup-v1-Require-capabilities-to-set-release_agent.patch new file mode 100644 index 00000000..51714169 --- /dev/null +++ b/packages/kernel-5.4/0001-cgroup-v1-Require-capabilities-to-set-release_agent.patch @@ -0,0 +1,55 @@ +From da405f0c70e6daba64d112c9036fedc28906a937 Mon Sep 17 00:00:00 2001 +From: "Eric W. Biederman" +Date: Thu, 20 Jan 2022 11:04:01 -0600 +Subject: [PATCH] cgroup-v1: Require capabilities to set release_agent + +The cgroup release_agent is called with call_usermodehelper. The function +call_usermodehelper starts the release_agent with a full set fo capabilities. +Therefore require capabilities when setting the release_agaent. + +Reported-by: Tabitha Sable +Tested-by: Tabitha Sable +Fixes: 81a6a5cdd2c5 ("Task Control Groups: automatic userspace notification of idle cgroups") +Cc: stable@vger.kernel.org # v2.6.24+ +Signed-off-by: "Eric W. Biederman" +Signed-off-by: Tejun Heo +--- + kernel/cgroup/cgroup-v1.c | 15 +++++++++++++++ + 1 file changed, 15 insertions(+) + +diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c +index 2d0ef613ca07..07917ac2c303 100644 +--- a/kernel/cgroup/cgroup-v1.c ++++ b/kernel/cgroup/cgroup-v1.c +@@ -549,6 +549,15 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of, + + BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); + ++ /* ++ * Release agent gets called with all capabilities, ++ * require capabilities to set release agent. ++ */ ++ ++ if ((of->file->f_cred->user_ns != &init_user_ns) || ++ !capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ + cgrp = cgroup_kn_lock_live(of->kn, false); + if (!cgrp) + return -ENODEV; +@@ -961,6 +970,12 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param) + /* Specifying two release agents is forbidden */ + if (ctx->release_agent) + return cg_invalf(fc, "cgroup1: release_agent respecified"); ++ /* ++ * Release agent gets called with all capabilities, ++ * require capabilities to set release agent. ++ */ ++ if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) ++ return cg_invalf(fc, "cgroup1: Setting release_agent not allowed"); + ctx->release_agent = param->string; + param->string = NULL; + break; +-- +2.32.0 + diff --git a/packages/kernel-5.4/0001-lustrefsx-Disable-Werror-stringop-overflow.patch b/packages/kernel-5.4/3001-lustrefsx-Disable-Werror-stringop-overflow.patch similarity index 93% rename from packages/kernel-5.4/0001-lustrefsx-Disable-Werror-stringop-overflow.patch rename to packages/kernel-5.4/3001-lustrefsx-Disable-Werror-stringop-overflow.patch index 8c0c23b5..b5273f98 100644 --- a/packages/kernel-5.4/0001-lustrefsx-Disable-Werror-stringop-overflow.patch +++ b/packages/kernel-5.4/3001-lustrefsx-Disable-Werror-stringop-overflow.patch @@ -1,7 +1,7 @@ From a5f6b26082e0022d3c3e70e0718e4787939778d8 Mon Sep 17 00:00:00 2001 From: iliana destroyer of worlds Date: Tue, 30 Jul 2019 12:59:09 -0700 -Subject: [PATCH 1/2] lustrefsx: Disable -Werror=stringop-overflow= +Subject: [PATCH] lustrefsx: Disable -Werror=stringop-overflow= Signed-off-by: iliana destroyer of worlds --- diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec index 7ff6d066..228f795d 100644 --- a/packages/kernel-5.4/kernel-5.4.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -10,8 +10,8 @@ URL: https://www.kernel.org/ Source0: https://cdn.amazonlinux.com/blobstore/9d3d2fc3caf5bc68bcc257a426b1a3177f60f1acd62e27d772b58156c1b76e57/kernel-5.4.172-90.336.amzn2.src.rpm Source100: config-bottlerocket -# Make Lustre FSx work with a newer GCC. -Patch0001: 0001-lustrefsx-Disable-Werror-stringop-overflow.patch +# cgroup v1 release agent fix +Patch0001: 0001-cgroup-v1-Require-capabilities-to-set-release_agent.patch # Help out-of-tree module builds run `make prepare` automatically. Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch @@ -27,6 +27,9 @@ Patch2005: 2005-.gitignore-Add-ZSTD-compressed-files.patch Patch2006: 2006-kbuild-move-module-strip-compression-code-into-scrip.patch Patch2007: 2007-kbuild-add-support-for-zstd-compressed-modules.patch +# Make Lustre FSx work with a newer GCC. +Patch3001: 3001-lustrefsx-Disable-Werror-stringop-overflow.patch + BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From 8c72d84c7f6d696bfdd644c39200064ce61163e6 Mon Sep 17 00:00:00 2001 From: Calum Lacroix Date: Mon, 14 Feb 2022 11:55:25 +0000 Subject: [PATCH 0598/1356] kernel-5.10: Add TCMU See https://www.kernel.org/doc/Documentation/target/tcmu-design.txt for details --- packages/kernel-5.10/config-bottlerocket | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index dcba4f8f..835980ce 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -49,6 +49,9 @@ CONFIG_DAX=y CONFIG_DM_INIT=y CONFIG_DM_VERITY=y +# TCMU/LIO +CONFIG_TCM_USER2=m + # EFI CONFIG_EFI=y CONFIG_EFI_STUB=y From 7fa28898b468b6fd6e8b5ff69638925f0ad7cdcc Mon Sep 17 00:00:00 2001 From: Calum Lacroix Date: Mon, 14 Feb 2022 11:56:16 +0000 Subject: [PATCH 0599/1356] kernel-5.4: Add TCMU See https://www.kernel.org/doc/Documentation/target/tcmu-design.txt for details --- packages/kernel-5.4/config-bottlerocket | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/kernel-5.4/config-bottlerocket b/packages/kernel-5.4/config-bottlerocket index b5f021eb..3739e023 100644 --- a/packages/kernel-5.4/config-bottlerocket +++ b/packages/kernel-5.4/config-bottlerocket @@ -22,6 +22,9 @@ CONFIG_DAX=y CONFIG_DM_INIT=y CONFIG_DM_VERITY=y +# TCMU/LIO +CONFIG_TCM_USER2=m + # Enable EFI. CONFIG_EFI=y CONFIG_EFI_STUB=y From 5eb022df66848d1fd0d6b68c294e067a8d8df37a Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Fri, 25 Feb 2022 10:02:57 +0000 Subject: [PATCH 0600/1356] kernel-5.10: Update to 5.10.96 Removes cgroup-v1 patch which has been included upstream. --- ...re-capabilities-to-set-release_agent.patch | 54 ------------------- packages/kernel-5.10/Cargo.toml | 4 +- packages/kernel-5.10/kernel-5.10.spec | 7 +-- 3 files changed, 4 insertions(+), 61 deletions(-) delete mode 100644 packages/kernel-5.10/0001-cgroup-v1-Require-capabilities-to-set-release_agent.patch diff --git a/packages/kernel-5.10/0001-cgroup-v1-Require-capabilities-to-set-release_agent.patch b/packages/kernel-5.10/0001-cgroup-v1-Require-capabilities-to-set-release_agent.patch deleted file mode 100644 index fbd7a04b..00000000 --- a/packages/kernel-5.10/0001-cgroup-v1-Require-capabilities-to-set-release_agent.patch +++ /dev/null @@ -1,54 +0,0 @@ -From ea044e9e1c65fe83d2e50580e43c4e9775414af1 Mon Sep 17 00:00:00 2001 -From: "Eric W. Biederman" -Date: Thu, 20 Jan 2022 11:04:01 -0600 -Subject: [PATCH] cgroup-v1: Require capabilities to set release_agent - -The cgroup release_agent is called with call_usermodehelper. The function -call_usermodehelper starts the release_agent with a full set fo capabilities. -Therefore require capabilities when setting the release_agaent. - -Reported-by: Tabitha Sable -Tested-by: Tabitha Sable -Fixes: 81a6a5cdd2c5 ("Task Control Groups: automatic userspace notification of idle cgroups") -Cc: stable@vger.kernel.org # v2.6.24+ -Signed-off-by: "Eric W. Biederman" -Signed-off-by: Tejun Heo ---- - kernel/cgroup/cgroup-v1.c | 14 ++++++++++++++ - 1 file changed, 14 insertions(+) - -diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c -index 7f71b54c06c5..69fba563c810 100644 ---- a/kernel/cgroup/cgroup-v1.c -+++ b/kernel/cgroup/cgroup-v1.c -@@ -545,6 +545,14 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of, - - BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); - -+ /* -+ * Release agent gets called with all capabilities, -+ * require capabilities to set release agent. -+ */ -+ if ((of->file->f_cred->user_ns != &init_user_ns) || -+ !capable(CAP_SYS_ADMIN)) -+ return -EPERM; -+ - cgrp = cgroup_kn_lock_live(of->kn, false); - if (!cgrp) - return -ENODEV; -@@ -958,6 +966,12 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param) - /* Specifying two release agents is forbidden */ - if (ctx->release_agent) - return invalfc(fc, "release_agent respecified"); -+ /* -+ * Release agent gets called with all capabilities, -+ * require capabilities to set release agent. -+ */ -+ if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) -+ return invalfc(fc, "Setting release_agent not allowed"); - ctx->release_agent = param->string; - param->string = NULL; - break; --- -2.32.0 - diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 53442753..ed9e3f77 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,8 +13,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/c80d649c51b68fdb2bc126c326f83fed93ed242d675f978a9a0da4012e9789a5/kernel-5.10.93-87.444.amzn2.src.rpm" -sha512 = "1e5442b0da15123e6a3c6c6b32f8f3b2ff53565fb9f2a76b778b315ea484a87423fef05bb1aed501c1a1f61507d5edac23bf1b1694bab3a73610ac6af22b190e" +url = "https://cdn.amazonlinux.com/blobstore/3d651c178d1a236b1ea38ddbd548d5402442c60003960ea61daf53e52d360d36/kernel-5.10.96-90.460.amzn2.src.rpm" +sha512 = "d95699f9a039f91899a5772081e17d523c6752faf0fd2edfc2f1394b2b0ba6cee05b05af234f029e3d0ef705b4ee05d86be92413215265db379d8c8ad21bdbbd" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index d0df34ae..3a1bb6b3 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,18 +1,15 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.93 +Version: 5.10.96 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/c80d649c51b68fdb2bc126c326f83fed93ed242d675f978a9a0da4012e9789a5/kernel-5.10.93-87.444.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/3d651c178d1a236b1ea38ddbd548d5402442c60003960ea61daf53e52d360d36/kernel-5.10.96-90.460.amzn2.src.rpm Source100: config-bottlerocket -# cgroup v1 release agent fix -Patch0001: 0001-cgroup-v1-Require-capabilities-to-set-release_agent.patch - # Help out-of-tree module builds run `make prepare` automatically. Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch From 47916cb1b22762905a4972178f6d0707512e1a77 Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Fri, 25 Feb 2022 10:19:56 +0000 Subject: [PATCH 0601/1356] kernel-5.4: Update to 5.4.176 Removes cgroup-v1 patch which has been included upstream. --- ...re-capabilities-to-set-release_agent.patch | 55 ------------------- packages/kernel-5.4/Cargo.toml | 4 +- packages/kernel-5.4/kernel-5.4.spec | 7 +-- 3 files changed, 4 insertions(+), 62 deletions(-) delete mode 100644 packages/kernel-5.4/0001-cgroup-v1-Require-capabilities-to-set-release_agent.patch diff --git a/packages/kernel-5.4/0001-cgroup-v1-Require-capabilities-to-set-release_agent.patch b/packages/kernel-5.4/0001-cgroup-v1-Require-capabilities-to-set-release_agent.patch deleted file mode 100644 index 51714169..00000000 --- a/packages/kernel-5.4/0001-cgroup-v1-Require-capabilities-to-set-release_agent.patch +++ /dev/null @@ -1,55 +0,0 @@ -From da405f0c70e6daba64d112c9036fedc28906a937 Mon Sep 17 00:00:00 2001 -From: "Eric W. Biederman" -Date: Thu, 20 Jan 2022 11:04:01 -0600 -Subject: [PATCH] cgroup-v1: Require capabilities to set release_agent - -The cgroup release_agent is called with call_usermodehelper. The function -call_usermodehelper starts the release_agent with a full set fo capabilities. -Therefore require capabilities when setting the release_agaent. - -Reported-by: Tabitha Sable -Tested-by: Tabitha Sable -Fixes: 81a6a5cdd2c5 ("Task Control Groups: automatic userspace notification of idle cgroups") -Cc: stable@vger.kernel.org # v2.6.24+ -Signed-off-by: "Eric W. Biederman" -Signed-off-by: Tejun Heo ---- - kernel/cgroup/cgroup-v1.c | 15 +++++++++++++++ - 1 file changed, 15 insertions(+) - -diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c -index 2d0ef613ca07..07917ac2c303 100644 ---- a/kernel/cgroup/cgroup-v1.c -+++ b/kernel/cgroup/cgroup-v1.c -@@ -549,6 +549,15 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of, - - BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); - -+ /* -+ * Release agent gets called with all capabilities, -+ * require capabilities to set release agent. -+ */ -+ -+ if ((of->file->f_cred->user_ns != &init_user_ns) || -+ !capable(CAP_SYS_ADMIN)) -+ return -EPERM; -+ - cgrp = cgroup_kn_lock_live(of->kn, false); - if (!cgrp) - return -ENODEV; -@@ -961,6 +970,12 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param) - /* Specifying two release agents is forbidden */ - if (ctx->release_agent) - return cg_invalf(fc, "cgroup1: release_agent respecified"); -+ /* -+ * Release agent gets called with all capabilities, -+ * require capabilities to set release agent. -+ */ -+ if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) -+ return cg_invalf(fc, "cgroup1: Setting release_agent not allowed"); - ctx->release_agent = param->string; - param->string = NULL; - break; --- -2.32.0 - diff --git a/packages/kernel-5.4/Cargo.toml b/packages/kernel-5.4/Cargo.toml index ecd7ae97..c3c82f2c 100644 --- a/packages/kernel-5.4/Cargo.toml +++ b/packages/kernel-5.4/Cargo.toml @@ -13,8 +13,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/9d3d2fc3caf5bc68bcc257a426b1a3177f60f1acd62e27d772b58156c1b76e57/kernel-5.4.172-90.336.amzn2.src.rpm" -sha512 = "a99575479a7aa0f5aaf264d105435af48d1201ecb133e9b4842e21b1b6d73220aa41ba44f5b400fd6662d4325208567c4defc31f4fdbc53b46be560031e835ef" +url = "https://cdn.amazonlinux.com/blobstore/32434003a841c9b7972e98167c4819504db24bb87f1a757a3d4bfc16f3c9e4e1/kernel-5.4.176-91.338.amzn2.src.rpm" +sha512 = "0a82d9b33367962da076d36dedaa6dc94968dbbd4581b0ead6a565fe04f0d3364b0d10ae57150d989748a379036d95464c17b89273a56a814fd176e229706000" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec index 228f795d..440d6947 100644 --- a/packages/kernel-5.4/kernel-5.4.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -1,18 +1,15 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.4 -Version: 5.4.172 +Version: 5.4.176 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/9d3d2fc3caf5bc68bcc257a426b1a3177f60f1acd62e27d772b58156c1b76e57/kernel-5.4.172-90.336.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/32434003a841c9b7972e98167c4819504db24bb87f1a757a3d4bfc16f3c9e4e1/kernel-5.4.176-91.338.amzn2.src.rpm Source100: config-bottlerocket -# cgroup v1 release agent fix -Patch0001: 0001-cgroup-v1-Require-capabilities-to-set-release_agent.patch - # Help out-of-tree module builds run `make prepare` automatically. Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch From 76d98604e25815176176d4fef887196dc24bd660 Mon Sep 17 00:00:00 2001 From: mello7tre Date: Wed, 1 Sep 2021 14:48:44 +0200 Subject: [PATCH 0602/1356] models, cfsignal: CloudFormation signal program Created a new rust program, cfsignal to send signal to CloudFormation Stack. Program is a sort of cfn-signal https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-signal.html but as cfn-signal need python cannot be used by bottlerocket. cfsignal read configuration from a cfsignal.toml file configured reading user-data, so it depends on settings-applier.service. It cannot send a signal for a failure happening before settings-applier.service and network-online.target are started. It is able to send a failure signal for any other service starting from (included): activate-multi-user.service It use systemctl action is-system-running with --wait option. This way we can know if any service, after systemd boot process finished, is in a failure status. Requested changes: * removed author * signal parameter renamed to should_signal (is more specific that should_send) * added README.md * removed commented out lines * use imdsclient in place of ec2_instance_metadata * refactor service_check.rs and renamed to system_check.rs use weak dependency (WantedBy)for cfsignal.service use tokio LTS, only with needed features restart command some code refactor * use directly signal_resource as function * code simplification in system_check.rs * use standard boilerplate for main function semaphore file and migration * Use semaphore file to only run on first boot * Add migration file for downgrading * client.signal_resource collapsed * Fix to packages/os/os.spec: toml file is not copyed (introduced during rebase) Readme changes --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 6f94e5f2..fc315fec 100644 --- a/README.md +++ b/README.md @@ -419,6 +419,13 @@ These settings can be changed at any time. Supported values are `debug`, `info`, `warn`, `error`, and `crit`, and the default is `info`. * `settings.ecs.enable-spot-instance-draining`: If the instance receives a spot termination notice, the agent will set the instance's state to `DRAINING`, so the workload can be moved gracefully before the instance is removed. Defaults to `false`. +#### CloudFormation signal helper settings + +For AWS variants, these settings allow you to set up CloudFormation signaling to indicate whether Bottlerocket hosts running in EC2 have been successfully created or updated: +* `settings.cloudformation.should-signal`: Whether to check status and send signal. Defaults to `false`. If set to `true`, both `stack-name` and `logical-resource-id` need to be specified. +* `settings.cloudformation.stack-name`: Name of the CloudFormation Stack to signal. +* `settings.cloudformation.logical-resource-id`: The logical ID of the AutoScalingGroup resource that you want to signal. + #### OCI Hooks settings Bottlerocket allows you to opt-in to use additional [OCI hooks](https://github.com/opencontainers/runtime-spec/blob/main/runtime.md#lifecycle) for your orchestrated containers. From d4a23d31a69fd3743e4ed3416a51b46fa7c5cb19 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Mon, 28 Feb 2022 18:19:17 -0800 Subject: [PATCH 0603/1356] README: add info about "settings.container-registry.credentials" --- README.md | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/README.md b/README.md index fc315fec..13d48957 100644 --- a/README.md +++ b/README.md @@ -453,6 +453,30 @@ When pulling an image from a registry, the container runtime will try the endpoi For [host-container](#host-containers-settings) and [bootstrap-container](#bootstrap-containers-settings) images from Amazon ECR private repositories, registry mirrors are currently unsupported. +The following setting is optional and allows you to configure image registry credentials. +* `settings.container-registry.credentials`: An array of container images registry credential settings. Each element specifies the registry and the credential information for said registry. +The credential fields map to [containerd's registry credential fields](https://github.com/containerd/containerd/blob/v1.6.0/docs/cri/registry.md#configure-registry-credentials), which in turn map to the fields in `.docker/config.json`. +It is recommended to programmatically set these settings via `apiclient` through the Bottlerocket control container and/or custom host-containers. + * An example `apiclient` call to set registry credentials for `gcr.io` and `docker.io` looks like this: + ```bash + apiclient set --json '{ + "container-registry": { + "credentials": [ + { + "registry": "gcr.io", + "username": "example_username", + "password": "example_password" + }, + { + "registry": "docker.io", + "auth": "example_base64_encoded_auth_string" + } + ] + } + }' + ``` +In addition to the container runtime daemons, these credential settings will also apply to [host-container](#host-containers-settings) and [bootstrap-container](#bootstrap-containers-settings) image pulls as well. + #### Updates settings * `settings.updates.metadata-base-url`: The common portion of all URIs used to download update metadata. From 3d6394812b1ffaf92861a4a9a56551cbd6472f86 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Wed, 16 Feb 2022 11:30:13 -0800 Subject: [PATCH 0604/1356] Add aws-k8s-1.22 variant Adds aws-k8s-1.22 variant, relinks symlinks in models --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c8f0d304..e88a4a84 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -24,7 +24,7 @@ jobs: continue-on-error: ${{ matrix.supported }} strategy: matrix: - variant: [aws-k8s-1.18, aws-k8s-1.19, aws-k8s-1.20, aws-k8s-1.21, aws-ecs-1] + variant: [aws-k8s-1.18, aws-k8s-1.19, aws-k8s-1.20, aws-k8s-1.21, aws-k8s-1.22, aws-ecs-1] arch: [x86_64, aarch64] supported: [true] fetch-upstream: ["false"] From 9145a12c1584919d810f913f192fccb6f0022772 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Wed, 16 Feb 2022 11:38:49 -0800 Subject: [PATCH 0605/1356] Add vmware-k8s-1.22 variant Adds vmware-k8s-1.22 variant, relinks symlinks in models Update example vmware k8s version in quickstart to v1.21 --- .github/workflows/build.yml | 4 ++++ README.md | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index e88a4a84..1db90f5e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -53,6 +53,10 @@ jobs: arch: x86_64 supported: true fetch-upstream: "false" + - variant: vmware-k8s-1.22 + arch: x86_64 + supported: true + fetch-upstream: "false" - variant: aws-k8s-1.21-nvidia arch: x86_64 supported: true diff --git a/README.md b/README.md index 13d48957..f4e44f97 100644 --- a/README.md +++ b/README.md @@ -64,6 +64,7 @@ We also have variants in preview status that are designed to be Kubernetes worke - `vmware-k8s-1.20` - `vmware-k8s-1.21` +- `vmware-k8s-1.22` The `aws-k8s-1.15`, `aws-k8s-1.16`, and `aws-k8s-1.17` variants are no longer supported. We recommend users replace nodes running these variants with the [latest variant compatible with their cluster](variants/). @@ -378,7 +379,7 @@ Static pods can be particularly useful when running in standalone mode. For Kubernetes variants in AWS and VMware, the following are set for you automatically, but you can override them if you know what you're doing! In AWS, [pluto](sources/api/) sets these based on runtime instance information. -In VMware, Bottlerocket uses [netdog](sources/api/) (for `node-ip`) or relies on [default values](sources/models/src/vmware-k8s-1.21/defaults.d/). +In VMware, Bottlerocket uses [netdog](sources/api/) (for `node-ip`) or relies on [default values](sources/models/src/vmware-k8s-1.22/defaults.d/). * `settings.kubernetes.node-ip`: The IP address of this node. * `settings.kubernetes.pod-infra-container-image`: The URI of the "pause" container. * `settings.kubernetes.kube-reserved`: Resources reserved for node components. From f853b3b770636795f55eda3a44c7925bfcc5e31c Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Thu, 17 Feb 2022 16:56:35 -0800 Subject: [PATCH 0606/1356] Add aws-k8s-1.22-nvidia variant Adds aws-k8s-1.22-nvidia variant, create symlinks for previous k8s version nvidia variant in models. --- .github/workflows/build.yml | 8 ++++++++ BUILDING.md | 5 +++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1db90f5e..fd2b10e7 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -65,6 +65,14 @@ jobs: arch: aarch64 supported: true fetch-upstream: "true" + - variant: aws-k8s-1.22-nvidia + arch: x86_64 + supported: true + fetch-upstream: "true" + - variant: aws-k8s-1.22-nvidia + arch: aarch64 + supported: true + fetch-upstream: "true" fail-fast: false steps: - uses: actions/checkout@v2 diff --git a/BUILDING.md b/BUILDING.md index c227bdd4..0a0f65d3 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -123,7 +123,7 @@ licenses = [ #### NVIDIA variants -If you want to build the `aws-k8s-1.21-nvidia` variant, you can follow these steps to prepare a `Licenses.toml` file using the [License for customer use of NVIDIA software](https://www.nvidia.com/en-us/drivers/nvidia-license/): +If you want to build any of the NVIDIA variants, you can follow these steps to prepare a `Licenses.toml` file using the [License for customer use of NVIDIA software](https://www.nvidia.com/en-us/drivers/nvidia-license/): 1. Create a `Licenses.toml` file in your Bottlerocket root directory, with the following content: @@ -144,8 +144,9 @@ cargo make fetch-licenses -e BUILDSYS_UPSTREAM_LICENSE_FETCH=true 3. Build your image, setting the `BUILDSYS_UPSTREAM_SOURCE_FALLBACK` flag to `true`, if you haven't cached the driver's sources: ```shell +K8S_VERSION=1.22 cargo make \ - -e BUILDSYS_VARIANT=aws-k8s-1.21-nvidia \ + -e BUILDSYS_VARIANT=aws-k8s-${K8S_VERSION}-nvidia \ -e BUILDSYS_UPSTREAM_SOURCE_FALLBACK="true" ``` From 4f08246ea73444d80ff9c7e554bea45c027c96bc Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Fri, 18 Feb 2022 10:41:59 -0800 Subject: [PATCH 0607/1356] Add metal-k8s-1.22 variant This change adds an additional variant `metal-k8s-1.22`, which includes necessary Kubernetes packages and settings for running Bottlerocket on metal in a Kubernetes v1.22 cluster. --- .github/workflows/build.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index fd2b10e7..c0277a7a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -45,6 +45,10 @@ jobs: arch: x86_64 supported: false fetch-upstream: "false" + - variant: metal-k8s-1.22 + arch: x86_64 + supported: false + fetch-upstream: "false" - variant: vmware-k8s-1.20 arch: x86_64 supported: true From 0f7ac31fe4c54a5b4de4b3039da3dd2dc5ed1312 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Mon, 7 Mar 2022 12:57:26 -0800 Subject: [PATCH 0608/1356] QUICKSTART, README: remove notes about vmware variant's preview status vmware-k8s variants have graduated from preview status. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f4e44f97..1482d649 100644 --- a/README.md +++ b/README.md @@ -60,7 +60,7 @@ The following variant supports ECS: - `aws-ecs-1` -We also have variants in preview status that are designed to be Kubernetes worker nodes in VMware: +We also have variants that are designed to be Kubernetes worker nodes in VMware: - `vmware-k8s-1.20` - `vmware-k8s-1.21` From b88731853da5c7ca491832afcab74086e151b3db Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Mon, 7 Mar 2022 18:55:10 +0000 Subject: [PATCH 0609/1356] kernel-5.4: Update to 5.4.181 --- ...fsx-Disable-Werror-stringop-overflow.patch | 20 ------------------- packages/kernel-5.4/Cargo.toml | 4 ++-- packages/kernel-5.4/kernel-5.4.spec | 7 ++----- 3 files changed, 4 insertions(+), 27 deletions(-) delete mode 100644 packages/kernel-5.4/3001-lustrefsx-Disable-Werror-stringop-overflow.patch diff --git a/packages/kernel-5.4/3001-lustrefsx-Disable-Werror-stringop-overflow.patch b/packages/kernel-5.4/3001-lustrefsx-Disable-Werror-stringop-overflow.patch deleted file mode 100644 index b5273f98..00000000 --- a/packages/kernel-5.4/3001-lustrefsx-Disable-Werror-stringop-overflow.patch +++ /dev/null @@ -1,20 +0,0 @@ -From a5f6b26082e0022d3c3e70e0718e4787939778d8 Mon Sep 17 00:00:00 2001 -From: iliana destroyer of worlds -Date: Tue, 30 Jul 2019 12:59:09 -0700 -Subject: [PATCH] lustrefsx: Disable -Werror=stringop-overflow= - -Signed-off-by: iliana destroyer of worlds ---- - drivers/staging/lustrefsx/Makefile.rules | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/drivers/staging/lustrefsx/Makefile.rules b/drivers/staging/lustrefsx/Makefile.rules -index a0d56e80f2ce..62390580a3b4 100644 ---- a/drivers/staging/lustrefsx/Makefile.rules -+++ b/drivers/staging/lustrefsx/Makefile.rules -@@ -3,4 +3,4 @@ ccflags-y += -include $(srctree)/drivers/staging/lustrefsx/config.h - ccflags-y += -I$(srctree)/drivers/staging/lustrefsx/libcfs/include - ccflags-y += -I$(srctree)/drivers/staging/lustrefsx/lnet/include - ccflags-y += -I$(srctree)/drivers/staging/lustrefsx/lustre/include --ccflags-y += -Wno-format-truncation -Werror -+ccflags-y += -Wno-format-truncation -Werror -Wno-error=stringop-overflow= diff --git a/packages/kernel-5.4/Cargo.toml b/packages/kernel-5.4/Cargo.toml index c3c82f2c..817a2d91 100644 --- a/packages/kernel-5.4/Cargo.toml +++ b/packages/kernel-5.4/Cargo.toml @@ -13,8 +13,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/32434003a841c9b7972e98167c4819504db24bb87f1a757a3d4bfc16f3c9e4e1/kernel-5.4.176-91.338.amzn2.src.rpm" -sha512 = "0a82d9b33367962da076d36dedaa6dc94968dbbd4581b0ead6a565fe04f0d3364b0d10ae57150d989748a379036d95464c17b89273a56a814fd176e229706000" +url = "https://cdn.amazonlinux.com/blobstore/d8a7e800750161a038954b2685ca8c5fb0a0dac22057530c4c0233d60f06c2d3/kernel-5.4.181-99.354.amzn2.src.rpm" +sha512 = "39903e5164ea966b62ddfa70ffd9a73ba50af363cf87d20011ad8d2f1e471857b79503da75770a1e812058c9cd2a17a88000e6e9a4c44580d3c4210144aa3993" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec index 440d6947..9896881a 100644 --- a/packages/kernel-5.4/kernel-5.4.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.4 -Version: 5.4.176 +Version: 5.4.181 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/32434003a841c9b7972e98167c4819504db24bb87f1a757a3d4bfc16f3c9e4e1/kernel-5.4.176-91.338.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/d8a7e800750161a038954b2685ca8c5fb0a0dac22057530c4c0233d60f06c2d3/kernel-5.4.181-99.354.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. @@ -24,9 +24,6 @@ Patch2005: 2005-.gitignore-Add-ZSTD-compressed-files.patch Patch2006: 2006-kbuild-move-module-strip-compression-code-into-scrip.patch Patch2007: 2007-kbuild-add-support-for-zstd-compressed-modules.patch -# Make Lustre FSx work with a newer GCC. -Patch3001: 3001-lustrefsx-Disable-Werror-stringop-overflow.patch - BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From e58dcc41f92261b4e9d1cfbecfc05fce0f9bec05 Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Mon, 7 Mar 2022 19:06:37 +0000 Subject: [PATCH 0610/1356] kernel-5.10: Update to 5.10.102 --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index ed9e3f77..b0127392 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,8 +13,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/3d651c178d1a236b1ea38ddbd548d5402442c60003960ea61daf53e52d360d36/kernel-5.10.96-90.460.amzn2.src.rpm" -sha512 = "d95699f9a039f91899a5772081e17d523c6752faf0fd2edfc2f1394b2b0ba6cee05b05af234f029e3d0ef705b4ee05d86be92413215265db379d8c8ad21bdbbd" +url = "https://cdn.amazonlinux.com/blobstore/abd0b3e08ff7d32abb916b2664e8de68bd7d16dbbfdcfe8e574d832aa19a3b1e/kernel-5.10.102-99.473.amzn2.src.rpm" +sha512 = "ed17395fed0480d87e59f80899953641169fae7ef2f34eb74bad66ff92b2eec5c72dbff4a08af49de516cde8fe96218a102857e048073dd6d48fb73be4ef19e0" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 3a1bb6b3..e61f5e07 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.96 +Version: 5.10.102 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/3d651c178d1a236b1ea38ddbd548d5402442c60003960ea61daf53e52d360d36/kernel-5.10.96-90.460.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/abd0b3e08ff7d32abb916b2664e8de68bd7d16dbbfdcfe8e574d832aa19a3b1e/kernel-5.10.102-99.473.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 2a65342e3e4448f311dd5b71a9243af8f83a9238 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 7 Mar 2022 22:17:53 +0000 Subject: [PATCH 0611/1356] kernel: fix merge_config.sh invocation ARCH and CROSS_COMPILE must be set in the environment to prevent the build host's architecture from influencing the generated config. Signed-off-by: Ben Cressey --- packages/kernel-5.10/kernel-5.10.spec | 4 +++- packages/kernel-5.4/kernel-5.4.spec | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index e61f5e07..7f8de017 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -84,8 +84,10 @@ CONFIG_EXTRA_FIRMWARE_DIR="%{_cross_libdir}/firmware" EOF %endif +export ARCH="%{_cross_karch}" +export CROSS_COMPILE="%{_cross_target}-" + KCONFIG_CONFIG="arch/%{_cross_karch}/configs/%{_cross_vendor}_defconfig" \ -ARCH="%{_cross_karch}" \ scripts/kconfig/merge_config.sh \ ../config-%{_cross_arch} \ %if "%{_cross_arch}" == "x86_64" diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec index 9896881a..b7ce83d7 100644 --- a/packages/kernel-5.4/kernel-5.4.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -91,8 +91,10 @@ CONFIG_EXTRA_FIRMWARE_DIR="%{_cross_libdir}/firmware" EOF %endif +export ARCH="%{_cross_karch}" +export CROSS_COMPILE="%{_cross_target}-" + KCONFIG_CONFIG="arch/%{_cross_karch}/configs/%{_cross_vendor}_defconfig" \ -ARCH="%{_cross_karch}" \ scripts/kconfig/merge_config.sh \ ../config-%{_cross_arch} \ %if "%{_cross_arch}" == "x86_64" From 0d0c16cbc30342906f6ad5b0cf2b0c935e188b5f Mon Sep 17 00:00:00 2001 From: Matthew James Briggs Date: Tue, 22 Mar 2022 13:46:42 -0700 Subject: [PATCH 0612/1356] documentation: explain data partition extension --- README.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 1482d649..7fda462e 100644 --- a/README.md +++ b/README.md @@ -811,6 +811,10 @@ For more details, see the [API system documentation](sources/api/). ### Default Volumes Bottlerocket operates with two default storage volumes. -* The root device, `/dev/xvda`, holds the active and passive [partition sets](#updates-1). +* The root device, holds the active and passive [partition sets](#updates-1). It also contains the bootloader, the dm-verity hash tree for verifying the [immutable root filesystem](SECURITY_FEATURES.md#immutable-rootfs-backed-by-dm-verity), and the data store for the Bottlerocket API. -* The data device, `/dev/xvdb`, is used as persistent storage for container images, container orchestration, [host-containers](#Custom-host-containers), and [bootstrap containers](#Bootstrap-containers-settings). +* The data device is used as persistent storage for container images, container orchestration, [host-containers](#Custom-host-containers), and [bootstrap containers](#Bootstrap-containers-settings). + +On boot Bottlerocket will increase the data partition size to use all of the data device. +If you increase the size of the device, you can reboot Bottlerocket to extend the data partition. +If you need to extend the data partition without rebooting, have a look at this [discussion](https://github.com/bottlerocket-os/bottlerocket/discussions/2011). From e4cbabc88a969b069bd43b52646a5151391c174b Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Thu, 10 Mar 2022 18:23:47 +0000 Subject: [PATCH 0613/1356] build: inventory installed packages This adds logic to `rpm2img` to generate an inventory of the installed rpms to `/usr/share/bottlerocket/application-inventory.json`. --- tools/rpm2img | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/tools/rpm2img b/tools/rpm2img index 956d44a3..f5a0713c 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -8,6 +8,7 @@ shopt -qs failglob . "${0%/*}/partyplanner" OUTPUT_FMT="raw" +BUILDER_ARCH="$(uname -m)" for opt in "$@"; do optarg="$(expr "${opt}" : '[^=]*=\(.*\)')" @@ -135,7 +136,38 @@ if [ "${PARTITION_PLAN}" == "split" ] ; then --sort --print "${DATA_IMAGE}" fi +INSTALL_TIME="$(date -u +%Y-%m-%dT%H:%M:%SZ)" rpm -iv --root "${ROOT_MOUNT}" "${PACKAGE_DIR}"/*.rpm + +# inventory installed packages +INVENTORY_QUERY="\{\"Name\":\"%{NAME}\"\ +,\"Publisher\":\"Bottlerocket\"\ +,\"Version\":\"${VERSION_ID}\"\ +,\"Release\":\"${BUILD_ID}\"\ +,\"InstalledTime\":\"${INSTALL_TIME}\"\ +,\"ApplicationType\":\"%{GROUP}\"\ +,\"Architecture\":\"%{ARCH}\"\ +,\"Url\":\"%{URL}\"\ +,\"Summary\":\"%{Summary}\"\}\n" + +mapfile -t installed_rpms <<< "$(rpm -qa --root "${ROOT_MOUNT}" \ + --queryformat "${INVENTORY_QUERY}")" + +# wrap installed_rpms mapfile into json +INVENTORY_DATA="$(jq --raw-output . <<< "${installed_rpms[@]}")" +# replace the package architecture with the target architecture (for cross-compiled builds) +if [[ "${BUILDER_ARCH}" != "${ARCH}" ]]; then + INVENTORY_DATA="$(jq --arg BUILDER_ARCH "${BUILDER_ARCH}" --arg TARGET_ARCH "${ARCH}" \ + '(.Architecture) |= sub($BUILDER_ARCH; $TARGET_ARCH)' <<< "${INVENTORY_DATA}")" +fi +# remove the 'bottlerocket--' prefix from package names +INVENTORY_DATA="$(jq --arg PKG_PREFIX "bottlerocket-${ARCH}-" \ + '(.Name) |= sub($PKG_PREFIX; "")' <<< "${INVENTORY_DATA}")" +# sort by package name and add 'Content' as top-level +INVENTORY_DATA="$(jq --slurp 'sort_by(.Name)' <<< "${INVENTORY_DATA}" | jq '{"Content": .}')" +printf "%s\n" "${INVENTORY_DATA}" > "${ROOT_MOUNT}/usr/share/bottlerocket/application-inventory.json" + +# install licenses install -p -m 0644 /host/{COPYRIGHT,LICENSE-APACHE,LICENSE-MIT} "${ROOT_MOUNT}"/usr/share/licenses/ mksquashfs \ "${ROOT_MOUNT}"/usr/share/licenses \ From bfc9eacf759d30146f80d2d14368a1922705d988 Mon Sep 17 00:00:00 2001 From: Matthew James Briggs Date: Wed, 23 Mar 2022 10:37:25 -0700 Subject: [PATCH 0614/1356] tools: update cargo dependencies --- tools/Cargo.lock | 284 ++++++++++++++++++++++++----------------------- 1 file changed, 145 insertions(+), 139 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 08734391..f0d79115 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -98,9 +98,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backtrace" @@ -140,9 +140,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1d36a02058e76b040de25a4464ba1c80935655595b661505c8b39b664828b95" +checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" dependencies = [ "generic-array", ] @@ -169,7 +169,7 @@ dependencies = [ "reqwest", "serde", "serde_plain", - "sha2 0.10.1", + "sha2 0.10.2", "snafu 0.7.0", "toml", "url", @@ -205,9 +205,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.72" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" +checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" [[package]] name = "cfg-if" @@ -281,9 +281,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6888e10551bb93e424d8df1d07f1a8b4fceb0001a3a4b048bfc47554946f47b3" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" dependencies = [ "core-foundation-sys", "libc", @@ -297,27 +297,27 @@ checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpufeatures" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" +checksum = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b" dependencies = [ "libc", ] [[package]] name = "crc32fast" -version = "1.3.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2209c310e29876f7f0b2721e7e26b84aff178aa3da5d091f9bfbf47669e60e3" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ "cfg-if", ] [[package]] name = "crossbeam-channel" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e54ea8bc3fb1ee042f5aace6e3c6e025d3874866da222930f70ce62aceba0bfa" +checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53" dependencies = [ "cfg-if", "crossbeam-utils", @@ -336,10 +336,11 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.6" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97242a70df9b89a65d0b6df3c4bf5b9ce03c5b7309019777fbde37e7537f8762" +checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c" dependencies = [ + "autocfg", "cfg-if", "crossbeam-utils", "lazy_static", @@ -349,9 +350,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcae03edb34f947e64acdb1c33ec169824e20657e9ecb61cef6c8c74dcb8120" +checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" dependencies = [ "cfg-if", "lazy_static", @@ -359,11 +360,12 @@ dependencies = [ [[package]] name = "crypto-common" -version = "0.1.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683d6b536309245c849479fba3da410962a43ed8e51c26b729208ec0ac2798d0" +checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" dependencies = [ "generic-array", + "typenum", ] [[package]] @@ -396,13 +398,12 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.1" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b697d66081d42af4fba142d56918a3cb21dc8eb63372c6b85d14f44fb9c5979b" +checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" dependencies = [ - "block-buffer 0.10.0", + "block-buffer 0.10.2", "crypto-common", - "generic-array", ] [[package]] @@ -446,9 +447,9 @@ dependencies = [ [[package]] name = "dyn-clone" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2626afccd7561a06cf1367e2950c4718ea04565e20fb5029b6c7d8ad09abcf" +checksum = "21e50f3adc76d6a43f5ed73b698a87d0760ca74617f60f7c3b879003536fdd28" [[package]] name = "either" @@ -498,9 +499,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28560757fe2bb34e79f907794bb6b22ae8b0e5c669b638a1132f2592b19035b4" +checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" dependencies = [ "futures-channel", "futures-core", @@ -513,9 +514,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3dda0b6588335f360afc675d0564c17a77a2bda81ca178a4b6081bd86c7f0b" +checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" dependencies = [ "futures-core", "futures-sink", @@ -523,15 +524,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0c8ff0461b82559810cdccfde3215c3f373807f5e5232b71479bff7bb2583d7" +checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" [[package]] name = "futures-executor" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29d6d2ff5bb10fb95c85b8ce46538a2e5f5e7fdc755623a7d4529ab8a4ed9d2a" +checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" dependencies = [ "futures-core", "futures-task", @@ -540,15 +541,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f9d34af5a1aac6fb380f735fe510746c38067c5bf16c7fd250280503c971b2" +checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" [[package]] name = "futures-macro" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dbd947adfffb0efc70599b3ddcf7b5597bb5fa9e245eb99f62b3a5f7bb8bd3c" +checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" dependencies = [ "proc-macro2", "quote", @@ -557,21 +558,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3055baccb68d74ff6480350f8d6eb8fcfa3aa11bdc1a1ae3afdd0514617d508" +checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" [[package]] name = "futures-task" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ee7c6485c30167ce4dfb83ac568a849fe53274c831081476ee13e0dce1aad72" +checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" [[package]] name = "futures-util" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b5cf40b47a271f77a8b1bec03ca09044d99d2372c0de244e66430761127164" +checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" dependencies = [ "futures-channel", "futures-core", @@ -597,9 +598,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c" +checksum = "d39cd93900197114fa1fcb7ae84ca742095eed9442088988ae74fa744e930e77" dependencies = [ "cfg-if", "libc", @@ -627,9 +628,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.11" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9f1f717ddc7b2ba36df7e871fd88db79326551d3d6f1fc406fbfd28b582ff8e" +checksum = "62eeb471aa3e3c9197aa4bfeabfe02982f6dc96f750486c0bb0009ac58b26d2b" dependencies = [ "bytes", "fnv", @@ -701,7 +702,7 @@ checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" dependencies = [ "bytes", "fnv", - "itoa 1.0.1", + "itoa", ] [[package]] @@ -717,9 +718,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" +checksum = "9100414882e15fb7feccb4897e5f0ff0ff1ca7d1a86a23208ada4d7a18e6c6c4" [[package]] name = "httpdate" @@ -729,9 +730,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.16" +version = "0.14.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7ec3e62bdc98a2f0393a5048e4c30ef659440ea6e0e572965103e72bd836f55" +checksum = "b26ae0a80afebe130861d90abf98e3814a4f28a4c6ffeb5ab8ebb2be311e0ef2" dependencies = [ "bytes", "futures-channel", @@ -742,7 +743,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 0.4.8", + "itoa", "pin-project-lite", "socket2", "tokio", @@ -776,9 +777,9 @@ checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" dependencies = [ "http", "hyper", - "rustls 0.20.2", + "rustls 0.20.4", "tokio", - "tokio-rustls 0.23.2", + "tokio-rustls 0.23.3", ] [[package]] @@ -829,7 +830,7 @@ dependencies = [ "rusoto_s3", "serde_json", "serde_yaml", - "sha2 0.10.1", + "sha2 0.10.2", "shell-words", "simplelog", "snafu 0.7.0", @@ -850,15 +851,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" - -[[package]] -name = "itoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" +checksum = "35e70ee094dc02fd9c13fdad4940090f22dbd6ac7c9e7094a46cf0232a50bc7c" [[package]] name = "itoa" @@ -883,9 +878,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.117" +version = "0.2.121" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e74d72e0f9b65b5b4ca49a346af3976df0f9c61d550727f349ecd559f251a26c" +checksum = "efaa7b300f3b5fe8eb6bf21ce3895e1751d9665086af2d64b42f19701015ff4f" [[package]] name = "linked-hash-map" @@ -904,9 +899,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.14" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" +checksum = "6389c490849ff5bc16be905ae24bc913a9c8892e19b2341dbc175e14c341c2b8" dependencies = [ "cfg-if", ] @@ -989,9 +984,9 @@ checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" [[package]] name = "ntapi" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +checksum = "c28774a7fd2fbb4f0babd8237ce554b73af68021b5f695a3cebd6c59bac0980f" dependencies = [ "winapi", ] @@ -1053,9 +1048,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5" +checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9" [[package]] name = "opaque-debug" @@ -1115,18 +1110,18 @@ dependencies = [ [[package]] name = "path-absolutize" -version = "3.0.11" +version = "3.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b288298a7a3a7b42539e3181ba590d32f2d91237b0691ed5f103875c754b3bf5" +checksum = "0a2a79d7c1c4eab523515c4561459b10516d6e7014aa76edc3ea05680d5c5d2d" dependencies = [ "path-dedot", ] [[package]] name = "path-dedot" -version = "3.0.14" +version = "3.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bfa72956f6be8524f7f7e2b07972dda393cb0008a6df4451f658b7e1bd1af80" +checksum = "f326e2a3331685a5e3d4633bb9836bd92126e08037cb512252f3612f616a0b28" dependencies = [ "once_cell", ] @@ -1265,7 +1260,7 @@ dependencies = [ "log", "pubsys-config", "reqwest", - "sha2 0.10.1", + "sha2 0.10.2", "shell-words", "simplelog", "snafu 0.7.0", @@ -1277,23 +1272,22 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.15" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "864d3e96a899863136fc6e99f3d7cae289dafe43bf2c5ac19b70df7210c0a145" +checksum = "b4af2ec4714533fcdf07e886f17025ace8b997b9ce51204ee69b6da831c3da57" dependencies = [ "proc-macro2", ] [[package]] name = "rand" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", "rand_core", - "rand_hc", ] [[package]] @@ -1315,15 +1309,6 @@ dependencies = [ "getrandom", ] -[[package]] -name = "rand_hc" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" -dependencies = [ - "rand_core", -] - [[package]] name = "rayon" version = "1.5.1" @@ -1351,28 +1336,29 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" +checksum = "8380fe0152551244f0747b1bf41737e0f8a74f97a14ccefd1148187271634f3c" dependencies = [ "bitflags", ] [[package]] name = "redox_users" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" +checksum = "7776223e2696f1aa4c6b0170e83212f47296a00424305117d013dfe86fb0fe55" dependencies = [ "getrandom", "redox_syscall", + "thiserror", ] [[package]] name = "regex" -version = "1.5.4" +version = "1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" +checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286" dependencies = [ "aho-corasick", "memchr", @@ -1396,9 +1382,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.9" +version = "0.11.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f242f1488a539a79bac6dbe7c8609ae43b7914b7736210f239a37cccb32525" +checksum = "46a1f7aa4f35e5e8b4160449f51afc758f0ce6454315a9fa7d0d113e958c41eb" dependencies = [ "base64", "bytes", @@ -1417,13 +1403,13 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", - "rustls 0.20.2", + "rustls 0.20.4", "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls 0.23.2", + "tokio-rustls 0.23.3", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -1645,9 +1631,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.20.2" +version = "0.20.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d37e5e2290f3e040b594b1a9e04377c2c671f1a1cfd9bfdef82106ac1c113f84" +checksum = "4fbfeb8d0ddb84706bc597a5574ab8912817c52a397f819e5b614e2265206921" dependencies = [ "log", "ring", @@ -1669,9 +1655,9 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "0.2.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" +checksum = "1ee86d63972a7c661d1536fefe8c3c8407321c3df668891286de28abcd087360" dependencies = [ "base64", ] @@ -1729,9 +1715,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.6.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fed7948b6c68acbb6e20c334f55ad635dc0f75506963de4464289fbd3b051ac" +checksum = "2dc14f172faf8a0194a3aded622712b0de276821addc574fa54fc0a1167e10dc" dependencies = [ "bitflags", "core-foundation", @@ -1742,9 +1728,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.6.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a57321bf8bc2362081b2599912d2961fe899c0efadf1b4b2f8d48b3e253bb96c" +checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" dependencies = [ "core-foundation-sys", "libc", @@ -1752,9 +1738,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.4" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "568a8e6258aa33c13358f81fd834adb854c6f7c9468520910a9b1e8fac068012" +checksum = "a4a3381e03edd24287172047536f20cabde766e2cd3e65e6b00fb3af51c4f38d" dependencies = [ "serde", ] @@ -1781,11 +1767,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.78" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d23c1ba4cf0efd44be32017709280b32d1cea5c3f1275c3b6d9e8bc54f758085" +checksum = "8e8d9fa5c3b304765ce1fd9c4c8a3de2c8db365a5b91be52f186efc675681d95" dependencies = [ - "itoa 1.0.1", + "itoa", "ryu", "serde", ] @@ -1806,7 +1792,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.1", + "itoa", "ryu", "serde", ] @@ -1838,13 +1824,13 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99c3bd8169c58782adad9290a9af5939994036b76187f7b4f0e6de91dbbfc0ec" +checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.1", + "digest 0.10.3", ] [[package]] @@ -1859,9 +1845,9 @@ dependencies = [ [[package]] name = "shell-words" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6fa3938c99da4914afedd13bf3d79bcb6c277d1b2c398d23257a304d9e1b074" +checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "shlex" @@ -1999,9 +1985,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a65b3f4ffa0092e9887669db0eae07941f023991ab58ea44da8fe8e2d511c6b" +checksum = "ea297be220d52398dcc07ce15a209fce436d361735ac1db700cab3b6cdfb9f54" dependencies = [ "proc-macro2", "quote", @@ -2024,9 +2010,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" +checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" dependencies = [ "winapi-util", ] @@ -2050,6 +2036,26 @@ dependencies = [ "unicode-width", ] +[[package]] +name = "thiserror" +version = "1.0.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "time" version = "0.1.43" @@ -2129,11 +2135,11 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.23.2" +version = "0.23.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a27d5f2b839802bd8267fa19b0530f5a08b9c08cd417976be2a65d130fe1c11b" +checksum = "4151fda0cf2798550ad0b34bcfc9b9dcc2a9d2471c895c68f3a8818e54f2389e" dependencies = [ - "rustls 0.20.2", + "rustls 0.20.4", "tokio", "webpki 0.22.0", ] @@ -2239,9 +2245,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.29" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" +checksum = "4a1bdf54a7c28a2bbf701e1d2233f6c77f473486b94bee4f9678da5a148dca7f" dependencies = [ "cfg-if", "pin-project-lite", @@ -2250,9 +2256,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.21" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4ed65637b8390770814083d20756f87bfa2c21bf2f110babdc5438351746e4" +checksum = "aa31669fa42c09c34d94d8165dd2012e8ff3c66aca50f3bb226b68f216f2706c" dependencies = [ "lazy_static", ] @@ -2286,9 +2292,9 @@ dependencies = [ [[package]] name = "unicode-segmentation" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" +checksum = "7e8820f5d777f6224dc4be3632222971ac30164d4a258d595640799554ebfd99" [[package]] name = "unicode-width" @@ -2513,9 +2519,9 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "winreg" -version = "0.7.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" +checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" dependencies = [ "winapi", ] @@ -2537,6 +2543,6 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.5.2" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c88870063c39ee00ec285a2f8d6a966e5b6fb2becc4e8dac77ed0d370ed6006" +checksum = "7eb5728b8afd3f280a869ce1d4c554ffaed35f45c231fc41bfbd0381bef50317" From 2f1fa05803a86398899ac7d2d31e7cb984d5c659 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Wed, 23 Mar 2022 23:11:07 +0000 Subject: [PATCH 0615/1356] kmod-5.10-nvidia: fix tmpfilesd configurations Now the directory to store the NVIDIA kernel module is created using the full path with `PREFIX`, instead of the symliked directory `/lib/modules` Signed-off-by: Arnaldo Garcia Rincon --- packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec | 4 +++- packages/kmod-5.10-nvidia/nvidia-tmpfiles.conf.in | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec index df60a9d0..235f8ae6 100644 --- a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec +++ b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec @@ -84,7 +84,9 @@ install -d %{buildroot}%{_cross_unitdir} install -d %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/{drivers,ld.so.conf.d} KERNEL_VERSION=$(cat %{kernel_sources}/include/config/kernel.release) -sed -e "s|__KERNEL_VERSION__|${KERNEL_VERSION}|" %{S:200} > nvidia.conf +sed \ + -e "s|__KERNEL_VERSION__|${KERNEL_VERSION}|" \ + -e "s|__PREFIX__|%{_cross_prefix}|" %{S:200} > nvidia.conf install -p -m 0644 nvidia.conf %{buildroot}%{_cross_tmpfilesdir} # Install modules-load.d drop-in to autoload required kernel modules diff --git a/packages/kmod-5.10-nvidia/nvidia-tmpfiles.conf.in b/packages/kmod-5.10-nvidia/nvidia-tmpfiles.conf.in index d95dbad9..d4763f28 100644 --- a/packages/kmod-5.10-nvidia/nvidia-tmpfiles.conf.in +++ b/packages/kmod-5.10-nvidia/nvidia-tmpfiles.conf.in @@ -1 +1,2 @@ -D /lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/tesla 0755 root root - +R __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/tesla - - - - - +d __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/tesla 0755 root root - - From 83cdd66756b9e5d309b065aa9319f81b787e6288 Mon Sep 17 00:00:00 2001 From: Matt Briggs Date: Sun, 27 Mar 2022 21:02:37 -0700 Subject: [PATCH 0616/1356] enable dependabot --- .github/dependabot.yml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..0d902da0 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,22 @@ +version: 2 +updates: + + - package-ecosystem: "cargo" + directory: "/sources" + schedule: + interval: "daily" + + - package-ecosystem: "cargo" + directory: "/tools" + schedule: + interval: "daily" + + - package-ecosystem: "gomod" + directory: "/sources/host-ctr" + schedule: + interval: "daily" + + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "daily" From e964303da5f51efea2ac66144cddaf44b1aefac8 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Tue, 29 Mar 2022 00:20:51 +0000 Subject: [PATCH 0617/1356] README: mention 'enter-admin-container' where appropriate --- README.md | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 7fda462e..2aa69591 100644 --- a/README.md +++ b/README.md @@ -134,10 +134,10 @@ aws ssm start-session --target INSTANCE_ID With the [default control container](https://github.com/bottlerocket-os/bottlerocket-control-container), you can make [API calls](#api) to configure and manage your Bottlerocket host. To do even more, read the next section about the [admin container](#admin-container). -If you've enabled the admin container, you can access it from the control container like this: +You can access the admin container from the control container like this: ``` -apiclient exec admin bash +enter-admin-container ``` ### Admin container @@ -160,13 +160,19 @@ If Bottlerocket is already running, you can enable the admin container from the enable-admin-container ``` +Or you can start an interactive session immediately like this: + +``` +enter-admin-container +``` + If you're using a custom control container, or want to make the API calls directly, you can enable the admin container like this instead: ``` apiclient set host-containers.admin.enabled=true ``` -Once you've enabled the admin container, you can either access it through SSH or from the control container like this: +Once you've enabled the admin container, you can either access it through SSH or execute commands from the control container like this: ``` apiclient exec admin bash @@ -508,8 +514,8 @@ These settings will configure the proxying behavior of the following services: * [ecs.service](packages/ecs-agent/ecs.service) * `settings.network.https-proxy`: The HTTPS proxy server to be used by services listed above. -* `settings.network.no-proxy`: A list of hosts that are excluded from proxying. - Example: +* `settings.network.no-proxy`: A list of hosts that are excluded from proxying. + Example: ``` [settings.network] https-proxy = "1.2.3.4:8080" From 21114f5da2832ef293926baa356acb28f8512c9e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Mar 2022 18:27:02 +0000 Subject: [PATCH 0618/1356] build(deps): bump actions/checkout from 2 to 3 Bumps [actions/checkout](https://github.com/actions/checkout) from 2 to 3. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c0277a7a..e63035dd 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -79,7 +79,7 @@ jobs: fetch-upstream: "true" fail-fast: false steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - run: rustup toolchain install 1.58.1 && rustup default 1.58.1 - run: cargo install --version 0.35.8 cargo-make - if: contains(matrix.variant, 'nvidia') From 3f1acbab89c96907d109550abe6a8c5d968e09d4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Mar 2022 18:27:50 +0000 Subject: [PATCH 0619/1356] build(deps): bump semver from 1.0.6 to 1.0.7 in /tools Bumps [semver](https://github.com/dtolnay/semver) from 1.0.6 to 1.0.7. - [Release notes](https://github.com/dtolnay/semver/releases) - [Commits](https://github.com/dtolnay/semver/compare/1.0.6...1.0.7) --- updated-dependencies: - dependency-name: semver dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- tools/Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index f0d79115..a37722a9 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -1738,9 +1738,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a3381e03edd24287172047536f20cabde766e2cd3e65e6b00fb3af51c4f38d" +checksum = "d65bd28f48be7196d222d95b9243287f48d27aca604e08497513019ff0502cc4" dependencies = [ "serde", ] From db68742f5d9cc60cdbb9e7fba873cfc6fba5bc07 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Mar 2022 18:27:38 +0000 Subject: [PATCH 0620/1356] build(deps): bump async-trait from 0.1.52 to 0.1.53 in /tools Bumps [async-trait](https://github.com/dtolnay/async-trait) from 0.1.52 to 0.1.53. - [Release notes](https://github.com/dtolnay/async-trait/releases) - [Commits](https://github.com/dtolnay/async-trait/compare/0.1.52...0.1.53) --- updated-dependencies: - dependency-name: async-trait dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- tools/Cargo.lock | 4 ++-- tools/infrasys/Cargo.toml | 2 +- tools/pubsys/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index a37722a9..8d9d3235 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -76,9 +76,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.52" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" +checksum = "ed6aa3524a2dfcf9fe180c51eae2b58738348d819517ceadf95789c51fff7600" dependencies = [ "proc-macro2", "quote", diff --git a/tools/infrasys/Cargo.toml b/tools/infrasys/Cargo.toml index 21c56660..d997baed 100644 --- a/tools/infrasys/Cargo.toml +++ b/tools/infrasys/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" publish = false [dependencies] -async-trait = "0.1.51" +async-trait = "0.1.53" clap = "2.33" hex = "0.4.0" log = "0.4.14" diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index dab33fea..e9b4d992 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" publish = false [dependencies] -async-trait = "0.1.36" +async-trait = "0.1.53" chrono = "0.4" clap = "2.33" coldsnap = { version = "0.3", default-features = false, features = ["rusoto-rustls"]} From edbb8e702072e3e6bd380e241cde393c5416464b Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Tue, 1 Mar 2022 23:05:47 +0000 Subject: [PATCH 0621/1356] grub: Set `private` variable via search We currently carry a few patches to enable `search` by partition label and uuid, but don't include the `search` module. This commit adds the module to enable this functionality in our grub config. This commit also adds an additional command to our grub configs. The command uses the `search` module to set the `private` variable to the location of the `BOTTLEROCKET-PRIVATE` partition. The `private` variable will be used to populate the grub menuentry that points to the bootconfig file. --- packages/grub/bios.cfg | 1 + packages/grub/efi.cfg | 1 + packages/grub/grub.spec | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/grub/bios.cfg b/packages/grub/bios.cfg index 06e7f532..ab8b5e14 100644 --- a/packages/grub/bios.cfg +++ b/packages/grub/bios.cfg @@ -5,6 +5,7 @@ gptprio.next -d boot_dev -u boot_uuid set root=$boot_dev set prefix=($root)/grub export boot_uuid +search --no-floppy --set private --part-label BOTTLEROCKET-PRIVATE configfile /grub/grub.cfg echo "boot failed (device ($boot_dev), uuid $boot_uuid)" echo "rebooting in 30 seconds..." diff --git a/packages/grub/efi.cfg b/packages/grub/efi.cfg index cde1542b..5d2fd3e6 100644 --- a/packages/grub/efi.cfg +++ b/packages/grub/efi.cfg @@ -4,6 +4,7 @@ gptprio.next -d boot_dev -u boot_uuid set root=$boot_dev set prefix=($root)/grub export boot_uuid +search --no-floppy --set private --part-label BOTTLEROCKET-PRIVATE configfile /grub/grub.cfg echo "boot failed (device ($boot_dev), uuid $boot_uuid)" echo "rebooting in 30 seconds..." diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index 145c3f06..b453f6ae 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -171,7 +171,7 @@ pushd efi-build popd %install -MODS="configfile echo ext2 gptprio linux normal part_gpt reboot sleep zstd" +MODS="configfile echo ext2 gptprio linux normal part_gpt reboot sleep zstd search" %if "%{_cross_arch}" == "x86_64" pushd bios-build From 275bcd2727cf89acee7a36902fe05498eb40fb29 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Tue, 1 Mar 2022 23:42:22 +0000 Subject: [PATCH 0622/1356] kernel-5.10: Add CONFIG_BOOT_CONFIG Adds `CONFIG_BOOT_CONFIG` to the kernel 5.10 config which enables the use of the "Boot config" mechanism. This allows a user to pass a configuration file attached to the end of the initrd which is used to extend the kernel command line at runtime. --- packages/kernel-5.10/config-bottlerocket | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index 835980ce..33d45b5c 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -117,3 +117,6 @@ CONFIG_MOUSE_PS2=m # Add support for IPMI drivers CONFIG_IPMI_HANDLER=m + +# Add support for bootconfig +CONFIG_BOOT_CONFIG=y From fd580a1469c4b4206a4fc9cea1d27074892831c3 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Tue, 29 Mar 2022 21:02:31 +0000 Subject: [PATCH 0623/1356] buildsys: Add `grub-features` property to variant config This change adds a `grub-features` property to the supported configuration that can be set in a variant's `Cargo.toml`. This property is meant to gate certain features of grub and it's config to variants that support it. Since grub isn't updated during OS update, it's important to ensure that variants which are known to support a certain grub feature receive the proper grub config. Currently, the only supported parameter is `set-private-var`. This parameter means the variant has a grub config that understands how to search for and find the BOTTLEROCKET_PRIVATE partition, and set the corresponding `$private` variable. --- tools/buildsys/src/builder.rs | 15 ++++++++++++++- tools/buildsys/src/main.rs | 11 +++++++++-- tools/buildsys/src/manifest.rs | 33 +++++++++++++++++++++++++++++++++ 3 files changed, 56 insertions(+), 3 deletions(-) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index 28da087a..eb960eca 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -21,7 +21,7 @@ use std::path::{Path, PathBuf}; use std::process::Output; use walkdir::{DirEntry, WalkDir}; -use crate::manifest::{ImageFormat, ImageLayout, PartitionPlan, SupportedArch}; +use crate::manifest::{GrubFeature, ImageFormat, ImageLayout, PartitionPlan, SupportedArch}; /* There's a bug in BuildKit that can lead to a build failure during parallel @@ -118,6 +118,7 @@ impl VariantBuilder { image_format: Option<&ImageFormat>, image_layout: Option<&ImageLayout>, kernel_parameters: Option<&Vec>, + grub_features: Option<&Vec>, ) -> Result { let output_dir: PathBuf = getenv("BUILDSYS_OUTPUT_DIR")?.into(); @@ -167,6 +168,18 @@ impl VariantBuilder { .unwrap_or_else(|| "".to_string()), ); + args.build_arg( + "GRUB_FEATURES", + grub_features + .map(|v| { + v.iter() + .map(|f| f.to_string()) + .collect::>() + .join(" ") + }) + .unwrap_or_else(|| "".to_string()), + ); + // Always rebuild variants since they are located in a different workspace, // and don't directly track changes in the underlying packages. getenv("BUILDSYS_TIMESTAMP")?; diff --git a/tools/buildsys/src/main.rs b/tools/buildsys/src/main.rs index e8ab2427..7b6cad11 100644 --- a/tools/buildsys/src/main.rs +++ b/tools/buildsys/src/main.rs @@ -194,8 +194,15 @@ fn build_variant() -> Result<()> { let image_format = manifest.image_format(); let image_layout = manifest.image_layout(); let kernel_parameters = manifest.kernel_parameters(); - VariantBuilder::build(&packages, image_format, image_layout, kernel_parameters) - .context(error::BuildAttemptSnafu)?; + let grub_features = manifest.grub_features(); + VariantBuilder::build( + &packages, + image_format, + image_layout, + kernel_parameters, + grub_features, + ) + .context(error::BuildAttemptSnafu)?; } else { println!("cargo:warning=No included packages in manifest. Skipping variant build."); } diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index 82bb7bae..a3442826 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -112,6 +112,19 @@ The given parameters are inserted at the start of the command line. kernel-parameters = [ "console=ttyS42", ] + +`grub-features` is a list of supported grub features. +This list allows us to conditionally use or exclude certain grub features in specific variants. +The only supported value at this time is `set-private-var`. +This value means that the grub config for the current variant includes the command to find the +BOTTLEROCKET_PRIVATE partition and set the appropriate `$private` variable for the grub to +consume. +Adding this value to `grub-features` enables the use of Boot Config. +``` +[package.metadata.build-variant] +grub-features = [ + "set-private-var", +] ``` */ @@ -190,6 +203,11 @@ impl ManifestInfo { .and_then(|b| b.kernel_parameters.as_ref()) } + /// Convenience method to return the GRUB features for this variant. + pub(crate) fn grub_features(&self) -> Option<&Vec> { + self.build_variant().and_then(|b| b.grub_features.as_ref()) + } + /// Helper methods to navigate the series of optional struct fields. fn build_package(&self) -> Option<&BuildPackage> { self.package @@ -238,6 +256,7 @@ pub(crate) struct BuildVariant { pub(crate) image_layout: Option, pub(crate) supported_arches: Option>, pub(crate) kernel_parameters: Option>, + pub(crate) grub_features: Option>, } #[derive(Deserialize, Debug)] @@ -313,6 +332,20 @@ impl SupportedArch { } } +#[derive(Deserialize, Debug, PartialEq, Eq, Hash)] +#[serde(rename_all = "kebab-case")] +pub(crate) enum GrubFeature { + SetPrivateVar, +} + +impl fmt::Display for GrubFeature { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + GrubFeature::SetPrivateVar => write!(f, "GRUB_SET_PRIVATE_VAR"), + } + } +} + #[derive(Deserialize, Debug)] #[serde(rename_all = "kebab-case")] pub(crate) struct ExternalFile { From e5203faff56e106eecff90ab67f781c67c7c70e1 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Wed, 2 Mar 2022 18:19:44 +0000 Subject: [PATCH 0624/1356] rpm2img: Conditionally add bootconfig parameters to grub menuentry This change conditionally adds the `bootconfig` parameter to the kernel command line if `set-private-var` is added to a variants `grub-features`. The `bootconfig` parameter signals the kernel to look for the Boot Configuration at the end of the specified initrd. This change also adds the corresponding initrd entry to the grub menuentry which points at the bootconfig file on the `BOTTLEROCKET-PRIVATE` partition. Bottlerocket technically doesn't use an initrd, so this initrd will only contain Boot Configuration data. If the file doesn't contain Boot Config data, the kernel logs a message but does not fail to boot. An empty `bootconfig.data` file is created and added to the `BOTTLEROCKET_PRIVATE` partition to keep grub from pausing and printing an error that the file doesn't exist. --- tools/empty-bootconfig.data | Bin 0 -> 40 bytes tools/rpm2img | 18 +++++++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 tools/empty-bootconfig.data diff --git a/tools/empty-bootconfig.data b/tools/empty-bootconfig.data new file mode 100644 index 0000000000000000000000000000000000000000..c184eba1a6c1b48ca5c5714544edfc47768da0f1 GIT binary patch literal 40 scmc~!Ey_#HQK+uv%FN3w0nrQ$A`A=+m23 "${BOOT_MOUNT}/grub/grub.cfg" set default="0" set timeout="0" @@ -261,6 +272,7 @@ set timeout="0" menuentry "${PRETTY_NAME} ${VERSION_ID}" { linux (\$root)/vmlinuz root=/dev/dm-0 \\ ${KERNEL_PARAMETERS} \\ + ${BOOTCONFIG} \\ rootwait ro \\ raid=noautodetect \\ random.trust_cpu=on selinux=1 enforcing=1 \\ @@ -268,6 +280,7 @@ menuentry "${PRETTY_NAME} ${VERSION_ID}" { biosdevname=0 dm_verity.max_bios=-1 dm_verity.dev_wait=1 \\ dm-mod.create="root,,,ro,0 $VERITY_DATA_512B_BLOCKS verity $VERITY_VERSION PARTUUID=\$boot_uuid/PARTNROFF=1 PARTUUID=\$boot_uuid/PARTNROFF=2 \\ $VERITY_DATA_BLOCK_SIZE $VERITY_HASH_BLOCK_SIZE $VERITY_DATA_4K_BLOCKS 1 $VERITY_HASH_ALGORITHM $VERITY_ROOT_HASH $VERITY_SALT 1 restart_on_corruption" + ${INITRD} } EOF @@ -283,12 +296,15 @@ dd if="${BOOT_IMAGE}" of="${OS_IMAGE}" conv=notrunc bs=1M seek="${partoff[BOOT-A # BOTTLEROCKET-PRIVATE +# Copy the empty bootconfig file into the image so grub doesn't pause and print +# an error that the file doesn't exist +cp /host/tools/empty-bootconfig.data "${PRIVATE_MOUNT}/bootconfig.data" # Targeted toward the current API server implementation. # Relative to the ext4 defaults, we: # - adjust the inode ratio since we expect lots of small files # - retain the inode size to allow most settings to be stored inline # - retain the block size to handle worse-case alignment for hardware -mkfs.ext4 -b 4096 -i 4096 -I 256 "${PRIVATE_IMAGE}" "${partsize[PRIVATE]}M" +mkfs.ext4 -b 4096 -i 4096 -I 256 -d "${PRIVATE_MOUNT}" "${PRIVATE_IMAGE}" "${partsize[PRIVATE]}M" dd if="${PRIVATE_IMAGE}" of="${OS_IMAGE}" conv=notrunc bs=1M seek="${partoff[PRIVATE]}" # BOTTLEROCKET-DATA From bf7b4b8794e8726013da88a06f8f4d8fd1a719a6 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Wed, 2 Mar 2022 18:33:13 +0000 Subject: [PATCH 0625/1356] rpm2img: Reorganize kernel/init command line parameters This change organizes the kernel and init command line parameters and moves the init parameters to the section after the "--" where they belong. --- tools/rpm2img | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/tools/rpm2img b/tools/rpm2img index 2a9eb34e..b8af8e04 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -270,16 +270,18 @@ set default="0" set timeout="0" menuentry "${PRETTY_NAME} ${VERSION_ID}" { - linux (\$root)/vmlinuz root=/dev/dm-0 \\ + linux (\$root)/vmlinuz \\ ${KERNEL_PARAMETERS} \\ ${BOOTCONFIG} \\ - rootwait ro \\ + root=/dev/dm-0 rootwait ro \\ raid=noautodetect \\ random.trust_cpu=on selinux=1 enforcing=1 \\ - systemd.log_target=journal-or-kmsg systemd.log_color=0 net.ifnames=0 \\ - biosdevname=0 dm_verity.max_bios=-1 dm_verity.dev_wait=1 \\ + dm_verity.max_bios=-1 dm_verity.dev_wait=1 \\ dm-mod.create="root,,,ro,0 $VERITY_DATA_512B_BLOCKS verity $VERITY_VERSION PARTUUID=\$boot_uuid/PARTNROFF=1 PARTUUID=\$boot_uuid/PARTNROFF=2 \\ - $VERITY_DATA_BLOCK_SIZE $VERITY_HASH_BLOCK_SIZE $VERITY_DATA_4K_BLOCKS 1 $VERITY_HASH_ALGORITHM $VERITY_ROOT_HASH $VERITY_SALT 1 restart_on_corruption" + $VERITY_DATA_BLOCK_SIZE $VERITY_HASH_BLOCK_SIZE $VERITY_DATA_4K_BLOCKS 1 $VERITY_HASH_ALGORITHM $VERITY_ROOT_HASH $VERITY_SALT 1 restart_on_corruption" \\ + -- \\ + systemd.log_target=journal-or-kmsg systemd.log_color=0 \\ + net.ifnames=0 biosdevname=0 ${INITRD} } EOF From 37ed931433051aeea5cce2ab88fa36fe64cbf37f Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Mon, 11 Apr 2022 17:24:04 +0000 Subject: [PATCH 0626/1356] tools: update clap to 3.1.8 --- tools/Cargo.lock | 48 ++++++++++++++++--- tools/infrasys/Cargo.toml | 10 ++-- tools/infrasys/src/main.rs | 2 +- tools/pubsys/Cargo.toml | 2 +- tools/pubsys/src/aws/ami/mod.rs | 2 +- tools/pubsys/src/aws/promote_ssm/mod.rs | 2 +- tools/pubsys/src/aws/publish_ami/mod.rs | 2 +- tools/pubsys/src/aws/ssm/mod.rs | 2 +- tools/pubsys/src/main.rs | 2 +- tools/pubsys/src/repo.rs | 2 +- .../pubsys/src/repo/check_expirations/mod.rs | 2 +- tools/pubsys/src/repo/refresh_repo/mod.rs | 2 +- tools/pubsys/src/repo/validate_repo/mod.rs | 2 +- tools/pubsys/src/vmware/upload_ova/mod.rs | 2 +- 14 files changed, 59 insertions(+), 23 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 8d9d3235..3b45b30d 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -194,7 +194,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "66dbfc9307f5b2429656e07533613cd3f26803fd2857fc33be22aa2711181d58" dependencies = [ - "clap", + "clap 2.34.0", "lazy_static", "percent-encoding", "regex", @@ -238,12 +238,27 @@ dependencies = [ "ansi_term", "atty", "bitflags", - "strsim", - "textwrap", + "strsim 0.8.0", + "textwrap 0.11.0", "unicode-width", "vec_map", ] +[[package]] +name = "clap" +version = "3.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71c47df61d9e16dc010b55dba1952a57d8c215dbb533fd13cdd13369aac73b1c" +dependencies = [ + "atty", + "bitflags", + "indexmap", + "os_str_bytes", + "strsim 0.10.0", + "termcolor", + "textwrap 0.15.0", +] + [[package]] name = "coldsnap" version = "0.3.2" @@ -821,7 +836,7 @@ version = "0.1.0" dependencies = [ "assert-json-diff", "async-trait", - "clap", + "clap 3.1.8", "hex", "log", "pubsys-config", @@ -1074,6 +1089,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "os_str_bytes" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" +dependencies = [ + "memchr", +] + [[package]] name = "parking_lot" version = "0.11.2" @@ -1198,7 +1222,7 @@ version = "0.1.0" dependencies = [ "async-trait", "chrono", - "clap", + "clap 3.1.8", "coldsnap", "duct", "futures", @@ -1953,13 +1977,19 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + [[package]] name = "structopt" version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c6b5c64445ba8094a6ab0c3cd2ad323e07171012d9c98b0b15651daf1787a10" dependencies = [ - "clap", + "clap 2.34.0", "lazy_static", "structopt-derive", ] @@ -2036,6 +2066,12 @@ dependencies = [ "unicode-width", ] +[[package]] +name = "textwrap" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" + [[package]] name = "thiserror" version = "1.0.30" diff --git a/tools/infrasys/Cargo.toml b/tools/infrasys/Cargo.toml index d997baed..6e0e73a8 100644 --- a/tools/infrasys/Cargo.toml +++ b/tools/infrasys/Cargo.toml @@ -7,10 +7,10 @@ edition = "2018" publish = false [dependencies] -async-trait = "0.1.53" -clap = "2.33" +async-trait = "0.1.53" +clap = "3.1" hex = "0.4.0" -log = "0.4.14" +log = "0.4.14" pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } rusoto_cloudformation = { version = "0.47", default-features = false, features = ["rustls"] } rusoto_core = { version = "0.47", default-features = false, features = ["rustls"] } @@ -18,10 +18,10 @@ rusoto_s3 = { version = "0.47", default-features = false, features = ["rustls"] serde_json = "1.0.66" serde_yaml = "0.8.17" sha2 = "0.10" -shell-words = "1.0.0" +shell-words = "1.0.0" simplelog = "0.11" snafu = "0.7" -structopt = { version = "0.3", default-features = false } +structopt = { version = "0.3", default-features = false } tokio = { version = "~1.8", default-features = false, features = ["macros", "rt-multi-thread"] } # LTS toml = "0.5" url = "2.2.2" diff --git a/tools/infrasys/src/main.rs b/tools/infrasys/src/main.rs index 94bd5437..4550cc32 100644 --- a/tools/infrasys/src/main.rs +++ b/tools/infrasys/src/main.rs @@ -15,7 +15,7 @@ use std::collections::HashMap; use std::num::NonZeroUsize; use std::path::{Path, PathBuf}; use std::{fs, process}; -use structopt::StructOpt; +use structopt::{clap, StructOpt}; use tokio::runtime::Runtime; use url::Url; diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index e9b4d992..d4abfe2a 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -9,7 +9,7 @@ publish = false [dependencies] async-trait = "0.1.53" chrono = "0.4" -clap = "2.33" +clap = "3.1" coldsnap = { version = "0.3", default-features = false, features = ["rusoto-rustls"]} duct = "0.13.0" pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index e7343abe..5407dbb0 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -24,7 +24,7 @@ use snafu::{ensure, OptionExt, ResultExt}; use std::collections::{HashMap, HashSet}; use std::fs::File; use std::path::PathBuf; -use structopt::StructOpt; +use structopt::{clap, StructOpt}; use wait::wait_for_ami; /// Builds Bottlerocket AMIs using latest build artifacts diff --git a/tools/pubsys/src/aws/promote_ssm/mod.rs b/tools/pubsys/src/aws/promote_ssm/mod.rs index 2b7a2fb1..e2c29e65 100644 --- a/tools/pubsys/src/aws/promote_ssm/mod.rs +++ b/tools/pubsys/src/aws/promote_ssm/mod.rs @@ -12,7 +12,7 @@ use rusoto_ssm::SsmClient; use snafu::{ensure, ResultExt}; use std::collections::HashMap; use std::path::PathBuf; -use structopt::StructOpt; +use structopt::{clap, StructOpt}; /// Copies sets of SSM parameters #[derive(Debug, StructOpt)] diff --git a/tools/pubsys/src/aws/publish_ami/mod.rs b/tools/pubsys/src/aws/publish_ami/mod.rs index 84b9c25f..9035dd79 100644 --- a/tools/pubsys/src/aws/publish_ami/mod.rs +++ b/tools/pubsys/src/aws/publish_ami/mod.rs @@ -20,7 +20,7 @@ use std::collections::{HashMap, HashSet}; use std::fs::File; use std::iter::FromIterator; use std::path::PathBuf; -use structopt::StructOpt; +use structopt::{clap, StructOpt}; /// Grants or revokes permissions to Bottlerocket AMIs #[derive(Debug, StructOpt)] diff --git a/tools/pubsys/src/aws/ssm/mod.rs b/tools/pubsys/src/aws/ssm/mod.rs index 7659de30..38786bf4 100644 --- a/tools/pubsys/src/aws/ssm/mod.rs +++ b/tools/pubsys/src/aws/ssm/mod.rs @@ -16,7 +16,7 @@ use std::collections::{HashMap, HashSet}; use std::fs::File; use std::iter::FromIterator; use std::path::PathBuf; -use structopt::StructOpt; +use structopt::{clap, StructOpt}; /// Sets SSM parameters based on current build information #[derive(Debug, StructOpt)] diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs index 29d5f34f..8f47f7fd 100644 --- a/tools/pubsys/src/main.rs +++ b/tools/pubsys/src/main.rs @@ -32,7 +32,7 @@ use simplelog::{Config as LogConfig, LevelFilter, SimpleLogger}; use snafu::ResultExt; use std::path::PathBuf; use std::process; -use structopt::StructOpt; +use structopt::{clap, StructOpt}; use tokio::runtime::Runtime; fn run() -> Result<()> { diff --git a/tools/pubsys/src/repo.rs b/tools/pubsys/src/repo.rs index 3892e999..771bd5ee 100644 --- a/tools/pubsys/src/repo.rs +++ b/tools/pubsys/src/repo.rs @@ -21,7 +21,7 @@ use std::fs::{self, File}; use std::num::NonZeroU64; use std::path::{Path, PathBuf}; use std::str::FromStr; -use structopt::StructOpt; +use structopt::{clap, StructOpt}; use tempfile::NamedTempFile; use tough::{ editor::signed::PathExists, diff --git a/tools/pubsys/src/repo/check_expirations/mod.rs b/tools/pubsys/src/repo/check_expirations/mod.rs index 8af39b9d..af2940fd 100644 --- a/tools/pubsys/src/repo/check_expirations/mod.rs +++ b/tools/pubsys/src/repo/check_expirations/mod.rs @@ -11,7 +11,7 @@ use snafu::{OptionExt, ResultExt}; use std::collections::HashMap; use std::fs::File; use std::path::PathBuf; -use structopt::StructOpt; +use structopt::{clap, StructOpt}; use tough::{ExpirationEnforcement, Repository, RepositoryLoader}; use url::Url; diff --git a/tools/pubsys/src/repo/refresh_repo/mod.rs b/tools/pubsys/src/repo/refresh_repo/mod.rs index c8d243cb..35b3d99a 100644 --- a/tools/pubsys/src/repo/refresh_repo/mod.rs +++ b/tools/pubsys/src/repo/refresh_repo/mod.rs @@ -13,7 +13,7 @@ use snafu::{ensure, OptionExt, ResultExt}; use std::fs; use std::fs::File; use std::path::{Path, PathBuf}; -use structopt::StructOpt; +use structopt::{clap, StructOpt}; use tough::editor::RepositoryEditor; use tough::key_source::{KeySource, LocalKeySource}; use tough::{ExpirationEnforcement, RepositoryLoader}; diff --git a/tools/pubsys/src/repo/validate_repo/mod.rs b/tools/pubsys/src/repo/validate_repo/mod.rs index 1feeea2e..edec8e18 100644 --- a/tools/pubsys/src/repo/validate_repo/mod.rs +++ b/tools/pubsys/src/repo/validate_repo/mod.rs @@ -11,7 +11,7 @@ use std::fs::File; use std::io; use std::path::PathBuf; use std::sync::mpsc; -use structopt::StructOpt; +use structopt::{clap, StructOpt}; use tough::{Repository, RepositoryLoader}; use url::Url; diff --git a/tools/pubsys/src/vmware/upload_ova/mod.rs b/tools/pubsys/src/vmware/upload_ova/mod.rs index 4a00b396..8226005c 100644 --- a/tools/pubsys/src/vmware/upload_ova/mod.rs +++ b/tools/pubsys/src/vmware/upload_ova/mod.rs @@ -12,7 +12,7 @@ use serde::Serialize; use snafu::{ensure, OptionExt, ResultExt}; use std::fs; use std::path::PathBuf; -use structopt::StructOpt; +use structopt::{clap, StructOpt}; use tempfile::NamedTempFile; use tinytemplate::TinyTemplate; From 5ec78b3101e48651916e72dba82bfe9b7626a7a7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 15 Apr 2022 21:46:05 +0000 Subject: [PATCH 0627/1356] build(deps): bump clap from 3.1.8 to 3.1.9 in /tools Bumps [clap](https://github.com/clap-rs/clap) from 3.1.8 to 3.1.9. - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/v3.1.8...v3.1.9) --- updated-dependencies: - dependency-name: clap dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- tools/Cargo.lock | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 3b45b30d..f7589608 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -246,19 +246,28 @@ dependencies = [ [[package]] name = "clap" -version = "3.1.8" +version = "3.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71c47df61d9e16dc010b55dba1952a57d8c215dbb533fd13cdd13369aac73b1c" +checksum = "6aad2534fad53df1cc12519c5cda696dd3e20e6118a027e24054aea14a0bdcbe" dependencies = [ "atty", "bitflags", + "clap_lex", "indexmap", - "os_str_bytes", "strsim 0.10.0", "termcolor", "textwrap 0.15.0", ] +[[package]] +name = "clap_lex" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "189ddd3b5d32a70b35e7686054371742a937b0d99128e76dde6340210e966669" +dependencies = [ + "os_str_bytes", +] + [[package]] name = "coldsnap" version = "0.3.2" @@ -836,7 +845,7 @@ version = "0.1.0" dependencies = [ "assert-json-diff", "async-trait", - "clap 3.1.8", + "clap 3.1.9", "hex", "log", "pubsys-config", @@ -1094,9 +1103,6 @@ name = "os_str_bytes" version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" -dependencies = [ - "memchr", -] [[package]] name = "parking_lot" @@ -1222,7 +1228,7 @@ version = "0.1.0" dependencies = [ "async-trait", "chrono", - "clap 3.1.8", + "clap 3.1.9", "coldsnap", "duct", "futures", From cf4761674e5cfc70f782d5483d1cc41e8b0d36ba Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 15 Apr 2022 21:46:20 +0000 Subject: [PATCH 0628/1356] build(deps): bump toml from 0.5.8 to 0.5.9 in /tools Bumps [toml](https://github.com/alexcrichton/toml-rs) from 0.5.8 to 0.5.9. - [Release notes](https://github.com/alexcrichton/toml-rs/releases) - [Commits](https://github.com/alexcrichton/toml-rs/compare/0.5.8...0.5.9) --- updated-dependencies: - dependency-name: toml dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- tools/Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index f7589608..8e0c8f5f 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -2213,9 +2213,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" dependencies = [ "serde", ] From 922d3f30dd02dc284ffd304ed441992a1f1a9951 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Tue, 22 Mar 2022 21:21:04 +0000 Subject: [PATCH 0629/1356] kernel-5.10, kernel-5.4: add CHECKPOINT_RESTORE config The `CHECKPOINT_RESTORE` config is required to be able to read the seccomp filter of a process using ptrace(2) Signed-off-by: Arnaldo Garcia Rincon --- packages/kernel-5.10/config-bottlerocket | 3 +++ packages/kernel-5.4/config-bottlerocket | 3 +++ 2 files changed, 6 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index 33d45b5c..8d20f859 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -120,3 +120,6 @@ CONFIG_IPMI_HANDLER=m # Add support for bootconfig CONFIG_BOOT_CONFIG=y + +# Enables support for checkpoint/restore +CONFIG_CHECKPOINT_RESTORE=y diff --git a/packages/kernel-5.4/config-bottlerocket b/packages/kernel-5.4/config-bottlerocket index 3739e023..34a422d9 100644 --- a/packages/kernel-5.4/config-bottlerocket +++ b/packages/kernel-5.4/config-bottlerocket @@ -82,3 +82,6 @@ CONFIG_DECOMPRESS_ZSTD=y CONFIG_SERIO_I8042=m CONFIG_KEYBOARD_ATKBD=m CONFIG_MOUSE_PS2=m + +# Enables support for checkpoint/restore +CONFIG_CHECKPOINT_RESTORE=y From 57481e121946a1d308a33813bf27ade3df3a093c Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Thu, 31 Mar 2022 10:00:54 -0700 Subject: [PATCH 0630/1356] Remove aws-k8s-1.18 variant EKS ended 1.18 support on March 31st 2022. --- .github/workflows/build.yml | 2 +- README.md | 9 +++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index e63035dd..b89eae9a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -24,7 +24,7 @@ jobs: continue-on-error: ${{ matrix.supported }} strategy: matrix: - variant: [aws-k8s-1.18, aws-k8s-1.19, aws-k8s-1.20, aws-k8s-1.21, aws-k8s-1.22, aws-ecs-1] + variant: [aws-k8s-1.19, aws-k8s-1.20, aws-k8s-1.21, aws-k8s-1.22, aws-ecs-1] arch: [x86_64, aarch64] supported: [true] fetch-upstream: ["false"] diff --git a/README.md b/README.md index 2aa69591..627dff10 100644 --- a/README.md +++ b/README.md @@ -50,7 +50,6 @@ For example, an `x86_64` build of the `aws-k8s-1.21` variant will produce an ima The following variants support EKS, as described above: -- `aws-k8s-1.18` - `aws-k8s-1.19` - `aws-k8s-1.20` - `aws-k8s-1.21` @@ -66,7 +65,13 @@ We also have variants that are designed to be Kubernetes worker nodes in VMware: - `vmware-k8s-1.21` - `vmware-k8s-1.22` -The `aws-k8s-1.15`, `aws-k8s-1.16`, and `aws-k8s-1.17` variants are no longer supported. +The following variants are no longer supported.: + +- `aws-k8s-1.15` +- `aws-k8s-1.16` +- `aws-k8s-1.17` +- `aws-k8s-1.18` + We recommend users replace nodes running these variants with the [latest variant compatible with their cluster](variants/). ## Architectures From 9576e058891831a3315fabd7e50aec233d2d3846 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Thu, 31 Mar 2022 10:01:49 -0700 Subject: [PATCH 0631/1356] README: update example commands for fetching kmod kit The old example no longer worked due to tuftool download changes --- BUILDING.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/BUILDING.md b/BUILDING.md index 0a0f65d3..f2e6484e 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -188,7 +188,7 @@ The specifics of building an out-of-tree module will vary by project, but the fi ### Downloading the kmod kit kmod kits are included in the official Bottlerocket repos starting with Bottlerocket v1.0.6. -Let's say you want to download the kit for building x86_64 modules for v1.0.6 and variant aws-k8s-1.18. +Let's say you want to download the kit for building x86_64 modules for v1.7.0 and variant aws-k8s-1.21. First, you need tuftool: ```bash @@ -205,10 +205,11 @@ sha512sum -c <<<"e9b1ea5f9b4f95c9b55edada4238bf00b12845aa98bdd2d3edb63ff82a03ada Next, set your desired parameters, and download the kmod kit: ```bash ARCH=x86_64 -VERSION=v1.0.6 -VARIANT=aws-k8s-1.18 +VERSION=v1.7.0 +VARIANT=aws-k8s-1.21 +OUTDIR="${VARIANT}-${VERSION}" -tuftool download . --target-name ${VARIANT}-${ARCH}-kmod-kit-${VERSION}.tar.xz \ +tuftool download "${OUTDIR}" --target-name ${VARIANT}-${ARCH}-kmod-kit-${VERSION}.tar.xz \ --root ./root.json \ --metadata-url "https://updates.bottlerocket.aws/2020-07-07/${VARIANT}/${ARCH}/" \ --targets-url "https://updates.bottlerocket.aws/targets/" From bb52305a3e043d4c7158aed4e2192e1d3aac4941 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Mon, 18 Apr 2022 22:18:24 +0000 Subject: [PATCH 0632/1356] kernel-5.4: update to 5.4.188 Signed-off-by: Arnaldo Garcia Rincon --- packages/kernel-5.4/Cargo.toml | 4 ++-- packages/kernel-5.4/kernel-5.4.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.4/Cargo.toml b/packages/kernel-5.4/Cargo.toml index 817a2d91..0dd8e405 100644 --- a/packages/kernel-5.4/Cargo.toml +++ b/packages/kernel-5.4/Cargo.toml @@ -13,8 +13,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/d8a7e800750161a038954b2685ca8c5fb0a0dac22057530c4c0233d60f06c2d3/kernel-5.4.181-99.354.amzn2.src.rpm" -sha512 = "39903e5164ea966b62ddfa70ffd9a73ba50af363cf87d20011ad8d2f1e471857b79503da75770a1e812058c9cd2a17a88000e6e9a4c44580d3c4210144aa3993" +url = "https://cdn.amazonlinux.com/blobstore/a120999c2cd538adae1c97c87e6d60f3bcf6f761064204638a5647e06aea1aad/kernel-5.4.188-104.359.amzn2.src.rpm" +sha512 = "ebb6f8460ddfccc50e89b499563dfa64f1c3228e9fe3cabd20ec1561ca8bf3764a50853b35085742dde3a219ad9314033d8c12cbc2d615f463aab0e062d9a229" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec index b7ce83d7..e1145c69 100644 --- a/packages/kernel-5.4/kernel-5.4.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.4 -Version: 5.4.181 +Version: 5.4.188 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/d8a7e800750161a038954b2685ca8c5fb0a0dac22057530c4c0233d60f06c2d3/kernel-5.4.181-99.354.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/a120999c2cd538adae1c97c87e6d60f3bcf6f761064204638a5647e06aea1aad/kernel-5.4.188-104.359.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From c8f51f48e44dfac9722b97ca687000e96daefb65 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Mon, 18 Apr 2022 22:20:03 +0000 Subject: [PATCH 0633/1356] kernel-5.10: update to 5.10.109 Signed-off-by: Arnaldo Garcia Rincon --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index b0127392..9e0bf10e 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,8 +13,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/abd0b3e08ff7d32abb916b2664e8de68bd7d16dbbfdcfe8e574d832aa19a3b1e/kernel-5.10.102-99.473.amzn2.src.rpm" -sha512 = "ed17395fed0480d87e59f80899953641169fae7ef2f34eb74bad66ff92b2eec5c72dbff4a08af49de516cde8fe96218a102857e048073dd6d48fb73be4ef19e0" +url = "https://cdn.amazonlinux.com/blobstore/3479900579a0dbe61cbe7e6d76620774513369246def8bae42ec791865d68df9/kernel-5.10.109-104.500.amzn2.src.rpm" +sha512 = "66c840eee5333bb77f8661b14ec07b33ea7b6d9db82c89370c8109c0a315c6ad532364d0c879efd45fff0bfe3855876bbf53b11b5107b0dc55f9d2ac1a59cc6d" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 7f8de017..64769bd8 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.102 +Version: 5.10.109 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/abd0b3e08ff7d32abb916b2664e8de68bd7d16dbbfdcfe8e574d832aa19a3b1e/kernel-5.10.102-99.473.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/3479900579a0dbe61cbe7e6d76620774513369246def8bae42ec791865d68df9/kernel-5.10.109-104.500.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 321aae02b18a69ba953306dd7fe77d63f559e24f Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Fri, 22 Apr 2022 00:16:46 +0000 Subject: [PATCH 0634/1356] Add support for ap-southeast-3: Jakarta --- tools/Cargo.lock | 30 ++++++++++-------------------- tools/Cargo.toml | 13 +++++++++++++ tools/deny.toml | 5 +++++ 3 files changed, 28 insertions(+), 20 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 8e0c8f5f..eed6c92d 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -1466,8 +1466,7 @@ dependencies = [ [[package]] name = "rusoto_cloudformation" version = "0.47.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e00db4cfcfc14725c720d881443f2c17607bd80aa20fecd1382a5936cc2db05d" +source = "git+https://github.com/rusoto/rusoto?rev=37bac105a0c16d2259f8390c1aeef068db713911#37bac105a0c16d2259f8390c1aeef068db713911" dependencies = [ "async-trait", "bytes", @@ -1480,8 +1479,7 @@ dependencies = [ [[package]] name = "rusoto_core" version = "0.47.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b4f000e8934c1b4f70adde180056812e7ea6b1a247952db8ee98c94cd3116cc" +source = "git+https://github.com/rusoto/rusoto?rev=37bac105a0c16d2259f8390c1aeef068db713911#37bac105a0c16d2259f8390c1aeef068db713911" dependencies = [ "async-trait", "base64", @@ -1505,8 +1503,7 @@ dependencies = [ [[package]] name = "rusoto_credential" version = "0.47.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a46b67db7bb66f5541e44db22b0a02fed59c9603e146db3a9e633272d3bac2f" +source = "git+https://github.com/rusoto/rusoto?rev=37bac105a0c16d2259f8390c1aeef068db713911#37bac105a0c16d2259f8390c1aeef068db713911" dependencies = [ "async-trait", "chrono", @@ -1523,8 +1520,7 @@ dependencies = [ [[package]] name = "rusoto_ebs" version = "0.47.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "618ce34e8ec52dfd0f597608ec21049e4d7379d737b5f2cc339c92b61d096e0d" +source = "git+https://github.com/rusoto/rusoto?rev=37bac105a0c16d2259f8390c1aeef068db713911#37bac105a0c16d2259f8390c1aeef068db713911" dependencies = [ "async-trait", "bytes", @@ -1538,8 +1534,7 @@ dependencies = [ [[package]] name = "rusoto_ec2" version = "0.47.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92315363c2f2acda29029ce0ce0e58e1c32caf10c9719068a1ec102add3d4878" +source = "git+https://github.com/rusoto/rusoto?rev=37bac105a0c16d2259f8390c1aeef068db713911#37bac105a0c16d2259f8390c1aeef068db713911" dependencies = [ "async-trait", "bytes", @@ -1552,8 +1547,7 @@ dependencies = [ [[package]] name = "rusoto_kms" version = "0.47.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7892cd2cca7644d33bd6fafdb2236efd3659162fd7b73ca68d3877f0528399c" +source = "git+https://github.com/rusoto/rusoto?rev=37bac105a0c16d2259f8390c1aeef068db713911#37bac105a0c16d2259f8390c1aeef068db713911" dependencies = [ "async-trait", "bytes", @@ -1566,8 +1560,7 @@ dependencies = [ [[package]] name = "rusoto_s3" version = "0.47.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "048c2fe811a823ad5a9acc976e8bf4f1d910df719dcf44b15c3e96c5b7a51027" +source = "git+https://github.com/rusoto/rusoto?rev=37bac105a0c16d2259f8390c1aeef068db713911#37bac105a0c16d2259f8390c1aeef068db713911" dependencies = [ "async-trait", "bytes", @@ -1579,8 +1572,7 @@ dependencies = [ [[package]] name = "rusoto_signature" version = "0.47.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6264e93384b90a747758bcc82079711eacf2e755c3a8b5091687b5349d870bcc" +source = "git+https://github.com/rusoto/rusoto?rev=37bac105a0c16d2259f8390c1aeef068db713911#37bac105a0c16d2259f8390c1aeef068db713911" dependencies = [ "base64", "bytes", @@ -1605,8 +1597,7 @@ dependencies = [ [[package]] name = "rusoto_ssm" version = "0.47.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "050304a18997ab01994d4a452472199088dc0376e61d1f50e2d9675227a0fe0c" +source = "git+https://github.com/rusoto/rusoto?rev=37bac105a0c16d2259f8390c1aeef068db713911#37bac105a0c16d2259f8390c1aeef068db713911" dependencies = [ "async-trait", "bytes", @@ -1619,8 +1610,7 @@ dependencies = [ [[package]] name = "rusoto_sts" version = "0.47.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e7edd42473ac006fd54105f619e480b0a94136e7f53cf3fb73541363678fd92" +source = "git+https://github.com/rusoto/rusoto?rev=37bac105a0c16d2259f8390c1aeef068db713911#37bac105a0c16d2259f8390c1aeef068db713911" dependencies = [ "async-trait", "bytes", diff --git a/tools/Cargo.toml b/tools/Cargo.toml index 58b7f54b..1439d6eb 100644 --- a/tools/Cargo.toml +++ b/tools/Cargo.toml @@ -6,3 +6,16 @@ members = [ "pubsys-config", "pubsys-setup", ] + +# This patches rusoto with an upstream commit to support ap-southeast-3 +[patch.crates-io] +rusoto_cloudformation = { git = "https://github.com/rusoto/rusoto", rev = "37bac105a0c16d2259f8390c1aeef068db713911" } +rusoto_core = { git = "https://github.com/rusoto/rusoto", rev = "37bac105a0c16d2259f8390c1aeef068db713911" } +rusoto_credential = { git = "https://github.com/rusoto/rusoto", rev = "37bac105a0c16d2259f8390c1aeef068db713911" } +rusoto_ebs = { git = "https://github.com/rusoto/rusoto", rev = "37bac105a0c16d2259f8390c1aeef068db713911" } +rusoto_ec2 = { git = "https://github.com/rusoto/rusoto", rev = "37bac105a0c16d2259f8390c1aeef068db713911" } +rusoto_kms = { git = "https://github.com/rusoto/rusoto", rev = "37bac105a0c16d2259f8390c1aeef068db713911" } +rusoto_s3 = { git = "https://github.com/rusoto/rusoto", rev = "37bac105a0c16d2259f8390c1aeef068db713911" } +rusoto_signature = { git = "https://github.com/rusoto/rusoto", rev = "37bac105a0c16d2259f8390c1aeef068db713911" } +rusoto_ssm = { git = "https://github.com/rusoto/rusoto", rev = "37bac105a0c16d2259f8390c1aeef068db713911" } +rusoto_sts = { git = "https://github.com/rusoto/rusoto", rev = "37bac105a0c16d2259f8390c1aeef068db713911" } diff --git a/tools/deny.toml b/tools/deny.toml index 7068b6b2..e0da1ce4 100644 --- a/tools/deny.toml +++ b/tools/deny.toml @@ -68,3 +68,8 @@ skip-tree = [ # Deny crates from unknown registries or git repositories. unknown-registry = "deny" unknown-git = "deny" + +allow-git = [ + # rusoto is patched with an upstream commit to support ap-southeast-3 + "https://github.com/rusoto/rusoto.git", +] From 2531512fe6e92313cd2e23a07b95564d2b0c721f Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Mon, 18 Apr 2022 17:42:20 +0000 Subject: [PATCH 0635/1356] README: add NVIDIA GPUs section This adds a new section for NVIDIA GPUs and lists what EC2 instance types are supported by the official Bottlerocket `nvidia` k8s AMIs. Signed-off-by: Arnaldo Garcia Rincon --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index 627dff10..e84cb0ee 100644 --- a/README.md +++ b/README.md @@ -54,6 +54,7 @@ The following variants support EKS, as described above: - `aws-k8s-1.20` - `aws-k8s-1.21` - `aws-k8s-1.21-nvidia` +- `aws-k8s-1.22-nvidia` The following variant supports ECS: @@ -745,6 +746,11 @@ There are a few important caveats about the provided kdump support: * The system kernel will reserve 256MB for the crash kernel, only when the host has at least 2GB of memory; the reserved space won't be available for processes running in the host * The crash kernel will only be loaded when the `crashkernel` parameter is present in the kernel's cmdline and if there is memory reserved for it +### NVIDIA GPUs Support +Bottlerocket's `nvidia` variants include the required packages and configurations to leverage NVIDIA GPUs. +The official AMIs for these variants can be used with EC2 GPU-equipped instance types such as: `p2`, `p3`, `p4`, `g4dn`, `g5` and `g5g`. +Please see [QUICKSTART-EKS](QUICKSTART-EKS.md#aws-k8s--nvidia-variants) for further details about kubernetes variants. + ## Details ### Security From 4cdefd3721254d031b12f62430f2e196c3d98aee Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Mon, 18 Apr 2022 17:49:25 +0000 Subject: [PATCH 0636/1356] QUICKSTART-EKS: add NVIDIA GPUs sample configuration Now the documentation explicitly says that it is possible to use a GPU per orchestrated container, and references the official kubernetes documentation to schedule NVIDIA GPUs. Signed-off-by: Arnaldo Garcia Rincon --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e84cb0ee..3644ac77 100644 --- a/README.md +++ b/README.md @@ -749,7 +749,7 @@ There are a few important caveats about the provided kdump support: ### NVIDIA GPUs Support Bottlerocket's `nvidia` variants include the required packages and configurations to leverage NVIDIA GPUs. The official AMIs for these variants can be used with EC2 GPU-equipped instance types such as: `p2`, `p3`, `p4`, `g4dn`, `g5` and `g5g`. -Please see [QUICKSTART-EKS](QUICKSTART-EKS.md#aws-k8s--nvidia-variants) for further details about kubernetes variants. +Please see [QUICKSTART-EKS](QUICKSTART-EKS.md#aws-k8s--nvidia-variants) for further details about Kubernetes variants. ## Details From 419cd2bc3bbc183d1aeffd48029496eeaa8739e2 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Wed, 27 Apr 2022 23:37:54 +0000 Subject: [PATCH 0637/1356] tools: update rusoto to 0.48.0 This brings in tough and coldsnap updates to use a consistent rusoto version. It also downgrades rustls to 0.20.2 to avoid spurious logging. --- tools/Cargo.lock | 242 +++++++++++++++----------------------- tools/Cargo.toml | 13 -- tools/deny.toml | 12 +- tools/infrasys/Cargo.toml | 6 +- tools/pubsys/Cargo.toml | 16 +-- 5 files changed, 109 insertions(+), 180 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index eed6c92d..4133b168 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -170,7 +170,7 @@ dependencies = [ "serde", "serde_plain", "sha2 0.10.2", - "snafu 0.7.0", + "snafu", "toml", "url", "walkdir", @@ -270,22 +270,24 @@ dependencies = [ [[package]] name = "coldsnap" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0abab1b9dde1257595c242aca970d5b3af6f569d979f4a9a571a392942db87e" +checksum = "415be2bbdb84dd0d33de3099c74b9f96bd78157fe54c2073683c2b4c4811463d" dependencies = [ "argh", + "async-trait", "base64", "bytes", "futures", "indicatif", + "nix", "rusoto_core", "rusoto_credential", "rusoto_ebs", "rusoto_ec2", "rusoto_signature", - "sha2 0.9.9", - "snafu 0.6.10", + "sha2 0.10.2", + "snafu", "tempfile", "tokio", ] @@ -402,15 +404,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "ct-logs" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1a816186fa68d9e426e3cb4ae4dff1fcd8e4a2c34b781bf7a822574a0d0aac8" -dependencies = [ - "sct 0.6.1", -] - [[package]] name = "digest" version = "0.9.0" @@ -776,23 +769,6 @@ dependencies = [ "want", ] -[[package]] -name = "hyper-rustls" -version = "0.22.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64" -dependencies = [ - "ct-logs", - "futures-util", - "hyper", - "log", - "rustls 0.19.1", - "rustls-native-certs", - "tokio", - "tokio-rustls 0.22.0", - "webpki 0.21.4", -] - [[package]] name = "hyper-rustls" version = "0.23.0" @@ -801,9 +777,11 @@ checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" dependencies = [ "http", "hyper", - "rustls 0.20.4", + "log", + "rustls", + "rustls-native-certs", "tokio", - "tokio-rustls 0.23.3", + "tokio-rustls", ] [[package]] @@ -857,7 +835,7 @@ dependencies = [ "sha2 0.10.2", "shell-words", "simplelog", - "snafu 0.7.0", + "snafu", "structopt", "tokio", "toml", @@ -1000,6 +978,18 @@ dependencies = [ "winapi", ] +[[package]] +name = "nix" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f17df307904acd05aa8e32e97bb20f2a0df1728bbc2d771ae8f9a90463441e9" +dependencies = [ + "bitflags", + "cfg-if", + "libc", + "memoffset", +] + [[package]] name = "nonzero_ext" version = "0.3.0" @@ -1135,7 +1125,7 @@ version = "0.1.0" dependencies = [ "cargo-readme", "chrono", - "snafu 0.7.0", + "snafu", ] [[package]] @@ -1252,7 +1242,7 @@ dependencies = [ "serde", "serde_json", "simplelog", - "snafu 0.7.0", + "snafu", "structopt", "tempfile", "tinytemplate", @@ -1277,7 +1267,7 @@ dependencies = [ "parse-datetime", "serde", "serde_yaml", - "snafu 0.7.0", + "snafu", "toml", "url", ] @@ -1293,7 +1283,7 @@ dependencies = [ "sha2 0.10.2", "shell-words", "simplelog", - "snafu 0.7.0", + "snafu", "structopt", "tempfile", "toml", @@ -1425,7 +1415,7 @@ dependencies = [ "http", "http-body", "hyper", - "hyper-rustls 0.23.0", + "hyper-rustls", "ipnet", "js-sys", "lazy_static", @@ -1433,13 +1423,13 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", - "rustls 0.20.4", - "rustls-pemfile", + "rustls", + "rustls-pemfile 0.3.0", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls 0.23.3", + "tokio-rustls", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -1465,8 +1455,9 @@ dependencies = [ [[package]] name = "rusoto_cloudformation" -version = "0.47.0" -source = "git+https://github.com/rusoto/rusoto?rev=37bac105a0c16d2259f8390c1aeef068db713911#37bac105a0c16d2259f8390c1aeef068db713911" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd30fadf72299e6d385ed4e32b1b765cb1c20e359b05ff14fa35dd2d7dd6a229" dependencies = [ "async-trait", "bytes", @@ -1478,8 +1469,9 @@ dependencies = [ [[package]] name = "rusoto_core" -version = "0.47.0" -source = "git+https://github.com/rusoto/rusoto?rev=37bac105a0c16d2259f8390c1aeef068db713911#37bac105a0c16d2259f8390c1aeef068db713911" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1db30db44ea73551326269adcf7a2169428a054f14faf9e1768f2163494f2fa2" dependencies = [ "async-trait", "base64", @@ -1488,7 +1480,7 @@ dependencies = [ "futures", "http", "hyper", - "hyper-rustls 0.22.1", + "hyper-rustls", "lazy_static", "log", "rusoto_credential", @@ -1502,8 +1494,9 @@ dependencies = [ [[package]] name = "rusoto_credential" -version = "0.47.0" -source = "git+https://github.com/rusoto/rusoto?rev=37bac105a0c16d2259f8390c1aeef068db713911#37bac105a0c16d2259f8390c1aeef068db713911" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee0a6c13db5aad6047b6a44ef023dbbc21a056b6dab5be3b79ce4283d5c02d05" dependencies = [ "async-trait", "chrono", @@ -1519,8 +1512,9 @@ dependencies = [ [[package]] name = "rusoto_ebs" -version = "0.47.0" -source = "git+https://github.com/rusoto/rusoto?rev=37bac105a0c16d2259f8390c1aeef068db713911#37bac105a0c16d2259f8390c1aeef068db713911" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3ab3b70d6e2b9e8550bc50a42fd03e5cf43b1146b3a2a4f73fae867c08787b2" dependencies = [ "async-trait", "bytes", @@ -1533,8 +1527,9 @@ dependencies = [ [[package]] name = "rusoto_ec2" -version = "0.47.0" -source = "git+https://github.com/rusoto/rusoto?rev=37bac105a0c16d2259f8390c1aeef068db713911#37bac105a0c16d2259f8390c1aeef068db713911" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "666c2f36b125e43229892f1a0d81ad28c0d0231d3b8b00ab0e8120975d6138ca" dependencies = [ "async-trait", "bytes", @@ -1546,8 +1541,9 @@ dependencies = [ [[package]] name = "rusoto_kms" -version = "0.47.0" -source = "git+https://github.com/rusoto/rusoto?rev=37bac105a0c16d2259f8390c1aeef068db713911#37bac105a0c16d2259f8390c1aeef068db713911" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e1fc19cfcfd9f6b2f96e36d5b0dddda9004d2cbfc2d17543e3b9f10cc38fce8" dependencies = [ "async-trait", "bytes", @@ -1559,8 +1555,9 @@ dependencies = [ [[package]] name = "rusoto_s3" -version = "0.47.0" -source = "git+https://github.com/rusoto/rusoto?rev=37bac105a0c16d2259f8390c1aeef068db713911#37bac105a0c16d2259f8390c1aeef068db713911" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7aae4677183411f6b0b412d66194ef5403293917d66e70ab118f07cc24c5b14d" dependencies = [ "async-trait", "bytes", @@ -1571,8 +1568,9 @@ dependencies = [ [[package]] name = "rusoto_signature" -version = "0.47.0" -source = "git+https://github.com/rusoto/rusoto?rev=37bac105a0c16d2259f8390c1aeef068db713911#37bac105a0c16d2259f8390c1aeef068db713911" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5ae95491c8b4847931e291b151127eccd6ff8ca13f33603eb3d0035ecb05272" dependencies = [ "base64", "bytes", @@ -1596,8 +1594,9 @@ dependencies = [ [[package]] name = "rusoto_ssm" -version = "0.47.0" -source = "git+https://github.com/rusoto/rusoto?rev=37bac105a0c16d2259f8390c1aeef068db713911#37bac105a0c16d2259f8390c1aeef068db713911" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "166034bb4835e1e6a7ac1cc659c9798e751cd75d7244f37beeaa12f2bbdda30b" dependencies = [ "async-trait", "bytes", @@ -1609,8 +1608,9 @@ dependencies = [ [[package]] name = "rusoto_sts" -version = "0.47.0" -source = "git+https://github.com/rusoto/rusoto?rev=37bac105a0c16d2259f8390c1aeef068db713911#37bac105a0c16d2259f8390c1aeef068db713911" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1643f49aa67cb7cb895ebac5a2ff3f991c6dbdc58ad98b28158cd5706aecd1d" dependencies = [ "async-trait", "bytes", @@ -1638,37 +1638,24 @@ dependencies = [ [[package]] name = "rustls" -version = "0.19.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" +checksum = "d37e5e2290f3e040b594b1a9e04377c2c671f1a1cfd9bfdef82106ac1c113f84" dependencies = [ - "base64", "log", "ring", - "sct 0.6.1", - "webpki 0.21.4", -] - -[[package]] -name = "rustls" -version = "0.20.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fbfeb8d0ddb84706bc597a5574ab8912817c52a397f819e5b614e2265206921" -dependencies = [ - "log", - "ring", - "sct 0.7.0", - "webpki 0.22.0", + "sct", + "webpki", ] [[package]] name = "rustls-native-certs" -version = "0.5.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" +checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" dependencies = [ "openssl-probe", - "rustls 0.19.1", + "rustls-pemfile 1.0.0", "schannel", "security-framework", ] @@ -1682,6 +1669,15 @@ dependencies = [ "base64", ] +[[package]] +name = "rustls-pemfile" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7522c9de787ff061458fe9a829dc790a3f5b22dc571694fc5883f448b94d9a9" +dependencies = [ + "base64", +] + [[package]] name = "ryu" version = "1.0.9" @@ -1713,16 +1709,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" -[[package]] -name = "sct" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "sct" version = "0.7.0" @@ -1907,16 +1893,6 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" -[[package]] -name = "snafu" -version = "0.6.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eab12d3c261b2308b0d80c26fffb58d17eba81a4be97890101f416b478c79ca7" -dependencies = [ - "doc-comment", - "snafu-derive 0.6.10", -] - [[package]] name = "snafu" version = "0.7.0" @@ -1925,18 +1901,7 @@ checksum = "2eba135d2c579aa65364522eb78590cdf703176ef71ad4c32b00f58f7afb2df5" dependencies = [ "backtrace", "doc-comment", - "snafu-derive 0.7.0", -] - -[[package]] -name = "snafu-derive" -version = "0.6.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1508efa03c362e23817f96cde18abed596a25219a8b2c66e8db33c03543d315b" -dependencies = [ - "proc-macro2", - "quote", - "syn", + "snafu-derive", ] [[package]] @@ -2154,26 +2119,15 @@ dependencies = [ "syn", ] -[[package]] -name = "tokio-rustls" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" -dependencies = [ - "rustls 0.19.1", - "tokio", - "webpki 0.21.4", -] - [[package]] name = "tokio-rustls" version = "0.23.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4151fda0cf2798550ad0b34bcfc9b9dcc2a9d2471c895c68f3a8818e54f2389e" dependencies = [ - "rustls 0.20.4", + "rustls", "tokio", - "webpki 0.22.0", + "webpki", ] [[package]] @@ -2212,9 +2166,9 @@ dependencies = [ [[package]] name = "tough" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "708125a84e70820bccc5fc11d7196664415be2b02b81ba6946e70e10803aa4da" +checksum = "8a72582c980b86ac5b86cd8deb4e6841b44efaed2db9efae9b486b98288d9de2" dependencies = [ "chrono", "dyn-clone", @@ -2230,7 +2184,7 @@ dependencies = [ "serde", "serde_json", "serde_plain", - "snafu 0.7.0", + "snafu", "tempfile", "untrusted", "url", @@ -2239,32 +2193,32 @@ dependencies = [ [[package]] name = "tough-kms" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a910dad24be252ff379d79a49c44ed36b3e8b0f5d34b79a8967df24e685bae2d" +checksum = "baafc5e2a7f5207043f0a7e50f5c5163571c751c6a2d61a642bb8d3acdcc9659" dependencies = [ "pem", "ring", "rusoto_core", "rusoto_credential", "rusoto_kms", - "snafu 0.7.0", + "snafu", "tokio", "tough", ] [[package]] name = "tough-ssm" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1c96981e5a2302abc1ea54f076ac47c2ffe2abcdcc147f7668ee8b3212c094" +checksum = "d7a8be927f383be49de8e032532b72e82e7129f248c742bfa47247435bc7cdfb" dependencies = [ "rusoto_core", "rusoto_credential", "rusoto_ssm", "serde", "serde_json", - "snafu 0.7.0", + "snafu", "tokio", "tough", ] @@ -2357,7 +2311,7 @@ dependencies = [ "serde", "serde_json", "serde_plain", - "snafu 0.7.0", + "snafu", "toml", ] @@ -2489,16 +2443,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "webpki" -version = "0.21.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "webpki" version = "0.22.0" @@ -2515,7 +2459,7 @@ version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "552ceb903e957524388c4d3475725ff2c8b7960922063af6ce53c9a43da07449" dependencies = [ - "webpki 0.22.0", + "webpki", ] [[package]] diff --git a/tools/Cargo.toml b/tools/Cargo.toml index 1439d6eb..58b7f54b 100644 --- a/tools/Cargo.toml +++ b/tools/Cargo.toml @@ -6,16 +6,3 @@ members = [ "pubsys-config", "pubsys-setup", ] - -# This patches rusoto with an upstream commit to support ap-southeast-3 -[patch.crates-io] -rusoto_cloudformation = { git = "https://github.com/rusoto/rusoto", rev = "37bac105a0c16d2259f8390c1aeef068db713911" } -rusoto_core = { git = "https://github.com/rusoto/rusoto", rev = "37bac105a0c16d2259f8390c1aeef068db713911" } -rusoto_credential = { git = "https://github.com/rusoto/rusoto", rev = "37bac105a0c16d2259f8390c1aeef068db713911" } -rusoto_ebs = { git = "https://github.com/rusoto/rusoto", rev = "37bac105a0c16d2259f8390c1aeef068db713911" } -rusoto_ec2 = { git = "https://github.com/rusoto/rusoto", rev = "37bac105a0c16d2259f8390c1aeef068db713911" } -rusoto_kms = { git = "https://github.com/rusoto/rusoto", rev = "37bac105a0c16d2259f8390c1aeef068db713911" } -rusoto_s3 = { git = "https://github.com/rusoto/rusoto", rev = "37bac105a0c16d2259f8390c1aeef068db713911" } -rusoto_signature = { git = "https://github.com/rusoto/rusoto", rev = "37bac105a0c16d2259f8390c1aeef068db713911" } -rusoto_ssm = { git = "https://github.com/rusoto/rusoto", rev = "37bac105a0c16d2259f8390c1aeef068db713911" } -rusoto_sts = { git = "https://github.com/rusoto/rusoto", rev = "37bac105a0c16d2259f8390c1aeef068db713911" } diff --git a/tools/deny.toml b/tools/deny.toml index e0da1ce4..bedc1540 100644 --- a/tools/deny.toml +++ b/tools/deny.toml @@ -60,16 +60,14 @@ skip-tree = [ # temporarily using a different version of snafu { name = "parse-datetime", version = "0.1.0" }, - # rusoto is using a different version of reqwest. - { name = "rusoto_core", version = "0.47.0" }, + # rusoto_signature uses an older version of sha2 + { name = "rusoto_signature" }, + + # reqwest uses an older rustls-pemfile + { name = "reqwest", version = "0.11.10" }, ] [sources] # Deny crates from unknown registries or git repositories. unknown-registry = "deny" unknown-git = "deny" - -allow-git = [ - # rusoto is patched with an upstream commit to support ap-southeast-3 - "https://github.com/rusoto/rusoto.git", -] diff --git a/tools/infrasys/Cargo.toml b/tools/infrasys/Cargo.toml index 6e0e73a8..86b034dd 100644 --- a/tools/infrasys/Cargo.toml +++ b/tools/infrasys/Cargo.toml @@ -12,9 +12,9 @@ clap = "3.1" hex = "0.4.0" log = "0.4.14" pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } -rusoto_cloudformation = { version = "0.47", default-features = false, features = ["rustls"] } -rusoto_core = { version = "0.47", default-features = false, features = ["rustls"] } -rusoto_s3 = { version = "0.47", default-features = false, features = ["rustls"] } +rusoto_cloudformation = { version = "0.48.0", default-features = false, features = ["rustls"] } +rusoto_core = { version = "0.48.0", default-features = false, features = ["rustls"] } +rusoto_s3 = { version = "0.48.0", default-features = false, features = ["rustls"] } serde_json = "1.0.66" serde_yaml = "0.8.17" sha2 = "0.10" diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index d4abfe2a..55c87ee8 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -22,14 +22,14 @@ parse-datetime = { path = "../../sources/parse-datetime", version = "0.1.0" } rayon = "1" # Need to bring in reqwest with a TLS feature so tough can support TLS repos. reqwest = { version = "0.11.1", default-features = false, features = ["rustls-tls", "blocking"] } -rusoto_core = { version = "0.47.0", default-features = false, features = ["rustls"] } -rusoto_credential = "0.47.0" -rusoto_ebs = { version = "0.47.0", default-features = false, features = ["rustls"] } -rusoto_ec2 = { version = "0.47.0", default-features = false, features = ["rustls"] } -rusoto_kms = { version = "0.47.0", default-features = false, features = ["rustls"] } -rusoto_signature = "0.47.0" -rusoto_ssm = { version = "0.47.0", default-features = false, features = ["rustls"] } -rusoto_sts = { version = "0.47.0", default-features = false, features = ["rustls"] } +rusoto_core = { version = "0.48.0", default-features = false, features = ["rustls"] } +rusoto_credential = "0.48.0" +rusoto_ebs = { version = "0.48.0", default-features = false, features = ["rustls"] } +rusoto_ec2 = { version = "0.48.0", default-features = false, features = ["rustls"] } +rusoto_kms = { version = "0.48.0", default-features = false, features = ["rustls"] } +rusoto_signature = "0.48.0" +rusoto_ssm = { version = "0.48.0", default-features = false, features = ["rustls"] } +rusoto_sts = { version = "0.48.0", default-features = false, features = ["rustls"] } simplelog = "0.11" snafu = "0.7" semver = "1.0" From 2955bf0acf06bd8facdf5747cf21f73e130aba33 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 28 Apr 2022 22:06:27 +0000 Subject: [PATCH 0638/1356] build(deps): bump simplelog from 0.11.2 to 0.12.0 in /tools Bumps [simplelog](https://github.com/drakulix/simplelog.rs) from 0.11.2 to 0.12.0. - [Release notes](https://github.com/drakulix/simplelog.rs/releases) - [Changelog](https://github.com/Drakulix/simplelog.rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/drakulix/simplelog.rs/compare/v0.11.2...v0.12.0) --- updated-dependencies: - dependency-name: simplelog dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- tools/Cargo.lock | 35 +++++++++++++++++++++++++++++++---- tools/infrasys/Cargo.toml | 2 +- tools/pubsys-setup/Cargo.toml | 2 +- tools/pubsys/Cargo.toml | 2 +- 4 files changed, 34 insertions(+), 7 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 4133b168..eef38e46 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -225,7 +225,7 @@ dependencies = [ "num-integer", "num-traits", "serde", - "time", + "time 0.1.43", "winapi", ] @@ -1034,6 +1034,15 @@ dependencies = [ "libc", ] +[[package]] +name = "num_threads" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aba1801fb138d8e85e11d0fc70baf4fe1cdfffda7c6cd34a854905df588e5ed0" +dependencies = [ + "libc", +] + [[package]] name = "number_prefix" version = "0.4.0" @@ -1872,13 +1881,13 @@ dependencies = [ [[package]] name = "simplelog" -version = "0.11.2" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1348164456f72ca0116e4538bdaabb0ddb622c7d9f16387c725af3e96d6001c" +checksum = "48dfff04aade74dd495b007c831cd6f4e0cee19c344dd9dc0884c0289b70a786" dependencies = [ - "chrono", "log", "termcolor", + "time 0.3.9", ] [[package]] @@ -2063,6 +2072,24 @@ dependencies = [ "winapi", ] +[[package]] +name = "time" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2702e08a7a860f005826c6815dcac101b19b5eb330c27fe4a5928fec1d20ddd" +dependencies = [ + "itoa", + "libc", + "num_threads", + "time-macros", +] + +[[package]] +name = "time-macros" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42657b1a6f4d817cda8e7a0ace261fe0cc946cf3a80314390b22cc61ae080792" + [[package]] name = "tinytemplate" version = "1.2.1" diff --git a/tools/infrasys/Cargo.toml b/tools/infrasys/Cargo.toml index 86b034dd..0daa874e 100644 --- a/tools/infrasys/Cargo.toml +++ b/tools/infrasys/Cargo.toml @@ -19,7 +19,7 @@ serde_json = "1.0.66" serde_yaml = "0.8.17" sha2 = "0.10" shell-words = "1.0.0" -simplelog = "0.11" +simplelog = "0.12" snafu = "0.7" structopt = { version = "0.3", default-features = false } tokio = { version = "~1.8", default-features = false, features = ["macros", "rt-multi-thread"] } # LTS diff --git a/tools/pubsys-setup/Cargo.toml b/tools/pubsys-setup/Cargo.toml index 7bfee00f..bf2d333f 100644 --- a/tools/pubsys-setup/Cargo.toml +++ b/tools/pubsys-setup/Cargo.toml @@ -13,7 +13,7 @@ pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } reqwest = { version = "0.11.1", default-features = false, features = ["rustls-tls", "blocking"] } sha2 = "0.10" shell-words = "1.0" -simplelog = "0.11" +simplelog = "0.12" snafu = "0.7" structopt = { version = "0.3", default-features = false } tempfile = "3.1" diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index 55c87ee8..2de0f2e8 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -30,7 +30,7 @@ rusoto_kms = { version = "0.48.0", default-features = false, features = ["rustls rusoto_signature = "0.48.0" rusoto_ssm = { version = "0.48.0", default-features = false, features = ["rustls"] } rusoto_sts = { version = "0.48.0", default-features = false, features = ["rustls"] } -simplelog = "0.11" +simplelog = "0.12" snafu = "0.7" semver = "1.0" serde = { version = "1.0", features = ["derive"] } From 7da13b151d50601ac3b73bccf618c19e10ad2db6 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Mon, 18 Apr 2022 18:43:08 +0000 Subject: [PATCH 0639/1356] sources,packages: add ecs-gpu-init helper program `ecs-gpu-init` queries the NVIDIA GPU devices available in the host, and writes down the `nvidia-gpu-info.json` file used by the ECS agent to configure GPUs for orchestrated tasks. Signed-off-by: Arnaldo Garcia Rincon --- tools/docker-go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/docker-go b/tools/docker-go index 732429a3..49a75a84 100755 --- a/tools/docker-go +++ b/tools/docker-go @@ -84,7 +84,7 @@ docker run --rm \ --security-opt label:disable \ ${DOCKER_RUN_ARGS} \ -v "${GO_MOD_CACHE}":/tmp/go/pkg/mod \ - -v "${GO_MODULE_PATH}":/usr/src/host-ctr \ - -w /usr/src/host-ctr \ + -v "${GO_MODULE_PATH}":/usr/src/module \ + -w /usr/src/module \ "${SDK_IMAGE}" \ bash -c "${COMMAND}" From 92f4b97d74f4ef654dc52c5cccdbf0a39cb4e1e5 Mon Sep 17 00:00:00 2001 From: Matthew James Briggs Date: Tue, 3 May 2022 14:45:54 -0700 Subject: [PATCH 0640/1356] silence dependabot for now --- .github/dependabot.yml | 22 ---------------------- 1 file changed, 22 deletions(-) delete mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index 0d902da0..00000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,22 +0,0 @@ -version: 2 -updates: - - - package-ecosystem: "cargo" - directory: "/sources" - schedule: - interval: "daily" - - - package-ecosystem: "cargo" - directory: "/tools" - schedule: - interval: "daily" - - - package-ecosystem: "gomod" - directory: "/sources/host-ctr" - schedule: - interval: "daily" - - - package-ecosystem: "github-actions" - directory: "/" - schedule: - interval: "daily" From 103b3fc65bd4c165be67ca575b70cd1b4ba8e1ae Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Thu, 21 Apr 2022 21:44:30 +0000 Subject: [PATCH 0641/1356] variants: add aws-ecs-1-nvidia Signed-off-by: Arnaldo Garcia Rincon --- .github/workflows/build.yml | 8 ++++++++ README.md | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b89eae9a..af6a993e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -77,6 +77,14 @@ jobs: arch: aarch64 supported: true fetch-upstream: "true" + - variant: aws-ecs-1-nvidia + arch: x86_64 + supported: true + fetch-upstream: "true" + - variant: aws-ecs-1-nvidia + arch: aarch64 + supported: true + fetch-upstream: "true" fail-fast: false steps: - uses: actions/checkout@v3 diff --git a/README.md b/README.md index 3644ac77..543a47b8 100644 --- a/README.md +++ b/README.md @@ -749,7 +749,7 @@ There are a few important caveats about the provided kdump support: ### NVIDIA GPUs Support Bottlerocket's `nvidia` variants include the required packages and configurations to leverage NVIDIA GPUs. The official AMIs for these variants can be used with EC2 GPU-equipped instance types such as: `p2`, `p3`, `p4`, `g4dn`, `g5` and `g5g`. -Please see [QUICKSTART-EKS](QUICKSTART-EKS.md#aws-k8s--nvidia-variants) for further details about Kubernetes variants. +Please see [QUICKSTART-EKS](QUICKSTART-EKS.md#aws-k8s--nvidia-variants) for further details about Kubernetes variants, and [QUICKSTART-ECS](QUICKSTART-ECS.md#aws-ecs--nvidia-variants) for ECS variants. ## Details From c02099abe68cb0444d0a0d9064e78157a24ac275 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Wed, 11 May 2022 11:13:44 -0700 Subject: [PATCH 0642/1356] kernel: add support for MegaRAID SAS This adds kernel config to enable support for MegaRAID SAS RAID controllers. --- packages/kernel-5.10/config-bottlerocket | 3 +++ packages/kernel-5.4/config-bottlerocket | 3 +++ 2 files changed, 6 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index 8d20f859..7d9806dd 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -123,3 +123,6 @@ CONFIG_BOOT_CONFIG=y # Enables support for checkpoint/restore CONFIG_CHECKPOINT_RESTORE=y + +# Enables support for LSI Logic's SAS based RAID controllers +CONFIG_MEGARAID_SAS=y diff --git a/packages/kernel-5.4/config-bottlerocket b/packages/kernel-5.4/config-bottlerocket index 34a422d9..70bda426 100644 --- a/packages/kernel-5.4/config-bottlerocket +++ b/packages/kernel-5.4/config-bottlerocket @@ -85,3 +85,6 @@ CONFIG_MOUSE_PS2=m # Enables support for checkpoint/restore CONFIG_CHECKPOINT_RESTORE=y + +# Enables support for LSI Logic's SAS based RAID controllers +CONFIG_MEGARAID_SAS=y From cd2ea3684370173437a5794616915ed36362e3aa Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 29 Mar 2022 18:37:51 -0700 Subject: [PATCH 0643/1356] README: add description and example for 'settings.boot' --- README.md | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/README.md b/README.md index 543a47b8..c441020e 100644 --- a/README.md +++ b/README.md @@ -561,6 +561,43 @@ Here are the metrics settings: "vm.max_map_count" = "262144" ``` +#### Boot-related settings + +*Please note that boot settings only exist for bare-metal variants at the moment* + +Specifying either of the following settings will generate a kernel boot config file to be loaded on subsequent boots: +* `settings.boot.kernel-parameters`: This allows additional kernel parameters to be specified on the kernel command line during boot. +* `settings.boot.init-parameters`: This allows additional init parameters to be specified on the kernel command line during boot. + +You can learn more about kernel boot configuration [here](https://www.kernel.org/doc/html/latest/admin-guide/bootconfig.html). + +Example user data for specifying boot settings: + +```toml +[settings.boot.kernel-parameters] +"console" = [ + "tty0", + "ttyS1,115200n8", +] +"crashkernel" = [ + "2G-:256M", +] +"slub_debug" = [ + "options,slabs", +] +"usbcore.quirks" = [ + "0781:5580:bk", + "0a5c:5834:gij", +] + +[settings.boot.init-parameters] +"log_level" = ["debug"] +"splash" = [] +``` + +If boot config data exists at `/proc/bootconfig`, it will be used to generate these API settings on first boot. +Please note that Bottlerocket only supports boot configuration for `kernel` and `init`. If any other boot config key is specified, the settings generation will fail. + #### Custom CA certificates settings By default, Bottlerocket ships with the Mozilla CA certificate store, but you can add self-signed certificates through the API using these settings: From e45534902d4a844c881ab0ed930d2edc5feeb5d1 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Tue, 12 Apr 2022 16:31:35 +0000 Subject: [PATCH 0644/1356] Use generated network configuration by default This commit removes the hard-coded `eth0.xml` network configuration file in favor of using `netdog` to generate the network configuration for all variants. It adds a new systemd unit file to run `netdog generate-net-config` early in boot, before the network is up. To generate the network configuration for AWS/VMware variants, we pass `netdog.default-interface=eth0:dhcp4,dhcp6?` which `netdog` interprets and generates network config similar to the hardcoded file previously used for these variants. For metal variants, we decided to use systemd-udevd's predictable device naming so we can count on network devices being named identically every boot. We currently pass `net.ifnames=0` on the kernel command line which disables predictable naming, which is fine for AWS and VMware variants as hardware is controlled and instances typically initially come up with a single interface in the same PCIe location. In order to continue using `net.ifnames=0` for AWS/VMware, we move this parameter out of the default kernel command line to the KERNEL_PARAMETERS section of each variants `Cargo.toml`. Aside: We previously passed `biosdevname=0` on the kernel command line as well. `biosdevname` is a udev helper utility written by Dell for consistent device naming based on SMBIOS info. We don't currently package the helper or include the udev rule that uses it, so we have removed the `biosdevname` parameter entirely. --- tools/rpm2img | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/rpm2img b/tools/rpm2img index b8af8e04..8eb4d69f 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -280,8 +280,7 @@ menuentry "${PRETTY_NAME} ${VERSION_ID}" { dm-mod.create="root,,,ro,0 $VERITY_DATA_512B_BLOCKS verity $VERITY_VERSION PARTUUID=\$boot_uuid/PARTNROFF=1 PARTUUID=\$boot_uuid/PARTNROFF=2 \\ $VERITY_DATA_BLOCK_SIZE $VERITY_HASH_BLOCK_SIZE $VERITY_DATA_4K_BLOCKS 1 $VERITY_HASH_ALGORITHM $VERITY_ROOT_HASH $VERITY_SALT 1 restart_on_corruption" \\ -- \\ - systemd.log_target=journal-or-kmsg systemd.log_color=0 \\ - net.ifnames=0 biosdevname=0 + systemd.log_target=journal-or-kmsg systemd.log_color=0 ${INITRD} } EOF From a4b67ddd3503a717e3c2c7f069628a6366c7838c Mon Sep 17 00:00:00 2001 From: Tianhao Geng Date: Mon, 16 May 2022 23:00:43 +0000 Subject: [PATCH 0645/1356] kubelet: add setting for configuring PodPidsLimit --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index c441020e..7d389086 100644 --- a/README.md +++ b/README.md @@ -383,6 +383,7 @@ The following settings are optional and allow you to further configure your clus * `settings.kubernetes.cpu-manager-reconcile-period`: Specifies the CPU manager reconcile period, which controls how often updated CPU assignments are written to cgroupfs. The value is a duration like `30s` for 30 seconds or `1h5m` for 1 hour and 5 minutes. * `settings.kubernetes.topology-manager-policy`: Specifies the topology manager policy. Possible values are `none`, `restricted`, `best-effort`, and `single-numa-node`. Defaults to `none`. * `settings.kubernetes.topology-manager-scope`: Specifies the topology manager scope. Possible values are `container` and `pod`. Defaults to `container`. If you want to group all containers in a pod to a common set of NUMA nodes, you can set this setting to `pod`. +* `settings.kubernetes.pod-pids-limit`: The maximum number of processes per pod. You can also optionally specify static pods for your node with the following settings. Static pods can be particularly useful when running in standalone mode. From 392a5f55fd2d69948d744e635a21ddd36048b12c Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Wed, 18 May 2022 12:21:40 -0700 Subject: [PATCH 0646/1356] kernel-5.10: enable support for broadcom ethernet cards This enables support for Broadcom Tigon3 based gigabit Ethernet cards. --- packages/kernel-5.10/config-bottlerocket | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index 7d9806dd..0a877eaf 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -28,6 +28,11 @@ CONFIG_E1000=m CONFIG_E1000e=m CONFIG_E1000e_hwts=y +# Broadcom network support +CONFIG_NET_VENDOR_BROADCOM=m +CONFIG_TIGON3_HWMON=y +CONFIG_TIGON3=m + # Intel 10G network support CONFIG_IXGB=m CONFIG_IXGBE=m From 08ccb0bf71244aec93bfcc9dca33437b8b4223de Mon Sep 17 00:00:00 2001 From: mello7tre Date: Thu, 7 Apr 2022 22:34:55 +0200 Subject: [PATCH 0647/1356] Add support for ECS Agent parameters ImagePullBehavior and WarmPoolsSupport They be configured using the keys: settings.ecs.image-pull-behavior: (default*|always|once|prefer-cached) settings.autoscaling.should-wait: (true|false*) WarmPoolsSupport use a more generic key settings, as for advice of @bcressey, this way it can be used, in future, for other variants. At the moment `settings.autoscaling.should-wait` is used only for the aws-ecs* variants. --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 7d389086..87ef60df 100644 --- a/README.md +++ b/README.md @@ -432,7 +432,8 @@ These settings can be changed at any time. * `settings.ecs.loglevel`: The level of verbosity for the ECS agent's logs. Supported values are `debug`, `info`, `warn`, `error`, and `crit`, and the default is `info`. * `settings.ecs.enable-spot-instance-draining`: If the instance receives a spot termination notice, the agent will set the instance's state to `DRAINING`, so the workload can be moved gracefully before the instance is removed. Defaults to `false`. - +* `settings.ecs.image-pull-behavior`: The behavior used to customize the [pull image process](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html#ecs-agent-availparam) for your container instances. + Supported values are `default`, `always`, `once`, `prefer-cached`, and the default is `default`. #### CloudFormation signal helper settings For AWS variants, these settings allow you to set up CloudFormation signaling to indicate whether Bottlerocket hosts running in EC2 have been successfully created or updated: @@ -440,6 +441,9 @@ For AWS variants, these settings allow you to set up CloudFormation signaling to * `settings.cloudformation.stack-name`: Name of the CloudFormation Stack to signal. * `settings.cloudformation.logical-resource-id`: The logical ID of the AutoScalingGroup resource that you want to signal. +#### Auto Scaling group settings +* `settings.autoscaling.should-wait`: Whether to wait for the instance to reach the `InService` state before the orchestrator agent joins the cluster. Defaults to `false`. Set this to `true` only if the instance is part of an Auto Scaling group, or will be attached to one later. + #### OCI Hooks settings Bottlerocket allows you to opt-in to use additional [OCI hooks](https://github.com/opencontainers/runtime-spec/blob/main/runtime.md#lifecycle) for your orchestrated containers. From 9d1e03d4a6e2c10676b8cac9ef9f87791ccbec50 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Wed, 25 May 2022 22:57:28 +0000 Subject: [PATCH 0648/1356] actions-workflow: bump rust to 1.61.0 and cargo-make to 0.35.12 --- .github/workflows/build.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index af6a993e..9ef0f3fe 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -88,8 +88,8 @@ jobs: fail-fast: false steps: - uses: actions/checkout@v3 - - run: rustup toolchain install 1.58.1 && rustup default 1.58.1 - - run: cargo install --version 0.35.8 cargo-make + - run: rustup default 1.61.0 + - run: cargo install --version 0.35.12 cargo-make - if: contains(matrix.variant, 'nvidia') run: | cat <<-EOF > Licenses.toml From 235653a3d359a1b48a21d42a786787d123a72dc7 Mon Sep 17 00:00:00 2001 From: r Date: Thu, 26 May 2022 19:51:04 -0500 Subject: [PATCH 0649/1356] docs: fix typos --- BUILDING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/BUILDING.md b/BUILDING.md index f2e6484e..dd08712e 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -96,7 +96,7 @@ cargo make -e BUILDSYS_ARCH=my-arch-here Most packages will include license files extracted from upstream source archives. However, in some rare cases there are multiple licenses that could apply to a package. -Bottlerocket's build system uses the `Licenses.toml` file in conjuction with the `licenses` directory to configure the licenses used for such special packages. +Bottlerocket's build system uses the `Licenses.toml` file in conjunction with the `licenses` directory to configure the licenses used for such special packages. Here is an example of a simple `Licenses.toml` configuration file: ```toml From 7d14123e56cd1df3a70e3a9a15bf9e1ca892dfd4 Mon Sep 17 00:00:00 2001 From: Matthew James Briggs Date: Wed, 1 Jun 2022 13:07:56 -0700 Subject: [PATCH 0650/1356] generate readme We had readme generating code pasted into most of our build.rs file. This has now been moved to a public function in a library so that the code is not duplicated. --- tools/Cargo.lock | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index eef38e46..e6fb6085 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -603,6 +603,14 @@ dependencies = [ "slab", ] +[[package]] +name = "generate-readme" +version = "0.1.0" +dependencies = [ + "cargo-readme", + "snafu", +] + [[package]] name = "generic-array" version = "0.14.5" @@ -1132,8 +1140,8 @@ dependencies = [ name = "parse-datetime" version = "0.1.0" dependencies = [ - "cargo-readme", "chrono", + "generate-readme", "snafu", ] From 85bf9c1195f1cf40a7c79934efb9e178bed3be51 Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Thu, 5 May 2022 23:03:57 +0000 Subject: [PATCH 0651/1356] README: add `settings.network.hosts` --- README.md | 41 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 87ef60df..0a9920d1 100644 --- a/README.md +++ b/README.md @@ -508,9 +508,44 @@ In addition to the container runtime daemons, these credential settings will als * `settings.network.hostname`: The desired hostname of the system. **Important note for all Kubernetes variants:** Changing this setting at runtime (not via user data) can cause issues with kubelet registration, as hostname is closely tied to the identity of the system for both registration and certificates/authorization purposes. -Most users don't need to change this setting as the following defaults work for the majority of use cases. -If this setting isn't set we attempt to use DNS reverse lookup for the hostname. -If the lookup is unsuccessful, the IP of the node is used. + Most users don't need to change this setting as the following defaults work for the majority of use cases. + If this setting isn't set we attempt to use DNS reverse lookup for the hostname. + If the lookup is unsuccessful, the IP of the node is used. + +* `settings.network.hosts`: A mapping of IP addresses to domain names which should resolve to those IP addresses. + This setting results in modifications to the `/etc/hosts` file for Bottlerocket. + Note that this setting does not typically impact name resolution for containers, which usually rely on orchestrator-specific mechanisms for configuring static resolution. + (See [ECS](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_HostEntry.html) and [Kubernetes](https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/) documentation for those mechanisms.) + + Example: + ```toml + [settings.network] + hosts = [ + ["10.0.0.0", ["test.example.com", "test1.example.com"]], + ["10.1.1.1", ["test2.example.com"]] + ] + ``` + This example would result in an `/etc/hosts` file entries like so: + ``` + 10.0.0.0 test.example.com test1.example.com + 10.1.1.1 test2.example.com + ``` + Repeated entries are merged (including loopback entries), with the first aliases listed taking precedence. e.g.: + + ```toml + [settings.network] + hosts = [ + ["10.0.0.0", ["test.example.com", "test1.example.com"]], + ["10.1.1.1", ["test2.example.com"]], + ["10.0.0.0", ["test3.example.com"]], + ] + ``` + Would result in `/etc/hosts` entries like so: + ``` + 10.0.0.0 test.example.com test1.example.com test3.example.com + 10.1.1.1 test2.example.com + ``` + ##### Proxy settings From 59fabc895d68760849ba46f6d07ac09a9e48c9b7 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 2 Jun 2022 03:00:28 +0000 Subject: [PATCH 0652/1356] build: ignore zero blocks on dm-verity root We omit 512 KiB blocks of all zeroes when registering EBS snapshots. If an encrypted EBS volume is subsequently created from the snapshot, reads from these uninitialized regions will return the decryption of zero with the volume key. This is essentially random garbage and the hash will not match what dm-verity expects. These large zero blocks are mostly found in the space between the end of the filesystem and the partition boundary, but there are some at the start of the filesystem as well. The regions are not accessed under normal circumstances. However, tools such as `blkid` will scan them to fingerprint the device, at which point dm-verity detects the "corruption" and reboots the instance. To avoid this, pass the dm-verity option to ignore zero blocks so that they are never actually read from the backing storage. Signed-off-by: Ben Cressey --- tools/rpm2img | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/rpm2img b/tools/rpm2img index 8eb4d69f..cd325a47 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -278,7 +278,8 @@ menuentry "${PRETTY_NAME} ${VERSION_ID}" { random.trust_cpu=on selinux=1 enforcing=1 \\ dm_verity.max_bios=-1 dm_verity.dev_wait=1 \\ dm-mod.create="root,,,ro,0 $VERITY_DATA_512B_BLOCKS verity $VERITY_VERSION PARTUUID=\$boot_uuid/PARTNROFF=1 PARTUUID=\$boot_uuid/PARTNROFF=2 \\ - $VERITY_DATA_BLOCK_SIZE $VERITY_HASH_BLOCK_SIZE $VERITY_DATA_4K_BLOCKS 1 $VERITY_HASH_ALGORITHM $VERITY_ROOT_HASH $VERITY_SALT 1 restart_on_corruption" \\ + $VERITY_DATA_BLOCK_SIZE $VERITY_HASH_BLOCK_SIZE $VERITY_DATA_4K_BLOCKS 1 $VERITY_HASH_ALGORITHM $VERITY_ROOT_HASH $VERITY_SALT \\ + 2 restart_on_corruption ignore_zero_blocks" \\ -- \\ systemd.log_target=journal-or-kmsg systemd.log_color=0 ${INITRD} From e8014a13f89811dcfb5167693606081d62ed0b2f Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Fri, 3 Jun 2022 15:15:54 +0000 Subject: [PATCH 0653/1356] Allow cluster-dns-ip to be a string or list While clusterDNS is modeled as a list in the kubelet config, we allow the settings to be given as either a string or list in the API for backwards compatibility. The API will store and return the data as-provided, and the model provides a unified iterator view over both models. --- README.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/README.md b/README.md index 0a9920d1..53f6799c 100644 --- a/README.md +++ b/README.md @@ -329,6 +329,19 @@ For Kubernetes variants in AWS, you must also specify: For Kubernetes variants in VMware, you must specify: * `settings.kubernetes.cluster-dns-ip`: The IP of the DNS service running in the cluster. + + This value can be set as a string containing a single IP address, or as a list containing multiple IP addresses. + Examples: + ``` + # Valid, single IP + [settings.kubernetes] + "cluster-dns-ip" = "10.0.0.1" + + # Also valid, multiple nameserver IPs + [settings.kubernetes] + "cluster-dns-ip" = ["10.0.0.1", "10.0.0.2"] + ``` + * `settings.kubernetes.bootstrap-token`: The token used for [TLS bootstrapping](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/). The following settings can be optionally set to customize the node labels and taints. Remember to quote keys (since they often contain ".") and to quote all values. From 6f7a07992e21e3ca82797b94eda5e5e215b7deb9 Mon Sep 17 00:00:00 2001 From: Matthew James Briggs Date: Mon, 6 Jun 2022 13:57:26 -0700 Subject: [PATCH 0654/1356] tools: update tokio We do not need to pin tokio to an LTS version in the tools workspace since we are not publishing these tools. We just need to make sure they are working and keep tokio up-to-date. --- tools/Cargo.lock | 96 +++++++++++++++++++++++++-------------- tools/infrasys/Cargo.toml | 2 +- tools/pubsys/Cargo.toml | 2 +- 3 files changed, 64 insertions(+), 36 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index e6fb6085..5b729748 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -629,7 +629,7 @@ checksum = "d39cd93900197114fa1fcb7ae84ca742095eed9442088988ae74fa744e930e77" dependencies = [ "cfg-if", "libc", - "wasi", + "wasi 0.10.2+wasi-snapshot-preview1", ] [[package]] @@ -966,24 +966,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.7.14" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc" +checksum = "713d550d9b44d89174e066b7a6217ae06234c10cb47819a88290d2b353c31799" dependencies = [ "libc", "log", - "miow", - "ntapi", - "winapi", -] - -[[package]] -name = "miow" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" -dependencies = [ - "winapi", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys", ] [[package]] @@ -1004,15 +994,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" -[[package]] -name = "ntapi" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28774a7fd2fbb4f0babd8237ce554b73af68021b5f695a3cebd6c59bac0980f" -dependencies = [ - "winapi", -] - [[package]] name = "num-integer" version = "0.1.44" @@ -1113,27 +1094,25 @@ checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" [[package]] name = "parking_lot" -version = "0.11.2" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ - "instant", "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" -version = "0.8.5" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" +checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" dependencies = [ "cfg-if", - "instant", "libc", "redox_syscall", "smallvec", - "winapi", + "windows-sys", ] [[package]] @@ -2125,11 +2104,10 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.8.5" +version = "1.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdc46ca74dd45faeaaf96a8fbe2406f425829705ee62100ccaa9b34a2145cff8" +checksum = "c51a52ed6686dd62c320f9b89299e9dfb46f730c7a48e635c19f21d116cb1439" dependencies = [ - "autocfg", "bytes", "libc", "memchr", @@ -2139,6 +2117,7 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", + "socket2", "tokio-macros", "winapi", ] @@ -2402,6 +2381,12 @@ version = "0.10.2+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + [[package]] name = "wasm-bindgen" version = "0.2.79" @@ -2528,6 +2513,49 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows-sys" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" +dependencies = [ + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" + +[[package]] +name = "windows_i686_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" + +[[package]] +name = "windows_i686_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" + [[package]] name = "winreg" version = "0.10.1" diff --git a/tools/infrasys/Cargo.toml b/tools/infrasys/Cargo.toml index 0daa874e..7b982a0a 100644 --- a/tools/infrasys/Cargo.toml +++ b/tools/infrasys/Cargo.toml @@ -22,7 +22,7 @@ shell-words = "1.0.0" simplelog = "0.12" snafu = "0.7" structopt = { version = "0.3", default-features = false } -tokio = { version = "~1.8", default-features = false, features = ["macros", "rt-multi-thread"] } # LTS +tokio = { version = "1", default-features = false, features = ["macros", "rt-multi-thread"] } toml = "0.5" url = "2.2.2" diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index 2de0f2e8..8e301c44 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -37,7 +37,7 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" structopt = { version = "0.3", default-features = false } tinytemplate = "1.1" -tokio = { version = "~1.8", features = ["full"] } # LTS +tokio = { version = "1", features = ["full"] } # LTS tokio-stream = { version = "0.1", features = ["time"] } toml = "0.5" tough = { version = "0.12", features = ["http"] } From 0ace6a973e1fc8f151b68cfbeaee13dca25d4736 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 6 Jun 2022 18:54:32 +0000 Subject: [PATCH 0655/1356] kernel: adjust option for megaraid sas This drops CONFIG_MEGARAID_SAS=y for the 5.4 kernel, which we don't plan to support on bare metal, and relocates the option to the block storage section of the config for the 5.10 kernel. Signed-off-by: Ben Cressey --- packages/kernel-5.10/config-bottlerocket | 6 +++--- packages/kernel-5.4/config-bottlerocket | 3 --- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index 0a877eaf..79cfee5f 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -48,6 +48,9 @@ CONFIG_VIRTIO=y CONFIG_VIRTIO_BLK=y CONFIG_VIRTIO_PCI=y +# LSI Logic's SAS based RAID controllers +CONFIG_MEGARAID_SAS=y + # dm-verity and enabling it on the kernel command line CONFIG_BLK_DEV_DM=y CONFIG_DAX=y @@ -128,6 +131,3 @@ CONFIG_BOOT_CONFIG=y # Enables support for checkpoint/restore CONFIG_CHECKPOINT_RESTORE=y - -# Enables support for LSI Logic's SAS based RAID controllers -CONFIG_MEGARAID_SAS=y diff --git a/packages/kernel-5.4/config-bottlerocket b/packages/kernel-5.4/config-bottlerocket index 70bda426..34a422d9 100644 --- a/packages/kernel-5.4/config-bottlerocket +++ b/packages/kernel-5.4/config-bottlerocket @@ -85,6 +85,3 @@ CONFIG_MOUSE_PS2=m # Enables support for checkpoint/restore CONFIG_CHECKPOINT_RESTORE=y - -# Enables support for LSI Logic's SAS based RAID controllers -CONFIG_MEGARAID_SAS=y From f25d65492220f988c3acae8c1635a25ac6a2f0cb Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 6 Jun 2022 18:56:18 +0000 Subject: [PATCH 0656/1356] kernel-5.10: add support for Microsemi PQI Signed-off-by: Ben Cressey --- packages/kernel-5.10/config-bottlerocket | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index 79cfee5f..883975d4 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -51,6 +51,9 @@ CONFIG_VIRTIO_PCI=y # LSI Logic's SAS based RAID controllers CONFIG_MEGARAID_SAS=y +# Microsemi PQI controllers +CONFIG_SCSI_SMARTPQI=y + # dm-verity and enabling it on the kernel command line CONFIG_BLK_DEV_DM=y CONFIG_DAX=y From d94ca9627c5f06ea3e2283f3d7296d5755d52442 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 16 May 2022 21:11:25 +0000 Subject: [PATCH 0657/1356] kernel: install headers and modules in parallel This provides a significant speed boost for kernel builds - around 50 seconds on a system with many available CPUs. Signed-off-by: Ben Cressey --- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- packages/kernel-5.4/kernel-5.4.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 64769bd8..dbb4d83a 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -112,8 +112,8 @@ make -s\\\ %kmake %{?_smp_mflags} modules %install -%kmake headers_install -%kmake modules_install +%kmake %{?_smp_mflags} headers_install +%kmake %{?_smp_mflags} modules_install install -d %{buildroot}/boot install -T -m 0755 arch/%{_cross_karch}/boot/%{_cross_kimage} %{buildroot}/boot/vmlinuz diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec index e1145c69..fc07c95e 100644 --- a/packages/kernel-5.4/kernel-5.4.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -119,8 +119,8 @@ make -s\\\ %kmake %{?_smp_mflags} modules %install -%kmake headers_install -%kmake modules_install +%kmake %{?_smp_mflags} headers_install +%kmake %{?_smp_mflags} modules_install install -d %{buildroot}/boot install -T -m 0755 arch/%{_cross_karch}/boot/%{_cross_kimage} %{buildroot}/boot/vmlinuz From c811b6909b86b3d26b83c795beef1219b1df2a94 Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Fri, 3 Jun 2022 21:26:37 +0000 Subject: [PATCH 0658/1356] kernel-5.4: update to 5.4.190 --- packages/kernel-5.4/Cargo.toml | 4 ++-- packages/kernel-5.4/kernel-5.4.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.4/Cargo.toml b/packages/kernel-5.4/Cargo.toml index 0dd8e405..0ee77825 100644 --- a/packages/kernel-5.4/Cargo.toml +++ b/packages/kernel-5.4/Cargo.toml @@ -13,8 +13,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/a120999c2cd538adae1c97c87e6d60f3bcf6f761064204638a5647e06aea1aad/kernel-5.4.188-104.359.amzn2.src.rpm" -sha512 = "ebb6f8460ddfccc50e89b499563dfa64f1c3228e9fe3cabd20ec1561ca8bf3764a50853b35085742dde3a219ad9314033d8c12cbc2d615f463aab0e062d9a229" +url = "https://cdn.amazonlinux.com/blobstore/ef7cb8ef41ebe7e5edc8bbf23eebc16600d6a8e21f8b468723b31fb32ef3e583/kernel-5.4.190-107.353.amzn2.src.rpm" +sha512 = "8682ee3ec20558b4e82d688fe83134ee01b6379b7da8cb7a367f1534000891384b5364b8ef991d8bd88ab53ad83d8984014f284f43ea13952b0e36bf3b6f2cd7" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec index fc07c95e..26d354f7 100644 --- a/packages/kernel-5.4/kernel-5.4.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.4 -Version: 5.4.188 +Version: 5.4.190 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/a120999c2cd538adae1c97c87e6d60f3bcf6f761064204638a5647e06aea1aad/kernel-5.4.188-104.359.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/ef7cb8ef41ebe7e5edc8bbf23eebc16600d6a8e21f8b468723b31fb32ef3e583/kernel-5.4.190-107.353.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 734cd57cd86a47a20fb2ecc32e7a9bcb76d222dd Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Fri, 3 Jun 2022 21:28:13 +0000 Subject: [PATCH 0659/1356] kernel-5.10: update to 5.10.112 --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 9e0bf10e..1c012b20 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,8 +13,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/3479900579a0dbe61cbe7e6d76620774513369246def8bae42ec791865d68df9/kernel-5.10.109-104.500.amzn2.src.rpm" -sha512 = "66c840eee5333bb77f8661b14ec07b33ea7b6d9db82c89370c8109c0a315c6ad532364d0c879efd45fff0bfe3855876bbf53b11b5107b0dc55f9d2ac1a59cc6d" +url = "https://cdn.amazonlinux.com/blobstore/70822ee2bc85888532dc1238257e5dd10ba67243f849a6a8d17cac89ae1663b6/kernel-5.10.112-108.499.amzn2.src.rpm" +sha512 = "0ec959cdc92684bbc4a1dc758f84ebd12e085a951c7425d9783daf5932eb27a026e2cbb6c1f7bacd9ac5f47b6c9ecaad020d07e7641294f1cad072163e71385e" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index dbb4d83a..4bfe6547 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.109 +Version: 5.10.112 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/3479900579a0dbe61cbe7e6d76620774513369246def8bae42ec791865d68df9/kernel-5.10.109-104.500.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/70822ee2bc85888532dc1238257e5dd10ba67243f849a6a8d17cac89ae1663b6/kernel-5.10.112-108.499.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 821d0dd27cb9f4ab9ee0184679d4662cb0b2fea1 Mon Sep 17 00:00:00 2001 From: Matthew James Briggs Date: Mon, 6 Jun 2022 14:20:22 -0700 Subject: [PATCH 0660/1356] tools: run cargo update --- tools/Cargo.lock | 315 ++++++++++++++++++++++++----------------------- 1 file changed, 160 insertions(+), 155 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 5b729748..bd559864 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -52,7 +52,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be69f70ef5497dd6ab331a50bd95c6ac6b8f7f17a7967838332743fbd58dc3b5" dependencies = [ "argh_shared", - "heck", + "heck 0.3.3", "proc-macro2", "quote", "syn", @@ -76,9 +76,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.53" +version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6aa3524a2dfcf9fe180c51eae2b58738348d819517ceadf95789c51fff7600" +checksum = "96cf8829f67d2eab0b2dfa42c5d0ef737e0724e4a82b01b3e292456202b19716" dependencies = [ "proc-macro2", "quote", @@ -104,9 +104,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backtrace" -version = "0.3.64" +version = "0.3.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e121dee8023ce33ab248d9ce1493df03c3b38a659b240096fcbd7048ff9c31f" +checksum = "11a17d453482a265fd5f8479f2a3f405566e6ca627837aaddb85af8b1ab8ef61" dependencies = [ "addr2line", "cc", @@ -178,9 +178,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.9.1" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" +checksum = "37ccbd214614c6783386c1af30caf03192f17891059cecc394b4fb119e363de3" [[package]] name = "bytes" @@ -246,9 +246,9 @@ dependencies = [ [[package]] name = "clap" -version = "3.1.9" +version = "3.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6aad2534fad53df1cc12519c5cda696dd3e20e6118a027e24054aea14a0bdcbe" +checksum = "d2dbdf4bdacb33466e854ce889eee8dfd5729abf7ccd7664d0a2d60cd384440b" dependencies = [ "atty", "bitflags", @@ -261,9 +261,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "189ddd3b5d32a70b35e7686054371742a937b0d99128e76dde6340210e966669" +checksum = "a37c35f1112dad5e6e0b1adaff798507497a18fceeb30cceb3bae7d1427b9213" dependencies = [ "os_str_bytes", ] @@ -482,9 +482,9 @@ checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "encoding_rs" -version = "0.8.30" +version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dc8abb250ffdda33912550faa54c88ec8b998dec0b2c55ab224921ce11df" +checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" dependencies = [ "cfg-if", ] @@ -623,9 +623,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d39cd93900197114fa1fcb7ae84ca742095eed9442088988ae74fa744e930e77" +checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" dependencies = [ "cfg-if", "libc", @@ -653,9 +653,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62eeb471aa3e3c9197aa4bfeabfe02982f6dc96f750486c0bb0009ac58b26d2b" +checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" dependencies = [ "bytes", "fnv", @@ -685,6 +685,12 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "heck" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" + [[package]] name = "hermit-abi" version = "0.1.19" @@ -721,9 +727,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" +checksum = "ff8670570af52249509a86f5e3e18a08c60b177071826898fde8997cf5f6bfbb" dependencies = [ "bytes", "fnv", @@ -732,9 +738,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", @@ -743,9 +749,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9100414882e15fb7feccb4897e5f0ff0ff1ca7d1a86a23208ada4d7a18e6c6c4" +checksum = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c" [[package]] name = "httpdate" @@ -755,9 +761,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.18" +version = "0.14.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b26ae0a80afebe130861d90abf98e3814a4f28a4c6ffeb5ab8ebb2be311e0ef2" +checksum = "42dc3c131584288d375f2d07f822b0cb012d8c6fb899a5b9fdb3cb7eb9b6004f" dependencies = [ "bytes", "futures-channel", @@ -805,9 +811,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.8.0" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" +checksum = "e6012d540c5baa3589337a98ce73408de9b5a25ec9fc2c6fd6be8f0d39e0ca5a" dependencies = [ "autocfg", "hashbrown", @@ -831,7 +837,7 @@ version = "0.1.0" dependencies = [ "assert-json-diff", "async-trait", - "clap 3.1.9", + "clap 3.1.18", "hex", "log", "pubsys-config", @@ -861,21 +867,21 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e70ee094dc02fd9c13fdad4940090f22dbd6ac7c9e7094a46cf0232a50bc7c" +checksum = "879d54834c8c76457ef4293a689b2a8c59b076067ad77b15efafbb05f92a592b" [[package]] name = "itoa" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" +checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" [[package]] name = "js-sys" -version = "0.3.56" +version = "0.3.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a38fc24e30fd564ce974c02bf1d337caddff65be6cc4735a1f7eab22a7440f04" +checksum = "671a26f820db17c2a2750743f1dd03bafd15b98c9f30c7c2628c024c05d73397" dependencies = [ "wasm-bindgen", ] @@ -888,9 +894,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.121" +version = "0.2.126" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efaa7b300f3b5fe8eb6bf21ce3895e1751d9665086af2d64b42f19701015ff4f" +checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" [[package]] name = "linked-hash-map" @@ -900,18 +906,19 @@ checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "lock_api" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88943dd7ef4a2e5a4bfa2753aaab3013e34ce2533d1996fb18ef591e315e2b3b" +checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" dependencies = [ + "autocfg", "scopeguard", ] [[package]] name = "log" -version = "0.4.16" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6389c490849ff5bc16be905ae24bc913a9c8892e19b2341dbc175e14c341c2b8" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ "cfg-if", ] @@ -935,9 +942,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" @@ -956,12 +963,11 @@ checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" [[package]] name = "miniz_oxide" -version = "0.4.4" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" +checksum = "6f5c75688da582b8ffc1f1799e9db273f32133c49e048f614d22ec3256773ccc" dependencies = [ "adler", - "autocfg", ] [[package]] @@ -996,9 +1002,9 @@ checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" [[package]] name = "num-integer" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ "autocfg", "num-traits", @@ -1006,9 +1012,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", ] @@ -1025,9 +1031,9 @@ dependencies = [ [[package]] name = "num_threads" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aba1801fb138d8e85e11d0fc70baf4fe1cdfffda7c6cd34a854905df588e5ed0" +checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" dependencies = [ "libc", ] @@ -1040,9 +1046,9 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.27.1" +version = "0.28.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9" +checksum = "e42c982f2d955fac81dd7e1d0e1426a7d702acd9c98d19ab01083a6a0328c424" dependencies = [ "memchr", ] @@ -1060,9 +1066,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.10.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9" +checksum = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225" [[package]] name = "opaque-debug" @@ -1088,9 +1094,9 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.0.0" +version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" +checksum = "21326818e99cfe6ce1e524c2a805c189a99b5ae555a35d19f9a284b427d86afa" [[package]] name = "parking_lot" @@ -1126,18 +1132,18 @@ dependencies = [ [[package]] name = "path-absolutize" -version = "3.0.12" +version = "3.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a2a79d7c1c4eab523515c4561459b10516d6e7014aa76edc3ea05680d5c5d2d" +checksum = "d3de4b40bd9736640f14c438304c09538159802388febb02c8abaae0846c1f13" dependencies = [ "path-dedot", ] [[package]] name = "path-dedot" -version = "3.0.16" +version = "3.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f326e2a3331685a5e3d4633bb9836bd92126e08037cb512252f3612f616a0b28" +checksum = "d611d5291372b3738a34ebf0d1f849e58b1dcc1101032f76a346eaa1f8ddbb5b" dependencies = [ "once_cell", ] @@ -1159,9 +1165,9 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project-lite" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" [[package]] name = "pin-utils" @@ -1201,11 +1207,11 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.36" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" +checksum = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f" dependencies = [ - "unicode-xid", + "unicode-ident", ] [[package]] @@ -1214,7 +1220,7 @@ version = "0.1.0" dependencies = [ "async-trait", "chrono", - "clap 3.1.9", + "clap 3.1.18", "coldsnap", "duct", "futures", @@ -1288,9 +1294,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.16" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4af2ec4714533fcdf07e886f17025ace8b997b9ce51204ee69b6da831c3da57" +checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" dependencies = [ "proc-macro2", ] @@ -1327,9 +1333,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.5.1" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90" +checksum = "bd99e5772ead8baa5215278c9b15bf92087709e9c1b2d1f97cdb5a183c933a7d" dependencies = [ "autocfg", "crossbeam-deque", @@ -1339,31 +1345,30 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.9.1" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e" +checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" dependencies = [ "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", - "lazy_static", "num_cpus", ] [[package]] name = "redox_syscall" -version = "0.2.11" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8380fe0152551244f0747b1bf41737e0f8a74f97a14ccefd1148187271634f3c" +checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" dependencies = [ "bitflags", ] [[package]] name = "redox_users" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7776223e2696f1aa4c6b0170e83212f47296a00424305117d013dfe86fb0fe55" +checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ "getrandom", "redox_syscall", @@ -1372,9 +1377,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.5.5" +version = "1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286" +checksum = "d83f127d94bdbcda4c8cc2e50f6f84f4b611f69c902699ca385a39c3a75f9ff1" dependencies = [ "aho-corasick", "memchr", @@ -1383,9 +1388,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.25" +version = "0.6.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" +checksum = "49b3de9ec5dc0a3417da371aab17d729997c15010e7fd24ff707773a33bddb64" [[package]] name = "remove_dir_all" @@ -1634,9 +1639,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d37e5e2290f3e040b594b1a9e04377c2c671f1a1cfd9bfdef82106ac1c113f84" +checksum = "5aab8ee6c7097ed6057f43c187a62418d0c05a4bd5f18b3571db50ee0f9ce033" dependencies = [ "log", "ring", @@ -1676,9 +1681,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" +checksum = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695" [[package]] name = "same-file" @@ -1691,12 +1696,12 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" dependencies = [ "lazy_static", - "winapi", + "windows-sys", ] [[package]] @@ -1740,27 +1745,27 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65bd28f48be7196d222d95b9243287f48d27aca604e08497513019ff0502cc4" +checksum = "8cb243bdfdb5936c8dc3c45762a19d12ab4550cdc753bc247637d4ec35a040fd" dependencies = [ "serde", ] [[package]] name = "serde" -version = "1.0.136" +version = "1.0.137" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" +checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.136" +version = "1.0.137" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" +checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be" dependencies = [ "proc-macro2", "quote", @@ -1769,9 +1774,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.79" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e8d9fa5c3b304765ce1fd9c4c8a3de2c8db365a5b91be52f186efc675681d95" +checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" dependencies = [ "itoa", "ryu", @@ -1801,9 +1806,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.23" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a521f2940385c165a24ee286aa8599633d162077a54bdcae2a6fd5a7bfa7a0" +checksum = "707d15895415db6628332b737c838b88c598522e4dc70647e59b72312924aebc" dependencies = [ "indexmap", "ryu", @@ -1879,9 +1884,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" +checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" [[package]] name = "smallvec" @@ -1891,9 +1896,9 @@ checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" [[package]] name = "snafu" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eba135d2c579aa65364522eb78590cdf703176ef71ad4c32b00f58f7afb2df5" +checksum = "5177903bf45656592d9eb5c0e22f408fc023aae51dbe2088889b71633ba451f2" dependencies = [ "backtrace", "doc-comment", @@ -1902,11 +1907,11 @@ dependencies = [ [[package]] name = "snafu-derive" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a7fe9b0669ef117c5cabc5549638528f36771f058ff977d7689deb517833a75" +checksum = "410b26ed97440d90ced3e2488c868d56a86e2064f5d7d6f417909b286afe25e5" dependencies = [ - "heck", + "heck 0.4.0", "proc-macro2", "quote", "syn", @@ -1957,7 +1962,7 @@ version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ - "heck", + "heck 0.3.3", "proc-macro-error", "proc-macro2", "quote", @@ -1972,13 +1977,13 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.89" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea297be220d52398dcc07ce15a209fce436d361735ac1db700cab3b6cdfb9f54" +checksum = "0748dd251e24453cb8717f0354206b91557e4ec8703673a4b30208f2abaf1ebf" dependencies = [ "proc-macro2", "quote", - "unicode-xid", + "unicode-ident", ] [[package]] @@ -2031,18 +2036,18 @@ checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" [[package]] name = "thiserror" -version = "1.0.30" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" +checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.30" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" +checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" dependencies = [ "proc-macro2", "quote", @@ -2089,9 +2094,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] @@ -2124,9 +2129,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" +checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" dependencies = [ "proc-macro2", "quote", @@ -2135,9 +2140,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.23.3" +version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4151fda0cf2798550ad0b34bcfc9b9dcc2a9d2471c895c68f3a8818e54f2389e" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ "rustls", "tokio", @@ -2146,9 +2151,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" +checksum = "df54d54117d6fdc4e4fea40fe1e4e566b3505700e148a6827e59b34b0d2600d9" dependencies = [ "futures-core", "pin-project-lite", @@ -2157,16 +2162,16 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.9" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e99e1983e5d376cd8eb4b66604d2e99e79f5bd988c3055891dcd8c9e2604cc0" +checksum = "cc463cd8deddc3770d20f9852143d50bf6094e640b485cb2e189a2099085ff45" dependencies = [ "bytes", "futures-core", "futures-sink", - "log", "pin-project-lite", "tokio", + "tracing", ] [[package]] @@ -2245,9 +2250,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.32" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a1bdf54a7c28a2bbf701e1d2233f6c77f473486b94bee4f9678da5a148dca7f" +checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09" dependencies = [ "cfg-if", "pin-project-lite", @@ -2256,9 +2261,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.23" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa31669fa42c09c34d94d8165dd2012e8ff3c66aca50f3bb226b68f216f2706c" +checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f" dependencies = [ "lazy_static", ] @@ -2277,9 +2282,15 @@ checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] name = "unicode-bidi" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" +checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" + +[[package]] +name = "unicode-ident" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee" [[package]] name = "unicode-normalization" @@ -2302,12 +2313,6 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" -[[package]] -name = "unicode-xid" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" - [[package]] name = "untrusted" version = "0.7.1" @@ -2389,9 +2394,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.79" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25f1af7423d8588a3d840681122e72e6a24ddbcb3f0ec385cac0d12d24256c06" +checksum = "27370197c907c55e3f1a9fbe26f44e937fe6451368324e009cba39e139dc08ad" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -2399,9 +2404,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.79" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b21c0df030f5a177f3cba22e9bc4322695ec43e7257d865302900290bcdedca" +checksum = "53e04185bfa3a779273da532f5025e33398409573f348985af9a1cbf3774d3f4" dependencies = [ "bumpalo", "lazy_static", @@ -2414,9 +2419,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.29" +version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb6ec270a31b1d3c7e266b999739109abce8b6c87e4b31fcfcd788b65267395" +checksum = "6f741de44b75e14c35df886aff5f1eb73aa114fa5d4d00dcd37b5e01259bf3b2" dependencies = [ "cfg-if", "js-sys", @@ -2426,9 +2431,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.79" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f4203d69e40a52ee523b2529a773d5ffc1dc0071801c87b3d270b471b80ed01" +checksum = "17cae7ff784d7e83a2fe7611cfe766ecf034111b49deb850a3dc7699c08251f5" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2436,9 +2441,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.79" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa8a30d46208db204854cadbb5d4baf5fcf8071ba5bf48190c3e59937962ebc" +checksum = "99ec0dc7a4756fffc231aab1b9f2f578d23cd391390ab27f952ae0c9b3ece20b" dependencies = [ "proc-macro2", "quote", @@ -2449,15 +2454,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.79" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d958d035c4438e28c70e4321a2911302f10135ce78a9c7834c0cab4123d06a2" +checksum = "d554b7f530dee5964d9a9468d95c1f8b8acae4f282807e7d27d4b03099a46744" [[package]] name = "web-sys" -version = "0.3.56" +version = "0.3.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c060b319f29dd25724f09a2ba1418f142f539b2be99fbf4d2d5a8f7330afb8eb" +checksum = "7b17e741662c70c8bd24ac5c5b18de314a2c26c32bf8346ee1e6f53de919c283" dependencies = [ "js-sys", "wasm-bindgen", @@ -2475,9 +2480,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.2" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552ceb903e957524388c4d3475725ff2c8b7960922063af6ce53c9a43da07449" +checksum = "44d8de8415c823c8abd270ad483c6feeac771fad964890779f9a8cb24fbbc1bf" dependencies = [ "webpki", ] @@ -2582,6 +2587,6 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.5.4" +version = "1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb5728b8afd3f280a869ce1d4c554ffaed35f45c231fc41bfbd0381bef50317" +checksum = "94693807d016b2f2d2e14420eb3bfcca689311ff775dcf113d74ea624b7cdf07" From 69c46281ba7f5b6930ee825238233b537727c7e9 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Tue, 7 Jun 2022 19:35:57 +0000 Subject: [PATCH 0661/1356] packages: update kernel-5.10 Signed-off-by: Arnaldo Garcia Rincon --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 1c012b20..a72cc614 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,8 +13,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/70822ee2bc85888532dc1238257e5dd10ba67243f849a6a8d17cac89ae1663b6/kernel-5.10.112-108.499.amzn2.src.rpm" -sha512 = "0ec959cdc92684bbc4a1dc758f84ebd12e085a951c7425d9783daf5932eb27a026e2cbb6c1f7bacd9ac5f47b6c9ecaad020d07e7641294f1cad072163e71385e" +url = "https://cdn.amazonlinux.com/blobstore/fd3a270843eca4874b201fd3554b484a79b18edc0d3b845ff3288dd9dd0d69a8/kernel-5.10.118-111.515.amzn2.src.rpm" +sha512 = "f9d8d4f43757a84072e585b20f4bbec188d4d28d12c7183dae65348ff487508eab999048f1796f2f4bb1a8de71412156eae62248343f3a7e579d0babfce9fd64" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 4bfe6547..596b0e9f 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.112 +Version: 5.10.118 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/70822ee2bc85888532dc1238257e5dd10ba67243f849a6a8d17cac89ae1663b6/kernel-5.10.112-108.499.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/fd3a270843eca4874b201fd3554b484a79b18edc0d3b845ff3288dd9dd0d69a8/kernel-5.10.118-111.515.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 13be8d48f2402a339b4283d9ea68c4977145eacb Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Tue, 7 Jun 2022 19:36:40 +0000 Subject: [PATCH 0662/1356] packages: update kernel-5.4 Signed-off-by: Arnaldo Garcia Rincon --- packages/kernel-5.4/Cargo.toml | 4 ++-- packages/kernel-5.4/kernel-5.4.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.4/Cargo.toml b/packages/kernel-5.4/Cargo.toml index 0ee77825..3dcc7f94 100644 --- a/packages/kernel-5.4/Cargo.toml +++ b/packages/kernel-5.4/Cargo.toml @@ -13,8 +13,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/ef7cb8ef41ebe7e5edc8bbf23eebc16600d6a8e21f8b468723b31fb32ef3e583/kernel-5.4.190-107.353.amzn2.src.rpm" -sha512 = "8682ee3ec20558b4e82d688fe83134ee01b6379b7da8cb7a367f1534000891384b5364b8ef991d8bd88ab53ad83d8984014f284f43ea13952b0e36bf3b6f2cd7" +url = "https://cdn.amazonlinux.com/blobstore/9959b4af12a63755e451619398b6471f3c6a496b854ce73740c786907f67560a/kernel-5.4.196-108.356.amzn2.src.rpm" +sha512 = "4b063d857d05a2796fc607ba425d5f75964e1123b24cb0f0ab8e1cb8334944b9fc5d734c83f1d5ef186b2ac38fb7ece5be62a49579f3b4187ee380cd28bdfaaf" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec index 26d354f7..a68fc540 100644 --- a/packages/kernel-5.4/kernel-5.4.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.4 -Version: 5.4.190 +Version: 5.4.196 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/ef7cb8ef41ebe7e5edc8bbf23eebc16600d6a8e21f8b468723b31fb32ef3e583/kernel-5.4.190-107.353.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/9959b4af12a63755e451619398b6471f3c6a496b854ce73740c786907f67560a/kernel-5.4.196-108.356.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 5ec6ed0d0613b57db1f1df10bd28871682bd51cd Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Mon, 16 May 2022 16:47:12 -0700 Subject: [PATCH 0663/1356] Add aws-k8s-1.23 variant Adds aws-k8s-1.23 variant, relinks symlinks in models. aws-k8s-1.23 supports settings.boot while older aws-k8s-* variants do not. --- .github/workflows/build.yml | 2 +- README.md | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 9ef0f3fe..9fd78369 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -24,7 +24,7 @@ jobs: continue-on-error: ${{ matrix.supported }} strategy: matrix: - variant: [aws-k8s-1.19, aws-k8s-1.20, aws-k8s-1.21, aws-k8s-1.22, aws-ecs-1] + variant: [aws-k8s-1.19, aws-k8s-1.20, aws-k8s-1.21, aws-k8s-1.22, aws-k8s-1.23, aws-ecs-1] arch: [x86_64, aarch64] supported: [true] fetch-upstream: ["false"] diff --git a/README.md b/README.md index 53f6799c..7067e891 100644 --- a/README.md +++ b/README.md @@ -53,6 +53,8 @@ The following variants support EKS, as described above: - `aws-k8s-1.19` - `aws-k8s-1.20` - `aws-k8s-1.21` +- `aws-k8s-1.22` +- `aws-k8s-1.23` - `aws-k8s-1.21-nvidia` - `aws-k8s-1.22-nvidia` From 3496f63799adcd662b4aa66cee532616d5e18554 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Wed, 18 May 2022 06:35:09 -0700 Subject: [PATCH 0664/1356] Add aws-k8s-1.23-nvidia variant Adds aws-k8s-1.23-nvidia variant, create symlinks for previous k8s version nvidia variant in models. aws-k8s-1.23-nvidia supports boot config settings while other nvidia variants do not. --- .github/workflows/build.yml | 8 ++++++++ README.md | 1 + 2 files changed, 9 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 9fd78369..70843d9f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -77,6 +77,14 @@ jobs: arch: aarch64 supported: true fetch-upstream: "true" + - variant: aws-k8s-1.23-nvidia + arch: x86_64 + supported: true + fetch-upstream: "true" + - variant: aws-k8s-1.23-nvidia + arch: aarch64 + supported: true + fetch-upstream: "true" - variant: aws-ecs-1-nvidia arch: x86_64 supported: true diff --git a/README.md b/README.md index 7067e891..b0384a39 100644 --- a/README.md +++ b/README.md @@ -57,6 +57,7 @@ The following variants support EKS, as described above: - `aws-k8s-1.23` - `aws-k8s-1.21-nvidia` - `aws-k8s-1.22-nvidia` +- `aws-k8s-1.23-nvidia` The following variant supports ECS: From 99cec5a767b49e9f0a4a5cdb784dcbe6777c139a Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Wed, 18 May 2022 07:47:00 -0700 Subject: [PATCH 0665/1356] Add vmware-k8s-1.23 variant Adds vmware-k8s-1.23 variant, relinks symlinks in models vmware-k8s-1.23 supports boot config settings in models. Rename symlink for oci-hooks in vmware-k8s-1.22 --- .github/workflows/build.yml | 4 ++++ README.md | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 70843d9f..a479d9d6 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -61,6 +61,10 @@ jobs: arch: x86_64 supported: true fetch-upstream: "false" + - variant: vmware-k8s-1.23 + arch: x86_64 + supported: true + fetch-upstream: "false" - variant: aws-k8s-1.21-nvidia arch: x86_64 supported: true diff --git a/README.md b/README.md index b0384a39..875171d6 100644 --- a/README.md +++ b/README.md @@ -68,6 +68,7 @@ We also have variants that are designed to be Kubernetes worker nodes in VMware: - `vmware-k8s-1.20` - `vmware-k8s-1.21` - `vmware-k8s-1.22` +- `vmware-k8s-1.23` The following variants are no longer supported.: @@ -408,7 +409,7 @@ Static pods can be particularly useful when running in standalone mode. For Kubernetes variants in AWS and VMware, the following are set for you automatically, but you can override them if you know what you're doing! In AWS, [pluto](sources/api/) sets these based on runtime instance information. -In VMware, Bottlerocket uses [netdog](sources/api/) (for `node-ip`) or relies on [default values](sources/models/src/vmware-k8s-1.22/defaults.d/). +In VMware, Bottlerocket uses [netdog](sources/api/) (for `node-ip`) or relies on [default values](sources/models/src/vmware-k8s-1.23/defaults.d). * `settings.kubernetes.node-ip`: The IP address of this node. * `settings.kubernetes.pod-infra-container-image`: The URI of the "pause" container. * `settings.kubernetes.kube-reserved`: Resources reserved for node components. From 0c07fcfa089a02caef4a579276696a4da19e71c7 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Wed, 18 May 2022 08:16:54 -0700 Subject: [PATCH 0666/1356] Add metal-k8s-1.23 variant This change adds an additional variant `metal-k8s-1.23`, which includes necessary Kubernetes packages and settings for running Bottlerocket on metal in a Kubernetes v1.23 cluster. --- .github/workflows/build.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a479d9d6..3829caf5 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -49,6 +49,10 @@ jobs: arch: x86_64 supported: false fetch-upstream: "false" + - variant: metal-k8s-1.23 + arch: x86_64 + supported: false + fetch-upstream: "false" - variant: vmware-k8s-1.20 arch: x86_64 supported: true From 115ec26e434c085f0b7eaa1ebac9b1d234c0c7e9 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 7 Jun 2022 17:15:35 -0700 Subject: [PATCH 0667/1356] models, kubernetes: add new provider-id setting This adds a new `settings.kubernetes.provider-id` setting for configuring the `providerID` item in kubelet config. --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 875171d6..e9a03a4b 100644 --- a/README.md +++ b/README.md @@ -401,6 +401,7 @@ The following settings are optional and allow you to further configure your clus * `settings.kubernetes.topology-manager-policy`: Specifies the topology manager policy. Possible values are `none`, `restricted`, `best-effort`, and `single-numa-node`. Defaults to `none`. * `settings.kubernetes.topology-manager-scope`: Specifies the topology manager scope. Possible values are `container` and `pod`. Defaults to `container`. If you want to group all containers in a pod to a common set of NUMA nodes, you can set this setting to `pod`. * `settings.kubernetes.pod-pids-limit`: The maximum number of processes per pod. +* `settings.kubernetes.provider-id`: This sets the unique ID of the instance that an external provider (i.e. cloudprovider) can use to identify a specific node. You can also optionally specify static pods for your node with the following settings. Static pods can be particularly useful when running in standalone mode. From 8576a35017edc1bb6d1ed6e8ec9d4713958a8ac9 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sun, 5 Jun 2022 18:58:25 +0000 Subject: [PATCH 0668/1356] kernel: drop System.map from /boot System.map is available in the kernel development tree on running systems, and in the downloadable kmod kit. The /boot filesystem is more space-constrained and we don't need an extra copy there. Signed-off-by: Ben Cressey --- packages/kernel-5.10/kernel-5.10.spec | 2 -- packages/kernel-5.4/kernel-5.4.spec | 2 -- 2 files changed, 4 deletions(-) diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 596b0e9f..bf1bcc9b 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -118,7 +118,6 @@ make -s\\\ install -d %{buildroot}/boot install -T -m 0755 arch/%{_cross_karch}/boot/%{_cross_kimage} %{buildroot}/boot/vmlinuz install -m 0644 .config %{buildroot}/boot/config -install -m 0644 System.map %{buildroot}/boot/System.map find %{buildroot}%{_cross_prefix} \ \( -name .install -o -name .check -o \ @@ -226,7 +225,6 @@ ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{kernel_libdir}/source %{_cross_attribution_file} /boot/vmlinuz /boot/config -/boot/System.map %files modules %dir %{_cross_libdir}/modules diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec index a68fc540..dfb442d0 100644 --- a/packages/kernel-5.4/kernel-5.4.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -125,7 +125,6 @@ make -s\\\ install -d %{buildroot}/boot install -T -m 0755 arch/%{_cross_karch}/boot/%{_cross_kimage} %{buildroot}/boot/vmlinuz install -m 0644 .config %{buildroot}/boot/config -install -m 0644 System.map %{buildroot}/boot/System.map find %{buildroot}%{_cross_prefix} \ \( -name .install -o -name .check -o \ @@ -231,7 +230,6 @@ ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{kernel_libdir}/source %{_cross_attribution_file} /boot/vmlinuz /boot/config -/boot/System.map %files modules %dir %{_cross_libdir}/modules From 9c1dd808b2f16b84caee41ee1ed41f35de3fdc81 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sun, 5 Jun 2022 18:59:57 +0000 Subject: [PATCH 0669/1356] kernel: restrict permissions on System.map This is good practice although the security benefit is limited, since unprivileged containers would need a volume mount to access the file, and could be running as root. Signed-off-by: Ben Cressey --- packages/kernel-5.10/kernel-5.10.spec | 3 +++ packages/kernel-5.4/kernel-5.4.spec | 3 +++ 2 files changed, 6 insertions(+) diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index bf1bcc9b..34d7119a 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -146,6 +146,9 @@ sed -i \ -e 's,$(CONFIG_SYSTEM_TRUSTED_KEYRING),n,g' \ scripts/Makefile +# Restrict permissions on System.map. +chmod 600 System.map + ( find * \ -type f \ diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec index dfb442d0..635c3140 100644 --- a/packages/kernel-5.4/kernel-5.4.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -153,6 +153,9 @@ sed -i \ -e 's,$(CONFIG_SYSTEM_TRUSTED_KEYRING),n,g' \ scripts/Makefile +# Restrict permissions on System.map. +chmod 600 System.map + ( find * \ -type f \ From fbe571b1428b19da92c64ca5fe265f8bc8f662d3 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 27 May 2022 03:21:01 +0000 Subject: [PATCH 0670/1356] build: set permissions for /boot Restrict these files to align with standard practice, even though all the contents are publicly available through the "boot" images in the updates repository. Signed-off-by: Ben Cressey --- tools/rpm2img | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/rpm2img b/tools/rpm2img index cd325a47..b6872b92 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -288,6 +288,7 @@ EOF # BOTTLEROCKET-BOOT-A mkdir -p "${BOOT_MOUNT}/lost+found" +chmod -R go-rwx "${BOOT_MOUNT}" BOOT_LABELS=$(setfiles -n -d -F -m -r "${BOOT_MOUNT}" \ "${SELINUX_FILE_CONTEXTS}" "${BOOT_MOUNT}" \ | awk -v root="${BOOT_MOUNT}" '{gsub(root"/","/"); gsub(root,"/"); print "ea_set", $1, "security.selinux", $4}') From 4d8a28ab2e4abae299b1dd0e512092a0e3c307fe Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Wed, 22 Jun 2022 23:49:19 +0000 Subject: [PATCH 0671/1356] sources: remove unused crate growpart Signed-off-by: Arnaldo Garcia Rincon --- GLOSSARY.md | 1 - 1 file changed, 1 deletion(-) diff --git a/GLOSSARY.md b/GLOSSARY.md index 6476d601..86b0cf7d 100644 --- a/GLOSSARY.md +++ b/GLOSSARY.md @@ -9,7 +9,6 @@ * **gptprio:** A structure of bits in GPT partition headers that specifies priority, tries remaining, and whether the partition booted successfully before. signpost sets these and GRUB uses them to determine which partition set to boot. * [**ghostdog**](sources/ghostdog): A program used to manage ephemeral disks. -* [**growpart**](sources/growpart): A program used to expand disk partitions upon boot. * **host containers**: Containers that run in a separate instance of containerd than "user" containers spawned by an orchestrator (e.g. Kubernetes). Used for system maintenance and connectivity. * [**host-ctr**](sources/host-ctr): The program started by `host-containers@.service` for each host container. From 8f0ca9af8477566978b87490007e22bf5ec72a7c Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Fri, 24 Jun 2022 12:58:16 -0700 Subject: [PATCH 0672/1356] kernel-5.10: enable mellanox modules This adds the kernel configs necessary for Mellanox NICs. --- packages/kernel-5.10/config-bottlerocket | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index 883975d4..a0b16a2b 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -40,6 +40,14 @@ CONFIG_IXGBE_DCB=y CONFIG_IXGBE_HWMON=y CONFIG_IXGBEVF=m +# Mellanox network support +CONFIG_MLXFW=m +CONFIG_MLX5_CORE=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX5_CORE_EN=y +CONFIG_NET_SWITCHDEV=y + # Xen blkfront for Xen-based EC2 platforms CONFIG_XEN_BLKDEV_FRONTEND=y From f8fced40a172838fab602768bd81a901ea78ac80 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Fri, 17 Jun 2022 21:36:24 +0000 Subject: [PATCH 0673/1356] kernel 5.10: Add `bnxt` module for Broadcom 10/25Gb network --- packages/kernel-5.10/config-bottlerocket | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index a0b16a2b..f2fd5581 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -32,6 +32,7 @@ CONFIG_E1000e_hwts=y CONFIG_NET_VENDOR_BROADCOM=m CONFIG_TIGON3_HWMON=y CONFIG_TIGON3=m +CONFIG_BNXT=m # Intel 10G network support CONFIG_IXGB=m From 4636fbb1036392b6db2932be54662788071bcc14 Mon Sep 17 00:00:00 2001 From: "Kyle J. Davis" Date: Tue, 28 Jun 2022 10:07:44 -0600 Subject: [PATCH 0674/1356] Adds nvidia ecs variant to README Signed-off-by: Kyle J. Davis --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index e9a03a4b..e9e55f90 100644 --- a/README.md +++ b/README.md @@ -59,9 +59,10 @@ The following variants support EKS, as described above: - `aws-k8s-1.22-nvidia` - `aws-k8s-1.23-nvidia` -The following variant supports ECS: +The following variants support ECS: - `aws-ecs-1` +- `aws-ecs-1-nvidia` We also have variants that are designed to be Kubernetes worker nodes in VMware: From a7b8aa9deea8a884d5e940fa99f26f5ae7dc2f2f Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Thu, 9 Jun 2022 21:36:33 +0000 Subject: [PATCH 0675/1356] README.md: Update for metal Updates README.md to add some additional information for metal variants. --- README.md | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index e9e55f90..0c82df45 100644 --- a/README.md +++ b/README.md @@ -5,6 +5,7 @@ Welcome to Bottlerocket! Bottlerocket is a free and open-source Linux-based operating system meant for hosting containers. If you’re ready to jump right in, read one of our setup guides for running Bottlerocket in [Amazon EKS](QUICKSTART-EKS.md), [Amazon ECS](QUICKSTART-ECS.md), or [VMware](QUICKSTART-VMWARE.md). +If you're interested in running Bottlerocket on bare metal servers, please refer to the [provisioning guide](PROVISIONING-METAL.md) to get started. Bottlerocket focuses on security and maintainability, providing a reliable, consistent, and safe platform for container-based workloads. This is a reflection of what we've learned building operating systems and services at Amazon. @@ -71,7 +72,13 @@ We also have variants that are designed to be Kubernetes worker nodes in VMware: - `vmware-k8s-1.22` - `vmware-k8s-1.23` -The following variants are no longer supported.: +The following variants are designed to be Kubernetes worker nodes on bare metal: + +- `metal-k8s-1.21` +- `metal-k8s-1.22` +- `metal-k8s-1.23` + +The following variants are no longer supported: - `aws-k8s-1.15` - `aws-k8s-1.16` @@ -96,6 +103,8 @@ These guides describe: * how to set up a cluster with the orchestrator, so your Bottlerocket instance can run containers * how to launch a Bottlerocket instance in EC2 or VMware +To see how to provision Bottlerocket on bare metal, see [PROVISIONING-METAL](PROVISIONING-METAL.md). + To build your own Bottlerocket images, please see [BUILDING](BUILDING.md). It describes: * how to build an image @@ -411,7 +420,8 @@ Static pods can be particularly useful when running in standalone mode. For Kubernetes variants in AWS and VMware, the following are set for you automatically, but you can override them if you know what you're doing! In AWS, [pluto](sources/api/) sets these based on runtime instance information. -In VMware, Bottlerocket uses [netdog](sources/api/) (for `node-ip`) or relies on [default values](sources/models/src/vmware-k8s-1.23/defaults.d). +In VMware and on bare metal, Bottlerocket uses [netdog](sources/api/) (for `node-ip`) or relies on default values. +(See the [VMware defaults](sources/models/src/vmware-k8s-1.23/defaults.d) or [bare metal defaults](sources/models/src/metal-k8s-1.23/defaults.d)). * `settings.kubernetes.node-ip`: The IP address of this node. * `settings.kubernetes.pod-infra-container-image`: The URI of the "pause" container. * `settings.kubernetes.kube-reserved`: Resources reserved for node components. From c9ca0bf5e7a8371947c65645624a282e59d64267 Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Tue, 7 Jun 2022 20:29:52 +0000 Subject: [PATCH 0676/1356] tools: Move empty-bootconfig.data into a dedicated directory empty-bootconfig.data will soon get company. Create a dedicated directory for pre-assembled bootconfig initrds and move the file there. Signed-off-by: Markus Boehme --- tools/{ => bootconfig}/empty-bootconfig.data | Bin tools/rpm2img | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename tools/{ => bootconfig}/empty-bootconfig.data (100%) diff --git a/tools/empty-bootconfig.data b/tools/bootconfig/empty-bootconfig.data similarity index 100% rename from tools/empty-bootconfig.data rename to tools/bootconfig/empty-bootconfig.data diff --git a/tools/rpm2img b/tools/rpm2img index b6872b92..6b0565a1 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -301,7 +301,7 @@ dd if="${BOOT_IMAGE}" of="${OS_IMAGE}" conv=notrunc bs=1M seek="${partoff[BOOT-A # Copy the empty bootconfig file into the image so grub doesn't pause and print # an error that the file doesn't exist -cp /host/tools/empty-bootconfig.data "${PRIVATE_MOUNT}/bootconfig.data" +cp /host/tools/bootconfig/empty-bootconfig.data "${PRIVATE_MOUNT}/bootconfig.data" # Targeted toward the current API server implementation. # Relative to the ext4 defaults, we: # - adjust the inode ratio since we expect lots of small files From 6339b1ec6564968615b3c00ad611628b68fea03b Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Tue, 7 Jun 2022 21:16:29 +0000 Subject: [PATCH 0677/1356] tools: Add start-local-vm script Starts a virtual machine running a locally built Bottlerocket image via QEMU and KVM. This is meant to ease development and experimentation for situations that don't call for integration into a Kubernetes cluster or other amenities provided in a cloud VM. This does the minimal amount of work to meaningfully interact with the launched VM: It configures the serial console for direct login available in the -dev variants and forwards host TCP port 2222 to VM TCP port 22 so login via SSH works if both the network is configured and the admin container enabled. Users can inject files into the private partition of a Bottlerocket image before it is launched to simulate the presence of user data and other configuration. For example, "--inject-file ipv4-net.toml:net.toml" adds the file "ipv4-net.toml" to the private partition as "net.toml". Potentially helpful future work includes running images where host and guest architecture differ, made possible by QEMU's TCG, as well as generating and automatically injecting bare-bones variants of "net.toml" and "user-data.toml" files to kick-start exploration. Signed-off-by: Markus Boehme --- .../qemu-x86-console-bootconfig.data | Bin 0 -> 60 bytes tools/start-local-vm | 270 ++++++++++++++++++ 2 files changed, 270 insertions(+) create mode 100644 tools/bootconfig/qemu-x86-console-bootconfig.data create mode 100755 tools/start-local-vm diff --git a/tools/bootconfig/qemu-x86-console-bootconfig.data b/tools/bootconfig/qemu-x86-console-bootconfig.data new file mode 100644 index 0000000000000000000000000000000000000000..b6aa9ebca270586ff80010c28e8e7e97ae142b06 GIT binary patch literal 60 zcmc~!Ey_#HQK;rp0D|QFyyELqk&|1A{yZu39bz1`P%V1~+a724yFI N{}5+?KQ~WzE&u{J4%Ywx literal 0 HcmV?d00001 diff --git a/tools/start-local-vm b/tools/start-local-vm new file mode 100755 index 00000000..c71c033b --- /dev/null +++ b/tools/start-local-vm @@ -0,0 +1,270 @@ +#!/usr/bin/env bash + +shopt -s nullglob + +arch=${BUILDSYS_ARCH} +variant=${BUILDSYS_VARIANT} +host_port_forwards=tcp::2222-:22 +vm_mem=4G +vm_cpus=4 +force_extract= +declare -A extra_files=() + +current_images=() +boot_image= +data_image= + +readonly repo_root=$(git rev-parse --show-toplevel) + +bail() { + >&2 echo "$@" + exit 1 +} + +show_usage() { + echo "\ +usage: ${0##*/} [--arch BUILDSYS_ARCH] [--variant BUILDSYS_VARIANT] + [--host-port-forwards HOST_PORT_FWDS] + [--vm-memory VM_MEMORY] [--vm-cpus VM_CPUS] + [--inject-file LOCAL_PATH[:IMAGE_PATH]]... + +Launch a local virtual machine from a Bottlerocket image. + +Options: + + --arch architecture of the Bottlerocket image (must match the + host architecture ($(uname -m)); may be omitted if the + BUILDSYS_ARCH environment variable is set) + --variant Bottlerocket variant to run (may be omitted if the + BUILDSYS_VARIANT environment variable is set) + --host-port-forwards + list of host ports to forward to the VM; HOST_PORT_FWDS + must be a valid QEMU port forwarding specifier (default + is ${host_port_forwards}) + --vm-memory amount of memory to assign to the VM; VM_MEMORY must be + a valid QEMU memory specifier (default is ${vm_mem}) + --vm-cpus number of CPUs to spawn for VM (default is ${vm_cpus}) + --force-extract force recreation of the extracted Bottlerocket image, + e.g. to force first boot behavior + --inject-file adds a local file to the private partition of the + Bottlerocket image before launching the virtual machine + (may be given multiple times); existing data on the + private partition will be lost + --help shows this usage text + +By default, the virtual machine's port 22 (SSH) will be exposed via the local +port 2222, i.e. if the Bottlerocket admin container has been enabled via +user-data, it can be reached by running + + ssh -p 2222 ec2-user@localhost + +from the host. + +Usage example: + + ${0##*/} --arch $(uname -m) --variant metal-dev --inject-file net.toml +" +} + +usage_error() { + local error=$1 + + { + if [[ -n ${error} ]]; then + printf "%s\n\n" "${error}" + fi + show_usage + } >&2 + + exit 1 +} + +parse_args() { + while [[ $# -gt 0 ]]; do + case $1 in + -h|--help) + show_usage; exit 0 ;; + --arch) + shift; arch=$1 ;; + --variant) + shift; variant=$1 ;; + --host-port-forwards) + shift; host_port_forwards=$1 ;; + --vm-memory) + shift; vm_mem=$1 ;; + --vm-cpus) + shift; vm_cpus=$1 ;; + --force-extract) + force_extract=yes ;; + --inject-file) + shift; local file_spec=$1 + if [[ ${file_spec} = *:* ]]; then + local local_file=${file_spec%%:*} + local image_file=${file_spec#*:} + else + local local_file=${file_spec} + local image_file=${file_spec##*/} + fi + extra_files[${local_file}]=${image_file} + ;; + *) + usage_error "unknown option '$1'" ;; + esac + shift + done + + [[ -n ${arch} ]] || usage_error 'Architecture needs to be set via either --arch or BUILDSYS_ARCH.' + [[ -n ${variant} ]] || usage_error 'Variant needs to be set via either --variant or BUILDSYS_VARIANT.' + + local host_arch=$(uname -m) + [[ ${arch} = ${host_arch} ]] || bail "Architecture needs to match host architecture (${host_arch}) for hardware virtualization." + + for path in "${!extra_files[@]}"; do + [[ -e ${path} ]] || bail "Cannot find local file '${path}' to inject." + done +} + +find_current_images() { + # BuildSys removes all but the latest build's compressed images + readonly current_images=( "${repo_root}/build/images/${arch}-${variant}"/*.img.lz4 ) + + if [[ ${#current_images[@]} -eq 0 ]]; then + bail 'No images found. Did the last build fail?' + fi +} + +remove_old_images() { + # Any of the latest images can serve as our touchstone. Older images can go. + declare -r any_current_image=${current_images[0]} + + for extracted_image in "${repo_root}/build/images/${arch}-${variant}"/*.img; do + if [[ ${extracted_image} -ot ${any_current_image} ]]; then + rm "${extracted_image}" + fi + done +} + +extract_images() { + for compressed_image in "${current_images[@]}"; do + uncompressed_image=${compressed_image%*.lz4} + if [[ ${force_extract} = yes ]] || [[ ${compressed_image} -nt ${uncompressed_image} ]]; then + lz4 --decompress --force --keep "${compressed_image}" + fi + done +} + +select_boot_image() { + for image in "${repo_root}/build/images/${arch}-${variant}/bottlerocket-${variant}-${arch}"-*.img; do + case ${image} in + *data*) readonly data_image=${image} ;; + *) readonly boot_image=${image} ;; + esac + done +} + +create_extra_files() { + # Explicitly instruct the kernel to send its output to the serial port on + # x86 via a bootconfig initrd. Passing in settings via user-data would be + # too late to get console output of the first boot. + if [[ ${arch} = x86_64 ]]; then + extra_files["${repo_root}/tools/bootconfig/qemu-x86-console-bootconfig.data"]=bootconfig.data + fi + + # If the private partition needs to be recreated, ensure that any bootconfig + # data file is present, otherwise GRUB will notice the missing file and wait + # for a key press. + if [[ ${#extra_files[@]} -gt 0 ]]; then + local has_bootconfig=no + for image_file in "${extra_files[@]}"; do + if [[ ${image_file} = bootconfig.data ]]; then + has_bootconfig=yes + break + fi + done + if [[ ${has_bootconfig} = no ]]; then + extra_files["${repo_root}/tools/bootconfig/empty-bootconfig.data"]=bootconfig.data + fi + fi +} + +inject_files() { + if [[ ${#extra_files[@]} -eq 0 ]]; then + return 0 + fi + + # We inject files into the boot image by replacing the private partition + # entirely. The new partition has to perfectly fit over the original one. + # Find the first and last sector, then calculate the partition's size. In + # absence of actual hardware, assume a traditional sector size of 512 bytes. + local private_first_sector private_last_sector + read -r private_first_sector private_last_sector < <( + fdisk --list-details "${boot_image}" \ + | awk '/BOTTLEROCKET-PRIVATE/ { print $2, $3 }') + if [[ -z ${private_first_sector} ]] || [[ -z ${private_last_sector} ]]; then + bail "Failed to find the private partition in '${boot_image}'." + fi + local private_size_mib=$(( (private_last_sector - private_first_sector + 1) * 512 / 1024 / 1024 )) + + local private_mount private_image + private_mount=$(mktemp -d) + private_image=$(mktemp) + + for local_file in "${!extra_files[@]}"; do + local image_file=${extra_files[${local_file}]} + cp "${local_file}" "${private_mount}/${image_file}" + done + + if ! mkfs.ext4 -d "${private_mount}" "${private_image}" "${private_size_mib}M" \ + || ! dd if="${private_image}" of="${boot_image}" conv=notrunc bs=512 seek="${private_first_sector}" + then + rm -f "${private_image}" + rm -rf "${private_mount}" + bail "Failed to inject files into '${boot_image}'." + fi +} + +launch_vm() { + local -a qemu_args=( + -nographic + -enable-kvm + -cpu host + -smp "${vm_cpus}" + -m "${vm_mem}" + -drive index=0,if=virtio,format=raw,file="${boot_image}" + ) + + # Plug the virtual primary NIC in as BDF 00:10.0 so udev will give it a + # consistent name we can know ahead of time--enp0s16 or ens16. + qemu_args+=( + -netdev user,id=net0,hostfwd="${host_port_forwards}" + -device virtio-net-pci,netdev=net0,addr=10.0 + ) + + # Resolve the last bit of uncertainty by disabling ACPI-based PCI hot plug, + # causing udev to use the bus location when naming the NIC (enp0s16). Since + # QEMU does not support PCI hot plug via ACPI on Arm, turn it off for the + # emulated x86_64 chipset only to achieve parity. + if [[ ${arch} = x86_64 ]]; then + qemu_args+=( -global PIIX4_PM.acpi-root-pci-hotplug=off ) + fi + + if [[ ${arch} = aarch64 ]]; then + qemu_args+=( -machine virt ) + qemu_args+=( -bios /usr/share/edk2/aarch64/QEMU_EFI.silent.fd ) + fi + + if [[ -n ${data_image} ]]; then + qemu_args+=( -drive index=1,if=virtio,format=raw,file="${data_image}" ) + fi + + qemu-system-"${arch}" "${qemu_args[@]}" +} + +parse_args "$@" +find_current_images +remove_old_images +extract_images +select_boot_image +create_extra_files +inject_files +launch_vm From fcf0069337931450025d86e8e42c6d632c0b7e45 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Mon, 4 Jul 2022 14:18:17 +0000 Subject: [PATCH 0678/1356] kernel: config: split bare metal specific config Split out bare metal specific config options. All these are enabling hardware support, mostly for specific storage or networking hardware that is not available in settings where the non-metal variants are used. For now split out the metal specific config options to later enable only building those drivers for the metal variants and keep the non-metal variants lean. This moves the settings originally introduced in commits b2e64488 Merge pull request #2243 from zmrow/broadcom_bnxt 6ab5fb62 kernel-5.10: enable mellanox modules 3be82c94 kernel 5.10: Add `bnxt` module for Broadcom 10/25Gb network 58d15ce0 kernel-5.10: add support for Microsemi PQI a69bcfa3 kernel: adjust option for megaraid sas d0d2c68e kernel-5.10: enable support for broadcom ethernet cards 31d50628 kernel: add support for MegaRAID SAS 928a481b kernel 5.10: Add config to support additional hardware Signed-off-by: Leonard Foerster --- packages/kernel-5.10/config-bottlerocket | 47 ------------------- .../kernel-5.10/config-bottlerocket-metal | 44 +++++++++++++++++ packages/kernel-5.10/kernel-5.10.spec | 4 +- 3 files changed, 47 insertions(+), 48 deletions(-) create mode 100644 packages/kernel-5.10/config-bottlerocket-metal diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index f2fd5581..19c6f8bc 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -8,47 +8,6 @@ CONFIG_EXT4_FS=y CONFIG_BLK_DEV_NVME=y CONFIG_NVME_CORE=y -# SATA support -CONFIG_BLK_DEV_SD=y -CONFIG_SATA_AHCI=y -CONFIG_ATA=y -CONFIG_ATA_PIIX=y - -# Network support -CONFIG_ETHERNET=y -CONFIG_NET_CORE=y -CONFIG_NETDEVICES=y - -# Intel network support -CONFIG_IGB=m -CONFIG_IGBVF=m -CONFIG_NET_VENDOR_INTEL=m -CONFIG_IGB_HWMON=y -CONFIG_E1000=m -CONFIG_E1000e=m -CONFIG_E1000e_hwts=y - -# Broadcom network support -CONFIG_NET_VENDOR_BROADCOM=m -CONFIG_TIGON3_HWMON=y -CONFIG_TIGON3=m -CONFIG_BNXT=m - -# Intel 10G network support -CONFIG_IXGB=m -CONFIG_IXGBE=m -CONFIG_IXGBE_DCB=y -CONFIG_IXGBE_HWMON=y -CONFIG_IXGBEVF=m - -# Mellanox network support -CONFIG_MLXFW=m -CONFIG_MLX5_CORE=m -CONFIG_MLX5_INFINIBAND=m -CONFIG_NET_VENDOR_MELLANOX=y -CONFIG_MLX5_CORE_EN=y -CONFIG_NET_SWITCHDEV=y - # Xen blkfront for Xen-based EC2 platforms CONFIG_XEN_BLKDEV_FRONTEND=y @@ -57,12 +16,6 @@ CONFIG_VIRTIO=y CONFIG_VIRTIO_BLK=y CONFIG_VIRTIO_PCI=y -# LSI Logic's SAS based RAID controllers -CONFIG_MEGARAID_SAS=y - -# Microsemi PQI controllers -CONFIG_SCSI_SMARTPQI=y - # dm-verity and enabling it on the kernel command line CONFIG_BLK_DEV_DM=y CONFIG_DAX=y diff --git a/packages/kernel-5.10/config-bottlerocket-metal b/packages/kernel-5.10/config-bottlerocket-metal new file mode 100644 index 00000000..17b49859 --- /dev/null +++ b/packages/kernel-5.10/config-bottlerocket-metal @@ -0,0 +1,44 @@ +# This file holds all the settings that are specific to hardware enablement +# we do for the metal variants. + +# SATA support +CONFIG_BLK_DEV_SD=y +CONFIG_SATA_AHCI=y +CONFIG_ATA=y +CONFIG_ATA_PIIX=y + +# Intel network support +CONFIG_IGB=m +CONFIG_IGBVF=m +CONFIG_NET_VENDOR_INTEL=m +CONFIG_IGB_HWMON=y +CONFIG_E1000=m +CONFIG_E1000e=m +CONFIG_E1000e_hwts=y + +# Broadcom network support +CONFIG_NET_VENDOR_BROADCOM=m +CONFIG_TIGON3_HWMON=y +CONFIG_TIGON3=m +CONFIG_BNXT=m + +# Intel 10G network support +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBEVF=m + +# Mellanox network support +CONFIG_MLXFW=m +CONFIG_MLX5_CORE=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX5_CORE_EN=y +CONFIG_NET_SWITCHDEV=y + +# LSI Logic's SAS based RAID controllers +CONFIG_MEGARAID_SAS=y + +# Microsemi PQI controllers +CONFIG_SCSI_SMARTPQI=y diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 34d7119a..9edb31b5 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -9,6 +9,7 @@ URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. Source0: https://cdn.amazonlinux.com/blobstore/fd3a270843eca4874b201fd3554b484a79b18edc0d3b845ff3288dd9dd0d69a8/kernel-5.10.118-111.515.amzn2.src.rpm Source100: config-bottlerocket +Source101: config-bottlerocket-metal # Help out-of-tree module builds run `make prepare` automatically. Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch @@ -93,7 +94,8 @@ scripts/kconfig/merge_config.sh \ %if "%{_cross_arch}" == "x86_64" ../config-microcode \ %endif - %{SOURCE100} + %{SOURCE100} \ + %{SOURCE101} rm -f ../config-* ../*.patch %global kmake \ From 80e4bfbca33a634205483765a351b53af8df88e5 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 7 Jul 2022 15:29:36 +0000 Subject: [PATCH 0679/1356] kernel-5.10: fix net vendor config "NET_VENDOR_INTEL" and "NET_VENDOR_BROADCOM" are booleans rather than tristates. Correct the value to avoid a build-time warning. Signed-off-by: Ben Cressey --- packages/kernel-5.10/config-bottlerocket-metal | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/kernel-5.10/config-bottlerocket-metal b/packages/kernel-5.10/config-bottlerocket-metal index 17b49859..e93cdc62 100644 --- a/packages/kernel-5.10/config-bottlerocket-metal +++ b/packages/kernel-5.10/config-bottlerocket-metal @@ -10,14 +10,14 @@ CONFIG_ATA_PIIX=y # Intel network support CONFIG_IGB=m CONFIG_IGBVF=m -CONFIG_NET_VENDOR_INTEL=m +CONFIG_NET_VENDOR_INTEL=y CONFIG_IGB_HWMON=y CONFIG_E1000=m CONFIG_E1000e=m CONFIG_E1000e_hwts=y # Broadcom network support -CONFIG_NET_VENDOR_BROADCOM=m +CONFIG_NET_VENDOR_BROADCOM=y CONFIG_TIGON3_HWMON=y CONFIG_TIGON3=m CONFIG_BNXT=m From 0ad5ae11dd8ae1f413f440ed2c05337f7bfc29ef Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 7 Jul 2022 15:30:44 +0000 Subject: [PATCH 0680/1356] kernel-5.10: fix E1000E config Fixes two build-time warnings: Value requested for CONFIG_E1000e not in final .config Value requested for CONFIG_E1000e_hwts not in final .config Signed-off-by: Ben Cressey --- packages/kernel-5.10/config-bottlerocket-metal | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/kernel-5.10/config-bottlerocket-metal b/packages/kernel-5.10/config-bottlerocket-metal index e93cdc62..7670fe64 100644 --- a/packages/kernel-5.10/config-bottlerocket-metal +++ b/packages/kernel-5.10/config-bottlerocket-metal @@ -13,8 +13,8 @@ CONFIG_IGBVF=m CONFIG_NET_VENDOR_INTEL=y CONFIG_IGB_HWMON=y CONFIG_E1000=m -CONFIG_E1000e=m -CONFIG_E1000e_hwts=y +CONFIG_E1000E=m +CONFIG_E1000E_HWTS=y # Broadcom network support CONFIG_NET_VENDOR_BROADCOM=y From 976cadac3e56fe795698259a87cd7ffa0d506c80 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 24 Jun 2022 17:34:07 +0000 Subject: [PATCH 0681/1356] kernel: disable unused filesystems It's not possible to rule out the existence of workloads using these filesystems, but it is possible to make a series of educated guesses. For Kubernetes variants, a CSI driver that supports the filesystem would be required to use it for container storage. This is especially true for network-based fileystems, because Bottlerocket does not ship any of the userspace tools required. Disabled network filesystems: * afs - network-based, no CSI driver * gfs2 - network-based, no CSI driver * nfs v2 - obsoleted by later versions of NFS Another use case would be containers that run with CAP_SYS_ADMIN and mount full disk or filesystem images. Disabling these filesystems is more of a judgment call, and comes down to whether the format is obsolete, whether it's in common use, whether it's useful on current platforms, and if it's consistently enabled across architectures. Obsolete local filesystems: * cramfs - read-only format, obsoleted by squashfs * ecryptfs - obsoleted by native filesystem encryption * ext2 - obsolete, handled by the ext4 driver * ext3 - obsolete, handled by the ext4 driver * romfs - obsoleted by initramfs Uncommon local filesystems: * hfs, hfsplus - not enabled on aarch64 * jfs - not enabled on aarch64 * jffs2 - not supported by current platforms * nilfs2 - not enabled on aarch64 * ntfs - not enabled on 5.10 kernels * ufs - not enabled on aarch64 * zonefs - not supported by current platforms Note that a potential use case for hfsplus could be to generate DMG files for OS X software installs. However, the more common approach appears to be using `genisoimage` on Linux. Signed-off-by: Ben Cressey --- packages/kernel-5.10/config-bottlerocket | 19 +++++++++++++++++++ packages/kernel-5.4/config-bottlerocket | 19 +++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index 19c6f8bc..2a5f5e76 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -96,3 +96,22 @@ CONFIG_BOOT_CONFIG=y # Enables support for checkpoint/restore CONFIG_CHECKPOINT_RESTORE=y + +# Disable unused filesystems. +CONFIG_AFS_FS=n +CONFIG_CRAMFS=n +CONFIG_ECRYPT_FS=n +CONFIG_EXT2_FS=n +CONFIG_EXT3_FS=n +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_GFS2_FS=n +CONFIG_HFS_FS=n +CONFIG_HFSPLUS_FS=n +CONFIG_JFS_FS=n +CONFIG_JFFS2_FS=n +CONFIG_NFS_V2=n +CONFIG_NILFS2_FS=n +CONFIG_NTFS_FS=n +CONFIG_ROMFS_FS=n +CONFIG_UFS_FS=n +CONFIG_ZONEFS_FS=n diff --git a/packages/kernel-5.4/config-bottlerocket b/packages/kernel-5.4/config-bottlerocket index 34a422d9..d21f44aa 100644 --- a/packages/kernel-5.4/config-bottlerocket +++ b/packages/kernel-5.4/config-bottlerocket @@ -85,3 +85,22 @@ CONFIG_MOUSE_PS2=m # Enables support for checkpoint/restore CONFIG_CHECKPOINT_RESTORE=y + +# Disable unused filesystems. +CONFIG_AFS_FS=n +CONFIG_CRAMFS=n +CONFIG_ECRYPT_FS=n +CONFIG_EXT2_FS=n +CONFIG_EXT3_FS=n +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_GFS2_FS=n +CONFIG_HFS_FS=n +CONFIG_HFSPLUS_FS=n +CONFIG_JFS_FS=n +CONFIG_JFFS2_FS=n +CONFIG_NFS_V2=n +CONFIG_NILFS2_FS=n +CONFIG_NTFS_FS=n +CONFIG_ROMFS_FS=n +CONFIG_UFS_FS=n +CONFIG_ZONEFS_FS=n From 725d91e34376a048265e6aee2b5524ae0e21c34c Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 24 Jun 2022 18:48:51 +0000 Subject: [PATCH 0682/1356] kernel: disable unused network protocols These protocols are unlikely to be used. They might require special hardware; they might just not be supported on the platforms where Bottlerocket runs today; they might raise security concerns; or some other reasoning might apply. Requires special hardware or platform support: * atm - an alternative to IP * can - used in automative and industrial applications * hsr - redundancy protocol for wired networks * rfkill - controls RF switches on WiFi and Bluetooth cards Raises security concerns: * dccp - CVE-2020-16119, CVE-2018-1130 * rds - CVE-2021-45480, CVE-2019-11815 * tipc - CVE-2022-0435, CVE-2021-29646 Other reasons: * af-rxrpc - only used by AFS, which is disabled * l2tp - not enabled in 5.10 for x86_64 Signed-off-by: Ben Cressey --- packages/kernel-5.10/config-bottlerocket | 11 +++++++++++ packages/kernel-5.4/config-bottlerocket | 11 +++++++++++ 2 files changed, 22 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index 2a5f5e76..d6ebdd44 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -115,3 +115,14 @@ CONFIG_NTFS_FS=n CONFIG_ROMFS_FS=n CONFIG_UFS_FS=n CONFIG_ZONEFS_FS=n + +# Disable unused network protocols. +CONFIG_AF_RXRPC=n +CONFIG_ATM=n +CONFIG_CAN=n +CONFIG_HSR=n +CONFIG_IP_DCCP=n +CONFIG_L2TP=n +CONFIG_RDS=n +CONFIG_RFKILL=n +CONFIG_TIPC=n diff --git a/packages/kernel-5.4/config-bottlerocket b/packages/kernel-5.4/config-bottlerocket index d21f44aa..91442915 100644 --- a/packages/kernel-5.4/config-bottlerocket +++ b/packages/kernel-5.4/config-bottlerocket @@ -104,3 +104,14 @@ CONFIG_NTFS_FS=n CONFIG_ROMFS_FS=n CONFIG_UFS_FS=n CONFIG_ZONEFS_FS=n + +# Disable unused network protocols. +CONFIG_AF_RXRPC=n +CONFIG_ATM=n +CONFIG_CAN=n +CONFIG_HSR=n +CONFIG_IP_DCCP=n +CONFIG_L2TP=n +CONFIG_RDS=n +CONFIG_RFKILL=n +CONFIG_TIPC=n From 16e8a133dd814bc5e011d91d7326d55ea9ff6e23 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Tue, 21 Jun 2022 00:37:20 +0000 Subject: [PATCH 0683/1356] packages: add kernel-5.15 sources Signed-off-by: Arnaldo Garcia Rincon --- ...-prepare-target-for-external-modules.patch | 51 ++++ ...de-tools-build-targets-from-external.patch | 64 +++++ packages/kernel-5.15/Cargo.toml | 20 ++ packages/kernel-5.15/build.rs | 9 + packages/kernel-5.15/config-bottlerocket | 129 +++++++++ .../kernel-5.15/config-bottlerocket-metal | 44 +++ packages/kernel-5.15/kernel-5.15.spec | 267 ++++++++++++++++++ packages/kernel-5.15/latest-srpm-url.sh | 2 + packages/kernel-5.15/pkg.rs | 1 + 9 files changed, 587 insertions(+) create mode 100644 packages/kernel-5.15/1001-Makefile-add-prepare-target-for-external-modules.patch create mode 100644 packages/kernel-5.15/1002-Revert-kbuild-hide-tools-build-targets-from-external.patch create mode 100644 packages/kernel-5.15/Cargo.toml create mode 100644 packages/kernel-5.15/build.rs create mode 100644 packages/kernel-5.15/config-bottlerocket create mode 100644 packages/kernel-5.15/config-bottlerocket-metal create mode 100644 packages/kernel-5.15/kernel-5.15.spec create mode 100755 packages/kernel-5.15/latest-srpm-url.sh create mode 100644 packages/kernel-5.15/pkg.rs diff --git a/packages/kernel-5.15/1001-Makefile-add-prepare-target-for-external-modules.patch b/packages/kernel-5.15/1001-Makefile-add-prepare-target-for-external-modules.patch new file mode 100644 index 00000000..9bcf51f6 --- /dev/null +++ b/packages/kernel-5.15/1001-Makefile-add-prepare-target-for-external-modules.patch @@ -0,0 +1,51 @@ +From 0f672709ce4e4dcce5e4f08e47169b9a18c0df08 Mon Sep 17 00:00:00 2001 +From: Ben Cressey +Date: Mon, 19 Apr 2021 18:46:04 +0000 +Subject: [PATCH 1001/1002] Makefile: add prepare target for external modules + +We need to ensure that native versions of programs like `objtool` are +built before trying to build out-of-tree modules, or else the build +will fail. + +Unlike other distributions, we cannot include these programs in our +kernel-devel archive, because we rely on cross-compilation: these are +"host" programs and may not match the architecture of the target. + +Ideally, out-of-tree builds would run `make prepare` first, so that +these programs could be compiled in the normal fashion. We ship all +the files needed for this to work. However, this requirement is +specific to our use case, and DKMS does not support it. + +Adding a minimal prepare target to the dependency graph causes the +programs to be built automatically and improves compatibility with +existing solutions. + +Signed-off-by: Ben Cressey +Signed-off-by: Arnaldo Garcia Rincon +--- + Makefile | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/Makefile b/Makefile +index 6192e6be4..473594c61 100644 +--- a/Makefile ++++ b/Makefile +@@ -1736,6 +1736,15 @@ else # KBUILD_EXTMOD + KBUILD_BUILTIN := + KBUILD_MODULES := 1 + ++PHONY += modules_prepare ++modules_prepare: tools/objtool ++ $(Q)$(MAKE) $(build)=scripts/basic ++ $(Q)$(MAKE) $(build)=scripts/dtc ++ $(Q)$(MAKE) $(build)=scripts/mod ++ $(Q)$(MAKE) $(build)=scripts ++ ++prepare: modules_prepare ++ + build-dirs := $(KBUILD_EXTMOD) + $(MODORDER): descend + @: +-- +2.33.1 + diff --git a/packages/kernel-5.15/1002-Revert-kbuild-hide-tools-build-targets-from-external.patch b/packages/kernel-5.15/1002-Revert-kbuild-hide-tools-build-targets-from-external.patch new file mode 100644 index 00000000..a211eda2 --- /dev/null +++ b/packages/kernel-5.15/1002-Revert-kbuild-hide-tools-build-targets-from-external.patch @@ -0,0 +1,64 @@ +From 3d31def7545ae4e6fc33a5b648610fa9c1e06e68 Mon Sep 17 00:00:00 2001 +From: Arnaldo Garcia Rincon +Date: Wed, 22 Jun 2022 19:26:43 +0000 +Subject: [PATCH 1002/1002] Revert "kbuild: hide tools/ build targets from + external module builds" + +This reverts commit 1bb0b18a06dceee1fdc32161a72e28eab6f011c4 in which +the targets to build "tools/*" were hidden for external modules, but +they are required by the kmod kit since the 'tools/*' binaries are not +distributed as part of the archive. + +Signed-off-by: Arnaldo Garcia Rincon +--- + Makefile | 27 ++++++++++++++------------- + 1 file changed, 14 insertions(+), 13 deletions(-) + +diff --git a/Makefile b/Makefile +index 473594c61..da4f000ef 100644 +--- a/Makefile ++++ b/Makefile +@@ -1357,19 +1357,6 @@ ifneq ($(wildcard $(resolve_btfids_O)),) + $(Q)$(MAKE) -sC $(srctree)/tools/bpf/resolve_btfids O=$(resolve_btfids_O) clean + endif + +-# Clear a bunch of variables before executing the submake +-ifeq ($(quiet),silent_) +-tools_silent=s +-endif +- +-tools/: FORCE +- $(Q)mkdir -p $(objtree)/tools +- $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ +- +-tools/%: FORCE +- $(Q)mkdir -p $(objtree)/tools +- $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $* +- + # --------------------------------------------------------------------------- + # Kernel selftest + +@@ -1989,6 +1976,20 @@ kernelversion: + image_name: + @echo $(KBUILD_IMAGE) + ++# Clear a bunch of variables before executing the submake ++ ++ifeq ($(quiet),silent_) ++tools_silent=s ++endif ++ ++tools/: FORCE ++ $(Q)mkdir -p $(objtree)/tools ++ $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ ++ ++tools/%: FORCE ++ $(Q)mkdir -p $(objtree)/tools ++ $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $* ++ + quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN $(wildcard $(rm-files))) + cmd_rmfiles = rm -rf $(rm-files) + +-- +2.33.1 + diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml new file mode 100644 index 00000000..3e19590d --- /dev/null +++ b/packages/kernel-5.15/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "kernel-5_15" +version = "0.1.0" +edition = "2018" +publish = false +build = "build.rs" + +[package.metadata.build-package] +package-name = "kernel-5.15" + +[lib] +path = "pkg.rs" + +[[package.metadata.build-package.external-files]] +# Use latest-srpm-url.sh to get this. +url = "https://cdn.amazonlinux.com/blobstore/14fac2ab958b3193693bea1691e23f27600ba44cb63009bdc6cc9368271227a5/kernel-5.15.43-20.123.amzn2.src.rpm" +sha512 = "0d54742e3d4cf03dcfc398f0ebcd2c3294119683ec830efb79a0470e71f039a58d1669d1f84d21827be7d5a785225ffc15e4c0613c154ff7c54de2a208d77c5b" + +[build-dependencies] +microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/build.rs b/packages/kernel-5.15/build.rs new file mode 100644 index 00000000..cad8999a --- /dev/null +++ b/packages/kernel-5.15/build.rs @@ -0,0 +1,9 @@ +use std::process::{exit, Command}; + +fn main() -> Result<(), std::io::Error> { + let ret = Command::new("buildsys").arg("build-package").status()?; + if !ret.success() { + exit(1); + } + Ok(()) +} diff --git a/packages/kernel-5.15/config-bottlerocket b/packages/kernel-5.15/config-bottlerocket new file mode 100644 index 00000000..9a370f2f --- /dev/null +++ b/packages/kernel-5.15/config-bottlerocket @@ -0,0 +1,129 @@ +# Because Bottlerocket does not have an initramfs, modules required to mount +# the root filesystem must be set to y. + +# The root filesystem is ext4 +CONFIG_EXT4_FS=y + +# NVMe support +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_CORE=y + +# Xen blkfront for Xen-based EC2 platforms +CONFIG_XEN_BLKDEV_FRONTEND=y + +# virtio for local testing with QEMU +CONFIG_VIRTIO=y +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_PCI=y + +# dm-verity and enabling it on the kernel command line +CONFIG_BLK_DEV_DM=y +CONFIG_DAX=y +CONFIG_DM_INIT=y +CONFIG_DM_VERITY=y + +# TCMU/LIO +CONFIG_TCM_USER2=m + +# EFI +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_EFI_MIXED=y + +# EFI video +CONFIG_FB=y +CONFIG_FB_EFI=y +CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER=y + +# yama LSM for ptrace restrictions +CONFIG_SECURITY_YAMA=y + +# Do not allow SELinux to be disabled at boot. +CONFIG_SECURITY_SELINUX_BOOTPARAM=n + +# Do not allow SELinux to be disabled at runtime. +CONFIG_SECURITY_SELINUX_DISABLE=n + +# Do not allow SELinux to use `enforcing=0` behavior. +CONFIG_SECURITY_SELINUX_DEVELOP=n + +# Check the protection applied by the kernel for mmap and mprotect, +# rather than the protection requested by userspace. +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=0 + +# Enable support for the kernel lockdown security module. +CONFIG_SECURITY_LOCKDOWN_LSM=y + +# Enable lockdown early so that if the option is present on the +# kernel command line, it can be enforced. +CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y + +# Enable zstd compression for squashfs. +CONFIG_SQUASHFS_ZSTD=y + +# enable /proc/config.gz +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y + +# kernel headers at /sys/kernel/kheaders.tar.xz +CONFIG_IKHEADERS=y + +# BTF debug info at /sys/kernel/btf/vmlinux +CONFIG_DEBUG_INFO_BTF=y + +# We don't want to extend the kernel command line with any upstream defaults; +# Bottlerocket uses a fairly custom setup that needs tight control over it. +CONFIG_CMDLINE_EXTEND=n + +# Enable ZSTD kernel image compression +CONFIG_HAVE_KERNEL_ZSTD=y +CONFIG_KERNEL_ZSTD=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_DECOMPRESS_ZSTD=y + +# Load i8042 controller, keyboard, and mouse as modules, to avoid waiting for +# them before mounting the root device. +CONFIG_SERIO_I8042=m +CONFIG_KEYBOARD_ATKBD=m +CONFIG_MOUSE_PS2=m + +# Add support for IPMI drivers +CONFIG_IPMI_HANDLER=m + +# Add support for bootconfig +CONFIG_BOOT_CONFIG=y + +# Enables support for checkpoint/restore +CONFIG_CHECKPOINT_RESTORE=y + +# Disable unused filesystems. +CONFIG_AFS_FS=n +CONFIG_CRAMFS=n +CONFIG_ECRYPT_FS=n +CONFIG_EXT2_FS=n +CONFIG_EXT3_FS=n +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_GFS2_FS=n +CONFIG_HFS_FS=n +CONFIG_HFSPLUS_FS=n +CONFIG_JFS_FS=n +CONFIG_JFFS2_FS=n +CONFIG_NFS_V2=n +CONFIG_NILFS2_FS=n +CONFIG_NTFS_FS=n +CONFIG_ROMFS_FS=n +CONFIG_UFS_FS=n +CONFIG_ZONEFS_FS=n +CONFIG_NTFS3_FS=n + +# Disable unused network protocols. +CONFIG_AF_RXRPC=n +CONFIG_ATM=n +CONFIG_CAN=n +CONFIG_HSR=n +CONFIG_IP_DCCP=n +CONFIG_L2TP=n +CONFIG_RDS=n +CONFIG_RFKILL=n +CONFIG_TIPC=n diff --git a/packages/kernel-5.15/config-bottlerocket-metal b/packages/kernel-5.15/config-bottlerocket-metal new file mode 100644 index 00000000..7670fe64 --- /dev/null +++ b/packages/kernel-5.15/config-bottlerocket-metal @@ -0,0 +1,44 @@ +# This file holds all the settings that are specific to hardware enablement +# we do for the metal variants. + +# SATA support +CONFIG_BLK_DEV_SD=y +CONFIG_SATA_AHCI=y +CONFIG_ATA=y +CONFIG_ATA_PIIX=y + +# Intel network support +CONFIG_IGB=m +CONFIG_IGBVF=m +CONFIG_NET_VENDOR_INTEL=y +CONFIG_IGB_HWMON=y +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_E1000E_HWTS=y + +# Broadcom network support +CONFIG_NET_VENDOR_BROADCOM=y +CONFIG_TIGON3_HWMON=y +CONFIG_TIGON3=m +CONFIG_BNXT=m + +# Intel 10G network support +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBEVF=m + +# Mellanox network support +CONFIG_MLXFW=m +CONFIG_MLX5_CORE=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX5_CORE_EN=y +CONFIG_NET_SWITCHDEV=y + +# LSI Logic's SAS based RAID controllers +CONFIG_MEGARAID_SAS=y + +# Microsemi PQI controllers +CONFIG_SCSI_SMARTPQI=y diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec new file mode 100644 index 00000000..ec5b4d76 --- /dev/null +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -0,0 +1,267 @@ +%global debug_package %{nil} + +Name: %{_cross_os}kernel-5.15 +Version: 5.15.43 +Release: 1%{?dist} +Summary: The Linux kernel +License: GPL-2.0 WITH Linux-syscall-note +URL: https://www.kernel.org/ +# Use latest-srpm-url.sh to get this. +Source0: https://cdn.amazonlinux.com/blobstore/14fac2ab958b3193693bea1691e23f27600ba44cb63009bdc6cc9368271227a5/kernel-5.15.43-20.123.amzn2.src.rpm +Source100: config-bottlerocket +Source101: config-bottlerocket-metal + +# Help out-of-tree module builds run `make prepare` automatically. +Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch +# Expose tools/* targets for out-of-tree module builds +Patch1002: 1002-Revert-kbuild-hide-tools-build-targets-from-external.patch + +BuildRequires: bc +BuildRequires: elfutils-devel +BuildRequires: hostname +BuildRequires: kmod +BuildRequires: openssl-devel + +# CPU microcode updates are included as "extra firmware" so the files don't +# need to be installed on the root filesystem. However, we want the license and +# attribution files to be available in the usual place. +%if "%{_cross_arch}" == "x86_64" +BuildRequires: %{_cross_os}microcode +Requires: %{_cross_os}microcode-licenses +%endif + +# Pull in expected modules and development files. +Requires: %{name}-modules = %{version}-%{release} +Requires: %{name}-devel = %{version}-%{release} + +%global kernel_sourcedir %{_cross_usrsrc}/kernels +%global kernel_libdir %{_cross_libdir}/modules/%{version} + +%description +%{summary}. + +%package devel +Summary: Configured Linux kernel source for module building + +%description devel +%{summary}. + +%package archive +Summary: Archived Linux kernel source for module building + +%description archive +%{summary}. + +%package modules +Summary: Modules for the Linux kernel + +%description modules +%{summary}. + +%package headers +Summary: Header files for the Linux kernel for use by glibc + +%description headers +%{summary}. + +%prep +rpm2cpio %{SOURCE0} | cpio -iu linux-%{version}.tar config-%{_cross_arch} "*.patch" +tar -xof linux-%{version}.tar; rm linux-%{version}.tar +%setup -TDn linux-%{version} +# Patches from the Source0 SRPM +for patch in ../*.patch; do + patch -p1 <"$patch" +done +# Patches listed in this spec (Patch0001...) +%autopatch -p1 + +%if "%{_cross_arch}" == "x86_64" +microcode="$(find %{_cross_libdir}/firmware -type f -path '*/*-ucode/*' -printf '%%P ')" +cat < ../config-microcode +CONFIG_EXTRA_FIRMWARE="${microcode}" +CONFIG_EXTRA_FIRMWARE_DIR="%{_cross_libdir}/firmware" +EOF +%endif + +export ARCH="%{_cross_karch}" +export CROSS_COMPILE="%{_cross_target}-" + +KCONFIG_CONFIG="arch/%{_cross_karch}/configs/%{_cross_vendor}_defconfig" \ +scripts/kconfig/merge_config.sh \ + ../config-%{_cross_arch} \ +%if "%{_cross_arch}" == "x86_64" + ../config-microcode \ +%endif + %{SOURCE100} \ + %{SOURCE101} +rm -f ../config-* ../*.patch + +%global kmake \ +make -s\\\ + ARCH="%{_cross_karch}"\\\ + CROSS_COMPILE="%{_cross_target}-"\\\ + INSTALL_HDR_PATH="%{buildroot}%{_cross_prefix}"\\\ + INSTALL_MOD_PATH="%{buildroot}%{_cross_prefix}"\\\ + INSTALL_MOD_STRIP=1\\\ +%{nil} + +%build +%kmake mrproper +%kmake %{_cross_vendor}_defconfig +%kmake %{?_smp_mflags} %{_cross_kimage} +%kmake %{?_smp_mflags} modules + +%install +%kmake %{?_smp_mflags} headers_install +%kmake %{?_smp_mflags} modules_install + +install -d %{buildroot}/boot +install -T -m 0755 arch/%{_cross_karch}/boot/%{_cross_kimage} %{buildroot}/boot/vmlinuz +install -m 0644 .config %{buildroot}/boot/config + +find %{buildroot}%{_cross_prefix} \ + \( -name .install -o -name .check -o \ + -name ..install.cmd -o -name ..check.cmd \) -delete + +# For out-of-tree kmod builds, we need to support the following targets: +# make scripts -> make prepare -> make modules +# +# This requires enough of the kernel tree to build host programs under the +# "scripts" and "tools" directories. + +# Any existing ELF objects will not work properly if we're cross-compiling for +# a different architecture, so get rid of them to avoid confusing errors. +find arch scripts tools -type f -executable \ + -exec sh -c "head -c4 {} | grep -q ELF && rm {}" \; + +# We don't need to include these files. +find -type f \( -name \*.cmd -o -name \*.gitignore \) -delete + +# Avoid an OpenSSL dependency by stubbing out options for module signing and +# trusted keyrings, so `sign-file` and `extract-cert` won't be built. External +# kernel modules do not have access to the keys they would need to make use of +# these tools. +sed -i \ + -e 's,$(CONFIG_MODULE_SIG_FORMAT),n,g' \ + -e 's,$(CONFIG_SYSTEM_TRUSTED_KEYRING),n,g' \ + scripts/Makefile + +# Restrict permissions on System.map. +chmod 600 System.map + +( + find * \ + -type f \ + \( -name Build\* -o -name Kbuild\* -o -name Kconfig\* -o -name Makefile\* \) \ + -print + + find arch/%{_cross_karch}/ \ + -type f \ + \( -name module.lds -o -name vmlinux.lds.S -o -name Platform -o -name \*.tbl \) \ + -print + + find arch/%{_cross_karch}/{include,lib}/ -type f ! -name \*.o ! -name \*.o.d -print + echo arch/%{_cross_karch}/kernel/asm-offsets.s + echo lib/vdso/gettimeofday.c + + for d in \ + arch/%{_cross_karch}/tools \ + arch/%{_cross_karch}/kernel/vdso ; do + [ -d "${d}" ] && find "${d}/" -type f -print + done + + find include -type f -print + find scripts -type f ! -name \*.l ! -name \*.y ! -name \*.o -print + + find tools/{arch/%{_cross_karch},include,objtool,scripts}/ -type f ! -name \*.o -print + echo tools/build/fixdep.c + find tools/lib/subcmd -type f -print + find tools/lib/{ctype,hweight,rbtree,string,str_error_r}.c + + echo kernel/bounds.c + echo kernel/time/timeconst.bc + echo security/selinux/include/classmap.h + echo security/selinux/include/initial_sid_to_string.h + echo security/selinux/include/policycap.h + echo security/selinux/include/policycap_names.h + + echo .config + echo Module.symvers + echo System.map +) | sort -u > kernel_devel_files + +# Create squashfs of kernel-devel files (ie. /usr/src/kernels/). +# +# -no-exports: +# The filesystem does not need to be exported via NFS. +# +# -all-root: +# Make all files owned by root rather than the build user. +# +# -comp zstd: +# zstd offers compression ratios like xz and decompression speeds like lz4. +SQUASHFS_OPTS="-no-exports -all-root -comp zstd" +mkdir -p src_squashfs/%{version} +tar c -T kernel_devel_files | tar x -C src_squashfs/%{version} +mksquashfs src_squashfs kernel-devel.squashfs ${SQUASHFS_OPTS} + +# Create a tarball of the same files, for use outside the running system. +# In theory we could extract these files with `unsquashfs`, but we do not want +# to require it to be installed on the build host, and it errors out when run +# inside Docker unless the limit for open files is lowered. +tar cf kernel-devel.tar src_squashfs/%{version} --transform='s|src_squashfs/%{version}|kernel-devel|' +xz -T0 kernel-devel.tar + +install -D kernel-devel.squashfs %{buildroot}%{_cross_datadir}/bottlerocket/kernel-devel.squashfs +install -D kernel-devel.tar.xz %{buildroot}%{_cross_datadir}/bottlerocket/kernel-devel.tar.xz +install -d %{buildroot}%{kernel_sourcedir} + +# Replace the incorrect links from modules_install. These will be bound +# into a host container (and unused in the host) so they must not point +# to %{_cross_usrsrc} (eg. /x86_64-bottlerocket-linux-gnu/sys-root/...) +rm -f %{buildroot}%{kernel_libdir}/build %{buildroot}%{kernel_libdir}/source +ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{kernel_libdir}/build +ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{kernel_libdir}/source + +%files +%license COPYING LICENSES/preferred/GPL-2.0 LICENSES/exceptions/Linux-syscall-note +%{_cross_attribution_file} +/boot/vmlinuz +/boot/config + +%files modules +%dir %{_cross_libdir}/modules +%{_cross_libdir}/modules/* + +%files headers +%dir %{_cross_includedir}/asm +%dir %{_cross_includedir}/asm-generic +%dir %{_cross_includedir}/drm +%dir %{_cross_includedir}/linux +%dir %{_cross_includedir}/misc +%dir %{_cross_includedir}/mtd +%dir %{_cross_includedir}/rdma +%dir %{_cross_includedir}/scsi +%dir %{_cross_includedir}/sound +%dir %{_cross_includedir}/video +%dir %{_cross_includedir}/xen +%{_cross_includedir}/asm/* +%{_cross_includedir}/asm-generic/* +%{_cross_includedir}/drm/* +%{_cross_includedir}/linux/* +%{_cross_includedir}/misc/* +%{_cross_includedir}/mtd/* +%{_cross_includedir}/rdma/* +%{_cross_includedir}/scsi/* +%{_cross_includedir}/sound/* +%{_cross_includedir}/video/* +%{_cross_includedir}/xen/* + +%files devel +%dir %{kernel_sourcedir} +%{_cross_datadir}/bottlerocket/kernel-devel.squashfs + +%files archive +%{_cross_datadir}/bottlerocket/kernel-devel.tar.xz + +%changelog diff --git a/packages/kernel-5.15/latest-srpm-url.sh b/packages/kernel-5.15/latest-srpm-url.sh new file mode 100755 index 00000000..91ba10e6 --- /dev/null +++ b/packages/kernel-5.15/latest-srpm-url.sh @@ -0,0 +1,2 @@ +#!/bin/sh +docker run --rm amazonlinux:2 sh -c 'amazon-linux-extras enable kernel-5.15 >/dev/null && yum install -q -y yum-utils && yumdownloader -q --source --urls kernel | grep ^http' diff --git a/packages/kernel-5.15/pkg.rs b/packages/kernel-5.15/pkg.rs new file mode 100644 index 00000000..d799fb2d --- /dev/null +++ b/packages/kernel-5.15/pkg.rs @@ -0,0 +1 @@ +// not used From 87707a3fc41a676717e2096df2975b761c11ed95 Mon Sep 17 00:00:00 2001 From: "Kyle J. Davis" Date: Mon, 11 Jul 2022 14:55:29 -0600 Subject: [PATCH 0684/1356] docs: adds note about branches and tags in contributing Signed-off-by: Kyle J. Davis --- CONTRIBUTING.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 374a5ce1..5daf2beb 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -39,6 +39,11 @@ To send us a pull request, please: GitHub provides additional documentation on [forking a repository](https://help.github.com/articles/fork-a-repo/) and [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). +## Repo branch and tag structure + +Active development occurs under the `develop` branch. + +Bottlerocket uses both tags and branches for release alignment. Numbered releases are always associated with [tags that mirror the full SemVer 3-digit version number](https://github.com/bottlerocket-os/bottlerocket/tags) (e.g. `1.7.2`). [Branches are for patching only](https://github.com/bottlerocket-os/bottlerocket/branches/all): if a patch is required, a branch will be cut for that minor release line (e.g. `1.7.x`). As a consequence, some previous minor versions may not have a branch if they never required a subsequent patch. ## Finding contributions to work on Looking at the existing issues is a great way to find something to contribute on. From 77560ed12a5a0cb15d20c7df11a82d1bc944cb99 Mon Sep 17 00:00:00 2001 From: Matthew James Briggs Date: Fri, 29 Apr 2022 19:56:00 -0700 Subject: [PATCH 0685/1356] test: Add `cargo make` targets for `testsys` tests --- .gitignore | 1 + tools/Cargo.lock | 671 +++++++++++++++++++++++++++++ tools/Cargo.toml | 1 + tools/deny.toml | 3 + tools/testsys/Cargo.toml | 26 ++ tools/testsys/src/aws_resources.rs | 229 ++++++++++ tools/testsys/src/delete.rs | 29 ++ tools/testsys/src/install.rs | 47 ++ tools/testsys/src/logs.rs | 46 ++ tools/testsys/src/main.rs | 111 +++++ tools/testsys/src/restart_test.rs | 21 + tools/testsys/src/run.rs | 215 +++++++++ tools/testsys/src/secret.rs | 117 +++++ tools/testsys/src/status.rs | 55 +++ tools/testsys/src/uninstall.rs | 23 + 15 files changed, 1595 insertions(+) create mode 100644 tools/testsys/Cargo.toml create mode 100644 tools/testsys/src/aws_resources.rs create mode 100644 tools/testsys/src/delete.rs create mode 100644 tools/testsys/src/install.rs create mode 100644 tools/testsys/src/logs.rs create mode 100644 tools/testsys/src/main.rs create mode 100644 tools/testsys/src/restart_test.rs create mode 100644 tools/testsys/src/run.rs create mode 100644 tools/testsys/src/secret.rs create mode 100644 tools/testsys/src/status.rs create mode 100644 tools/testsys/src/uninstall.rs diff --git a/.gitignore b/.gitignore index 9c27e13b..595b687d 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ /.gomodcache /html /Infra.toml +/testsys.kubeconfig /*.pem /keys /roles diff --git a/tools/Cargo.lock b/tools/Cargo.lock index bd559864..15eee4b0 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -35,6 +35,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "anyhow" +version = "1.0.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08f9b8508dccb7687a1d6c4ce66b2b0ecef467c94667de27d8d7fe1f8d2a9cdc" + [[package]] name = "argh" version = "0.1.7" @@ -74,6 +80,17 @@ dependencies = [ "serde_json", ] +[[package]] +name = "async-recursion" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2cda8f4bcc10624c4e85bc66b3f452cca98cfa5ca002dc83a16aad2367641bea" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-trait" version = "0.1.56" @@ -147,6 +164,25 @@ dependencies = [ "generic-array", ] +[[package]] +name = "bottlerocket-types" +version = "0.1.0" +source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?rev=021e8d6#021e8d69b13b7d05e79963a0ff3f1c5c1af10753" +dependencies = [ + "model", + "serde", + "serde_plain", +] + +[[package]] +name = "bottlerocket-variant" +version = "0.1.0" +dependencies = [ + "generate-readme", + "serde", + "snafu", +] + [[package]] name = "bstr" version = "0.2.17" @@ -182,6 +218,12 @@ version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37ccbd214614c6783386c1af30caf03192f17891059cecc394b4fb119e363de3" +[[package]] +name = "byteorder" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" + [[package]] name = "bytes" version = "1.1.0" @@ -252,13 +294,28 @@ checksum = "d2dbdf4bdacb33466e854ce889eee8dfd5729abf7ccd7664d0a2d60cd384440b" dependencies = [ "atty", "bitflags", + "clap_derive", "clap_lex", "indexmap", + "lazy_static", "strsim 0.10.0", "termcolor", "textwrap 0.15.0", ] +[[package]] +name = "clap_derive" +version = "3.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25320346e922cffe59c0bbc5410c8d8784509efb321488971081313cb1e1a33c" +dependencies = [ + "heck 0.4.0", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "clap_lex" version = "0.2.0" @@ -404,6 +461,41 @@ dependencies = [ "subtle", ] +[[package]] +name = "darling" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4529658bdda7fd6769b8614be250cdcfc3aeb0ee72fe66f9e41e5e5eb73eac02" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "649c91bc01e8b1eac09fb91e8dbc7d517684ca6be8ebc75bb9cafc894f9fdb6f" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.10.0", + "syn", +] + +[[package]] +name = "darling_macro" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddfc69c5bfcbd2fc09a0f38451d2daf0e372e367986a83906d1b0dbc88134fb5" +dependencies = [ + "darling_core", + "quote", + "syn", +] + [[package]] name = "digest" version = "0.9.0" @@ -489,6 +581,19 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "env_logger" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" +dependencies = [ + "atty", + "humantime", + "log", + "regex", + "termcolor", +] + [[package]] name = "fastrand" version = "1.7.0" @@ -504,6 +609,21 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" version = "1.0.1" @@ -747,6 +867,12 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "http-range-header" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" + [[package]] name = "httparse" version = "1.7.1" @@ -759,6 +885,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + [[package]] name = "hyper" version = "0.14.19" @@ -798,6 +930,37 @@ dependencies = [ "tokio-rustls", ] +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper", + "pin-project-lite", + "tokio", + "tokio-io-timeout", +] + +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper", + "native-tls", + "tokio", + "tokio-native-tls", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + [[package]] name = "idna" version = "0.2.3" @@ -886,6 +1049,126 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "json-patch" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f995a3c8f2bc3dd52a18a583e90f9ec109c047fa1603a853e46bcda14d2e279d" +dependencies = [ + "serde", + "serde_json", + "treediff", +] + +[[package]] +name = "jsonpath_lib" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaa63191d68230cccb81c5aa23abd53ed64d83337cacbb25a7b8c7979523774f" +dependencies = [ + "log", + "serde", + "serde_json", +] + +[[package]] +name = "k8s-openapi" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2ae2c04fcee6b01b04e3aadd56bb418932c8e0a9d8a93f48bc68c6bdcdb559d" +dependencies = [ + "base64", + "bytes", + "chrono", + "http", + "percent-encoding", + "serde", + "serde-value", + "serde_json", + "url", +] + +[[package]] +name = "kube" +version = "0.73.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f68b954ea9ad888de953fb1488bd8f377c4c78d82d4642efa5925189210b50b7" +dependencies = [ + "k8s-openapi", + "kube-client", + "kube-core", + "kube-derive", +] + +[[package]] +name = "kube-client" +version = "0.73.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9150dc7107d9acf4986088f284a0a6dddc5ae37ef1ffdf142f6811dc5998dd58" +dependencies = [ + "base64", + "bytes", + "chrono", + "dirs-next", + "either", + "futures", + "http", + "http-body", + "hyper", + "hyper-timeout", + "hyper-tls", + "jsonpath_lib", + "k8s-openapi", + "kube-core", + "openssl", + "pem", + "pin-project", + "rand", + "secrecy", + "serde", + "serde_json", + "serde_yaml", + "thiserror", + "tokio", + "tokio-native-tls", + "tokio-tungstenite", + "tokio-util", + "tower", + "tower-http", + "tracing", +] + +[[package]] +name = "kube-core" +version = "0.73.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc8c429676abe6a73b374438d5ca02caaf9ae7a635441253c589b779fa5d0622" +dependencies = [ + "chrono", + "form_urlencoded", + "http", + "json-patch", + "k8s-openapi", + "once_cell", + "schemars", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "kube-derive" +version = "0.73.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfb405f0d39181acbfdc7c79e3fc095330c9b6465ab50aeb662d762e53b662f1" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "serde_json", + "syn", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -923,6 +1206,12 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "maplit" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" + [[package]] name = "matches" version = "0.1.9" @@ -982,6 +1271,54 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "model" +version = "0.1.0" +source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?rev=021e8d6#021e8d69b13b7d05e79963a0ff3f1c5c1af10753" +dependencies = [ + "async-recursion", + "async-trait", + "base64", + "bytes", + "futures", + "http", + "json-patch", + "k8s-openapi", + "kube", + "lazy_static", + "log", + "maplit", + "regex", + "schemars", + "serde", + "serde_json", + "serde_plain", + "serde_yaml", + "snafu", + "tabled", + "tokio", + "tokio-util", + "topological-sort", +] + +[[package]] +name = "native-tls" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd7e2f3618557f980e0b17e8856252eee3c97fa12c54dff0ca290fb6266ca4a9" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + [[package]] name = "nix" version = "0.24.1" @@ -1076,12 +1413,60 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +[[package]] +name = "openssl" +version = "0.10.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb81a6430ac911acb25fe5ac8f1d2af1b4ea8a4fdfda0f1ee4292af2e2d8eb0e" +dependencies = [ + "bitflags", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "openssl-probe" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +[[package]] +name = "openssl-sys" +version = "0.9.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d5fd19fb3e0a8191c1e34935718976a3e70c112ab9a24af6d7cadccd9d90bc0" +dependencies = [ + "autocfg", + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "ordered-float" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7940cf2ca942593318d07fcf2596cdca60a85c9e7fab408a5e21a4f9dcd40d87" +dependencies = [ + "num-traits", +] + [[package]] name = "os_pipe" version = "0.9.2" @@ -1098,6 +1483,15 @@ version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21326818e99cfe6ce1e524c2a805c189a99b5ae555a35d19f9a284b427d86afa" +[[package]] +name = "papergrid" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63709d10e2c2ec58f7bd91d8258d27ce80de090064b0ddf3a4bf38b907b61b8a" +dependencies = [ + "unicode-width", +] + [[package]] name = "parking_lot" version = "0.12.1" @@ -1163,6 +1557,26 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +[[package]] +name = "pin-project" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "pin-project-lite" version = "0.2.9" @@ -1175,6 +1589,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkg-config" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" + [[package]] name = "ppv-lite86" version = "0.2.16" @@ -1704,6 +2124,30 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "schemars" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1847b767a3d62d95cbf3d8a9f0e421cf57a0d8aa4f411d4b16525afb0284d4ed" +dependencies = [ + "dyn-clone", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af4d7e1b012cb3d9129567661a63755ea4b8a7386d339dc945ae187e403c6743" +dependencies = [ + "proc-macro2", + "quote", + "serde_derive_internals", + "syn", +] + [[package]] name = "scopeguard" version = "1.1.0" @@ -1720,6 +2164,16 @@ dependencies = [ "untrusted", ] +[[package]] +name = "secrecy" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" +dependencies = [ + "serde", + "zeroize", +] + [[package]] name = "security-framework" version = "2.6.1" @@ -1761,6 +2215,16 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-value" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" +dependencies = [ + "ordered-float", + "serde", +] + [[package]] name = "serde_derive" version = "1.0.137" @@ -1772,12 +2236,24 @@ dependencies = [ "syn", ] +[[package]] +name = "serde_derive_internals" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "serde_json" version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" dependencies = [ + "indexmap", "itoa", "ryu", "serde", @@ -1816,6 +2292,17 @@ dependencies = [ "yaml-rust", ] +[[package]] +name = "sha-1" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.3", +] + [[package]] name = "sha2" version = "0.9.9" @@ -1986,6 +2473,27 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "tabled" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d15827061abcf689257b1841c8e2732b1dfcc3ef825b24ce6c606e1e9e1a7bde" +dependencies = [ + "papergrid", + "tabled_derive", +] + +[[package]] +name = "tabled_derive" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "278ea3921cee8c5a69e0542998a089f7a14fa43c9c4e4f9951295da89bd0c943" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "tempfile" version = "3.3.0" @@ -2019,6 +2527,29 @@ dependencies = [ "winapi", ] +[[package]] +name = "testsys" +version = "0.1.0" +dependencies = [ + "anyhow", + "bottlerocket-types", + "bottlerocket-variant", + "clap 3.1.18", + "env_logger", + "futures", + "k8s-openapi", + "log", + "maplit", + "model", + "pubsys-config", + "serde", + "serde_json", + "serde_plain", + "terminal_size", + "tokio", + "unescape", +] + [[package]] name = "textwrap" version = "0.11.0" @@ -2127,6 +2658,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "tokio-io-timeout" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +dependencies = [ + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-macros" version = "1.8.0" @@ -2138,6 +2679,16 @@ dependencies = [ "syn", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.23.4" @@ -2160,6 +2711,18 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-tungstenite" +version = "0.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06cda1232a49558c46f8a504d5b93101d42c0bf7f911f12a105ba48168f821ae" +dependencies = [ + "futures-util", + "log", + "tokio", + "tungstenite", +] + [[package]] name = "tokio-util" version = "0.7.3" @@ -2183,6 +2746,12 @@ dependencies = [ "serde", ] +[[package]] +name = "topological-sort" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa7c7f42dea4b1b99439786f5633aeb9c14c1b53f75e282803c2ec2ad545873c" + [[package]] name = "tough" version = "0.12.2" @@ -2242,6 +2811,49 @@ dependencies = [ "tough", ] +[[package]] +name = "tower" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e" +dependencies = [ + "futures-core", + "futures-util", + "pin-project", + "pin-project-lite", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c530c8675c1dbf98facee631536fa116b5fb6382d7dd6dc1b118d970eafe3ba" +dependencies = [ + "base64", + "bitflags", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "pin-project-lite", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" + [[package]] name = "tower-service" version = "0.3.1" @@ -2255,10 +2867,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09" dependencies = [ "cfg-if", + "log", "pin-project-lite", + "tracing-attributes", "tracing-core", ] +[[package]] +name = "tracing-attributes" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "tracing-core" version = "0.1.26" @@ -2268,18 +2893,52 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "treediff" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "761e8d5ad7ce14bb82b7e61ccc0ca961005a275a060b9644a2431aa11553c2ff" +dependencies = [ + "serde_json", +] + [[package]] name = "try-lock" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +[[package]] +name = "tungstenite" +version = "0.17.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d96a2dea40e7570482f28eb57afbe42d97551905da6a9400acc5c328d24004f5" +dependencies = [ + "base64", + "byteorder", + "bytes", + "http", + "httparse", + "log", + "rand", + "sha-1", + "thiserror", + "url", + "utf-8", +] + [[package]] name = "typenum" version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +[[package]] +name = "unescape" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccb97dac3243214f8d8507998906ca3e2e0b900bf9bf4870477f125b82e68f6e" + [[package]] name = "unicode-bidi" version = "0.3.8" @@ -2347,6 +3006,18 @@ dependencies = [ "serde", ] +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + [[package]] name = "vec_map" version = "0.8.2" diff --git a/tools/Cargo.toml b/tools/Cargo.toml index 58b7f54b..b76b84c5 100644 --- a/tools/Cargo.toml +++ b/tools/Cargo.toml @@ -5,4 +5,5 @@ members = [ "pubsys", "pubsys-config", "pubsys-setup", + "testsys", ] diff --git a/tools/deny.toml b/tools/deny.toml index bedc1540..3a1380be 100644 --- a/tools/deny.toml +++ b/tools/deny.toml @@ -68,6 +68,9 @@ skip-tree = [ ] [sources] +allow-git = [ + "https://github.com/bottlerocket-os/bottlerocket-test-system", +] # Deny crates from unknown registries or git repositories. unknown-registry = "deny" unknown-git = "deny" diff --git a/tools/testsys/Cargo.toml b/tools/testsys/Cargo.toml new file mode 100644 index 00000000..84701cf9 --- /dev/null +++ b/tools/testsys/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "testsys" +version = "0.1.0" +authors = ["Ethan Pullen ", "Matt Briggs "] +license = "Apache-2.0 OR MIT" +edition = "2021" +publish = false + +[dependencies] +anyhow = "1.0" +bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", rev = "021e8d6", version = "0.1"} +bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } +clap = { version = "3", features = ["derive", "env"] } +env_logger = "0.9" +futures = "0.3.8" +k8s-openapi = { version = "0.15", features = ["v1_20", "api"], default-features = false } +log = "0.4" +maplit = "1.0.2" +model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", rev = "021e8d6", version = "0.1"} +pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } +serde = { version = "1", features = ["derive"] } +serde_json = "1" +serde_plain = "1" +terminal_size = "0.1" +tokio = { version = "1", features = ["macros", "rt-multi-thread", "fs"] } +unescape = "0.1.0" diff --git a/tools/testsys/src/aws_resources.rs b/tools/testsys/src/aws_resources.rs new file mode 100644 index 00000000..4537cb29 --- /dev/null +++ b/tools/testsys/src/aws_resources.rs @@ -0,0 +1,229 @@ +use crate::run::{TestType, TestsysImages}; +use anyhow::{anyhow, Context, Result}; +use bottlerocket_types::agent_config::{ + ClusterType, CreationPolicy, Ec2Config, EksClusterConfig, K8sVersion, SonobuoyConfig, + SonobuoyMode, +}; + +use bottlerocket_variant::Variant; +use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta; +use k8s_openapi::serde_json::Value; +use maplit::btreemap; +use model::constants::NAMESPACE; +use model::{ + Agent, Configuration, Crd, DestructionPolicy, Resource, ResourceSpec, SecretName, Test, + TestSpec, +}; +use std::collections::BTreeMap; + +pub(crate) struct AwsK8s { + pub(crate) arch: String, + pub(crate) variant: String, + pub(crate) region: String, + pub(crate) assume_role: Option, + pub(crate) instance_type: Option, + pub(crate) ami: String, + pub(crate) secrets: Option>, + pub(crate) kube_conformance_image: Option, + pub(crate) target_cluster_name: Option, +} + +impl AwsK8s { + /// Create the necessary test and resource crds for the specified test type. + pub(crate) fn create_crds( + &self, + test: TestType, + testsys_images: &TestsysImages, + ) -> Result> { + match test { + TestType::Conformance => { + self.sonobuoy_test_crds(testsys_images, SonobuoyMode::CertifiedConformance) + } + TestType::Quick => self.sonobuoy_test_crds(testsys_images, SonobuoyMode::Quick), + } + } + + fn sonobuoy_test_crds( + &self, + testsys_images: &TestsysImages, + sonobuoy_mode: SonobuoyMode, + ) -> Result> { + let crds = vec![ + self.eks_crd("", testsys_images)?, + self.ec2_crd("", testsys_images)?, + self.sonobuoy_crd("", "-test", sonobuoy_mode, None, testsys_images)?, + ]; + Ok(crds) + } + + /// Labels help filter test results with `testsys status`. + fn labels(&self) -> BTreeMap { + btreemap! { + "testsys/arch".to_string() => self.arch.to_string(), + "testsys/variant".to_string() => self.variant.to_string(), + } + } + + fn kube_arch(&self) -> String { + self.arch.replace('_', "-") + } + + fn kube_variant(&self) -> String { + self.variant.replace('.', "") + } + + /// Bottlerocket cluster naming convention. + fn cluster_name(&self, suffix: &str) -> String { + self.target_cluster_name + .clone() + .unwrap_or_else(|| format!("{}-{}{}", self.kube_arch(), self.kube_variant(), suffix)) + } + + fn eks_crd(&self, cluster_suffix: &str, testsys_images: &TestsysImages) -> Result { + let cluster_version = K8sVersion::parse( + Variant::new(&self.variant) + .context("The provided variant cannot be interpreted.")? + .version() + .context("aws-k8s variant is missing k8s version")?, + ) + .map_err(|e| anyhow!(e))?; + let cluster_name = self.cluster_name(cluster_suffix); + let eks_crd = Resource { + metadata: ObjectMeta { + name: Some(cluster_name.clone()), + namespace: Some(NAMESPACE.into()), + labels: Some(self.labels()), + ..Default::default() + }, + spec: ResourceSpec { + depends_on: None, + agent: Agent { + name: "eks-provider".to_string(), + image: testsys_images.eks_resource.clone(), + pull_secret: testsys_images.secret.clone(), + keep_running: false, + timeout: None, + configuration: Some( + EksClusterConfig { + cluster_name, + creation_policy: Some(CreationPolicy::IfNotExists), + region: Some(self.region.clone()), + zones: None, + version: Some(cluster_version), + assume_role: self.assume_role.clone(), + } + .into_map() + .context("Unable to convert eks config to map")?, + ), + secrets: self.secrets.clone(), + capabilities: None, + }, + destruction_policy: DestructionPolicy::Never, + }, + status: None, + }; + Ok(Crd::Resource(eks_crd)) + } + + fn ec2_crd(&self, cluster_suffix: &str, testsys_images: &TestsysImages) -> Result { + let cluster_name = self.cluster_name(cluster_suffix); + let mut ec2_config = Ec2Config { + node_ami: self.ami.clone(), + instance_count: Some(2), + instance_type: self.instance_type.clone(), + cluster_name: format!("${{{}.clusterName}}", cluster_name), + region: format!("${{{}.region}}", cluster_name), + instance_profile_arn: format!("${{{}.iamInstanceProfileArn}}", cluster_name), + subnet_id: format!("${{{}.privateSubnetId}}", cluster_name), + cluster_type: ClusterType::Eks, + endpoint: Some(format!("${{{}.endpoint}}", cluster_name)), + certificate: Some(format!("${{{}.certificate}}", cluster_name)), + cluster_dns_ip: Some(format!("${{{}.clusterDnsIp}}", cluster_name)), + security_groups: vec![], + assume_role: self.assume_role.clone(), + } + .into_map() + .context("Unable to create ec2 config")?; + + // TODO - we have change the raw map to reference/template a non string field. + ec2_config.insert( + "securityGroups".to_owned(), + Value::String(format!("${{{}.securityGroups}}", cluster_name)), + ); + + let ec2_resource = Resource { + metadata: ObjectMeta { + name: Some(format!("{}-instances", cluster_name)), + namespace: Some(NAMESPACE.into()), + labels: Some(self.labels()), + ..Default::default() + }, + spec: ResourceSpec { + depends_on: Some(vec![cluster_name]), + agent: Agent { + name: "ec2-provider".to_string(), + image: testsys_images.ec2_resource.clone(), + pull_secret: testsys_images.secret.clone(), + keep_running: false, + timeout: None, + configuration: Some(ec2_config), + secrets: self.secrets.clone(), + capabilities: None, + }, + destruction_policy: DestructionPolicy::OnDeletion, + }, + status: None, + }; + Ok(Crd::Resource(ec2_resource)) + } + + fn sonobuoy_crd( + &self, + cluster_suffix: &str, + test_name_suffix: &str, + sonobuoy_mode: SonobuoyMode, + depends_on: Option>, + testsys_images: &TestsysImages, + ) -> Result { + let cluster_name = self.cluster_name(cluster_suffix); + let ec2_resource_name = format!("{}-instances", cluster_name); + let test_name = format!("{}{}", cluster_name, test_name_suffix); + let sonobuoy = Test { + metadata: ObjectMeta { + name: Some(test_name), + namespace: Some(NAMESPACE.into()), + labels: Some(self.labels()), + ..Default::default() + }, + spec: TestSpec { + resources: vec![ec2_resource_name, cluster_name.to_string()], + depends_on, + retries: Some(5), + agent: Agent { + name: "sonobuoy-test-agent".to_string(), + image: testsys_images.sonobuoy_test.clone(), + pull_secret: testsys_images.secret.clone(), + keep_running: true, + timeout: None, + configuration: Some( + SonobuoyConfig { + kubeconfig_base64: format!("${{{}.encodedKubeconfig}}", cluster_name), + plugin: "e2e".to_string(), + mode: sonobuoy_mode, + kubernetes_version: None, + kube_conformance_image: self.kube_conformance_image.clone(), + assume_role: self.assume_role.clone(), + } + .into_map() + .context("Unable to convert sonobuoy config to `Map`")?, + ), + secrets: self.secrets.clone(), + capabilities: None, + }, + }, + status: None, + }; + + Ok(Crd::Test(sonobuoy)) + } +} diff --git a/tools/testsys/src/delete.rs b/tools/testsys/src/delete.rs new file mode 100644 index 00000000..09d87921 --- /dev/null +++ b/tools/testsys/src/delete.rs @@ -0,0 +1,29 @@ +use anyhow::{Context, Result}; +use clap::Parser; +use futures::TryStreamExt; +use log::info; +use model::test_manager::{DeleteEvent, TestManager}; + +/// Delete all tests and resources from a testsys cluster. +#[derive(Debug, Parser)] +pub(crate) struct Delete {} + +impl Delete { + pub(crate) async fn run(self, client: TestManager) -> Result<()> { + let mut stream = client.delete_all().await.context("Unable to delete all")?; + + while let Some(delete) = stream + .try_next() + .await + .context("A deletion error occured")? + { + match delete { + DeleteEvent::Starting(crd) => println!("Starting delete for {}", crd.name()), + DeleteEvent::Deleted(crd) => println!("Delete finished for {}", crd.name()), + DeleteEvent::Failed(crd) => println!("Delete failed for {}", crd.name()), + } + } + info!("Delete finished"); + Ok(()) + } +} diff --git a/tools/testsys/src/install.rs b/tools/testsys/src/install.rs new file mode 100644 index 00000000..11e7de59 --- /dev/null +++ b/tools/testsys/src/install.rs @@ -0,0 +1,47 @@ +use anyhow::{Context, Result}; +use clap::Parser; +use log::{info, trace}; +use model::test_manager::{ImageConfig, TestManager}; + +/// The install subcommand is responsible for putting all of the necessary components for testsys in +/// a k8s cluster. +#[derive(Debug, Parser)] +pub(crate) struct Install { + /// Controller image pull secret. This is the name of a Kubernetes secret that will be used to + /// pull the container image from a private registry. For example, if you created a pull secret + /// with `kubectl create secret docker-registry regcred` then you would pass + /// `--controller-pull-secret regcred`. + #[clap( + long = "controller-pull-secret", + env = "TESTSYS_CONTROLLER_PULL_SECRET" + )] + secret: Option, + + /// Controller image uri. If not provided the latest released controller image will be used. + #[clap( + long = "controller-uri", + env = "TESTSYS_CONTROLLER_IMAGE", + default_value = "public.ecr.aws/bottlerocket-test-system/controller:v0.0.1" + )] + controller_uri: String, +} + +impl Install { + pub(crate) async fn run(self, client: TestManager) -> Result<()> { + trace!( + "Installing testsys using controller image '{}'", + &self.controller_uri + ); + let controller_image = match (self.secret, self.controller_uri) { + (Some(secret), image) => ImageConfig::WithCreds { secret, image }, + (None, image) => ImageConfig::Image(image), + }; + client.install(controller_image).await.context( + "Unable to install testsys to the cluster. (Some artifacts may be left behind)", + )?; + + info!("testsys components were successfully installed."); + + Ok(()) + } +} diff --git a/tools/testsys/src/logs.rs b/tools/testsys/src/logs.rs new file mode 100644 index 00000000..07e5b2cb --- /dev/null +++ b/tools/testsys/src/logs.rs @@ -0,0 +1,46 @@ +use anyhow::{Context, Error, Result}; +use clap::Parser; +use futures::TryStreamExt; +use model::test_manager::{ResourceState, TestManager}; +use unescape::unescape; + +/// Stream the logs of an object from a testsys cluster. +#[derive(Debug, Parser)] +pub(crate) struct Logs { + /// The name of the test we want logs from. + #[clap(long, conflicts_with = "resource")] + test: Option, + + /// The name of the resource we want logs from. + #[clap(long, conflicts_with = "test", requires = "state")] + resource: Option, + + /// The resource state we want logs for (Creation, Destruction). + #[clap(long = "state", conflicts_with = "test")] + resource_state: Option, + + /// Follow logs + #[clap(long, short)] + follow: bool, +} + +impl Logs { + pub(crate) async fn run(self, client: TestManager) -> Result<()> { + match (self.test, self.resource, self.resource_state) { + (Some(test), None, None) => { + let mut logs = client.test_logs(test, self.follow).await.context("Unable to get logs.")?; + while let Some(line) = logs.try_next().await? { + println!("{}", unescape(&String::from_utf8_lossy(&line)).context("Unable to unescape log string")?); + } + } + (None, Some(resource), Some(state)) => { + let mut logs = client.resource_logs(resource, state, self.follow).await.context("Unable to get logs.")?; + while let Some(line) = logs.try_next().await? { + println!("{}", unescape(&String::from_utf8_lossy(&line)).context("Unable to unescape log string")?); + } + } + _ => return Err(Error::msg("Invalid arguments were provided. Exactly one of `--test` or `--resource` must be given.")), + }; + Ok(()) + } +} diff --git a/tools/testsys/src/main.rs b/tools/testsys/src/main.rs new file mode 100644 index 00000000..bc86a991 --- /dev/null +++ b/tools/testsys/src/main.rs @@ -0,0 +1,111 @@ +use anyhow::{Context, Result}; +use clap::{Parser, Subcommand}; +use delete::Delete; +use env_logger::Builder; +use install::Install; +use log::{debug, error, LevelFilter}; +use logs::Logs; +use model::test_manager::TestManager; +use restart_test::RestartTest; +use run::Run; +use secret::Add; +use status::Status; +use std::path::PathBuf; +use uninstall::Uninstall; + +mod aws_resources; +mod delete; +mod install; +mod logs; +mod restart_test; +mod run; +mod secret; +mod status; +mod uninstall; + +/// A program for running and controlling Bottlerocket tests in a Kubernetes cluster using +/// bottlerocket-test-system +#[derive(Parser, Debug)] +#[clap(about, long_about = None)] +struct TestsysArgs { + #[structopt(global = true, long, default_value = "INFO")] + /// How much detail to log; from least to most: ERROR, WARN, INFO, DEBUG, TRACE + log_level: LevelFilter, + + /// Path to the kubeconfig file for the testsys cluster. Can also be passed with the KUBECONFIG + /// environment variable. + #[clap(long)] + kubeconfig: Option, + + #[clap(subcommand)] + command: Command, +} + +impl TestsysArgs { + async fn run(self) -> Result<()> { + let client = match self.kubeconfig { + Some(path) => TestManager::new_from_kubeconfig_path(&path) + .await + .context(format!( + "Unable to create testsys client using kubeconfig '{}'", + path.display() + ))?, + None => TestManager::new().await.context( + "Unable to create testsys client using KUBECONFIG variable or default kubeconfig", + )?, + }; + match self.command { + Command::Run(run) => run.run(client).await?, + Command::Install(install) => install.run(client).await?, + Command::Delete(delete) => delete.run(client).await?, + Command::Status(status) => status.run(client).await?, + Command::Logs(logs) => logs.run(client).await?, + Command::RestartTest(restart_test) => restart_test.run(client).await?, + Command::Add(add) => add.run(client).await?, + Command::Uninstall(uninstall) => uninstall.run(client).await?, + }; + Ok(()) + } +} + +#[derive(Subcommand, Debug)] +enum Command { + Install(Install), + // We need to box run because it requires significantly more arguments than the other commands. + Run(Box), + Delete(Delete), + Status(Status), + Logs(Logs), + RestartTest(RestartTest), + Add(Add), + Uninstall(Uninstall), +} + +#[tokio::main] +async fn main() { + let args = TestsysArgs::parse(); + init_logger(args.log_level); + debug!("{:?}", args); + if let Err(e) = args.run().await { + error!("{}", e); + std::process::exit(1); + } +} + +/// Initialize the logger with the value passed by `--log-level` (or its default) when the +/// `RUST_LOG` environment variable is not present. If present, the `RUST_LOG` environment variable +/// overrides `--log-level`/`level`. +fn init_logger(level: LevelFilter) { + match std::env::var(env_logger::DEFAULT_FILTER_ENV).ok() { + Some(_) => { + // RUST_LOG exists; env_logger will use it. + Builder::from_default_env().init(); + } + None => { + // RUST_LOG does not exist; use default log level for this crate only. + Builder::new() + .filter(Some(env!("CARGO_CRATE_NAME")), level) + .init(); + } + } +} diff --git a/tools/testsys/src/restart_test.rs b/tools/testsys/src/restart_test.rs new file mode 100644 index 00000000..cbd4264c --- /dev/null +++ b/tools/testsys/src/restart_test.rs @@ -0,0 +1,21 @@ +use anyhow::{Context, Result}; +use clap::Parser; +use model::test_manager::TestManager; + +/// Restart a test. This will delete the test object from the testsys cluster and replace it with +/// a new, identical test object with a clean state. +#[derive(Debug, Parser)] +pub(crate) struct RestartTest { + /// The name of the test to be restarted. + #[clap()] + test_name: String, +} + +impl RestartTest { + pub(crate) async fn run(self, client: TestManager) -> Result<()> { + client + .restart_test(&self.test_name) + .await + .context(format!("Unable to restart test '{}'", self.test_name)) + } +} diff --git a/tools/testsys/src/run.rs b/tools/testsys/src/run.rs new file mode 100644 index 00000000..cc881670 --- /dev/null +++ b/tools/testsys/src/run.rs @@ -0,0 +1,215 @@ +use crate::aws_resources::AwsK8s; +use anyhow::{anyhow, ensure, Context, Result}; +use bottlerocket_variant::Variant; +use clap::Parser; +use log::{debug, info}; +use model::test_manager::TestManager; +use model::SecretName; +use pubsys_config::InfraConfig; +use serde::Deserialize; +use serde_plain::derive_fromstr_from_deserialize; +use std::collections::HashMap; +use std::fs::File; +use std::path::PathBuf; + +/// Run a set of tests for a given arch and variant +#[derive(Debug, Parser)] +pub(crate) struct Run { + /// The type of test to run. Options are `quick` and `conformance`. + test_flavor: TestType, + + /// The architecture to test. Either x86_64 or aarch64. + #[clap(long, env = "BUILDSYS_ARCH")] + arch: String, + + /// The variant to test + #[clap(long, env = "BUILDSYS_VARIANT")] + variant: String, + + /// The path to `Infra.toml` + #[clap(long, env = "PUBLISH_INFRA_CONFIG_PATH", parse(from_os_str))] + infra_config_path: PathBuf, + + /// The path to `amis.json` + #[clap(long, env = "AMI_INPUT")] + ami_input: String, + + /// Override for the region the tests should be run in. If none is provided the first region in + /// Infra.toml will be used. This is the region that the aws client is created with for testing + /// and resource agents. + #[clap(long, env = "TESTSYS_TARGET_REGION")] + target_region: Option, + + /// The name of the cluster for resource agents (eks resource agent, ecs resource agent). Note: + /// This is not the name of the `testsys cluster` this is the name of the cluster that tests + /// should be run on. If no cluster name is provided, the bottlerocket cluster + /// naming convention `-` will be used. + #[clap(long, env = "TESTSYS_TARGET_CLUSTER_NAME")] + target_cluster_name: Option, + + /// The custom kube conformance image that should be used by sonobuoy. This is only applicable + /// for k8s variants. It can be omitted for non-k8s variants and it can be omitted to use the + /// default sonobuoy conformance image. + #[clap(long)] + kube_conformance_image: Option, + + /// The role that should be assumed by the agents + #[clap(long, env = "TESTSYS_ASSUME_ROLE")] + assume_role: Option, + + /// Specify the instance type that should be used. This is only applicable for aws-* variants. + /// It can be omitted for non-aws variants and can be omitted to use default instance types. + #[clap(long)] + instance_type: Option, + + /// Add secrets to the testsys agents (`--secret aws-credentials=my-secret`) + #[clap(long, short, parse(try_from_str = parse_key_val), number_of_values = 1)] + secret: Vec<(String, SecretName)>, + + #[clap(flatten)] + agent_images: TestsysImages, +} + +impl Run { + pub(crate) async fn run(self, client: TestManager) -> Result<()> { + let variant = + Variant::new(&self.variant).context("The provided variant cannot be interpreted.")?; + debug!("Using variant '{}'", variant); + let secrets = if self.secret.is_empty() { + None + } else { + Some(self.secret.into_iter().collect()) + }; + // If a lock file exists, use that, otherwise use Infra.toml or default + let infra_config = InfraConfig::from_path_or_lock(&self.infra_config_path, true) + .context("Unable to read infra config")?; + + let aws = infra_config.aws.unwrap_or_default(); + + // If the user gave an override region, use that, otherwise use the first region from the + // config. + let region = if let Some(region) = self.target_region { + debug!("Using provided region for testing"); + region + } else { + debug!("No region was provided, determining region from `Infra.toml`"); + aws.regions + .clone() + .pop_front() + .context("No region was provided and no regions found in infra config")? + }; + + match variant.family() { + "aws-k8s" => { + debug!("Variant is in 'aws-k8s' family"); + let bottlerocket_ami = ami(&self.ami_input, ®ion)?; + debug!("Using ami '{}'", bottlerocket_ami); + let aws_k8s = AwsK8s { + arch: self.arch, + variant: self.variant, + region, + assume_role: self.assume_role, + instance_type: self.instance_type, + ami: bottlerocket_ami.to_string(), + secrets, + kube_conformance_image: self.kube_conformance_image, + target_cluster_name: self.target_cluster_name, + }; + debug!("Creating crds for aws-k8s testing"); + let crds = aws_k8s.create_crds(self.test_flavor, &self.agent_images)?; + debug!("Adding crds to testsys cluster"); + for crd in crds { + let crd = client + .create_object(crd) + .await + .context("Unable to create object")?; + if let Some(name) = crd.name() { + info!("Successfully added '{}'", name) + }; + } + } + other => { + return Err(anyhow!( + "testsys has not yet added support for the '{}' variant family", + other + )) + } + }; + + Ok(()) + } +} + +fn ami(ami_input: &str, region: &str) -> Result { + let file = File::open(ami_input).context("Unable to open amis.json")?; + let ami_input: HashMap = + serde_json::from_reader(file).context(format!("Unable to deserialize '{}'", ami_input))?; + ensure!(!ami_input.is_empty(), "amis.json is empty"); + Ok(ami_input + .get(region) + .context(format!("ami not found for region '{}'", region))? + .id + .clone()) +} + +fn parse_key_val(s: &str) -> Result<(String, SecretName)> { + let mut iter = s.splitn(2, '='); + let key = iter.next().context("Key is missing")?; + let value = iter.next().context("Value is missing")?; + Ok((key.to_string(), SecretName::new(value)?)) +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "lowercase")] +pub(crate) enum TestType { + /// Conformance testing is a full integration test that asserts that Bottlerocket is working for + /// customer workloads. For k8s variants, for example, this will run the full suite of sonobuoy + /// conformance tests. + Conformance, + /// Run a quick test that ensures a basic workload can run on Bottlerocket. For example, on k8s + /// variance this will run sonobuoy in "quick" mode. For ECS variants, this will run a simple + /// ECS task. + Quick, +} + +derive_fromstr_from_deserialize!(TestType); + +#[derive(Clone, Debug, Deserialize)] +pub(crate) struct Image { + pub(crate) id: String, + // This is used to deserialize amis.json +} + +#[derive(Debug, Parser)] +pub(crate) struct TestsysImages { + /// Eks resource agent uri. If not provided the latest released resource agent will be used. + #[clap( + long = "eks-resource-agent-image", + env = "TESTSYS_EKS_RESOURCE_AGENT_IMAGE", + default_value = "public.ecr.aws/bottlerocket-test-system/eks-resource-agent:v0.0.1" + )] + pub(crate) eks_resource: String, + + /// Ec2 resource agent uri. If not provided the latest released resource agent will be used. + #[clap( + long = "ec2-resource-agent-image", + env = "TESTSYS_EC2_RESOURCE_AGENT_IMAGE", + default_value = "public.ecr.aws/bottlerocket-test-system/ec2-resource-agent:v0.0.1" + )] + pub(crate) ec2_resource: String, + + /// Sonobuoy test agent uri. If not provided the latest released test agent will be used. + #[clap( + long = "sonobuoy-test-agent-image", + env = "TESTSYS_SONOBUOY_TEST_AGENT_IMAGE", + default_value = "public.ecr.aws/bottlerocket-test-system/sonobuoy-test-agent:v0.0.1" + )] + pub(crate) sonobuoy_test: String, + + /// Images pull secret. This is the name of a Kubernetes secret that will be used to + /// pull the container image from a private registry. For example, if you created a pull secret + /// with `kubectl create secret docker-registry regcred` then you would pass + /// `--images-pull-secret regcred`. + #[clap(long = "images-pull-secret", env = "TESTSYS_IMAGES_PULL_SECRET")] + pub(crate) secret: Option, +} diff --git a/tools/testsys/src/secret.rs b/tools/testsys/src/secret.rs new file mode 100644 index 00000000..a9d2faa3 --- /dev/null +++ b/tools/testsys/src/secret.rs @@ -0,0 +1,117 @@ +use anyhow::{Context, Result}; +use clap::Parser; +use model::test_manager::TestManager; +use model::SecretName; + +/// Add a testsys object to the testsys cluster. +#[derive(Debug, Parser)] +pub(crate) struct Add { + #[clap(subcommand)] + command: AddCommand, +} + +#[derive(Debug, Parser)] +enum AddCommand { + /// Add a secret to the testsys cluster. + Secret(AddSecret), +} + +impl Add { + pub(crate) async fn run(self, client: TestManager) -> Result<()> { + match self.command { + AddCommand::Secret(add_secret) => add_secret.run(client).await, + } + } +} + +/// Add a secret to the cluster. +#[derive(Debug, Parser)] +pub(crate) struct AddSecret { + #[clap(subcommand)] + command: Command, +} + +#[derive(Debug, Parser)] +enum Command { + /// Create a secret for image pulls. + Image(AddSecretImage), + /// Create a secret from key value pairs. + Map(AddSecretMap), +} + +impl AddSecret { + pub(crate) async fn run(self, client: TestManager) -> Result<()> { + match self.command { + Command::Image(add_secret_image) => add_secret_image.run(client).await, + Command::Map(add_secret_map) => add_secret_map.run(client).await, + } + } +} + +/// Add a `Secret` with key value pairs. +#[derive(Debug, Parser)] +pub(crate) struct AddSecretMap { + /// Name of the secret + #[clap(short, long)] + name: SecretName, + + /// Key value pairs for secrets. (Key=value) + #[clap(parse(try_from_str = parse_key_val))] + args: Vec<(String, String)>, +} + +impl AddSecretMap { + pub(crate) async fn run(self, client: TestManager) -> Result<()> { + client + .create_secret(&self.name, self.args) + .await + .context("Unable to create secret")?; + println!("Successfully added '{}' to secrets.", self.name); + Ok(()) + } +} + +fn parse_key_val(s: &str) -> Result<(String, String)> { + let mut iter = s.splitn(2, '='); + let key = iter.next().context("Key is missing")?; + let value = iter.next().context("Value is missing")?; + Ok((key.to_string(), value.to_string())) +} + +/// Add a secret to the testsys cluster for image pulls. +#[derive(Debug, Parser)] +pub(crate) struct AddSecretImage { + /// Controller image pull username + #[clap(long, short = 'u')] + pull_username: String, + + /// Controller image pull password + #[clap(long, short = 'p')] + pull_password: String, + + /// Image uri + #[clap(long = "image-uri", short)] + image_uri: String, + + /// Controller image uri + #[clap(long, short = 'n')] + secret_name: String, +} + +impl AddSecretImage { + pub(crate) async fn run(self, client: TestManager) -> Result<()> { + client + .create_image_pull_secret( + &self.secret_name, + &self.pull_username, + &self.pull_password, + &self.image_uri, + ) + .await + .context("Unable to create pull secret")?; + + println!("The secret was added."); + + Ok(()) + } +} diff --git a/tools/testsys/src/status.rs b/tools/testsys/src/status.rs new file mode 100644 index 00000000..8f6df684 --- /dev/null +++ b/tools/testsys/src/status.rs @@ -0,0 +1,55 @@ +use anyhow::{Context, Result}; +use clap::Parser; +use log::{debug, info}; +use model::test_manager::{SelectionParams, TestManager}; +use terminal_size::{Height, Width}; + +/// Check the status of testsys objects. +#[derive(Debug, Parser)] +pub(crate) struct Status { + /// Output the results in JSON format. + #[clap(long = "json")] + json: bool, + + /// Check the status of the testsys controller + #[clap(long, short = 'c')] + controller: bool, + + /// Focus status on a particular arch + #[clap(long)] + arch: Option, + + /// Focus status on a particular variant + #[clap(long)] + variant: Option, +} + +impl Status { + pub(crate) async fn run(self, client: TestManager) -> Result<()> { + let mut labels = Vec::new(); + if let Some(arch) = self.arch { + labels.push(format!("testsys/arch={}", arch)) + }; + if let Some(variant) = self.variant { + labels.push(format!("testsys/variant={}", variant)) + }; + let status = client + .status(&SelectionParams::Label(labels.join(",")), self.controller) + .await + .context("Unable to get status")?; + + if self.json { + info!( + "{}", + serde_json::to_string_pretty(&status) + .context("Could not create string from status.")? + ); + } else { + let (terminal_size::Width(width), _) = + terminal_size::terminal_size().unwrap_or((Width(80), Height(0))); + debug!("Window width '{}'", width); + println!("{}", status.to_string(width as usize)); + } + Ok(()) + } +} diff --git a/tools/testsys/src/uninstall.rs b/tools/testsys/src/uninstall.rs new file mode 100644 index 00000000..aa4b8961 --- /dev/null +++ b/tools/testsys/src/uninstall.rs @@ -0,0 +1,23 @@ +use anyhow::{Context, Result}; +use clap::Parser; +use log::{info, trace}; +use model::test_manager::TestManager; + +/// The uninstall subcommand is responsible for removing all of the components for testsys in +/// a k8s cluster. This is completed by removing the `testsys-bottlerocket-aws` namespace. +#[derive(Debug, Parser)] +pub(crate) struct Uninstall {} + +impl Uninstall { + pub(crate) async fn run(self, client: TestManager) -> Result<()> { + trace!("Uninstalling testsys"); + + client.uninstall().await.context( + "Unable to uninstall testsys from the cluster. (Some artifacts may be left behind)", + )?; + + info!("testsys components were successfully uninstalled."); + + Ok(()) + } +} From cf78f4aaa67e0029c25c4accdaf49c21dc81c0db Mon Sep 17 00:00:00 2001 From: M Date: Wed, 15 Jun 2022 01:19:58 +0000 Subject: [PATCH 0686/1356] kubelet: add image GC threshold settings Add kubelet config options: `imageGCHighThresholdPercent`, `imageGCLowThresholdPercent` and validation for both settings to both the apiserver and template rendering code. --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 0c82df45..6e291cdb 100644 --- a/README.md +++ b/README.md @@ -411,6 +411,8 @@ The following settings are optional and allow you to further configure your clus * `settings.kubernetes.topology-manager-policy`: Specifies the topology manager policy. Possible values are `none`, `restricted`, `best-effort`, and `single-numa-node`. Defaults to `none`. * `settings.kubernetes.topology-manager-scope`: Specifies the topology manager scope. Possible values are `container` and `pod`. Defaults to `container`. If you want to group all containers in a pod to a common set of NUMA nodes, you can set this setting to `pod`. * `settings.kubernetes.pod-pids-limit`: The maximum number of processes per pod. +* `settings.kubernetes.image-gc-high-threshold-percent`: The percent of disk usage after which image garbage collection is always run. +* `settings.kubernetes.image-gc-low-threshold-percent`: The percent of disk usage before which image garbage collection is never run. * `settings.kubernetes.provider-id`: This sets the unique ID of the instance that an external provider (i.e. cloudprovider) can use to identify a specific node. You can also optionally specify static pods for your node with the following settings. From ba0d9b0e4b6492de7c7bb1477b51d3726f6ca262 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Mon, 11 Jul 2022 09:12:54 +0000 Subject: [PATCH 0687/1356] kernel: add driver support for Cisco UCS platforms Enable networking and storage drivers for Cisco UCS platforms for bare metal variants. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/config-bottlerocket-metal | 9 +++++++++ packages/kernel-5.15/config-bottlerocket-metal | 9 +++++++++ 2 files changed, 18 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket-metal b/packages/kernel-5.10/config-bottlerocket-metal index 7670fe64..e88c418f 100644 --- a/packages/kernel-5.10/config-bottlerocket-metal +++ b/packages/kernel-5.10/config-bottlerocket-metal @@ -22,6 +22,11 @@ CONFIG_TIGON3_HWMON=y CONFIG_TIGON3=m CONFIG_BNXT=m +# Cisco UCS network support +CONFIG_NET_VENDOR_CISCO=y +CONFIG_ENIC=m +CONFIG_INFINIBAND_USNIC=m + # Intel 10G network support CONFIG_IXGB=m CONFIG_IXGBE=m @@ -37,6 +42,10 @@ CONFIG_NET_VENDOR_MELLANOX=y CONFIG_MLX5_CORE_EN=y CONFIG_NET_SWITCHDEV=y +# Cisco UCS HBA support +CONFIG_FCOE_FNIC=m +CONFIG_SCSI_SNIC=m + # LSI Logic's SAS based RAID controllers CONFIG_MEGARAID_SAS=y diff --git a/packages/kernel-5.15/config-bottlerocket-metal b/packages/kernel-5.15/config-bottlerocket-metal index 7670fe64..e88c418f 100644 --- a/packages/kernel-5.15/config-bottlerocket-metal +++ b/packages/kernel-5.15/config-bottlerocket-metal @@ -22,6 +22,11 @@ CONFIG_TIGON3_HWMON=y CONFIG_TIGON3=m CONFIG_BNXT=m +# Cisco UCS network support +CONFIG_NET_VENDOR_CISCO=y +CONFIG_ENIC=m +CONFIG_INFINIBAND_USNIC=m + # Intel 10G network support CONFIG_IXGB=m CONFIG_IXGBE=m @@ -37,6 +42,10 @@ CONFIG_NET_VENDOR_MELLANOX=y CONFIG_MLX5_CORE_EN=y CONFIG_NET_SWITCHDEV=y +# Cisco UCS HBA support +CONFIG_FCOE_FNIC=m +CONFIG_SCSI_SNIC=m + # LSI Logic's SAS based RAID controllers CONFIG_MEGARAID_SAS=y From 301e747b852efc7b3240716270c879018ed5d8b2 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 11 Jul 2022 23:42:39 +0000 Subject: [PATCH 0688/1356] models: add new setting to block kernel modules For compliance or policy reasons, the system administrator might want to prevent certain kernel modules from being loaded, if they are not expected to be used. This isn't meant to be an ironclad guarantee that the desired kernel functionality will never be available. The module could be built into the kernel instead, or loaded early in the boot, or a core dependency of another required module. Signed-off-by: Ben Cressey --- README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/README.md b/README.md index 6e291cdb..131bfe05 100644 --- a/README.md +++ b/README.md @@ -623,6 +623,17 @@ Here are the metrics settings: May be set to "none" (the default in older [variants](variants/), up through aws-k8s-1.19), "integrity" (the default for newer [variants](variants/)), or "confidentiality". **Important note:** this setting cannot be lowered (toward 'none') at runtime. You must reboot for a change to a lower level to take effect. +* `settings.kernel.modules..allowed`: Whether the named kernel module is allowed to be loaded. + **Important note:** this setting does not affect kernel modules that are already loaded. + You may need to reboot for a change to disallow a kernel module to take effect. + * Example user data for blocking kernel modules: + ``` + [settings.kernel.modules.sctp] + allowed = false + + [settings.kernel.modules.udf] + allowed = false + ``` * `settings.kernel.sysctl`: Key/value pairs representing Linux kernel parameters. Remember to quote keys (since they often contain ".") and to quote all values. * Example user data for setting up sysctl: From 36a669b574a3f46e92ec12e4f562f55e29f76b57 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sat, 9 Jul 2022 16:20:09 +0000 Subject: [PATCH 0689/1356] variants: use quiet parameter for aws and vmware Use the "quiet" parameter for platforms where the hardware is known and the kernel output is not generally needed on the console in order to diagnose issues. This includes the "aws" and "vmware" variants. The "metal" variants continue to use the more verbose default output so that any hardware without matching driver support can be detected. Force systemd to always print the status of units, since this can be invaluable data when troubleshooting failures caused by dependency ordering problems. Remove the printk settings from the default sysctls, since these will be applied by `systemd` very early and override the "quiet" behavior. The default values in `/proc/sys/kernel/printk` are controlled by the kernel config, and will be "7 4 1 7" without the "quiet" parameter, and "4 4 1 7" when it is present. The main motivation for this is to improve boot performance and make it more consistent. printk logging is expensive, and can slow down many code paths in the kernel. Signed-off-by: Ben Cressey --- tools/rpm2img | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/rpm2img b/tools/rpm2img index 6b0565a1..68241245 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -281,7 +281,7 @@ menuentry "${PRETTY_NAME} ${VERSION_ID}" { $VERITY_DATA_BLOCK_SIZE $VERITY_HASH_BLOCK_SIZE $VERITY_DATA_4K_BLOCKS 1 $VERITY_HASH_ALGORITHM $VERITY_ROOT_HASH $VERITY_SALT \\ 2 restart_on_corruption ignore_zero_blocks" \\ -- \\ - systemd.log_target=journal-or-kmsg systemd.log_color=0 + systemd.log_target=journal-or-kmsg systemd.log_color=0 systemd.show_status=true ${INITRD} } EOF From 4c4389ef595e16b4b93b9ea334f013532bc36ac2 Mon Sep 17 00:00:00 2001 From: Mahdi Chaker Date: Sat, 16 Jul 2022 09:47:59 +0000 Subject: [PATCH 0690/1356] git: add mchaker to mailmap Add mchaker to the git .mailmap file --- .mailmap | 1 + 1 file changed, 1 insertion(+) diff --git a/.mailmap b/.mailmap index 61a6496a..96bbec15 100644 --- a/.mailmap +++ b/.mailmap @@ -6,3 +6,4 @@ Samuel Mendoza-Jonas <53018225+sam-aws@users.noreply.githu Tom Kirchner Zac Mrowicki Zac Mrowicki +Mahdi Chaker M From d4fafbe4f3a0884fc9f5ab8e9bd371bcd8ba681c Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Wed, 6 Jul 2022 08:44:37 +0000 Subject: [PATCH 0691/1356] kernel: Only build metal specific drivers for metal variants Adjust kernel spec files to only merge `config-bottlerocket-metal` config fragment when the variant contains the substring `metal`. To make this work we need to force a rebuild of the kernel for each variant by setting the `variant-sensitive` option. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/Cargo.toml | 1 + packages/kernel-5.10/kernel-5.10.spec | 4 ++++ packages/kernel-5.15/Cargo.toml | 1 + packages/kernel-5.15/kernel-5.15.spec | 4 ++++ 4 files changed, 10 insertions(+) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index a72cc614..1c88f1a9 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -6,6 +6,7 @@ publish = false build = "build.rs" [package.metadata.build-package] +variant-sensitive = true package-name = "kernel-5.10" [lib] diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 9edb31b5..4e6b8e8b 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,4 +1,5 @@ %global debug_package %{nil} +%global _is_metal_variant %(if echo %{_cross_variant} | grep -Fqw "metal"; then echo 1; else echo 0; fi) Name: %{_cross_os}kernel-5.10 Version: 5.10.118 @@ -95,7 +96,10 @@ scripts/kconfig/merge_config.sh \ ../config-microcode \ %endif %{SOURCE100} \ +%if %{_is_metal_variant} %{SOURCE101} +%endif + rm -f ../config-* ../*.patch %global kmake \ diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 3e19590d..94dd6ff7 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -6,6 +6,7 @@ publish = false build = "build.rs" [package.metadata.build-package] +variant-sensitive = true package-name = "kernel-5.15" [lib] diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index ec5b4d76..dd2f79df 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,4 +1,5 @@ %global debug_package %{nil} +%global _is_metal_variant %(if echo %{_cross_variant} | grep -Fqw "metal"; then echo 1; else echo 0; fi) Name: %{_cross_os}kernel-5.15 Version: 5.15.43 @@ -93,7 +94,10 @@ scripts/kconfig/merge_config.sh \ ../config-microcode \ %endif %{SOURCE100} \ +%if %{_is_metal_variant} %{SOURCE101} +%endif + rm -f ../config-* ../*.patch %global kmake \ From ebb51d6474cecf01d473d2d08f6c5a47265eea5d Mon Sep 17 00:00:00 2001 From: Mahdi Chaker Date: Sat, 16 Jul 2022 09:36:17 +0000 Subject: [PATCH 0692/1356] docs: README Markdown linting Standardize list item characters to `*` and add newlines between paragraphs. Automatic linting from within VS Code. --- README.md | 114 ++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 86 insertions(+), 28 deletions(-) diff --git a/README.md b/README.md index 131bfe05..916c32f6 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,7 @@ Bottlerocket-specific additions focus on reliable updates and on the API. Instead of making configuration changes manually, you can change settings with an API call, and these changes are automatically migrated through updates. Some notable features include: + * [API access](#api) for configuring your system, with secure out-of-band [access methods](#exploration) when you need them. * [Updates](#updates) based on partition flips, for fast and reliable system updates. * [Modeled configuration](#settings) that's automatically migrated through updates. @@ -51,39 +52,39 @@ For example, an `x86_64` build of the `aws-k8s-1.21` variant will produce an ima The following variants support EKS, as described above: -- `aws-k8s-1.19` -- `aws-k8s-1.20` -- `aws-k8s-1.21` -- `aws-k8s-1.22` -- `aws-k8s-1.23` -- `aws-k8s-1.21-nvidia` -- `aws-k8s-1.22-nvidia` -- `aws-k8s-1.23-nvidia` +* `aws-k8s-1.19` +* `aws-k8s-1.20` +* `aws-k8s-1.21` +* `aws-k8s-1.22` +* `aws-k8s-1.23` +* `aws-k8s-1.21-nvidia` +* `aws-k8s-1.22-nvidia` +* `aws-k8s-1.23-nvidia` The following variants support ECS: -- `aws-ecs-1` -- `aws-ecs-1-nvidia` +* `aws-ecs-1` +* `aws-ecs-1-nvidia` We also have variants that are designed to be Kubernetes worker nodes in VMware: -- `vmware-k8s-1.20` -- `vmware-k8s-1.21` -- `vmware-k8s-1.22` -- `vmware-k8s-1.23` +* `vmware-k8s-1.20` +* `vmware-k8s-1.21` +* `vmware-k8s-1.22` +* `vmware-k8s-1.23` The following variants are designed to be Kubernetes worker nodes on bare metal: -- `metal-k8s-1.21` -- `metal-k8s-1.22` -- `metal-k8s-1.23` +* `metal-k8s-1.21` +* `metal-k8s-1.22` +* `metal-k8s-1.23` The following variants are no longer supported: -- `aws-k8s-1.15` -- `aws-k8s-1.16` -- `aws-k8s-1.17` -- `aws-k8s-1.18` +* `aws-k8s-1.15` +* `aws-k8s-1.16` +* `aws-k8s-1.17` +* `aws-k8s-1.18` We recommend users replace nodes running these variants with the [latest variant compatible with their cluster](variants/). @@ -100,6 +101,7 @@ To get started with Kubernetes in Amazon EKS, please see [QUICKSTART-EKS](QUICKS To get started with Kubernetes in VMware, please see [QUICKSTART-VMWARE](QUICKSTART-VMWARE.md). To get started with Amazon ECS, please see [QUICKSTART-ECS](QUICKSTART-ECS.md). These guides describe: + * how to set up a cluster with the orchestrator, so your Bottlerocket instance can run containers * how to launch a Bottlerocket instance in EC2 or VMware @@ -107,11 +109,13 @@ To see how to provision Bottlerocket on bare metal, see [PROVISIONING-METAL](PRO To build your own Bottlerocket images, please see [BUILDING](BUILDING.md). It describes: + * how to build an image * how to register an EC2 AMI from an image To publish your built Bottlerocket images, please see [PUBLISHING](PUBLISHING.md). It describes: + * how to make TUF repos including your image * how to copy your AMI across regions * how to mark your AMIs public or grant access to specific accounts @@ -228,24 +232,29 @@ You can read more about the update APIs in our [update system documentation](sou apiclient knows how to handle those update APIs for you, and you can run it from the [control](#control-container) or [admin](#admin-container) containers. To see what updates are available: + ``` apiclient update check ``` + If an update is available, it will show up in the `chosen_update` field. The `available_updates` field will show the full list of available versions, including older versions, because Bottlerocket supports safely rolling back. To apply the latest update: + ``` apiclient update apply ``` The next time you reboot, you'll start up in the new version, and system configuration will be automatically [migrated](sources/api/migration/). To reboot right away: + ``` apiclient reboot ``` If you're confident about updating, the `apiclient update apply` command has `--check` and `--reboot` flags to combine the above actions, so you can accomplish all of the above steps like this: + ``` apiclient update apply --check --reboot ``` @@ -275,17 +284,20 @@ Here we'll describe the settings you can configure on your Bottlerocket instance #### Using the API client You can see the current settings with an API request: + ``` apiclient get settings ``` This will return all of the current settings in JSON format. For example, here's an abbreviated response: + ``` {"motd":"...", {"kubernetes": ...}} ``` You can change settings like this: + ``` apiclient set motd="hi there" kubernetes.node-labels.environment=test ``` @@ -335,17 +347,21 @@ For more details about running Bottlerocket as a Kubernetes worker node in VMwar The following settings must be specified in order to join a Kubernetes cluster. You should [specify them in user data](#using-user-data). + * `settings.kubernetes.cluster-certificate`: This is the base64-encoded certificate authority of the cluster. * `settings.kubernetes.api-server`: This is the cluster's Kubernetes API endpoint. For Kubernetes variants in AWS, you must also specify: + * `settings.kubernetes.cluster-name`: The cluster name you chose during setup; the [setup guide](QUICKSTART-EKS.md) uses "bottlerocket". For Kubernetes variants in VMware, you must specify: + * `settings.kubernetes.cluster-dns-ip`: The IP of the DNS service running in the cluster. This value can be set as a string containing a single IP address, or as a list containing multiple IP addresses. Examples: + ``` # Valid, single IP [settings.kubernetes] @@ -359,9 +375,11 @@ For Kubernetes variants in VMware, you must specify: * `settings.kubernetes.bootstrap-token`: The token used for [TLS bootstrapping](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/). The following settings can be optionally set to customize the node labels and taints. Remember to quote keys (since they often contain ".") and to quote all values. + * `settings.kubernetes.node-labels`: [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) in the form of key, value pairs added when registering the node in the cluster. * `settings.kubernetes.node-taints`: [Taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) in the form of key, values and effects entries added when registering the node in the cluster. * Example user data for setting up labels and taints: + ``` [settings.kubernetes.node-labels] "label1" = "foo" @@ -372,6 +390,7 @@ The following settings can be optionally set to customize the node labels and ta ``` The following settings are optional and allow you to further configure your cluster. + * `settings.kubernetes.cluster-domain`: The DNS domain for this cluster, allowing all Kubernetes-run containers to search this domain before the host's search domains. Defaults to `cluster.local`. * `settings.kubernetes.standalone-mode`: Whether to run the kubelet in standalone mode, without connecting to an API server. Defaults to `false`. * `settings.kubernetes.cloud-provider`: The cloud provider for this cluster. Defaults to `aws` for AWS variants, and `external` for other variants. @@ -381,23 +400,29 @@ The following settings are optional and allow you to further configure your clus * `settings.kubernetes.eviction-hard`: The signals and thresholds that trigger pod eviction. Remember to quote signals (since they all contain ".") and to quote all values. * Example user data for setting up eviction hard: + ``` [settings.kubernetes.eviction-hard] "memory.available" = "15%" ``` + * `settings.kubernetes.allowed-unsafe-sysctls`: Enables specified list of unsafe sysctls. * Example user data for setting up allowed unsafe sysctls: + ``` allowed-unsafe-sysctls = ["net.core.somaxconn", "net.ipv4.ip_local_port_range"] ``` + * `settings.kubernetes.system-reserved`: Resources reserved for system components. * Example user data for setting up system reserved: + ``` [settings.kubernetes.system-reserved] cpu = "10m" memory = "100Mi" ephemeral-storage= "1Gi" ``` + * `settings.kubernetes.registry-qps`: The registry pull QPS. * `settings.kubernetes.registry-burst`: The maximum size of bursty pulls. * `settings.kubernetes.event-qps`: The maximum event creations per second. @@ -417,6 +442,7 @@ The following settings are optional and allow you to further configure your clus You can also optionally specify static pods for your node with the following settings. Static pods can be particularly useful when running in standalone mode. + * `settings.kubernetes.static-pods..manifest`: A base64-encoded pod manifest. * `settings.kubernetes.static-pods..enabled`: Whether the static pod is enabled. @@ -424,6 +450,7 @@ For Kubernetes variants in AWS and VMware, the following are set for you automat In AWS, [pluto](sources/api/) sets these based on runtime instance information. In VMware and on bare metal, Bottlerocket uses [netdog](sources/api/) (for `node-ip`) or relies on default values. (See the [VMware defaults](sources/models/src/vmware-k8s-1.23/defaults.d) or [bare metal defaults](sources/models/src/metal-k8s-1.23/defaults.d)). + * `settings.kubernetes.node-ip`: The IP address of this node. * `settings.kubernetes.pod-infra-container-image`: The URI of the "pause" container. * `settings.kubernetes.kube-reserved`: Resources reserved for node components. @@ -433,6 +460,7 @@ In VMware and on bare metal, Bottlerocket uses [netdog](sources/api/) (for `node * `ephemeral-storage`: defaults to `1Gi`. For Kubernetes variants in AWS, the following settings are set for you automatically by [pluto](sources/api/). + * `settings.kubernetes.max-pods`: The maximum number of pods that can be scheduled on this node (limited by number of available IPv4 addresses) * `settings.kubernetes.cluster-dns-ip`: Derived from the EKS Service IP CIDR or the CIDR block of the primary network interface. @@ -442,10 +470,12 @@ See the [setup guide](QUICKSTART-ECS.md) for much more detail on setting up Bott The following settings are optional and allow you to configure how your instance joins an ECS cluster. Since joining a cluster happens at startup, they need to be [specified in user data](#using-user-data). + * `settings.ecs.cluster`: The name or [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of your Amazon ECS cluster. If left unspecified, Bottlerocket will join your `default` cluster. * `settings.ecs.instance-attributes`: [Attributes](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html#attributes) in the form of key, value pairs added when registering the container instance in the cluster. * Example user data for setting up attributes: + ``` [settings.ecs.instance-attributes] attribute1 = "foo" @@ -454,6 +484,7 @@ Since joining a cluster happens at startup, they need to be [specified in user d The following settings are optional and allow you to further configure your cluster. These settings can be changed at any time. + * `settings.ecs.logging-drivers`: The list of logging drivers available on the container instance. The ECS agent running on a container instance must register available logging drivers before tasks that use those drivers are eligible to be placed on the instance. Bottlerocket enables the `json-file`, `awslogs`, and `none` drivers by default. @@ -465,14 +496,17 @@ These settings can be changed at any time. * `settings.ecs.enable-spot-instance-draining`: If the instance receives a spot termination notice, the agent will set the instance's state to `DRAINING`, so the workload can be moved gracefully before the instance is removed. Defaults to `false`. * `settings.ecs.image-pull-behavior`: The behavior used to customize the [pull image process](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html#ecs-agent-availparam) for your container instances. Supported values are `default`, `always`, `once`, `prefer-cached`, and the default is `default`. + #### CloudFormation signal helper settings For AWS variants, these settings allow you to set up CloudFormation signaling to indicate whether Bottlerocket hosts running in EC2 have been successfully created or updated: + * `settings.cloudformation.should-signal`: Whether to check status and send signal. Defaults to `false`. If set to `true`, both `stack-name` and `logical-resource-id` need to be specified. * `settings.cloudformation.stack-name`: Name of the CloudFormation Stack to signal. * `settings.cloudformation.logical-resource-id`: The logical ID of the AutoScalingGroup resource that you want to signal. #### Auto Scaling group settings + * `settings.autoscaling.should-wait`: Whether to wait for the instance to reach the `InService` state before the orchestrator agent joins the cluster. Defaults to `false`. Set this to `true` only if the instance is part of an Auto Scaling group, or will be attached to one later. #### OCI Hooks settings @@ -485,10 +519,12 @@ Once you opt-in to use additional OCI hooks, any new orchestrated containers wil #### Container image registry settings The following setting is optional and allows you to configure image registry mirrors and pull-through caches for your containers. + * `settings.container-registry.mirrors`: An array of container image registry mirror settings. Each element specifies the registry and the endpoints for said registry. When pulling an image from a registry, the container runtime will try the endpoints one by one and use the first working one. (Docker and containerd will still try the default registry URL if the mirrors fail.) * Example user data for setting up image registry mirrors: + ``` [[settings.container-registry.mirrors]] registry = "*" @@ -498,15 +534,18 @@ When pulling an image from a registry, the container runtime will try the endpoi registry = "docker.io" endpoint = [ "https://", "https://"] ``` + If you use a Bottlerocket variant that uses Docker as the container runtime, like `aws-ecs-1`, you should be aware that Docker only supports pull-through caches for images from Docker Hub (docker.io). Mirrors for other registries are ignored in this case. For [host-container](#host-containers-settings) and [bootstrap-container](#bootstrap-containers-settings) images from Amazon ECR private repositories, registry mirrors are currently unsupported. The following setting is optional and allows you to configure image registry credentials. + * `settings.container-registry.credentials`: An array of container images registry credential settings. Each element specifies the registry and the credential information for said registry. The credential fields map to [containerd's registry credential fields](https://github.com/containerd/containerd/blob/v1.6.0/docs/cri/registry.md#configure-registry-credentials), which in turn map to the fields in `.docker/config.json`. It is recommended to programmatically set these settings via `apiclient` through the Bottlerocket control container and/or custom host-containers. * An example `apiclient` call to set registry credentials for `gcr.io` and `docker.io` looks like this: + ```bash apiclient set --json '{ "container-registry": { @@ -524,6 +563,7 @@ It is recommended to programmatically set these settings via `apiclient` through } }' ``` + In addition to the container runtime daemons, these credential settings will also apply to [host-container](#host-containers-settings) and [bootstrap-container](#bootstrap-containers-settings) image pulls as well. #### Updates settings @@ -547,8 +587,9 @@ In addition to the container runtime daemons, these credential settings will als This setting results in modifications to the `/etc/hosts` file for Bottlerocket. Note that this setting does not typically impact name resolution for containers, which usually rely on orchestrator-specific mechanisms for configuring static resolution. (See [ECS](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_HostEntry.html) and [Kubernetes](https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/) documentation for those mechanisms.) - + Example: + ```toml [settings.network] hosts = [ @@ -556,11 +597,14 @@ In addition to the container runtime daemons, these credential settings will als ["10.1.1.1", ["test2.example.com"]] ] ``` + This example would result in an `/etc/hosts` file entries like so: + ``` 10.0.0.0 test.example.com test1.example.com 10.1.1.1 test2.example.com ``` + Repeated entries are merged (including loopback entries), with the first aliases listed taking precedence. e.g.: ```toml @@ -571,28 +615,31 @@ In addition to the container runtime daemons, these credential settings will als ["10.0.0.0", ["test3.example.com"]], ] ``` + Would result in `/etc/hosts` entries like so: + ``` 10.0.0.0 test.example.com test1.example.com test3.example.com 10.1.1.1 test2.example.com ``` - ##### Proxy settings These settings will configure the proxying behavior of the following services: + * For all variants: - * [containerd.service](packages/containerd/containerd.service) - * [host-containerd.service](packages/host-ctr/host-containerd.service) + * [containerd.service](packages/containerd/containerd.service) + * [host-containerd.service](packages/host-ctr/host-containerd.service) * For Kubernetes variants: - * [kubelet.service](packages/kubernetes-1.18/kubelet.service) + * [kubelet.service](packages/kubernetes-1.18/kubelet.service) * For the ECS variant: - * [docker.service](packages/docker-engine/docker.service) - * [ecs.service](packages/ecs-agent/ecs.service) + * [docker.service](packages/docker-engine/docker.service) + * [ecs.service](packages/ecs-agent/ecs.service) * `settings.network.https-proxy`: The HTTPS proxy server to be used by services listed above. * `settings.network.no-proxy`: A list of hosts that are excluded from proxying. Example: + ``` [settings.network] https-proxy = "1.2.3.4:8080" @@ -637,6 +684,7 @@ Here are the metrics settings: * `settings.kernel.sysctl`: Key/value pairs representing Linux kernel parameters. Remember to quote keys (since they often contain ".") and to quote all values. * Example user data for setting up sysctl: + ``` [settings.kernel.sysctl] "user.max_user_namespaces" = "16384" @@ -648,6 +696,7 @@ Here are the metrics settings: *Please note that boot settings only exist for bare-metal variants at the moment* Specifying either of the following settings will generate a kernel boot config file to be loaded on subsequent boots: + * `settings.boot.kernel-parameters`: This allows additional kernel parameters to be specified on the kernel command line during boot. * `settings.boot.init-parameters`: This allows additional init parameters to be specified on the kernel command line during boot. @@ -712,6 +761,7 @@ apiclient set \ You can use this method from within a [bootstrap container](#bootstrap-containers-settings), if your user data is over the size limit of the platform. #### Host containers settings + * `settings.host-containers.admin.source`: The URI of the [admin container](#admin-container). * `settings.host-containers.admin.enabled`: Whether the admin container is enabled. * `settings.host-containers.admin.superpowered`: Whether the admin container has high levels of access to the Bottlerocket host. @@ -731,6 +781,7 @@ You can optionally define a `user-data` field with arbitrary base64-encoded data Keep in mind that the default admin container (since Bottlerocket v1.0.6) relies on `user-data` to store SSH keys. You can set `user-data` to [customize the keys](https://github.com/bottlerocket-os/bottlerocket-admin-container/#authenticating-with-the-admin-container), or you can use it for your own purposes in a custom container. Here's an example of adding a custom host container with API calls: + ``` apiclient set \ host-containers.custom.source=MY-CONTAINER-URI \ @@ -739,6 +790,7 @@ apiclient set \ ``` Here's the same example, but with the settings you'd add to user data: + ``` [settings.host-containers.custom] enabled = true @@ -761,6 +813,7 @@ It's available at `/.bottlerocket/host-containers/$HOST_CONTAINER_NAME` and (sin The default `admin` host-container, for example, stores its SSH host keys under `/.bottlerocket/host-containers/admin/etc/ssh/`. There are a few important caveats to understand about host containers: + * They're not orchestrated. They only start or stop according to that `enabled` flag. * They run in a separate instance of containerd than the one used for orchestrated containers like Kubernetes pods. * They're not updated automatically. You need to update the `source` and commit those changes. @@ -772,6 +825,7 @@ We use them for the control container because it needs to be available early to Be careful, and make sure you have a similar low-level use case before reaching for host containers. #### Bootstrap containers settings + * `settings.bootstrap-containers..source`: the image for the container * `settings.bootstrap-containers..mode`: the mode of the container, it could be one of `off`, `once` or `always`. See below for a description of modes. * `settings.bootstrap-containers..essential`: whether or not the container should fail the boot process, defaults to `false` @@ -815,6 +869,7 @@ essential = true ``` ##### Mount propagations in bootstrap and superpowered containers + Both bootstrap and superpowered host containers are configured with the `/.bottlerocket/rootfs/mnt` bind mount that points to `/mnt` in the host, which itself is a bind mount of `/local/mnt`. This bind mount is set up with shared propagations, so any new mount point created underneath `/.bottlerocket/rootfs/mnt` in any bootstrap or superpowered host container will propagate across mount namespaces. You can use this feature to configure ephemeral disks attached to your hosts that you may want to use on your workloads. @@ -866,6 +921,7 @@ There are a few important caveats about the provided kdump support: * The crash kernel will only be loaded when the `crashkernel` parameter is present in the kernel's cmdline and if there is memory reserved for it ### NVIDIA GPUs Support + Bottlerocket's `nvidia` variants include the required packages and configurations to leverage NVIDIA GPUs. The official AMIs for these variants can be used with EC2 GPU-equipped instance types such as: `p2`, `p3`, `p4`, `g4dn`, `g5` and `g5g`. Please see [QUICKSTART-EKS](QUICKSTART-EKS.md#aws-k8s--nvidia-variants) for further details about Kubernetes variants, and [QUICKSTART-ECS](QUICKSTART-ECS.md#aws-ecs--nvidia-variants) for ECS variants. @@ -892,6 +948,7 @@ We use RPM package definitions to build and install individual packages into an RPM itself is not in the image - it's just a common and convenient package definition format. We currently package the following major third-party components: + * Linux kernel ([background](https://en.wikipedia.org/wiki/Linux), [packaging](packages/kernel-5.4/)) * glibc ([background](https://www.gnu.org/software/libc/), [packaging](packages/glibc/)) * Buildroot as build toolchain ([background](https://buildroot.org/), via the [SDK](https://github.com/bottlerocket-os/bottlerocket-sdk)) @@ -947,6 +1004,7 @@ For more details, see the [API system documentation](sources/api/). ### Default Volumes Bottlerocket operates with two default storage volumes. + * The root device, holds the active and passive [partition sets](#updates-1). It also contains the bootloader, the dm-verity hash tree for verifying the [immutable root filesystem](SECURITY_FEATURES.md#immutable-rootfs-backed-by-dm-verity), and the data store for the Bottlerocket API. * The data device is used as persistent storage for container images, container orchestration, [host-containers](#Custom-host-containers), and [bootstrap containers](#Bootstrap-containers-settings). From b235f721e64e83bcd436e74661a58d383f6c1d27 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 21 Jul 2022 09:48:37 +0000 Subject: [PATCH 0693/1356] kernel: config: clean up '=n' settings Disabling a kernel config setting is usually done in a config file in the form of \# CONFIG_ is not set instead of CONFIG_=n While both today should be equivalent historically there was problems with specifying '=n' still making that setting defined and having the opposite effect of what is intended by setting '=n'. Nowadays either way will result in a final config that will only have the `is not set` variant for all deactivated options, so the diff of the resulting artifact before and after this patch will be empty. As the `is not used` form is the commonly used form, lets adhere to it. As a bonus it reduces the noise when comparing configs that are manually merged using the merge-config script. Replaced all the '=n' settings by the common form through command: for file in packages/kernel*/config*; do sed -E -i "s/^(CONFIG_[A-Z0-9_]*)=n/# \1 is not set/g" $file; done Signed-off-by: Leonard Foerster --- packages/kernel-5.10/config-bottlerocket | 58 +++++++++++------------ packages/kernel-5.15/config-bottlerocket | 60 ++++++++++++------------ packages/kernel-5.4/config-bottlerocket | 58 +++++++++++------------ 3 files changed, 88 insertions(+), 88 deletions(-) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index d6ebdd44..9f2bdfcc 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -39,13 +39,13 @@ CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER=y CONFIG_SECURITY_YAMA=y # Do not allow SELinux to be disabled at boot. -CONFIG_SECURITY_SELINUX_BOOTPARAM=n +# CONFIG_SECURITY_SELINUX_BOOTPARAM is not set # Do not allow SELinux to be disabled at runtime. -CONFIG_SECURITY_SELINUX_DISABLE=n +# CONFIG_SECURITY_SELINUX_DISABLE is not set # Do not allow SELinux to use `enforcing=0` behavior. -CONFIG_SECURITY_SELINUX_DEVELOP=n +# CONFIG_SECURITY_SELINUX_DEVELOP is not set # Check the protection applied by the kernel for mmap and mprotect, # rather than the protection requested by userspace. @@ -73,7 +73,7 @@ CONFIG_DEBUG_INFO_BTF=y # We don't want to extend the kernel command line with any upstream defaults; # Bottlerocket uses a fairly custom setup that needs tight control over it. -CONFIG_CMDLINE_EXTEND=n +# CONFIG_CMDLINE_EXTEND is not set # Enable ZSTD kernel image compression CONFIG_HAVE_KERNEL_ZSTD=y @@ -98,31 +98,31 @@ CONFIG_BOOT_CONFIG=y CONFIG_CHECKPOINT_RESTORE=y # Disable unused filesystems. -CONFIG_AFS_FS=n -CONFIG_CRAMFS=n -CONFIG_ECRYPT_FS=n -CONFIG_EXT2_FS=n -CONFIG_EXT3_FS=n +# CONFIG_AFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set CONFIG_EXT4_USE_FOR_EXT2=y -CONFIG_GFS2_FS=n -CONFIG_HFS_FS=n -CONFIG_HFSPLUS_FS=n -CONFIG_JFS_FS=n -CONFIG_JFFS2_FS=n -CONFIG_NFS_V2=n -CONFIG_NILFS2_FS=n -CONFIG_NTFS_FS=n -CONFIG_ROMFS_FS=n -CONFIG_UFS_FS=n -CONFIG_ZONEFS_FS=n +# CONFIG_GFS2_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_NFS_V2 is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_NTFS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_UFS_FS is not set +# CONFIG_ZONEFS_FS is not set # Disable unused network protocols. -CONFIG_AF_RXRPC=n -CONFIG_ATM=n -CONFIG_CAN=n -CONFIG_HSR=n -CONFIG_IP_DCCP=n -CONFIG_L2TP=n -CONFIG_RDS=n -CONFIG_RFKILL=n -CONFIG_TIPC=n +# CONFIG_AF_RXRPC is not set +# CONFIG_ATM is not set +# CONFIG_CAN is not set +# CONFIG_HSR is not set +# CONFIG_IP_DCCP is not set +# CONFIG_L2TP is not set +# CONFIG_RDS is not set +# CONFIG_RFKILL is not set +# CONFIG_TIPC is not set diff --git a/packages/kernel-5.15/config-bottlerocket b/packages/kernel-5.15/config-bottlerocket index 9a370f2f..d3e9bfea 100644 --- a/packages/kernel-5.15/config-bottlerocket +++ b/packages/kernel-5.15/config-bottlerocket @@ -39,13 +39,13 @@ CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER=y CONFIG_SECURITY_YAMA=y # Do not allow SELinux to be disabled at boot. -CONFIG_SECURITY_SELINUX_BOOTPARAM=n +# CONFIG_SECURITY_SELINUX_BOOTPARAM is not set # Do not allow SELinux to be disabled at runtime. -CONFIG_SECURITY_SELINUX_DISABLE=n +# CONFIG_SECURITY_SELINUX_DISABLE is not set # Do not allow SELinux to use `enforcing=0` behavior. -CONFIG_SECURITY_SELINUX_DEVELOP=n +# CONFIG_SECURITY_SELINUX_DEVELOP is not set # Check the protection applied by the kernel for mmap and mprotect, # rather than the protection requested by userspace. @@ -73,7 +73,7 @@ CONFIG_DEBUG_INFO_BTF=y # We don't want to extend the kernel command line with any upstream defaults; # Bottlerocket uses a fairly custom setup that needs tight control over it. -CONFIG_CMDLINE_EXTEND=n +# CONFIG_CMDLINE_EXTEND is not set # Enable ZSTD kernel image compression CONFIG_HAVE_KERNEL_ZSTD=y @@ -98,32 +98,32 @@ CONFIG_BOOT_CONFIG=y CONFIG_CHECKPOINT_RESTORE=y # Disable unused filesystems. -CONFIG_AFS_FS=n -CONFIG_CRAMFS=n -CONFIG_ECRYPT_FS=n -CONFIG_EXT2_FS=n -CONFIG_EXT3_FS=n +# CONFIG_AFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set CONFIG_EXT4_USE_FOR_EXT2=y -CONFIG_GFS2_FS=n -CONFIG_HFS_FS=n -CONFIG_HFSPLUS_FS=n -CONFIG_JFS_FS=n -CONFIG_JFFS2_FS=n -CONFIG_NFS_V2=n -CONFIG_NILFS2_FS=n -CONFIG_NTFS_FS=n -CONFIG_ROMFS_FS=n -CONFIG_UFS_FS=n -CONFIG_ZONEFS_FS=n -CONFIG_NTFS3_FS=n +# CONFIG_GFS2_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_NFS_V2 is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_NTFS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_UFS_FS is not set +# CONFIG_ZONEFS_FS is not set +# CONFIG_NTFS3_FS is not set # Disable unused network protocols. -CONFIG_AF_RXRPC=n -CONFIG_ATM=n -CONFIG_CAN=n -CONFIG_HSR=n -CONFIG_IP_DCCP=n -CONFIG_L2TP=n -CONFIG_RDS=n -CONFIG_RFKILL=n -CONFIG_TIPC=n +# CONFIG_AF_RXRPC is not set +# CONFIG_ATM is not set +# CONFIG_CAN is not set +# CONFIG_HSR is not set +# CONFIG_IP_DCCP is not set +# CONFIG_L2TP is not set +# CONFIG_RDS is not set +# CONFIG_RFKILL is not set +# CONFIG_TIPC is not set diff --git a/packages/kernel-5.4/config-bottlerocket b/packages/kernel-5.4/config-bottlerocket index 91442915..92b7ac81 100644 --- a/packages/kernel-5.4/config-bottlerocket +++ b/packages/kernel-5.4/config-bottlerocket @@ -34,13 +34,13 @@ CONFIG_EFI_MIXED=y CONFIG_SECURITY_YAMA=y # Do not allow SELinux to be disabled at boot. -CONFIG_SECURITY_SELINUX_BOOTPARAM=n +# CONFIG_SECURITY_SELINUX_BOOTPARAM is not set # Do not allow SELinux to be disabled at runtime. -CONFIG_SECURITY_SELINUX_DISABLE=n +# CONFIG_SECURITY_SELINUX_DISABLE is not set # Do not allow SELinux to use `enforcing=0` behavior. -CONFIG_SECURITY_SELINUX_DEVELOP=n +# CONFIG_SECURITY_SELINUX_DEVELOP is not set # Check the protection applied by the kernel for mmap and mprotect, # rather than the protection requested by userspace. @@ -68,7 +68,7 @@ CONFIG_DEBUG_INFO_BTF=y # We don't want to extend the kernel command line with any upstream defaults; # Bottlerocket uses a fairly custom setup that needs tight control over it. -CONFIG_CMDLINE_EXTEND=n +# CONFIG_CMDLINE_EXTEND is not set # Enable ZSTD kernel image compression CONFIG_HAVE_KERNEL_ZSTD=y @@ -87,31 +87,31 @@ CONFIG_MOUSE_PS2=m CONFIG_CHECKPOINT_RESTORE=y # Disable unused filesystems. -CONFIG_AFS_FS=n -CONFIG_CRAMFS=n -CONFIG_ECRYPT_FS=n -CONFIG_EXT2_FS=n -CONFIG_EXT3_FS=n +# CONFIG_AFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set CONFIG_EXT4_USE_FOR_EXT2=y -CONFIG_GFS2_FS=n -CONFIG_HFS_FS=n -CONFIG_HFSPLUS_FS=n -CONFIG_JFS_FS=n -CONFIG_JFFS2_FS=n -CONFIG_NFS_V2=n -CONFIG_NILFS2_FS=n -CONFIG_NTFS_FS=n -CONFIG_ROMFS_FS=n -CONFIG_UFS_FS=n -CONFIG_ZONEFS_FS=n +# CONFIG_GFS2_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_NFS_V2 is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_NTFS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_UFS_FS is not set +# CONFIG_ZONEFS_FS is not set # Disable unused network protocols. -CONFIG_AF_RXRPC=n -CONFIG_ATM=n -CONFIG_CAN=n -CONFIG_HSR=n -CONFIG_IP_DCCP=n -CONFIG_L2TP=n -CONFIG_RDS=n -CONFIG_RFKILL=n -CONFIG_TIPC=n +# CONFIG_AF_RXRPC is not set +# CONFIG_ATM is not set +# CONFIG_CAN is not set +# CONFIG_HSR is not set +# CONFIG_IP_DCCP is not set +# CONFIG_L2TP is not set +# CONFIG_RDS is not set +# CONFIG_RFKILL is not set +# CONFIG_TIPC is not set From ee7e97a978b45f6d8b103f0e8ce9852d884105bd Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Thu, 14 Jul 2022 17:29:46 +0200 Subject: [PATCH 0694/1356] docs: Add quickstart guide for local QEMU/KVM VMs Commit e2b58d60 ("tools: Add start-local-vm script") added a wrapper script to simplify running a locally built Bottlerocket image in a local VM using QEMU and KVM. Provide a quickstart guide analogous to the existing ones for EKS, ECS, and VMware to help make use of it. The guide currently assumes anyone following along to be running Fedora. Changes to the guide for other distros will be minimal, if they are needed at all. However, changes in start-local-vm are likely to be required. Leave this to a dedicated later change. Signed-off-by: Markus Boehme --- BUILDING.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/BUILDING.md b/BUILDING.md index dd08712e..46698424 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -174,7 +174,12 @@ cargo make -e PUBLISH_REGIONS=your-region-here -e BUILDSYS_VARIANT=my-variant-he ## Use your image -See the [setup guide for Kubernetes](QUICKSTART-EKS.md) or the [setup guide for Amazon ECS](QUICKSTART-ECS.md) for information on running Bottlerocket images. +See any of the setup guides tailored to the various execution environments for information on running Bottlerocket images: + +* [Setup guide for Kubernetes](QUICKSTART-EKS.md) +* [Setup guide for Amazon ECS](QUICKSTART-ECS.md) +* [Setup guide for VMware](QUICKSTART-VMWARE.md) +* [Setup guide for QEMU/KVM](QUICKSTART-LOCAL.md) ## Publish your image From 247afe2dde8e3a9dfe53166a2fe79076e4184ad6 Mon Sep 17 00:00:00 2001 From: Mahdi Chaker Date: Fri, 22 Jul 2022 18:40:45 +0000 Subject: [PATCH 0695/1356] host-ctr: add a mount for system logs Add a container-visible mount for /var/log/support, which contains system logs. --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 916c32f6..ca6ff593 100644 --- a/README.md +++ b/README.md @@ -674,6 +674,7 @@ Here are the metrics settings: **Important note:** this setting does not affect kernel modules that are already loaded. You may need to reboot for a change to disallow a kernel module to take effect. * Example user data for blocking kernel modules: + ``` [settings.kernel.modules.sctp] allowed = false @@ -681,6 +682,7 @@ Here are the metrics settings: [settings.kernel.modules.udf] allowed = false ``` + * `settings.kernel.sysctl`: Key/value pairs representing Linux kernel parameters. Remember to quote keys (since they often contain ".") and to quote all values. * Example user data for setting up sysctl: @@ -896,13 +898,14 @@ logdog ``` This will write an archive of the logs to `/var/log/support/bottlerocket-logs.tar.gz`. +This archive is accessible from host containers at `/.bottlerocket/support`. You can use SSH to retrieve the file. Once you have exited from the Bottlerocket host, run a command like: ```bash ssh -i YOUR_KEY_FILE \ ec2-user@YOUR_HOST \ - "cat /.bottlerocket/rootfs/var/log/support/bottlerocket-logs.tar.gz" > bottlerocket-logs.tar.gz + "cat /.bottlerocket/support/bottlerocket-logs.tar.gz" > bottlerocket-logs.tar.gz ``` (If your instance isn't accessible through SSH, you can use [SSH over SSM](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-getting-started-enable-ssh-connections.html).) From 623a74205c7f1420a5b0d0c351447adf7057eab2 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 26 Jul 2022 18:31:15 -0700 Subject: [PATCH 0696/1356] tools: update rust dependencies Ran 'cargo update' to update rust dependencies. --- tools/Cargo.lock | 337 +++++++++++++++++++++++------------------------ 1 file changed, 167 insertions(+), 170 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 15eee4b0..a6ce1c5b 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -37,15 +37,15 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f9b8508dccb7687a1d6c4ce66b2b0ecef467c94667de27d8d7fe1f8d2a9cdc" +checksum = "bb07d2053ccdbe10e2af2995a2f116c1330396493dc1269f6a91d0ae82e19704" [[package]] name = "argh" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb41d85d92dfab96cb95ab023c265c5e4261bb956c0fb49ca06d90c570f1958" +checksum = "a7e7e4aa7e40747e023c0761dafcb42333a9517575bbf1241747f68dd3177a62" dependencies = [ "argh_derive", "argh_shared", @@ -53,9 +53,9 @@ dependencies = [ [[package]] name = "argh_derive" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be69f70ef5497dd6ab331a50bd95c6ac6b8f7f17a7967838332743fbd58dc3b5" +checksum = "69f2bd7ff6ed6414f4e5521bd509bae46454bbd513801767ced3f21a751ab4bc" dependencies = [ "argh_shared", "heck 0.3.3", @@ -66,15 +66,15 @@ dependencies = [ [[package]] name = "argh_shared" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6f8c380fa28aa1b36107cd97f0196474bb7241bb95a453c5c01a15ac74b2eac" +checksum = "47253b98986dafc7a3e1cf3259194f1f47ac61abb57a57f46ec09e48d004ecda" [[package]] name = "assert-json-diff" -version = "2.0.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f1c3703dd33532d7f0ca049168930e9099ecac238e23cf932f3a69c42f06da" +checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" dependencies = [ "serde", "serde_json", @@ -121,9 +121,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backtrace" -version = "0.3.65" +version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11a17d453482a265fd5f8479f2a3f405566e6ca627837aaddb85af8b1ab8ef61" +checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7" dependencies = [ "addr2line", "cc", @@ -226,9 +226,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" +checksum = "f0b3de4a0c5e67e16066a0715723abd91edc2f9001d09c46e1dca929351e130e" [[package]] name = "cargo-readme" @@ -267,7 +267,7 @@ dependencies = [ "num-integer", "num-traits", "serde", - "time 0.1.43", + "time 0.1.44", "winapi", ] @@ -288,16 +288,16 @@ dependencies = [ [[package]] name = "clap" -version = "3.1.18" +version = "3.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2dbdf4bdacb33466e854ce889eee8dfd5729abf7ccd7664d0a2d60cd384440b" +checksum = "44bbe24bbd31a185bc2c4f7c2abe80bea13a20d57ee4e55be70ac512bdc76417" dependencies = [ "atty", "bitflags", "clap_derive", "clap_lex", "indexmap", - "lazy_static", + "once_cell", "strsim 0.10.0", "termcolor", "textwrap 0.15.0", @@ -305,9 +305,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "3.1.18" +version = "3.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25320346e922cffe59c0bbc5410c8d8784509efb321488971081313cb1e1a33c" +checksum = "9ba52acd3b0a5c33aeada5cdaa3267cdc7c594a98731d4268cdc1532f4264cb4" dependencies = [ "heck 0.4.0", "proc-macro-error", @@ -318,9 +318,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.2.0" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a37c35f1112dad5e6e0b1adaff798507497a18fceeb30cceb3bae7d1427b9213" +checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" dependencies = [ "os_str_bytes", ] @@ -398,9 +398,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.4" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53" +checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" dependencies = [ "cfg-if", "crossbeam-utils", @@ -408,9 +408,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -419,33 +419,33 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.8" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c" +checksum = "045ebe27666471bb549370b4b0b3e51b07f56325befa4284db65fc89c02511b1" dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", - "lazy_static", "memoffset", + "once_cell", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.8" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" +checksum = "51887d4adc7b564537b15adcfb307936f8075dfcd5f00dde9a9f1d29383682bc" dependencies = [ "cfg-if", - "lazy_static", + "once_cell", ] [[package]] name = "crypto-common" -version = "0.1.3" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", "typenum", @@ -556,15 +556,15 @@ dependencies = [ [[package]] name = "dyn-clone" -version = "1.0.5" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21e50f3adc76d6a43f5ed73b698a87d0760ca74617f60f7c3b879003536fdd28" +checksum = "9d07a982d1fb29db01e5a59b1918e03da4df7297eaeee7686ac45542fd4e59c8" [[package]] name = "either" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +checksum = "3f107b87b6afc2a64fd13cac55fe06d6c8859f12d4b14cbcdd2c67d0976781be" [[package]] name = "encode_unicode" @@ -596,9 +596,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" dependencies = [ "instant", ] @@ -743,26 +743,26 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" +checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" dependencies = [ "cfg-if", "libc", - "wasi 0.10.2+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", ] [[package]] name = "gimli" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" +checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" [[package]] name = "globset" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10463d9ff00a2a068db14231982f5132edebad0d7660cd956a1c30292dbcbfbd" +checksum = "0a1e17342619edbc21a964c2afbeb6c820c6a2560032872f397bb97ea127bd0a" dependencies = [ "aho-corasick", "bstr", @@ -792,9 +792,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.11.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "heck" @@ -847,9 +847,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff8670570af52249509a86f5e3e18a08c60b177071826898fde8997cf5f6bfbb" +checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", @@ -893,9 +893,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.19" +version = "0.14.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42dc3c131584288d375f2d07f822b0cb012d8c6fb899a5b9fdb3cb7eb9b6004f" +checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac" dependencies = [ "bytes", "futures-channel", @@ -974,9 +974,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.8.2" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6012d540c5baa3589337a98ce73408de9b5a25ec9fc2c6fd6be8f0d39e0ca5a" +checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" dependencies = [ "autocfg", "hashbrown", @@ -1000,7 +1000,7 @@ version = "0.1.0" dependencies = [ "assert-json-diff", "async-trait", - "clap 3.1.18", + "clap 3.2.15", "hex", "log", "pubsys-config", @@ -1042,9 +1042,9 @@ checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" [[package]] name = "js-sys" -version = "0.3.57" +version = "0.3.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "671a26f820db17c2a2750743f1dd03bafd15b98c9f30c7c2628c024c05d73397" +checksum = "258451ab10b34f8af53416d1fdab72c22e805f0c92a1136d59470ec0b11138b2" dependencies = [ "wasm-bindgen", ] @@ -1183,9 +1183,9 @@ checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" [[package]] name = "linked-hash-map" -version = "0.5.4" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "lock_api" @@ -1261,9 +1261,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713d550d9b44d89174e066b7a6217ae06234c10cb47819a88290d2b353c31799" +checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" dependencies = [ "libc", "log", @@ -1321,9 +1321,9 @@ dependencies = [ [[package]] name = "nix" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f17df307904acd05aa8e32e97bb20f2a0df1728bbc2d771ae8f9a90463441e9" +checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc" dependencies = [ "bitflags", "cfg-if", @@ -1383,9 +1383,9 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.28.4" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e42c982f2d955fac81dd7e1d0e1426a7d702acd9c98d19ab01083a6a0328c424" +checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" dependencies = [ "memchr", ] @@ -1403,9 +1403,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225" +checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1" [[package]] name = "opaque-debug" @@ -1415,9 +1415,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.40" +version = "0.10.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb81a6430ac911acb25fe5ac8f1d2af1b4ea8a4fdfda0f1ee4292af2e2d8eb0e" +checksum = "618febf65336490dfcf20b73f885f5651a0c89c64c2d4a8c3662585a70bf5bd0" dependencies = [ "bitflags", "cfg-if", @@ -1447,9 +1447,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.73" +version = "0.9.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5fd19fb3e0a8191c1e34935718976a3e70c112ab9a24af6d7cadccd9d90bc0" +checksum = "e5f9bd0c2710541a3cda73d6f9ac4f1b240de4ae261065d309dbe73d9dceb42f" dependencies = [ "autocfg", "cc", @@ -1479,9 +1479,9 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.1.0" +version = "6.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21326818e99cfe6ce1e524c2a805c189a99b5ae555a35d19f9a284b427d86afa" +checksum = "648001efe5d5c0102d8cea768e348da85d90af8ba91f0bea908f157951493cd4" [[package]] name = "papergrid" @@ -1544,9 +1544,9 @@ dependencies = [ [[package]] name = "pem" -version = "1.0.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9a3b09a20e374558580a4914d3b7d89bd61b954a5a5e1dcbea98753addb1947" +checksum = "03c64931a1a212348ec4f3b4362585eca7159d0d09cbdf4a7f74f02173596fd4" dependencies = [ "base64", ] @@ -1559,18 +1559,18 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" +checksum = "78203e83c48cffbe01e4a2d35d566ca4de445d79a85372fc64e378bfc812a260" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" +checksum = "710faf75e1b33345361201d36d04e98ac1ed8909151a017ed384700836104c74" dependencies = [ "proc-macro2", "quote", @@ -1627,9 +1627,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.39" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f" +checksum = "c278e965f1d8cf32d6e0e96de3d3e79712178ae67986d9cf9151f51e95aac89b" dependencies = [ "unicode-ident", ] @@ -1640,7 +1640,7 @@ version = "0.1.0" dependencies = [ "async-trait", "chrono", - "clap 3.1.18", + "clap 3.2.15", "coldsnap", "duct", "futures", @@ -1714,9 +1714,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" +checksum = "3bcdf212e9776fbcb2d23ab029360416bb1706b1aea2d1a5ba002727cbcab804" dependencies = [ "proc-macro2", ] @@ -1777,9 +1777,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.13" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags", ] @@ -1797,9 +1797,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.5.6" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d83f127d94bdbcda4c8cc2e50f6f84f4b611f69c902699ca385a39c3a75f9ff1" +checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" dependencies = [ "aho-corasick", "memchr", @@ -1808,9 +1808,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.26" +version = "0.6.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49b3de9ec5dc0a3417da371aab17d729997c15010e7fd24ff707773a33bddb64" +checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" [[package]] name = "remove_dir_all" @@ -1823,9 +1823,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.10" +version = "0.11.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46a1f7aa4f35e5e8b4160449f51afc758f0ce6454315a9fa7d0d113e958c41eb" +checksum = "b75aa69a3f06bbcc66ede33af2af253c6f7a86b1ca0033f60c580a27074fbf92" dependencies = [ "base64", "bytes", @@ -1845,12 +1845,13 @@ dependencies = [ "percent-encoding", "pin-project-lite", "rustls", - "rustls-pemfile 0.3.0", + "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-rustls", + "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -2076,20 +2077,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" dependencies = [ "openssl-probe", - "rustls-pemfile 1.0.0", + "rustls-pemfile", "schannel", "security-framework", ] -[[package]] -name = "rustls-pemfile" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ee86d63972a7c661d1536fefe8c3c8407321c3df668891286de28abcd087360" -dependencies = [ - "base64", -] - [[package]] name = "rustls-pemfile" version = "1.0.0" @@ -2199,18 +2191,18 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.9" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cb243bdfdb5936c8dc3c45762a19d12ab4550cdc753bc247637d4ec35a040fd" +checksum = "a2333e6df6d6598f2b1974829f853c2b4c5f4a6e503c10af918081aa6f8564e1" dependencies = [ "serde", ] [[package]] name = "serde" -version = "1.0.137" +version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" +checksum = "fc855a42c7967b7c369eb5860f7164ef1f6f81c20c7cc1141f2a604e18723b03" dependencies = [ "serde_derive", ] @@ -2227,9 +2219,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.137" +version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be" +checksum = "6f2122636b9fe3b81f1cb25099fcf2d3f542cdb1d45940d56c713158884a05da" dependencies = [ "proc-macro2", "quote", @@ -2249,9 +2241,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.81" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" +checksum = "82c2c1fdcd807d1098552c5b9a36e425e42e9fbd7c6a37a8425f390f781f7fa7" dependencies = [ "indexmap", "itoa", @@ -2282,9 +2274,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.24" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707d15895415db6628332b737c838b88c598522e4dc70647e59b72312924aebc" +checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" dependencies = [ "indexmap", "ryu", @@ -2366,20 +2358,23 @@ checksum = "48dfff04aade74dd495b007c831cd6f4e0cee19c344dd9dc0884c0289b70a786" dependencies = [ "log", "termcolor", - "time 0.3.9", + "time 0.3.11", ] [[package]] name = "slab" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" +checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +dependencies = [ + "autocfg", +] [[package]] name = "smallvec" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" +checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1" [[package]] name = "snafu" @@ -2464,9 +2459,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.96" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0748dd251e24453cb8717f0354206b91557e4ec8703673a4b30208f2abaf1ebf" +checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd" dependencies = [ "proc-macro2", "quote", @@ -2534,7 +2529,7 @@ dependencies = [ "anyhow", "bottlerocket-types", "bottlerocket-variant", - "clap 3.1.18", + "clap 3.2.15", "env_logger", "futures", "k8s-openapi", @@ -2587,19 +2582,20 @@ dependencies = [ [[package]] name = "time" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ "libc", + "wasi 0.10.0+wasi-snapshot-preview1", "winapi", ] [[package]] name = "time" -version = "0.3.9" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2702e08a7a860f005826c6815dcac101b19b5eb330c27fe4a5928fec1d20ddd" +checksum = "72c91f41dcb2f096c05f0873d667dceec1087ce5bcf984ec8ffb19acddbb3217" dependencies = [ "itoa", "libc", @@ -2640,10 +2636,11 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.19.2" +version = "1.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c51a52ed6686dd62c320f9b89299e9dfb46f730c7a48e635c19f21d116cb1439" +checksum = "7a8325f63a7d4774dd041e363b2409ed1c5cbbd0f867795e661df066b2b0a581" dependencies = [ + "autocfg", "bytes", "libc", "memchr", @@ -2713,9 +2710,9 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.17.1" +version = "0.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06cda1232a49558c46f8a504d5b93101d42c0bf7f911f12a105ba48168f821ae" +checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" dependencies = [ "futures-util", "log", @@ -2754,9 +2751,9 @@ checksum = "aa7c7f42dea4b1b99439786f5633aeb9c14c1b53f75e282803c2ec2ad545873c" [[package]] name = "tough" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a72582c980b86ac5b86cd8deb4e6841b44efaed2db9efae9b486b98288d9de2" +checksum = "e1a0aa977ae8d619536dfcf7c27848bc280030ee18d358fa5c3174ad094e189c" dependencies = [ "chrono", "dyn-clone", @@ -2813,9 +2810,9 @@ dependencies = [ [[package]] name = "tower" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", @@ -2856,15 +2853,15 @@ checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" [[package]] name = "tower-service" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.34" +version = "0.1.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09" +checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" dependencies = [ "cfg-if", "log", @@ -2875,9 +2872,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" +checksum = "11c75893af559bc8e10716548bdef5cb2b983f8e637db9d0e15126b61b484ee2" dependencies = [ "proc-macro2", "quote", @@ -2886,11 +2883,11 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.26" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f" +checksum = "7b7358be39f2f274f322d2aaed611acc57f382e8eb1e5b48cb9ae30933495ce7" dependencies = [ - "lazy_static", + "once_cell", ] [[package]] @@ -2910,9 +2907,9 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "tungstenite" -version = "0.17.2" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96a2dea40e7570482f28eb57afbe42d97551905da6a9400acc5c328d24004f5" +checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" dependencies = [ "base64", "byteorder", @@ -2947,15 +2944,15 @@ checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" [[package]] name = "unicode-ident" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee" +checksum = "15c61ba63f9235225a22310255a29b806b907c9b8c964bcbd0a2c70f3f2deea7" [[package]] name = "unicode-normalization" -version = "0.1.19" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +checksum = "854cbdc4f7bc6ae19c820d44abdc3277ac3e1b2b93db20a636825d9322fb60e6" dependencies = [ "tinyvec", ] @@ -3053,9 +3050,9 @@ dependencies = [ [[package]] name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" +version = "0.10.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasi" @@ -3065,9 +3062,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.80" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27370197c907c55e3f1a9fbe26f44e937fe6451368324e009cba39e139dc08ad" +checksum = "fc7652e3f6c4706c8d9cd54832c4a4ccb9b5336e2c3bd154d5cccfbf1c1f5f7d" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -3075,13 +3072,13 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.80" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53e04185bfa3a779273da532f5025e33398409573f348985af9a1cbf3774d3f4" +checksum = "662cd44805586bd52971b9586b1df85cdbbd9112e4ef4d8f41559c334dc6ac3f" dependencies = [ "bumpalo", - "lazy_static", "log", + "once_cell", "proc-macro2", "quote", "syn", @@ -3090,9 +3087,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.30" +version = "0.4.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f741de44b75e14c35df886aff5f1eb73aa114fa5d4d00dcd37b5e01259bf3b2" +checksum = "fa76fb221a1f8acddf5b54ace85912606980ad661ac7a503b4570ffd3a624dad" dependencies = [ "cfg-if", "js-sys", @@ -3102,9 +3099,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.80" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17cae7ff784d7e83a2fe7611cfe766ecf034111b49deb850a3dc7699c08251f5" +checksum = "b260f13d3012071dfb1512849c033b1925038373aea48ced3012c09df952c602" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3112,9 +3109,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.80" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99ec0dc7a4756fffc231aab1b9f2f578d23cd391390ab27f952ae0c9b3ece20b" +checksum = "5be8e654bdd9b79216c2929ab90721aa82faf65c48cdf08bdc4e7f51357b80da" dependencies = [ "proc-macro2", "quote", @@ -3125,15 +3122,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.80" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d554b7f530dee5964d9a9468d95c1f8b8acae4f282807e7d27d4b03099a46744" +checksum = "6598dd0bd3c7d51095ff6531a5b23e02acdc81804e30d8f07afb77b7215a140a" [[package]] name = "web-sys" -version = "0.3.57" +version = "0.3.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b17e741662c70c8bd24ac5c5b18de314a2c26c32bf8346ee1e6f53de919c283" +checksum = "ed055ab27f941423197eb86b2035720b1a3ce40504df082cac2ecc6ed73335a1" dependencies = [ "js-sys", "wasm-bindgen", @@ -3151,9 +3148,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.3" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d8de8415c823c8abd270ad483c6feeac771fad964890779f9a8cb24fbbc1bf" +checksum = "f1c760f0d366a6c24a02ed7816e23e691f5d92291f94d15e836006fd11b04daf" dependencies = [ "webpki", ] @@ -3258,6 +3255,6 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.5.5" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94693807d016b2f2d2e14420eb3bfcca689311ff775dcf113d74ea624b7cdf07" +checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" From 2bdd8f254c7ac0ab6c9e66814c0cf8dc97c0ca78 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 26 Jul 2022 18:42:57 -0700 Subject: [PATCH 0697/1356] tools: add license check exception for 'unicode-ident' 'unicode-ident' v1.0.2 added 'Unicode-DFS-2016' as a license. --- tools/deny.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/deny.toml b/tools/deny.toml index 3a1380be..b1549246 100644 --- a/tools/deny.toml +++ b/tools/deny.toml @@ -25,6 +25,7 @@ allow = [ exceptions = [ { name = "webpki-roots", allow = ["MPL-2.0"], version = "*" }, + { name = "unicode-ident", allow = ["MIT", "Apache-2.0", "Unicode-DFS-2016"] }, ] # https://github.com/hsivonen/encoding_rs The non-test code that isn't generated from the WHATWG data in this crate is From 0b89f5c35a4167648af4cadc8a53562a6b96967b Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Fri, 22 Jul 2022 22:54:41 +0000 Subject: [PATCH 0698/1356] kernel-5.4: enable ZSTD compression for kernel modules Signed-off-by: Arnaldo Garcia Rincon --- packages/kernel-5.4/config-bottlerocket | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/kernel-5.4/config-bottlerocket b/packages/kernel-5.4/config-bottlerocket index 92b7ac81..94a9d2e3 100644 --- a/packages/kernel-5.4/config-bottlerocket +++ b/packages/kernel-5.4/config-bottlerocket @@ -77,6 +77,10 @@ CONFIG_ZSTD_COMPRESS=y CONFIG_ZSTD_DECOMPRESS=y CONFIG_DECOMPRESS_ZSTD=y +# Enable ZSTD modules compression +CONFIG_MODULE_COMPRESS=y +CONFIG_MODULE_COMPRESS_ZSTD=y + # Load i8042 controller, keyboard, and mouse as modules, to avoid waiting for # them before mounting the root device. CONFIG_SERIO_I8042=m From bf5c05f5a760b7612cb3c699da87341a57e6eb3a Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Fri, 22 Jul 2022 22:54:54 +0000 Subject: [PATCH 0699/1356] kernel-5.10: enable ZSTD compression for kernel modules Signed-off-by: Arnaldo Garcia Rincon --- packages/kernel-5.10/config-bottlerocket | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index 9f2bdfcc..1facb07a 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -82,6 +82,10 @@ CONFIG_ZSTD_COMPRESS=y CONFIG_ZSTD_DECOMPRESS=y CONFIG_DECOMPRESS_ZSTD=y +# Enable ZSTD modules compression +CONFIG_MODULE_COMPRESS=y +CONFIG_MODULE_COMPRESS_ZSTD=y + # Load i8042 controller, keyboard, and mouse as modules, to avoid waiting for # them before mounting the root device. CONFIG_SERIO_I8042=m From 0116a56fb69ead6886e3e567a19b86b4e7065879 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Fri, 22 Jul 2022 22:55:06 +0000 Subject: [PATCH 0700/1356] kernel-5.15: enable ZSTD compression for kernel modules Signed-off-by: Arnaldo Garcia Rincon --- packages/kernel-5.15/config-bottlerocket | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/kernel-5.15/config-bottlerocket b/packages/kernel-5.15/config-bottlerocket index d3e9bfea..d3894016 100644 --- a/packages/kernel-5.15/config-bottlerocket +++ b/packages/kernel-5.15/config-bottlerocket @@ -82,6 +82,10 @@ CONFIG_ZSTD_COMPRESS=y CONFIG_ZSTD_DECOMPRESS=y CONFIG_DECOMPRESS_ZSTD=y +# Enable ZSTD modules compression +# CONFIG_MODULE_COMPRESS_NONE is not set +CONFIG_MODULE_COMPRESS_ZSTD=y + # Load i8042 controller, keyboard, and mouse as modules, to avoid waiting for # them before mounting the root device. CONFIG_SERIO_I8042=m From 470f2c20072bb1d27771bfe07fc9f64412fefd45 Mon Sep 17 00:00:00 2001 From: "Kyle J. Davis" Date: Wed, 27 Jul 2022 11:07:06 -0600 Subject: [PATCH 0701/1356] Docs: adds case convention to CONTRIBUTING Signed-off-by: Kyle J. Davis --- CONTRIBUTING.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5daf2beb..c1f9e7f7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -45,6 +45,16 @@ Active development occurs under the `develop` branch. Bottlerocket uses both tags and branches for release alignment. Numbered releases are always associated with [tags that mirror the full SemVer 3-digit version number](https://github.com/bottlerocket-os/bottlerocket/tags) (e.g. `1.7.2`). [Branches are for patching only](https://github.com/bottlerocket-os/bottlerocket/branches/all): if a patch is required, a branch will be cut for that minor release line (e.g. `1.7.x`). As a consequence, some previous minor versions may not have a branch if they never required a subsequent patch. +## Filename case conventions + +Bottlerocket follows a few basic filename case conventions: + +- All extensions are lowercase +- Build related configuration files always start with a capital letter (e.g. `Infra.toml`, `Release.toml`), +- All caps is used for documents and licenses (e.g. `PUBLISHING.md`, `TRADEMARKS.md`), +- All lower case is used for all other files (e.g. `sample-eksctl.yaml`, `main.rs`). + + ## Finding contributions to work on Looking at the existing issues is a great way to find something to contribute on. As this repository uses GitHub issue [labels](https://github.com/bottlerocket-os/bottlerocket/labels), looking at any ['status/helpwelcome'](https://github.com/bottlerocket-os/bottlerocket/labels/status%2Fhelpwelcome) issues is a great place to start. From 5e620a3c60dccf81fe831d5f3149f1822bc0754f Mon Sep 17 00:00:00 2001 From: "Kyle J. Davis" Date: Wed, 27 Jul 2022 11:11:15 -0600 Subject: [PATCH 0702/1356] Docs: adds missing comma Signed-off-by: Kyle J. Davis --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c1f9e7f7..eae718f9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -49,7 +49,7 @@ Bottlerocket uses both tags and branches for release alignment. Numbered release Bottlerocket follows a few basic filename case conventions: -- All extensions are lowercase +- All extensions are lowercase, - Build related configuration files always start with a capital letter (e.g. `Infra.toml`, `Release.toml`), - All caps is used for documents and licenses (e.g. `PUBLISHING.md`, `TRADEMARKS.md`), - All lower case is used for all other files (e.g. `sample-eksctl.yaml`, `main.rs`). From b5e06d5c8e294b20c46089471d0c0bd519af6e96 Mon Sep 17 00:00:00 2001 From: "Kyle J. Davis" Date: Wed, 27 Jul 2022 10:45:03 -0600 Subject: [PATCH 0703/1356] Docs: Add community section to readme Signed-off-by: Kyle J. Davis --- README.md | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index ca6ff593..b9ae2d60 100644 --- a/README.md +++ b/README.md @@ -22,21 +22,25 @@ Some notable features include: * [Modeled configuration](#settings) that's automatically migrated through updates. * [Security](#security) as a top priority. -## Contact us +## Participate in the Community -If you find a security issue, please [contact our security team](https://github.com/bottlerocket-os/bottlerocket/security/policy) rather than opening an issue. +There are many ways to take part in the Bottlerocket community: -If you're interested in contributing, thank you! +- [Join on Meetup](https://www.meetup.com/bottlerocket-community/) to hear about the latest Bottlerocket (virtual/in-person) events and community meetings. The [next community meeting is August 24, 2022](https://www.meetup.com/bottlerocket-community/events/287425423/). +- [Start or join a discussion](https://github.com/bottlerocket-os/bottlerocket/discussions) if you have questions about Bottlerocket. +- If you're interested in contributing, thank you! Please see our [contributor's guide](CONTRIBUTING.md). +## Contact us + +If you find a security issue, please [contact our security team](https://github.com/bottlerocket-os/bottlerocket/security/policy) rather than opening an issue. + We use GitHub issues to track other bug reports and feature requests. You can look at [existing issues](https://github.com/bottlerocket-os/bottlerocket/issues) to see whether your concern is already known. If not, you can select from a few templates and get some guidance on the type of information that would be most helpful. [Contact us with a new issue here.](https://github.com/bottlerocket-os/bottlerocket/issues/new/choose) -If you just have questions about Bottlerocket, please feel free to [start or join a discussion](https://github.com/bottlerocket-os/bottlerocket/discussions). - We don't have other communication channels set up quite yet, but don't worry about making an issue or a discussion thread! You can let us know about things that seem difficult, or even ways you might like to help. From 01f1fe2b7fac7db253c21681f0fcb3e1653e2338 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Wed, 27 Jul 2022 18:56:39 +0000 Subject: [PATCH 0704/1356] packages: update kernel-5.10 Signed-off-by: Arnaldo Garcia Rincon --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 1c88f1a9..c257034e 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -14,8 +14,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/fd3a270843eca4874b201fd3554b484a79b18edc0d3b845ff3288dd9dd0d69a8/kernel-5.10.118-111.515.amzn2.src.rpm" -sha512 = "f9d8d4f43757a84072e585b20f4bbec188d4d28d12c7183dae65348ff487508eab999048f1796f2f4bb1a8de71412156eae62248343f3a7e579d0babfce9fd64" +url = "https://cdn.amazonlinux.com/blobstore/04a89d2664b3be51cad04255bde6ff8ee1620a5281b0dc1f2f4707e1e6cfe150/kernel-5.10.130-118.517.amzn2.src.rpm" +sha512 = "3047b80f7f8d703b3c0ab9785493245d01b27faa5948fddbcb9d0843c5bfcfa0972b61afa70551a2cc3d2c8b92ec0069993ed92ca12459f7ec67d03a00a031b7" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 4e6b8e8b..61f3b848 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -2,13 +2,13 @@ %global _is_metal_variant %(if echo %{_cross_variant} | grep -Fqw "metal"; then echo 1; else echo 0; fi) Name: %{_cross_os}kernel-5.10 -Version: 5.10.118 +Version: 5.10.130 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/fd3a270843eca4874b201fd3554b484a79b18edc0d3b845ff3288dd9dd0d69a8/kernel-5.10.118-111.515.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/04a89d2664b3be51cad04255bde6ff8ee1620a5281b0dc1f2f4707e1e6cfe150/kernel-5.10.130-118.517.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-metal From b2320e63ee5eb8d8b5645b82ff5bd01d04000c2c Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Wed, 27 Jul 2022 18:57:49 +0000 Subject: [PATCH 0705/1356] packages: update kernel-5.15 Signed-off-by: Arnaldo Garcia Rincon --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 94dd6ff7..7a19e38b 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -14,8 +14,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/14fac2ab958b3193693bea1691e23f27600ba44cb63009bdc6cc9368271227a5/kernel-5.15.43-20.123.amzn2.src.rpm" -sha512 = "0d54742e3d4cf03dcfc398f0ebcd2c3294119683ec830efb79a0470e71f039a58d1669d1f84d21827be7d5a785225ffc15e4c0613c154ff7c54de2a208d77c5b" +url = "https://cdn.amazonlinux.com/blobstore/47fc1797c6cf0a9ee2cb4c2ccba9c73a47c0ff75bdb22bf19e939083029881dc/kernel-5.15.54-25.126.amzn2.src.rpm" +sha512 = "5c08b5cd682adccd1bb9e2a418ae5bbb24ddcdc53e6ae46ea9760415989a25e02066db9e1aa6240455523189fb319f3aa0cb5b1f9ae8b5bccda8f4c46f2cb7a8" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index dd2f79df..cc1798ce 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -2,13 +2,13 @@ %global _is_metal_variant %(if echo %{_cross_variant} | grep -Fqw "metal"; then echo 1; else echo 0; fi) Name: %{_cross_os}kernel-5.15 -Version: 5.15.43 +Version: 5.15.54 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/14fac2ab958b3193693bea1691e23f27600ba44cb63009bdc6cc9368271227a5/kernel-5.15.43-20.123.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/47fc1797c6cf0a9ee2cb4c2ccba9c73a47c0ff75bdb22bf19e939083029881dc/kernel-5.15.54-25.126.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-metal From 0d8316e4cccc399f2ff0813dcf1714648ea1fd05 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Wed, 27 Jul 2022 18:58:02 +0000 Subject: [PATCH 0706/1356] packages: update kernel-5.4 Signed-off-by: Arnaldo Garcia Rincon --- packages/kernel-5.4/Cargo.toml | 4 ++-- packages/kernel-5.4/kernel-5.4.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.4/Cargo.toml b/packages/kernel-5.4/Cargo.toml index 3dcc7f94..ca9ac82e 100644 --- a/packages/kernel-5.4/Cargo.toml +++ b/packages/kernel-5.4/Cargo.toml @@ -13,8 +13,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/9959b4af12a63755e451619398b6471f3c6a496b854ce73740c786907f67560a/kernel-5.4.196-108.356.amzn2.src.rpm" -sha512 = "4b063d857d05a2796fc607ba425d5f75964e1123b24cb0f0ab8e1cb8334944b9fc5d734c83f1d5ef186b2ac38fb7ece5be62a49579f3b4187ee380cd28bdfaaf" +url = "https://cdn.amazonlinux.com/blobstore/bd95a16ec3ce5de68339dbf6dedf8912f4ed5abd35716967c5eb4c93d4f8057e/kernel-5.4.204-113.362.amzn2.src.rpm" +sha512 = "420445e7699839b61d69d0fdc3a0a543fb2025fb2901b25075a0195af6b6f0cd74d138a560c4e83f4fc44394007cfec239245fc4f1afdfdf8c8ea7c8cd05bfad" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec index 635c3140..8b4f3f0e 100644 --- a/packages/kernel-5.4/kernel-5.4.spec +++ b/packages/kernel-5.4/kernel-5.4.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.4 -Version: 5.4.196 +Version: 5.4.204 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/9959b4af12a63755e451619398b6471f3c6a496b854ce73740c786907f67560a/kernel-5.4.196-108.356.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/bd95a16ec3ce5de68339dbf6dedf8912f4ed5abd35716967c5eb4c93d4f8057e/kernel-5.4.204-113.362.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 83813ddcceb5ef02803fc249f9db2715c7208fcd Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Fri, 5 Aug 2022 18:19:32 +0000 Subject: [PATCH 0707/1356] kernel-5.4: Switch from zstd to xz for module compression Stop compressing modules with zstd for now and switch to xz instead. It turned out zstd support in kmod is not yet widespread, which becomes a problem when trying to load modules from a container and not the Bottlerocket host system. Fixes: a9efffcdab4c ("kernel-5.4: enable ZSTD compression for kernel modules") Signed-off-by: Markus Boehme --- packages/kernel-5.4/config-bottlerocket | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/kernel-5.4/config-bottlerocket b/packages/kernel-5.4/config-bottlerocket index 94a9d2e3..b6036719 100644 --- a/packages/kernel-5.4/config-bottlerocket +++ b/packages/kernel-5.4/config-bottlerocket @@ -77,9 +77,9 @@ CONFIG_ZSTD_COMPRESS=y CONFIG_ZSTD_DECOMPRESS=y CONFIG_DECOMPRESS_ZSTD=y -# Enable ZSTD modules compression +# Enable xz modules compression CONFIG_MODULE_COMPRESS=y -CONFIG_MODULE_COMPRESS_ZSTD=y +CONFIG_MODULE_COMPRESS_XZ=y # Load i8042 controller, keyboard, and mouse as modules, to avoid waiting for # them before mounting the root device. From 8b2dd23359cd9bd7f942ffb856c326f7032fbf6f Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Fri, 5 Aug 2022 18:25:51 +0000 Subject: [PATCH 0708/1356] kernel-5.10: Switch from zstd to xz for module compression Stop compressing modules with zstd for now and switch to xz instead. It turned out zstd support in kmod is not yet widespread, which becomes a problem when trying to load modules from a container and not the Bottlerocket host system. Fixes: 00a7f93c961b ("kernel-5.10: enable ZSTD compression for kernel modules") Signed-off-by: Markus Boehme --- packages/kernel-5.10/config-bottlerocket | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index 1facb07a..3fa5c9ae 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -82,9 +82,9 @@ CONFIG_ZSTD_COMPRESS=y CONFIG_ZSTD_DECOMPRESS=y CONFIG_DECOMPRESS_ZSTD=y -# Enable ZSTD modules compression +# Enable xz modules compression CONFIG_MODULE_COMPRESS=y -CONFIG_MODULE_COMPRESS_ZSTD=y +CONFIG_MODULE_COMPRESS_XZ=y # Load i8042 controller, keyboard, and mouse as modules, to avoid waiting for # them before mounting the root device. From c756f9eb76daf8cae89191467f701a798e9fe992 Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Fri, 5 Aug 2022 18:27:17 +0000 Subject: [PATCH 0709/1356] kernel-5.15: Switch from zstd to xz for module compression Stop compressing modules with zstd for now and switch to xz instead. It turned out zstd support in kmod is not yet widespread, which becomes a problem when trying to load modules from a container and not the Bottlerocket host system. Fixes: 9d019a9070f6 ("kernel-5.15: enable ZSTD compression for kernel modules") Signed-off-by: Markus Boehme --- packages/kernel-5.15/config-bottlerocket | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/kernel-5.15/config-bottlerocket b/packages/kernel-5.15/config-bottlerocket index d3894016..de01ebf1 100644 --- a/packages/kernel-5.15/config-bottlerocket +++ b/packages/kernel-5.15/config-bottlerocket @@ -82,9 +82,9 @@ CONFIG_ZSTD_COMPRESS=y CONFIG_ZSTD_DECOMPRESS=y CONFIG_DECOMPRESS_ZSTD=y -# Enable ZSTD modules compression +# Enable xz modules compression # CONFIG_MODULE_COMPRESS_NONE is not set -CONFIG_MODULE_COMPRESS_ZSTD=y +CONFIG_MODULE_COMPRESS_XZ=y # Load i8042 controller, keyboard, and mouse as modules, to avoid waiting for # them before mounting the root device. From 8ae3e73fa4274232b737ea155ad4f5bc798caeba Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 2 Aug 2022 16:50:07 -0700 Subject: [PATCH 0710/1356] Remove aws-k8s-1.19 variant EKS ended 1.19 support on August 1st, 2022. --- .github/workflows/build.yml | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3829caf5..3b7115fe 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -24,7 +24,7 @@ jobs: continue-on-error: ${{ matrix.supported }} strategy: matrix: - variant: [aws-k8s-1.19, aws-k8s-1.20, aws-k8s-1.21, aws-k8s-1.22, aws-k8s-1.23, aws-ecs-1] + variant: [aws-k8s-1.20, aws-k8s-1.21, aws-k8s-1.22, aws-k8s-1.23, aws-ecs-1] arch: [x86_64, aarch64] supported: [true] fetch-upstream: ["false"] diff --git a/README.md b/README.md index b9ae2d60..a091960d 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,6 @@ For example, an `x86_64` build of the `aws-k8s-1.21` variant will produce an ima The following variants support EKS, as described above: -* `aws-k8s-1.19` * `aws-k8s-1.20` * `aws-k8s-1.21` * `aws-k8s-1.22` @@ -89,6 +88,7 @@ The following variants are no longer supported: * `aws-k8s-1.16` * `aws-k8s-1.17` * `aws-k8s-1.18` +* `aws-k8s-1.19` We recommend users replace nodes running these variants with the [latest variant compatible with their cluster](variants/). From dbbf216f41822d8578866563b659dc8ae6d446c5 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 2 Aug 2022 17:12:32 -0700 Subject: [PATCH 0711/1356] README: clarify defaults for settings.kernel.lockdown Clarifies where lockdown is set to "none" by default --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a091960d..25b0067a 100644 --- a/README.md +++ b/README.md @@ -671,7 +671,7 @@ Here are the metrics settings: #### Kernel settings * `settings.kernel.lockdown`: This allows further restrictions on what the Linux kernel will allow, for example preventing the loading of unsigned modules. - May be set to "none" (the default in older [variants](variants/), up through aws-k8s-1.19), "integrity" (the default for newer [variants](variants/)), or "confidentiality". + May be set to "none" (the default in `*-nvidia` and `*-dev` variants), "integrity" (the default for other variants), or "confidentiality". **Important note:** this setting cannot be lowered (toward 'none') at runtime. You must reboot for a change to a lower level to take effect. * `settings.kernel.modules..allowed`: Whether the named kernel module is allowed to be loaded. From 49cb615201f97980adeab474453b5507991694af Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 2 Aug 2022 16:52:27 -0700 Subject: [PATCH 0712/1356] packages: remove kernel-5.4 package There are no longer any variants that still depend on the 5.4 kernel. --- ...-prepare-target-for-external-modules.patch | 50 -- ...-for-preboot-environment-improve-per.patch | 101 ---- ...1-lib-Add-zstd-support-to-decompress.patch | 460 ------------------ ...d-support-for-zstd-compressed-kernel.patch | 119 ----- ...ump-ZO_z_extra_bytes-margin-for-zstd.patch | 50 -- ...d-support-for-ZSTD-compressed-kernel.patch | 175 ------- ....gitignore-Add-ZSTD-compressed-files.patch | 34 -- ...le-strip-compression-code-into-scrip.patch | 184 ------- ...-support-for-zstd-compressed-modules.patch | 82 ---- packages/kernel-5.4/Cargo.toml | 20 - packages/kernel-5.4/build.rs | 9 - packages/kernel-5.4/config-bottlerocket | 121 ----- packages/kernel-5.4/kernel-5.4.spec | 272 ----------- packages/kernel-5.4/latest-srpm-url.sh | 2 - packages/kernel-5.4/pkg.rs | 1 - 15 files changed, 1680 deletions(-) delete mode 100644 packages/kernel-5.4/1001-Makefile-add-prepare-target-for-external-modules.patch delete mode 100644 packages/kernel-5.4/2000-lib-Prepare-zstd-for-preboot-environment-improve-per.patch delete mode 100644 packages/kernel-5.4/2001-lib-Add-zstd-support-to-decompress.patch delete mode 100644 packages/kernel-5.4/2002-init-Add-support-for-zstd-compressed-kernel.patch delete mode 100644 packages/kernel-5.4/2003-x86-Bump-ZO_z_extra_bytes-margin-for-zstd.patch delete mode 100644 packages/kernel-5.4/2004-x86-Add-support-for-ZSTD-compressed-kernel.patch delete mode 100644 packages/kernel-5.4/2005-.gitignore-Add-ZSTD-compressed-files.patch delete mode 100644 packages/kernel-5.4/2006-kbuild-move-module-strip-compression-code-into-scrip.patch delete mode 100644 packages/kernel-5.4/2007-kbuild-add-support-for-zstd-compressed-modules.patch delete mode 100644 packages/kernel-5.4/Cargo.toml delete mode 100644 packages/kernel-5.4/build.rs delete mode 100644 packages/kernel-5.4/config-bottlerocket delete mode 100644 packages/kernel-5.4/kernel-5.4.spec delete mode 100755 packages/kernel-5.4/latest-srpm-url.sh delete mode 100644 packages/kernel-5.4/pkg.rs diff --git a/packages/kernel-5.4/1001-Makefile-add-prepare-target-for-external-modules.patch b/packages/kernel-5.4/1001-Makefile-add-prepare-target-for-external-modules.patch deleted file mode 100644 index 80142928..00000000 --- a/packages/kernel-5.4/1001-Makefile-add-prepare-target-for-external-modules.patch +++ /dev/null @@ -1,50 +0,0 @@ -From 6e4fa756a327a510f8713d60dc257aaeed5e33d7 Mon Sep 17 00:00:00 2001 -From: Ben Cressey -Date: Fri, 13 Nov 2020 23:37:11 +0000 -Subject: [PATCH] Makefile: add prepare target for external modules - -We need to ensure that native versions of programs like `objtool` are -built before trying to build out-of-tree modules, or else the build -will fail. - -Unlike other distributions, we cannot include these programs in our -kernel-devel archive, because we rely on cross-compilation: these are -"host" programs and may not match the architecture of the target. - -Ideally, out-of-tree builds would run `make prepare` first, so that -these programs could be compiled in the normal fashion. We ship all -the files needed for this to work. However, this requirement is -specific to our use case, and DKMS does not support it. - -Adding a minimal prepare target to the dependency graph causes the -programs to be built automatically and improves compatibility with -existing solutions. - -Signed-off-by: Ben Cressey ---- - Makefile | 9 +++++++++ - 1 file changed, 9 insertions(+) - -diff --git a/Makefile b/Makefile -index 29948bc4a0d2..2f766911437c 100644 ---- a/Makefile -+++ b/Makefile -@@ -1613,6 +1613,15 @@ $(objtree)/Module.symvers: - echo " is missing; modules will have no dependencies and modversions."; \ - echo ) - -+PHONY += modules_prepare -+modules_prepare: $(objtool_target) -+ $(Q)$(MAKE) $(build)=scripts/basic -+ $(Q)$(MAKE) $(build)=scripts/dtc -+ $(Q)$(MAKE) $(build)=scripts/mod -+ $(Q)$(MAKE) $(build)=scripts -+ -+prepare: modules_prepare -+ - build-dirs := $(KBUILD_EXTMOD) - PHONY += modules - modules: descend $(objtree)/Module.symvers --- -2.21.0 - diff --git a/packages/kernel-5.4/2000-lib-Prepare-zstd-for-preboot-environment-improve-per.patch b/packages/kernel-5.4/2000-lib-Prepare-zstd-for-preboot-environment-improve-per.patch deleted file mode 100644 index a79dbacc..00000000 --- a/packages/kernel-5.4/2000-lib-Prepare-zstd-for-preboot-environment-improve-per.patch +++ /dev/null @@ -1,101 +0,0 @@ -From bd475ee90b2b4ce6eae2ccbb5ef214557e937145 Mon Sep 17 00:00:00 2001 -From: Nick Terrell -Date: Thu, 30 Jul 2020 12:08:34 -0700 -Subject: [PATCH 2000/2007] lib: Prepare zstd for preboot environment, improve - performance - -These changes are necessary to get the build to work in the preboot -environment, and to get reasonable performance: - -- Remove a double definition of the CHECK_F macro when the zstd - library is amalgamated. - -- Switch ZSTD_copy8() to __builtin_memcpy(), because in the preboot - environment on x86 gcc can't inline `memcpy()` otherwise. - -- Limit the gcc hack in ZSTD_wildcopy() to the broken gcc version. See - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388. - -ZSTD_copy8() and ZSTD_wildcopy() are in the core of the zstd hot loop. -So outlining these calls to memcpy(), and having an extra branch are very -detrimental to performance. - -Signed-off-by: Nick Terrell -Signed-off-by: Ingo Molnar -Tested-by: Sedat Dilek -Reviewed-by: Kees Cook -Link: https://lore.kernel.org/r/20200730190841.2071656-2-nickrterrell@gmail.com -(cherry picked from commit 6d25a633ea68a103c7293d16eb69a7d4689075ad) ---- - lib/zstd/fse_decompress.c | 9 +-------- - lib/zstd/zstd_internal.h | 14 ++++++++++++-- - 2 files changed, 13 insertions(+), 10 deletions(-) - -diff --git a/lib/zstd/fse_decompress.c b/lib/zstd/fse_decompress.c -index a84300e5a013..0b353530fb3f 100644 ---- a/lib/zstd/fse_decompress.c -+++ b/lib/zstd/fse_decompress.c -@@ -47,6 +47,7 @@ - ****************************************************************/ - #include "bitstream.h" - #include "fse.h" -+#include "zstd_internal.h" - #include - #include - #include /* memcpy, memset */ -@@ -60,14 +61,6 @@ - enum { FSE_static_assert = 1 / (int)(!!(c)) }; \ - } /* use only *after* variable declarations */ - --/* check and forward error code */ --#define CHECK_F(f) \ -- { \ -- size_t const e = f; \ -- if (FSE_isError(e)) \ -- return e; \ -- } -- - /* ************************************************************** - * Templates - ****************************************************************/ -diff --git a/lib/zstd/zstd_internal.h b/lib/zstd/zstd_internal.h -index 1a79fab9e13a..dac753397f86 100644 ---- a/lib/zstd/zstd_internal.h -+++ b/lib/zstd/zstd_internal.h -@@ -127,7 +127,14 @@ static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG; - * Shared functions to include for inlining - *********************************************/ - ZSTD_STATIC void ZSTD_copy8(void *dst, const void *src) { -- memcpy(dst, src, 8); -+ /* -+ * zstd relies heavily on gcc being able to analyze and inline this -+ * memcpy() call, since it is called in a tight loop. Preboot mode -+ * is compiled in freestanding mode, which stops gcc from analyzing -+ * memcpy(). Use __builtin_memcpy() to tell gcc to analyze this as a -+ * regular memcpy(). -+ */ -+ __builtin_memcpy(dst, src, 8); - } - /*! ZSTD_wildcopy() : - * custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */ -@@ -137,13 +144,16 @@ ZSTD_STATIC void ZSTD_wildcopy(void *dst, const void *src, ptrdiff_t length) - const BYTE* ip = (const BYTE*)src; - BYTE* op = (BYTE*)dst; - BYTE* const oend = op + length; -- /* Work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388. -+#if defined(GCC_VERSION) && GCC_VERSION >= 70000 && GCC_VERSION < 70200 -+ /* -+ * Work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388. - * Avoid the bad case where the loop only runs once by handling the - * special case separately. This doesn't trigger the bug because it - * doesn't involve pointer/integer overflow. - */ - if (length <= 8) - return ZSTD_copy8(dst, src); -+#endif - do { - ZSTD_copy8(op, ip); - op += 8; --- -2.30.2 - diff --git a/packages/kernel-5.4/2001-lib-Add-zstd-support-to-decompress.patch b/packages/kernel-5.4/2001-lib-Add-zstd-support-to-decompress.patch deleted file mode 100644 index 4572db2c..00000000 --- a/packages/kernel-5.4/2001-lib-Add-zstd-support-to-decompress.patch +++ /dev/null @@ -1,460 +0,0 @@ -From 30ff8b18827f5fc6c31808a5868324867688cbdd Mon Sep 17 00:00:00 2001 -From: Nick Terrell -Date: Thu, 30 Jul 2020 12:08:35 -0700 -Subject: [PATCH 2001/2007] lib: Add zstd support to decompress - -- Add unzstd() and the zstd decompress interface. - -- Add zstd support to decompress_method(). - -The decompress_method() and unzstd() functions are used to decompress -the initramfs and the initrd. The __decompress() function is used in -the preboot environment to decompress a zstd compressed kernel. - -The zstd decompression function allows the input and output buffers to -overlap because that is used by x86 kernel decompression. - -Signed-off-by: Nick Terrell -Signed-off-by: Ingo Molnar -Tested-by: Sedat Dilek -Reviewed-by: Kees Cook -Link: https://lore.kernel.org/r/20200730190841.2071656-3-nickrterrell@gmail.com -(cherry picked from commit 4963bb2b89884bbdb7e33e6a09c159551e9627aa) ---- - include/linux/decompress/unzstd.h | 11 + - lib/Kconfig | 4 + - lib/Makefile | 1 + - lib/decompress.c | 5 + - lib/decompress_unzstd.c | 345 ++++++++++++++++++++++++++++++ - 5 files changed, 366 insertions(+) - create mode 100644 include/linux/decompress/unzstd.h - create mode 100644 lib/decompress_unzstd.c - -diff --git a/include/linux/decompress/unzstd.h b/include/linux/decompress/unzstd.h -new file mode 100644 -index 000000000000..56d539ae880f ---- /dev/null -+++ b/include/linux/decompress/unzstd.h -@@ -0,0 +1,11 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+#ifndef LINUX_DECOMPRESS_UNZSTD_H -+#define LINUX_DECOMPRESS_UNZSTD_H -+ -+int unzstd(unsigned char *inbuf, long len, -+ long (*fill)(void*, unsigned long), -+ long (*flush)(void*, unsigned long), -+ unsigned char *output, -+ long *pos, -+ void (*error_fn)(char *x)); -+#endif -diff --git a/lib/Kconfig b/lib/Kconfig -index 3321d04dfa5a..ad33691e129c 100644 ---- a/lib/Kconfig -+++ b/lib/Kconfig -@@ -329,6 +329,10 @@ config DECOMPRESS_LZ4 - select LZ4_DECOMPRESS - tristate - -+config DECOMPRESS_ZSTD -+ select ZSTD_DECOMPRESS -+ tristate -+ - # - # Generic allocator support is selected if needed - # -diff --git a/lib/Makefile b/lib/Makefile -index 6bf453fb731d..f948c1f6534d 100644 ---- a/lib/Makefile -+++ b/lib/Makefile -@@ -157,6 +157,7 @@ lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o - lib-$(CONFIG_DECOMPRESS_XZ) += decompress_unxz.o - lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o - lib-$(CONFIG_DECOMPRESS_LZ4) += decompress_unlz4.o -+lib-$(CONFIG_DECOMPRESS_ZSTD) += decompress_unzstd.o - - obj-$(CONFIG_TEXTSEARCH) += textsearch.o - obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o -diff --git a/lib/decompress.c b/lib/decompress.c -index 857ab1af1ef3..ab3fc90ffc64 100644 ---- a/lib/decompress.c -+++ b/lib/decompress.c -@@ -13,6 +13,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -37,6 +38,9 @@ - #ifndef CONFIG_DECOMPRESS_LZ4 - # define unlz4 NULL - #endif -+#ifndef CONFIG_DECOMPRESS_ZSTD -+# define unzstd NULL -+#endif - - struct compress_format { - unsigned char magic[2]; -@@ -52,6 +56,7 @@ static const struct compress_format compressed_formats[] __initconst = { - { {0xfd, 0x37}, "xz", unxz }, - { {0x89, 0x4c}, "lzo", unlzo }, - { {0x02, 0x21}, "lz4", unlz4 }, -+ { {0x28, 0xb5}, "zstd", unzstd }, - { {0, 0}, NULL, NULL } - }; - -diff --git a/lib/decompress_unzstd.c b/lib/decompress_unzstd.c -new file mode 100644 -index 000000000000..0ad2c15479ed ---- /dev/null -+++ b/lib/decompress_unzstd.c -@@ -0,0 +1,345 @@ -+// SPDX-License-Identifier: GPL-2.0 -+ -+/* -+ * Important notes about in-place decompression -+ * -+ * At least on x86, the kernel is decompressed in place: the compressed data -+ * is placed to the end of the output buffer, and the decompressor overwrites -+ * most of the compressed data. There must be enough safety margin to -+ * guarantee that the write position is always behind the read position. -+ * -+ * The safety margin for ZSTD with a 128 KB block size is calculated below. -+ * Note that the margin with ZSTD is bigger than with GZIP or XZ! -+ * -+ * The worst case for in-place decompression is that the beginning of -+ * the file is compressed extremely well, and the rest of the file is -+ * uncompressible. Thus, we must look for worst-case expansion when the -+ * compressor is encoding uncompressible data. -+ * -+ * The structure of the .zst file in case of a compresed kernel is as follows. -+ * Maximum sizes (as bytes) of the fields are in parenthesis. -+ * -+ * Frame Header: (18) -+ * Blocks: (N) -+ * Checksum: (4) -+ * -+ * The frame header and checksum overhead is at most 22 bytes. -+ * -+ * ZSTD stores the data in blocks. Each block has a header whose size is -+ * a 3 bytes. After the block header, there is up to 128 KB of payload. -+ * The maximum uncompressed size of the payload is 128 KB. The minimum -+ * uncompressed size of the payload is never less than the payload size -+ * (excluding the block header). -+ * -+ * The assumption, that the uncompressed size of the payload is never -+ * smaller than the payload itself, is valid only when talking about -+ * the payload as a whole. It is possible that the payload has parts where -+ * the decompressor consumes more input than it produces output. Calculating -+ * the worst case for this would be tricky. Instead of trying to do that, -+ * let's simply make sure that the decompressor never overwrites any bytes -+ * of the payload which it is currently reading. -+ * -+ * Now we have enough information to calculate the safety margin. We need -+ * - 22 bytes for the .zst file format headers; -+ * - 3 bytes per every 128 KiB of uncompressed size (one block header per -+ * block); and -+ * - 128 KiB (biggest possible zstd block size) to make sure that the -+ * decompressor never overwrites anything from the block it is currently -+ * reading. -+ * -+ * We get the following formula: -+ * -+ * safety_margin = 22 + uncompressed_size * 3 / 131072 + 131072 -+ * <= 22 + (uncompressed_size >> 15) + 131072 -+ */ -+ -+/* -+ * Preboot environments #include "path/to/decompress_unzstd.c". -+ * All of the source files we depend on must be #included. -+ * zstd's only source dependeny is xxhash, which has no source -+ * dependencies. -+ * -+ * When UNZSTD_PREBOOT is defined we declare __decompress(), which is -+ * used for kernel decompression, instead of unzstd(). -+ * -+ * Define __DISABLE_EXPORTS in preboot environments to prevent symbols -+ * from xxhash and zstd from being exported by the EXPORT_SYMBOL macro. -+ */ -+#ifdef STATIC -+# define UNZSTD_PREBOOT -+# include "xxhash.c" -+# include "zstd/entropy_common.c" -+# include "zstd/fse_decompress.c" -+# include "zstd/huf_decompress.c" -+# include "zstd/zstd_common.c" -+# include "zstd/decompress.c" -+#endif -+ -+#include -+#include -+#include -+ -+/* 128MB is the maximum window size supported by zstd. */ -+#define ZSTD_WINDOWSIZE_MAX (1 << ZSTD_WINDOWLOG_MAX) -+/* -+ * Size of the input and output buffers in multi-call mode. -+ * Pick a larger size because it isn't used during kernel decompression, -+ * since that is single pass, and we have to allocate a large buffer for -+ * zstd's window anyway. The larger size speeds up initramfs decompression. -+ */ -+#define ZSTD_IOBUF_SIZE (1 << 17) -+ -+static int INIT handle_zstd_error(size_t ret, void (*error)(char *x)) -+{ -+ const int err = ZSTD_getErrorCode(ret); -+ -+ if (!ZSTD_isError(ret)) -+ return 0; -+ -+ switch (err) { -+ case ZSTD_error_memory_allocation: -+ error("ZSTD decompressor ran out of memory"); -+ break; -+ case ZSTD_error_prefix_unknown: -+ error("Input is not in the ZSTD format (wrong magic bytes)"); -+ break; -+ case ZSTD_error_dstSize_tooSmall: -+ case ZSTD_error_corruption_detected: -+ case ZSTD_error_checksum_wrong: -+ error("ZSTD-compressed data is corrupt"); -+ break; -+ default: -+ error("ZSTD-compressed data is probably corrupt"); -+ break; -+ } -+ return -1; -+} -+ -+/* -+ * Handle the case where we have the entire input and output in one segment. -+ * We can allocate less memory (no circular buffer for the sliding window), -+ * and avoid some memcpy() calls. -+ */ -+static int INIT decompress_single(const u8 *in_buf, long in_len, u8 *out_buf, -+ long out_len, long *in_pos, -+ void (*error)(char *x)) -+{ -+ const size_t wksp_size = ZSTD_DCtxWorkspaceBound(); -+ void *wksp = large_malloc(wksp_size); -+ ZSTD_DCtx *dctx = ZSTD_initDCtx(wksp, wksp_size); -+ int err; -+ size_t ret; -+ -+ if (dctx == NULL) { -+ error("Out of memory while allocating ZSTD_DCtx"); -+ err = -1; -+ goto out; -+ } -+ /* -+ * Find out how large the frame actually is, there may be junk at -+ * the end of the frame that ZSTD_decompressDCtx() can't handle. -+ */ -+ ret = ZSTD_findFrameCompressedSize(in_buf, in_len); -+ err = handle_zstd_error(ret, error); -+ if (err) -+ goto out; -+ in_len = (long)ret; -+ -+ ret = ZSTD_decompressDCtx(dctx, out_buf, out_len, in_buf, in_len); -+ err = handle_zstd_error(ret, error); -+ if (err) -+ goto out; -+ -+ if (in_pos != NULL) -+ *in_pos = in_len; -+ -+ err = 0; -+out: -+ if (wksp != NULL) -+ large_free(wksp); -+ return err; -+} -+ -+static int INIT __unzstd(unsigned char *in_buf, long in_len, -+ long (*fill)(void*, unsigned long), -+ long (*flush)(void*, unsigned long), -+ unsigned char *out_buf, long out_len, -+ long *in_pos, -+ void (*error)(char *x)) -+{ -+ ZSTD_inBuffer in; -+ ZSTD_outBuffer out; -+ ZSTD_frameParams params; -+ void *in_allocated = NULL; -+ void *out_allocated = NULL; -+ void *wksp = NULL; -+ size_t wksp_size; -+ ZSTD_DStream *dstream; -+ int err; -+ size_t ret; -+ -+ if (out_len == 0) -+ out_len = LONG_MAX; /* no limit */ -+ -+ if (fill == NULL && flush == NULL) -+ /* -+ * We can decompress faster and with less memory when we have a -+ * single chunk. -+ */ -+ return decompress_single(in_buf, in_len, out_buf, out_len, -+ in_pos, error); -+ -+ /* -+ * If in_buf is not provided, we must be using fill(), so allocate -+ * a large enough buffer. If it is provided, it must be at least -+ * ZSTD_IOBUF_SIZE large. -+ */ -+ if (in_buf == NULL) { -+ in_allocated = large_malloc(ZSTD_IOBUF_SIZE); -+ if (in_allocated == NULL) { -+ error("Out of memory while allocating input buffer"); -+ err = -1; -+ goto out; -+ } -+ in_buf = in_allocated; -+ in_len = 0; -+ } -+ /* Read the first chunk, since we need to decode the frame header. */ -+ if (fill != NULL) -+ in_len = fill(in_buf, ZSTD_IOBUF_SIZE); -+ if (in_len < 0) { -+ error("ZSTD-compressed data is truncated"); -+ err = -1; -+ goto out; -+ } -+ /* Set the first non-empty input buffer. */ -+ in.src = in_buf; -+ in.pos = 0; -+ in.size = in_len; -+ /* Allocate the output buffer if we are using flush(). */ -+ if (flush != NULL) { -+ out_allocated = large_malloc(ZSTD_IOBUF_SIZE); -+ if (out_allocated == NULL) { -+ error("Out of memory while allocating output buffer"); -+ err = -1; -+ goto out; -+ } -+ out_buf = out_allocated; -+ out_len = ZSTD_IOBUF_SIZE; -+ } -+ /* Set the output buffer. */ -+ out.dst = out_buf; -+ out.pos = 0; -+ out.size = out_len; -+ -+ /* -+ * We need to know the window size to allocate the ZSTD_DStream. -+ * Since we are streaming, we need to allocate a buffer for the sliding -+ * window. The window size varies from 1 KB to ZSTD_WINDOWSIZE_MAX -+ * (8 MB), so it is important to use the actual value so as not to -+ * waste memory when it is smaller. -+ */ -+ ret = ZSTD_getFrameParams(¶ms, in.src, in.size); -+ err = handle_zstd_error(ret, error); -+ if (err) -+ goto out; -+ if (ret != 0) { -+ error("ZSTD-compressed data has an incomplete frame header"); -+ err = -1; -+ goto out; -+ } -+ if (params.windowSize > ZSTD_WINDOWSIZE_MAX) { -+ error("ZSTD-compressed data has too large a window size"); -+ err = -1; -+ goto out; -+ } -+ -+ /* -+ * Allocate the ZSTD_DStream now that we know how much memory is -+ * required. -+ */ -+ wksp_size = ZSTD_DStreamWorkspaceBound(params.windowSize); -+ wksp = large_malloc(wksp_size); -+ dstream = ZSTD_initDStream(params.windowSize, wksp, wksp_size); -+ if (dstream == NULL) { -+ error("Out of memory while allocating ZSTD_DStream"); -+ err = -1; -+ goto out; -+ } -+ -+ /* -+ * Decompression loop: -+ * Read more data if necessary (error if no more data can be read). -+ * Call the decompression function, which returns 0 when finished. -+ * Flush any data produced if using flush(). -+ */ -+ if (in_pos != NULL) -+ *in_pos = 0; -+ do { -+ /* -+ * If we need to reload data, either we have fill() and can -+ * try to get more data, or we don't and the input is truncated. -+ */ -+ if (in.pos == in.size) { -+ if (in_pos != NULL) -+ *in_pos += in.pos; -+ in_len = fill ? fill(in_buf, ZSTD_IOBUF_SIZE) : -1; -+ if (in_len < 0) { -+ error("ZSTD-compressed data is truncated"); -+ err = -1; -+ goto out; -+ } -+ in.pos = 0; -+ in.size = in_len; -+ } -+ /* Returns zero when the frame is complete. */ -+ ret = ZSTD_decompressStream(dstream, &out, &in); -+ err = handle_zstd_error(ret, error); -+ if (err) -+ goto out; -+ /* Flush all of the data produced if using flush(). */ -+ if (flush != NULL && out.pos > 0) { -+ if (out.pos != flush(out.dst, out.pos)) { -+ error("Failed to flush()"); -+ err = -1; -+ goto out; -+ } -+ out.pos = 0; -+ } -+ } while (ret != 0); -+ -+ if (in_pos != NULL) -+ *in_pos += in.pos; -+ -+ err = 0; -+out: -+ if (in_allocated != NULL) -+ large_free(in_allocated); -+ if (out_allocated != NULL) -+ large_free(out_allocated); -+ if (wksp != NULL) -+ large_free(wksp); -+ return err; -+} -+ -+#ifndef UNZSTD_PREBOOT -+STATIC int INIT unzstd(unsigned char *buf, long len, -+ long (*fill)(void*, unsigned long), -+ long (*flush)(void*, unsigned long), -+ unsigned char *out_buf, -+ long *pos, -+ void (*error)(char *x)) -+{ -+ return __unzstd(buf, len, fill, flush, out_buf, 0, pos, error); -+} -+#else -+STATIC int INIT __decompress(unsigned char *buf, long len, -+ long (*fill)(void*, unsigned long), -+ long (*flush)(void*, unsigned long), -+ unsigned char *out_buf, long out_len, -+ long *pos, -+ void (*error)(char *x)) -+{ -+ return __unzstd(buf, len, fill, flush, out_buf, out_len, pos, error); -+} -+#endif --- -2.30.2 - diff --git a/packages/kernel-5.4/2002-init-Add-support-for-zstd-compressed-kernel.patch b/packages/kernel-5.4/2002-init-Add-support-for-zstd-compressed-kernel.patch deleted file mode 100644 index 9eef1158..00000000 --- a/packages/kernel-5.4/2002-init-Add-support-for-zstd-compressed-kernel.patch +++ /dev/null @@ -1,119 +0,0 @@ -From 306c3246fc07136e55747b9d4016e043bb77b00a Mon Sep 17 00:00:00 2001 -From: Nick Terrell -Date: Thu, 30 Jul 2020 12:08:36 -0700 -Subject: [PATCH 2002/2007] init: Add support for zstd compressed kernel - -- Add the zstd and zstd22 cmds to scripts/Makefile.lib - -- Add the HAVE_KERNEL_ZSTD and KERNEL_ZSTD options - -Architecture specific support is still needed for decompression. - -Signed-off-by: Nick Terrell -Signed-off-by: Ingo Molnar -Tested-by: Sedat Dilek -Reviewed-by: Kees Cook -Link: https://lore.kernel.org/r/20200730190841.2071656-4-nickrterrell@gmail.com -(cherry picked from commit 48f7ddf785af24aa380f3282d8d4400883d0099e) ---- - Makefile | 3 ++- - init/Kconfig | 15 ++++++++++++++- - scripts/Makefile.lib | 22 ++++++++++++++++++++++ - 3 files changed, 38 insertions(+), 2 deletions(-) - -diff --git a/Makefile b/Makefile -index e51077a8080d..3f593214a087 100644 ---- a/Makefile -+++ b/Makefile -@@ -448,6 +448,7 @@ KLZOP = lzop - LZMA = lzma - LZ4 = lz4c - XZ = xz -+ZSTD = zstd - - CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ - -Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF) -@@ -496,7 +497,7 @@ CLANG_FLAGS := - export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC - export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE LEX YACC AWK INSTALLKERNEL - export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX --export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ -+export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD - export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE - - export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS -diff --git a/init/Kconfig b/init/Kconfig -index f23e90d9935f..4dc3ea198a2c 100644 ---- a/init/Kconfig -+++ b/init/Kconfig -@@ -159,13 +159,16 @@ config HAVE_KERNEL_LZO - config HAVE_KERNEL_LZ4 - bool - -+config HAVE_KERNEL_ZSTD -+ bool -+ - config HAVE_KERNEL_UNCOMPRESSED - bool - - choice - prompt "Kernel compression mode" - default KERNEL_GZIP -- depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO || HAVE_KERNEL_LZ4 || HAVE_KERNEL_UNCOMPRESSED -+ depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO || HAVE_KERNEL_LZ4 || HAVE_KERNEL_ZSTD || HAVE_KERNEL_UNCOMPRESSED - help - The linux kernel is a kind of self-extracting executable. - Several compression algorithms are available, which differ -@@ -244,6 +247,16 @@ config KERNEL_LZ4 - is about 8% bigger than LZO. But the decompression speed is - faster than LZO. - -+config KERNEL_ZSTD -+ bool "ZSTD" -+ depends on HAVE_KERNEL_ZSTD -+ help -+ ZSTD is a compression algorithm targeting intermediate compression -+ with fast decompression speed. It will compress better than GZIP and -+ decompress around the same speed as LZO, but slower than LZ4. You -+ will need at least 192 KB RAM or more for booting. The zstd command -+ line tool is required for compression. -+ - config KERNEL_UNCOMPRESSED - bool "None" - depends on HAVE_KERNEL_UNCOMPRESSED -diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib -index a6d0044328b1..698b58774ed7 100644 ---- a/scripts/Makefile.lib -+++ b/scripts/Makefile.lib -@@ -383,6 +383,28 @@ quiet_cmd_xzkern = XZKERN $@ - quiet_cmd_xzmisc = XZMISC $@ - cmd_xzmisc = cat $(real-prereqs) | $(XZ) --check=crc32 --lzma2=dict=1MiB > $@ - -+# ZSTD -+# --------------------------------------------------------------------------- -+# Appends the uncompressed size of the data using size_append. The .zst -+# format has the size information available at the beginning of the file too, -+# but it's in a more complex format and it's good to avoid changing the part -+# of the boot code that reads the uncompressed size. -+# -+# Note that the bytes added by size_append will make the zstd tool think that -+# the file is corrupt. This is expected. -+# -+# zstd uses a maximum window size of 8 MB. zstd22 uses a maximum window size of -+# 128 MB. zstd22 is used for kernel compression because it is decompressed in a -+# single pass, so zstd doesn't need to allocate a window buffer. When streaming -+# decompression is used, like initramfs decompression, zstd22 should likely not -+# be used because it would require zstd to allocate a 128 MB buffer. -+ -+quiet_cmd_zstd = ZSTD $@ -+ cmd_zstd = { cat $(real-prereqs) | $(ZSTD) -19; $(size_append); } > $@ -+ -+quiet_cmd_zstd22 = ZSTD22 $@ -+ cmd_zstd22 = { cat $(real-prereqs) | $(ZSTD) -22 --ultra; $(size_append); } > $@ -+ - # ASM offsets - # --------------------------------------------------------------------------- - --- -2.30.2 - diff --git a/packages/kernel-5.4/2003-x86-Bump-ZO_z_extra_bytes-margin-for-zstd.patch b/packages/kernel-5.4/2003-x86-Bump-ZO_z_extra_bytes-margin-for-zstd.patch deleted file mode 100644 index 3da5c14a..00000000 --- a/packages/kernel-5.4/2003-x86-Bump-ZO_z_extra_bytes-margin-for-zstd.patch +++ /dev/null @@ -1,50 +0,0 @@ -From 66cad5025a1bbd5a2dec72f706c293ee6ff59243 Mon Sep 17 00:00:00 2001 -From: Nick Terrell -Date: Thu, 30 Jul 2020 12:08:38 -0700 -Subject: [PATCH 2003/2007] x86: Bump ZO_z_extra_bytes margin for zstd - -Bump the ZO_z_extra_bytes margin for zstd. - -Zstd needs 3 bytes per 128 KB, and has a 22 byte fixed overhead. -Zstd needs to maintain 128 KB of space at all times, since that is -the maximum block size. See the comments regarding in-place -decompression added in lib/decompress_unzstd.c for details. - -The existing code is written so that all the compression algorithms use -the same ZO_z_extra_bytes. It is taken to be the maximum of the growth -rate plus the maximum fixed overhead. The comments just above this diff -state that: - -Signed-off-by: Nick Terrell -Signed-off-by: Ingo Molnar -Tested-by: Sedat Dilek -Reviewed-by: Kees Cook -Link: https://lore.kernel.org/r/20200730190841.2071656-6-nickrterrell@gmail.com -(cherry picked from commit 0fe4f4ef8cc8e15a8f29f08f4be6128395f125f6) ---- - arch/x86/boot/header.S | 8 +++++++- - 1 file changed, 7 insertions(+), 1 deletion(-) - -diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S -index 2c11c0f45d49..1382b7bb73d2 100644 ---- a/arch/x86/boot/header.S -+++ b/arch/x86/boot/header.S -@@ -536,8 +536,14 @@ pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr - # the size-dependent part now grows so fast. - # - # extra_bytes = (uncompressed_size >> 8) + 65536 -+# -+# ZSTD compressed data grows by at most 3 bytes per 128K, and only has a 22 -+# byte fixed overhead but has a maximum block size of 128K, so it needs a -+# larger margin. -+# -+# extra_bytes = (uncompressed_size >> 8) + 131072 - --#define ZO_z_extra_bytes ((ZO_z_output_len >> 8) + 65536) -+#define ZO_z_extra_bytes ((ZO_z_output_len >> 8) + 131072) - #if ZO_z_output_len > ZO_z_input_len - # define ZO_z_extract_offset (ZO_z_output_len + ZO_z_extra_bytes - \ - ZO_z_input_len) --- -2.30.2 - diff --git a/packages/kernel-5.4/2004-x86-Add-support-for-ZSTD-compressed-kernel.patch b/packages/kernel-5.4/2004-x86-Add-support-for-ZSTD-compressed-kernel.patch deleted file mode 100644 index 9507f2e7..00000000 --- a/packages/kernel-5.4/2004-x86-Add-support-for-ZSTD-compressed-kernel.patch +++ /dev/null @@ -1,175 +0,0 @@ -From 31714e3795c54f31f7edbfb1bf1808ab12439347 Mon Sep 17 00:00:00 2001 -From: Nick Terrell -Date: Thu, 30 Jul 2020 12:08:39 -0700 -Subject: [PATCH 2004/2007] x86: Add support for ZSTD compressed kernel - -- Add support for zstd compressed kernel - -- Define __DISABLE_EXPORTS in Makefile - -- Remove __DISABLE_EXPORTS definition from kaslr.c - -- Bump the heap size for zstd. - -- Update the documentation. - -Integrates the ZSTD decompression code to the x86 pre-boot code. - -Zstandard requires slightly more memory during the kernel decompression -on x86 (192 KB vs 64 KB), and the memory usage is independent of the -window size. - -__DISABLE_EXPORTS is now defined in the Makefile, which covers both -the existing use in kaslr.c, and the use needed by the zstd decompressor -in misc.c. - -This patch has been boot tested with both a zstd and gzip compressed -kernel on i386 and x86_64 using buildroot and QEMU. - -Additionally, this has been tested in production on x86_64 devices. -We saw a 2 second boot time reduction by switching kernel compression -from xz to zstd. - -Signed-off-by: Nick Terrell -Signed-off-by: Ingo Molnar -Tested-by: Sedat Dilek -Reviewed-by: Kees Cook -Link: https://lore.kernel.org/r/20200730190841.2071656-7-nickrterrell@gmail.com -(cherry picked from commit fb46d057db824693994b048d3a8c869892afaa3f) -[fixed merge conflict in arch/x86/boot/compressed/Makefile] -Signed-off-by: Arnaldo Garcia Rincon ---- - Documentation/x86/boot.rst | 6 +++--- - arch/x86/Kconfig | 1 + - arch/x86/boot/compressed/Makefile | 6 +++++- - arch/x86/boot/compressed/kaslr.c | 7 ------- - arch/x86/boot/compressed/misc.c | 4 ++++ - arch/x86/include/asm/boot.h | 11 +++++++++-- - 6 files changed, 22 insertions(+), 13 deletions(-) - -diff --git a/Documentation/x86/boot.rst b/Documentation/x86/boot.rst -index 08a2f100c0e6..4e6b8ee2978e 100644 ---- a/Documentation/x86/boot.rst -+++ b/Documentation/x86/boot.rst -@@ -767,9 +767,9 @@ Protocol: 2.08+ - uncompressed data should be determined using the standard magic - numbers. The currently supported compression formats are gzip - (magic numbers 1F 8B or 1F 9E), bzip2 (magic number 42 5A), LZMA -- (magic number 5D 00), XZ (magic number FD 37), and LZ4 (magic number -- 02 21). The uncompressed payload is currently always ELF (magic -- number 7F 45 4C 46). -+ (magic number 5D 00), XZ (magic number FD 37), LZ4 (magic number -+ 02 21) and ZSTD (magic number 28 B5). The uncompressed payload is -+ currently always ELF (magic number 7F 45 4C 46). - - ============ ============== - Field name: payload_length -diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig -index 36a28b9e46cb..9e7067cdebbf 100644 ---- a/arch/x86/Kconfig -+++ b/arch/x86/Kconfig -@@ -179,6 +179,7 @@ config X86 - select HAVE_KERNEL_LZMA - select HAVE_KERNEL_LZO - select HAVE_KERNEL_XZ -+ select HAVE_KERNEL_ZSTD - select HAVE_KPROBES - select HAVE_KPROBES_ON_FTRACE - select HAVE_FUNCTION_ERROR_INJECTION -diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile -index 292b5bc6e3a3..dfe6782d76db 100644 ---- a/arch/x86/boot/compressed/Makefile -+++ b/arch/x86/boot/compressed/Makefile -@@ -24,7 +24,7 @@ OBJECT_FILES_NON_STANDARD := y - KCOV_INSTRUMENT := n - - targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \ -- vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 -+ vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 vmlinux.bin.zst - - KBUILD_CFLAGS := -m$(BITS) -O2 - KBUILD_CFLAGS += -fno-strict-aliasing $(call cc-option, -fPIE, -fPIC) -@@ -40,6 +40,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning, gnu) - KBUILD_CFLAGS += -Wno-pointer-sign - # Disable relocation relaxation in case the link is not PIE. - KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no) -+KBUILD_CFLAGS += -D__DISABLE_EXPORTS - - KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ - GCOV_PROFILE := n -@@ -146,6 +147,8 @@ $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE - $(call if_changed,lzo) - $(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) FORCE - $(call if_changed,lz4) -+$(obj)/vmlinux.bin.zst: $(vmlinux.bin.all-y) FORCE -+ $(call if_changed,zstd22) - - suffix-$(CONFIG_KERNEL_GZIP) := gz - suffix-$(CONFIG_KERNEL_BZIP2) := bz2 -@@ -153,6 +156,7 @@ suffix-$(CONFIG_KERNEL_LZMA) := lzma - suffix-$(CONFIG_KERNEL_XZ) := xz - suffix-$(CONFIG_KERNEL_LZO) := lzo - suffix-$(CONFIG_KERNEL_LZ4) := lz4 -+suffix-$(CONFIG_KERNEL_ZSTD) := zst - - quiet_cmd_mkpiggy = MKPIGGY $@ - cmd_mkpiggy = $(obj)/mkpiggy $< > $@ -diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c -index 2e53c056ba20..ae7e1698587f 100644 ---- a/arch/x86/boot/compressed/kaslr.c -+++ b/arch/x86/boot/compressed/kaslr.c -@@ -19,13 +19,6 @@ - */ - #define BOOT_CTYPE_H - --/* -- * _ctype[] in lib/ctype.c is needed by isspace() of linux/ctype.h. -- * While both lib/ctype.c and lib/cmdline.c will bring EXPORT_SYMBOL -- * which is meaningless and will cause compiling error in some cases. -- */ --#define __DISABLE_EXPORTS -- - #include "misc.h" - #include "error.h" - #include "../string.h" -diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c -index 9652d5c2afda..39e592d0e0b4 100644 ---- a/arch/x86/boot/compressed/misc.c -+++ b/arch/x86/boot/compressed/misc.c -@@ -77,6 +77,10 @@ static int lines, cols; - #ifdef CONFIG_KERNEL_LZ4 - #include "../../../../lib/decompress_unlz4.c" - #endif -+ -+#ifdef CONFIG_KERNEL_ZSTD -+#include "../../../../lib/decompress_unzstd.c" -+#endif - /* - * NOTE: When adding a new decompressor, please update the analysis in - * ../header.S. -diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h -index 680c320363db..9191280d9ea3 100644 ---- a/arch/x86/include/asm/boot.h -+++ b/arch/x86/include/asm/boot.h -@@ -24,9 +24,16 @@ - # error "Invalid value for CONFIG_PHYSICAL_ALIGN" - #endif - --#ifdef CONFIG_KERNEL_BZIP2 -+#if defined(CONFIG_KERNEL_BZIP2) - # define BOOT_HEAP_SIZE 0x400000 --#else /* !CONFIG_KERNEL_BZIP2 */ -+#elif defined(CONFIG_KERNEL_ZSTD) -+/* -+ * Zstd needs to allocate the ZSTD_DCtx in order to decompress the kernel. -+ * The ZSTD_DCtx is ~160KB, so set the heap size to 192KB because it is a -+ * round number and to allow some slack. -+ */ -+# define BOOT_HEAP_SIZE 0x30000 -+#else - # define BOOT_HEAP_SIZE 0x10000 - #endif - --- -2.30.2 - diff --git a/packages/kernel-5.4/2005-.gitignore-Add-ZSTD-compressed-files.patch b/packages/kernel-5.4/2005-.gitignore-Add-ZSTD-compressed-files.patch deleted file mode 100644 index be693c7c..00000000 --- a/packages/kernel-5.4/2005-.gitignore-Add-ZSTD-compressed-files.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 9e5deea6a88d5c1038e7d2562224a2bfd1a083f1 Mon Sep 17 00:00:00 2001 -From: Adam Borowski -Date: Thu, 30 Jul 2020 12:08:40 -0700 -Subject: [PATCH 2005/2007] .gitignore: Add ZSTD-compressed files - -For now, that's arch/x86/boot/compressed/vmlinux.bin.zst but probably more -will come, thus let's be consistent with all other compressors. - -Signed-off-by: Adam Borowski -Signed-off-by: Nick Terrell -Signed-off-by: Ingo Molnar -Tested-by: Sedat Dilek -Reviewed-by: Kees Cook -Link: https://lore.kernel.org/r/20200730190841.2071656-8-nickrterrell@gmail.com -(cherry picked from commit 6f3decabaff032e5fcc6cf56f0851ee259359232) ---- - .gitignore | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/.gitignore b/.gitignore -index 70580bdd352c..10faf379482f 100644 ---- a/.gitignore -+++ b/.gitignore -@@ -44,6 +44,7 @@ - *.tab.[ch] - *.tar - *.xz -+*.zst - Module.symvers - modules.builtin - modules.order --- -2.30.2 - diff --git a/packages/kernel-5.4/2006-kbuild-move-module-strip-compression-code-into-scrip.patch b/packages/kernel-5.4/2006-kbuild-move-module-strip-compression-code-into-scrip.patch deleted file mode 100644 index cc0c97af..00000000 --- a/packages/kernel-5.4/2006-kbuild-move-module-strip-compression-code-into-scrip.patch +++ /dev/null @@ -1,184 +0,0 @@ -From 1b82a356860716e14a27bc90cc9755caebbdab0b Mon Sep 17 00:00:00 2001 -From: Masahiro Yamada -Date: Wed, 31 Mar 2021 22:38:08 +0900 -Subject: [PATCH 2006/2007] kbuild: move module strip/compression code into - scripts/Makefile.modinst - -Both mod_strip_cmd and mod_compress_cmd are only used in -scripts/Makefile.modinst, hence there is no good reason to define them -in the top Makefile. Move the relevant code to scripts/Makefile.modinst. - -Also, show separate log messages for each of install, strip, sign, and -compress. - -Signed-off-by: Masahiro Yamada -(cherry picked from commit 65ce9c38326e2588fcd1a3a4817c14b4660f430b) -[fixed a merge conflict in Makefile and script/Makefile.modinst while cherry-picking] -Signed-off-by: Arnaldo Garcia Rincon ---- - Makefile | 32 ------------- - scripts/Makefile.modinst | 98 +++++++++++++++++++++++++++++++++------- - 2 files changed, 81 insertions(+), 49 deletions(-) - -diff --git a/Makefile b/Makefile -index 3f593214a087..ef0da022f0c1 100644 ---- a/Makefile -+++ b/Makefile -@@ -978,38 +978,6 @@ export INSTALL_DTBS_PATH ?= $(INSTALL_PATH)/dtbs/$(KERNELRELEASE) - MODLIB = $(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE) - export MODLIB - --# --# INSTALL_MOD_STRIP, if defined, will cause modules to be --# stripped after they are installed. If INSTALL_MOD_STRIP is '1', then --# the default option --strip-debug will be used. Otherwise, --# INSTALL_MOD_STRIP value will be used as the options to the strip command. -- --ifdef INSTALL_MOD_STRIP --ifeq ($(INSTALL_MOD_STRIP),1) --mod_strip_cmd = $(STRIP) --strip-debug --else --mod_strip_cmd = $(STRIP) $(INSTALL_MOD_STRIP) --endif # INSTALL_MOD_STRIP=1 --else --mod_strip_cmd = true --endif # INSTALL_MOD_STRIP --export mod_strip_cmd -- --# CONFIG_MODULE_COMPRESS, if defined, will cause module to be compressed --# after they are installed in agreement with CONFIG_MODULE_COMPRESS_GZIP --# or CONFIG_MODULE_COMPRESS_XZ. -- --mod_compress_cmd = true --ifdef CONFIG_MODULE_COMPRESS -- ifdef CONFIG_MODULE_COMPRESS_GZIP -- mod_compress_cmd = $(KGZIP) -n -f -- endif # CONFIG_MODULE_COMPRESS_GZIP -- ifdef CONFIG_MODULE_COMPRESS_XZ -- mod_compress_cmd = $(XZ) -f -- endif # CONFIG_MODULE_COMPRESS_XZ --endif # CONFIG_MODULE_COMPRESS --export mod_compress_cmd -- - ifdef CONFIG_MODULE_SIG_ALL - $(eval $(call config_filename,MODULE_SIG_KEY)) - -diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst -index 5a4579e76485..84696ef99df7 100644 ---- a/scripts/Makefile.modinst -+++ b/scripts/Makefile.modinst -@@ -6,30 +6,94 @@ - PHONY := __modinst - __modinst: - --include scripts/Kbuild.include -+include include/config/auto.conf -+include $(srctree)/scripts/Kbuild.include - --modules := $(sort $(shell cat $(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD)/)modules.order)) -+modules := $(sort $(shell cat $(MODORDER))) -+ -+ifeq ($(KBUILD_EXTMOD),) -+dst := $(MODLIB)/kernel -+else -+INSTALL_MOD_DIR ?= extra -+dst := $(MODLIB)/$(INSTALL_MOD_DIR) -+endif -+ -+suffix-y := -+suffix-$(CONFIG_MODULE_COMPRESS_GZIP) := .gz -+suffix-$(CONFIG_MODULE_COMPRESS_XZ) := .xz -+ -+modules := $(patsubst $(extmod_prefix)%, $(dst)/%$(suffix-y), $(modules)) - --PHONY += $(modules) - __modinst: $(modules) - @: - --# Don't stop modules_install if we can't sign external modules. --quiet_cmd_modules_install = INSTALL $@ -- cmd_modules_install = \ -- mkdir -p $(2) ; \ -- cp $@ $(2) ; \ -- $(mod_strip_cmd) $(2)/$(notdir $@) ; \ -- $(mod_sign_cmd) $(2)/$(notdir $@) $(patsubst %,|| true,$(KBUILD_EXTMOD)) ; \ -- $(mod_compress_cmd) $(2)/$(notdir $@) -+quiet_cmd_none = -+ cmd_none = : - --# Modules built outside the kernel source tree go into extra by default --INSTALL_MOD_DIR ?= extra --ext-mod-dir = $(INSTALL_MOD_DIR)$(subst $(patsubst %/,%,$(KBUILD_EXTMOD)),,$(@D)) -+# -+# Installation -+# -+quiet_cmd_install = INSTALL $@ -+ cmd_install = mkdir -p $(dir $@); cp $< $@ -+ -+# Strip -+# -+# INSTALL_MOD_STRIP, if defined, will cause modules to be stripped after they -+# are installed. If INSTALL_MOD_STRIP is '1', then the default option -+# --strip-debug will be used. Otherwise, INSTALL_MOD_STRIP value will be used -+# as the options to the strip command. -+ifdef INSTALL_MOD_STRIP -+ -+ifeq ($(INSTALL_MOD_STRIP),1) -+strip-option := --strip-debug -+else -+strip-option := $(INSTALL_MOD_STRIP) -+endif -+ -+quiet_cmd_strip = STRIP $@ -+ cmd_strip = $(STRIP) $(strip-option) $@ -+ -+else -+ -+quiet_cmd_strip = -+ cmd_strip = : -+ -+endif -+ -+# -+# Signing -+# Don't stop modules_install even if we can't sign external modules. -+# -+ifeq ($(CONFIG_MODULE_SIG_ALL),y) -+quiet_cmd_sign = SIGN $@ -+$(eval $(call config_filename,MODULE_SIG_KEY)) -+ cmd_sign = scripts/sign-file $(CONFIG_MODULE_SIG_HASH) $(MODULE_SIG_KEY_SRCPREFIX)$(CONFIG_MODULE_SIG_KEY) certs/signing_key.x509 $@ \ -+ $(if $(KBUILD_EXTMOD),|| true) -+else -+quiet_cmd_sign := -+ cmd_sign := : -+endif -+ -+$(dst)/%.ko: $(extmod_prefix)%.ko FORCE -+ $(call cmd,install) -+ $(call cmd,strip) -+ $(call cmd,sign) -+ -+# -+# Compression -+# -+quiet_cmd_gzip = GZIP $@ -+ cmd_gzip = $(KGZIP) -n -f $< -+quiet_cmd_xz = XZ $@ -+ cmd_xz = $(XZ) --lzma2=dict=2MiB -f $< -+ -+$(dst)/%.ko.gz: $(dst)/%.ko FORCE -+ $(call cmd,gzip) - --modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D)) -+$(dst)/%.ko.xz: $(dst)/%.ko FORCE -+ $(call cmd,xz) - --$(modules): -- $(call cmd,modules_install,$(MODLIB)/$(modinst_dir)) -+PHONY += FORCE -+FORCE: - - .PHONY: $(PHONY) --- -2.30.2 - diff --git a/packages/kernel-5.4/2007-kbuild-add-support-for-zstd-compressed-modules.patch b/packages/kernel-5.4/2007-kbuild-add-support-for-zstd-compressed-modules.patch deleted file mode 100644 index 7aebb53f..00000000 --- a/packages/kernel-5.4/2007-kbuild-add-support-for-zstd-compressed-modules.patch +++ /dev/null @@ -1,82 +0,0 @@ -From ddd6d2cff1af4bccee97a7d939e39f64a8965e50 Mon Sep 17 00:00:00 2001 -From: Piotr Gorski -Date: Wed, 7 Apr 2021 18:09:27 +0200 -Subject: [PATCH 2007/2007] kbuild: add support for zstd compressed modules - -kmod 28 supports modules compressed in zstd format so let's add this -possibility to kernel. - -Signed-off-by: Piotr Gorski -Reviewed-by: Oleksandr Natalenko -Signed-off-by: Masahiro Yamada -(cherry picked from commit c3d7ef377eb2564b165b1e8fdb4646952c90ac17) -[fixed a merge conflict in init/Kconfig] -Signed-off-by: Arnaldo Garcia Rincon ---- - init/Kconfig | 11 +++++++++-- - scripts/Makefile.modinst | 6 ++++++ - 2 files changed, 15 insertions(+), 2 deletions(-) - -diff --git a/init/Kconfig b/init/Kconfig -index 4dc3ea198a2c..c6ffb8b7eec6 100644 ---- a/init/Kconfig -+++ b/init/Kconfig -@@ -2121,8 +2121,9 @@ config MODULE_COMPRESS - Out-of-tree kernel modules installed using Kbuild will also be - compressed upon installation. - -- Note: for modules inside an initrd or initramfs, it's more efficient -- to compress the whole initrd or initramfs instead. -+ Please note that the tool used to load modules needs to support the -+ corresponding algorithm. module-init-tools MAY support gzip, and kmod -+ MAY support gzip, xz and zstd. - - Note: This is fully compatible with signed modules. - -@@ -2144,6 +2145,12 @@ config MODULE_COMPRESS_GZIP - config MODULE_COMPRESS_XZ - bool "XZ" - -+config MODULE_COMPRESS_ZSTD -+ bool "ZSTD" -+ help -+ Compress modules with ZSTD. The installed modules are suffixed -+ with .ko.zst. -+ - endchoice - - config MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS -diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst -index 84696ef99df7..59f613aa08b4 100644 ---- a/scripts/Makefile.modinst -+++ b/scripts/Makefile.modinst -@@ -21,6 +21,7 @@ endif - suffix-y := - suffix-$(CONFIG_MODULE_COMPRESS_GZIP) := .gz - suffix-$(CONFIG_MODULE_COMPRESS_XZ) := .xz -+suffix-$(CONFIG_MODULE_COMPRESS_ZSTD) := .zst - - modules := $(patsubst $(extmod_prefix)%, $(dst)/%$(suffix-y), $(modules)) - -@@ -86,6 +87,8 @@ quiet_cmd_gzip = GZIP $@ - cmd_gzip = $(KGZIP) -n -f $< - quiet_cmd_xz = XZ $@ - cmd_xz = $(XZ) --lzma2=dict=2MiB -f $< -+quiet_cmd_zstd = ZSTD $@ -+ cmd_zstd = $(ZSTD) -T0 --rm -f -q $< - - $(dst)/%.ko.gz: $(dst)/%.ko FORCE - $(call cmd,gzip) -@@ -93,6 +96,9 @@ $(dst)/%.ko.gz: $(dst)/%.ko FORCE - $(dst)/%.ko.xz: $(dst)/%.ko FORCE - $(call cmd,xz) - -+$(dst)/%.ko.zst: $(dst)/%.ko FORCE -+ $(call cmd,zstd) -+ - PHONY += FORCE - FORCE: - --- -2.30.2 - diff --git a/packages/kernel-5.4/Cargo.toml b/packages/kernel-5.4/Cargo.toml deleted file mode 100644 index ca9ac82e..00000000 --- a/packages/kernel-5.4/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "kernel-5_4" -version = "0.1.0" -edition = "2018" -publish = false -build = "build.rs" - -[package.metadata.build-package] -package-name = "kernel-5.4" - -[lib] -path = "pkg.rs" - -[[package.metadata.build-package.external-files]] -# Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/bd95a16ec3ce5de68339dbf6dedf8912f4ed5abd35716967c5eb4c93d4f8057e/kernel-5.4.204-113.362.amzn2.src.rpm" -sha512 = "420445e7699839b61d69d0fdc3a0a543fb2025fb2901b25075a0195af6b6f0cd74d138a560c4e83f4fc44394007cfec239245fc4f1afdfdf8c8ea7c8cd05bfad" - -[build-dependencies] -microcode = { path = "../microcode" } diff --git a/packages/kernel-5.4/build.rs b/packages/kernel-5.4/build.rs deleted file mode 100644 index cad8999a..00000000 --- a/packages/kernel-5.4/build.rs +++ /dev/null @@ -1,9 +0,0 @@ -use std::process::{exit, Command}; - -fn main() -> Result<(), std::io::Error> { - let ret = Command::new("buildsys").arg("build-package").status()?; - if !ret.success() { - exit(1); - } - Ok(()) -} diff --git a/packages/kernel-5.4/config-bottlerocket b/packages/kernel-5.4/config-bottlerocket deleted file mode 100644 index b6036719..00000000 --- a/packages/kernel-5.4/config-bottlerocket +++ /dev/null @@ -1,121 +0,0 @@ -# Because Bottlerocket does not have an initramfs, modules required to mount -# the root filesystem must be set to y. - -# The root filesystem is ext4 -CONFIG_EXT4_FS=y - -# NVMe for EC2 Nitro platforms (C5, M5, and later) -CONFIG_BLK_DEV_NVME=y -CONFIG_NVME_CORE=y - -# Xen blkfront for Xen-based EC2 platforms -CONFIG_XEN_BLKDEV_FRONTEND=y - -# virtio for local testing with QEMU -CONFIG_VIRTIO=y -CONFIG_VIRTIO_BLK=y -CONFIG_VIRTIO_PCI=y - -# dm-verity and enabling it on the kernel command line -CONFIG_BLK_DEV_DM=y -CONFIG_DAX=y -CONFIG_DM_INIT=y -CONFIG_DM_VERITY=y - -# TCMU/LIO -CONFIG_TCM_USER2=m - -# Enable EFI. -CONFIG_EFI=y -CONFIG_EFI_STUB=y -CONFIG_EFI_MIXED=y - -# yama LSM for ptrace restrictions -CONFIG_SECURITY_YAMA=y - -# Do not allow SELinux to be disabled at boot. -# CONFIG_SECURITY_SELINUX_BOOTPARAM is not set - -# Do not allow SELinux to be disabled at runtime. -# CONFIG_SECURITY_SELINUX_DISABLE is not set - -# Do not allow SELinux to use `enforcing=0` behavior. -# CONFIG_SECURITY_SELINUX_DEVELOP is not set - -# Check the protection applied by the kernel for mmap and mprotect, -# rather than the protection requested by userspace. -CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=0 - -# Enable support for the kernel lockdown security module. -CONFIG_SECURITY_LOCKDOWN_LSM=y - -# Enable lockdown early so that if the option is present on the -# kernel command line, it can be enforced. -CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y - -# Enable zstd compression for squashfs. -CONFIG_SQUASHFS_ZSTD=y - -# enable /proc/config.gz -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y - -# kernel headers at /sys/kernel/kheaders.tar.xz -CONFIG_IKHEADERS=y - -# BTF debug info at /sys/kernel/btf/vmlinux -CONFIG_DEBUG_INFO_BTF=y - -# We don't want to extend the kernel command line with any upstream defaults; -# Bottlerocket uses a fairly custom setup that needs tight control over it. -# CONFIG_CMDLINE_EXTEND is not set - -# Enable ZSTD kernel image compression -CONFIG_HAVE_KERNEL_ZSTD=y -CONFIG_KERNEL_ZSTD=y -CONFIG_ZSTD_COMPRESS=y -CONFIG_ZSTD_DECOMPRESS=y -CONFIG_DECOMPRESS_ZSTD=y - -# Enable xz modules compression -CONFIG_MODULE_COMPRESS=y -CONFIG_MODULE_COMPRESS_XZ=y - -# Load i8042 controller, keyboard, and mouse as modules, to avoid waiting for -# them before mounting the root device. -CONFIG_SERIO_I8042=m -CONFIG_KEYBOARD_ATKBD=m -CONFIG_MOUSE_PS2=m - -# Enables support for checkpoint/restore -CONFIG_CHECKPOINT_RESTORE=y - -# Disable unused filesystems. -# CONFIG_AFS_FS is not set -# CONFIG_CRAMFS is not set -# CONFIG_ECRYPT_FS is not set -# CONFIG_EXT2_FS is not set -# CONFIG_EXT3_FS is not set -CONFIG_EXT4_USE_FOR_EXT2=y -# CONFIG_GFS2_FS is not set -# CONFIG_HFS_FS is not set -# CONFIG_HFSPLUS_FS is not set -# CONFIG_JFS_FS is not set -# CONFIG_JFFS2_FS is not set -# CONFIG_NFS_V2 is not set -# CONFIG_NILFS2_FS is not set -# CONFIG_NTFS_FS is not set -# CONFIG_ROMFS_FS is not set -# CONFIG_UFS_FS is not set -# CONFIG_ZONEFS_FS is not set - -# Disable unused network protocols. -# CONFIG_AF_RXRPC is not set -# CONFIG_ATM is not set -# CONFIG_CAN is not set -# CONFIG_HSR is not set -# CONFIG_IP_DCCP is not set -# CONFIG_L2TP is not set -# CONFIG_RDS is not set -# CONFIG_RFKILL is not set -# CONFIG_TIPC is not set diff --git a/packages/kernel-5.4/kernel-5.4.spec b/packages/kernel-5.4/kernel-5.4.spec deleted file mode 100644 index 8b4f3f0e..00000000 --- a/packages/kernel-5.4/kernel-5.4.spec +++ /dev/null @@ -1,272 +0,0 @@ -%global debug_package %{nil} - -Name: %{_cross_os}kernel-5.4 -Version: 5.4.204 -Release: 1%{?dist} -Summary: The Linux kernel -License: GPL-2.0 WITH Linux-syscall-note -URL: https://www.kernel.org/ -# Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/bd95a16ec3ce5de68339dbf6dedf8912f4ed5abd35716967c5eb4c93d4f8057e/kernel-5.4.204-113.362.amzn2.src.rpm -Source100: config-bottlerocket - -# Help out-of-tree module builds run `make prepare` automatically. -Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch - -# Add zstd support for compressed kernel -Patch2000: 2000-lib-Prepare-zstd-for-preboot-environment-improve-per.patch -Patch2001: 2001-lib-Add-zstd-support-to-decompress.patch -Patch2002: 2002-init-Add-support-for-zstd-compressed-kernel.patch -Patch2003: 2003-x86-Bump-ZO_z_extra_bytes-margin-for-zstd.patch -Patch2004: 2004-x86-Add-support-for-ZSTD-compressed-kernel.patch -Patch2005: 2005-.gitignore-Add-ZSTD-compressed-files.patch -# Add zstd support for compressed kernel modules -Patch2006: 2006-kbuild-move-module-strip-compression-code-into-scrip.patch -Patch2007: 2007-kbuild-add-support-for-zstd-compressed-modules.patch - -BuildRequires: bc -BuildRequires: elfutils-devel -BuildRequires: hostname -BuildRequires: kmod -BuildRequires: openssl-devel - -# CPU microcode updates are included as "extra firmware" so the files don't -# need to be installed on the root filesystem. However, we want the license and -# attribution files to be available in the usual place. -%if "%{_cross_arch}" == "x86_64" -BuildRequires: %{_cross_os}microcode -Requires: %{_cross_os}microcode-licenses -%endif - -# Pull in expected modules and development files. -Requires: %{name}-modules = %{version}-%{release} -Requires: %{name}-devel = %{version}-%{release} - -%global kernel_sourcedir %{_cross_usrsrc}/kernels -%global kernel_libdir %{_cross_libdir}/modules/%{version} - -%description -%{summary}. - -%package devel -Summary: Configured Linux kernel source for module building - -%description devel -%{summary}. - -%package archive -Summary: Archived Linux kernel source for module building - -%description archive -%{summary}. - -%package modules -Summary: Modules for the Linux kernel - -%description modules -%{summary}. - -%package headers -Summary: Header files for the Linux kernel for use by glibc - -%description headers -%{summary}. - -%prep -rpm2cpio %{SOURCE0} | cpio -iu linux-%{version}.tar config-%{_cross_arch} "*.patch" -tar -xof linux-%{version}.tar; rm linux-%{version}.tar -%setup -TDn linux-%{version} -# Patches from the Source0 SRPM -for patch in ../*.patch; do - patch -p1 <"$patch" -done -# Patches listed in this spec (Patch0001...) -%autopatch -p1 - -%if "%{_cross_arch}" == "x86_64" -microcode="$(find %{_cross_libdir}/firmware -type f -path '*/*-ucode/*' -printf '%%P ')" -cat < ../config-microcode -CONFIG_EXTRA_FIRMWARE="${microcode}" -CONFIG_EXTRA_FIRMWARE_DIR="%{_cross_libdir}/firmware" -EOF -%endif - -export ARCH="%{_cross_karch}" -export CROSS_COMPILE="%{_cross_target}-" - -KCONFIG_CONFIG="arch/%{_cross_karch}/configs/%{_cross_vendor}_defconfig" \ -scripts/kconfig/merge_config.sh \ - ../config-%{_cross_arch} \ -%if "%{_cross_arch}" == "x86_64" - ../config-microcode \ -%endif - %{SOURCE100} -rm -f ../config-* ../*.patch - -%global kmake \ -make -s\\\ - ARCH="%{_cross_karch}"\\\ - CROSS_COMPILE="%{_cross_target}-"\\\ - INSTALL_HDR_PATH="%{buildroot}%{_cross_prefix}"\\\ - INSTALL_MOD_PATH="%{buildroot}%{_cross_prefix}"\\\ - INSTALL_MOD_STRIP=1\\\ -%{nil} - -%build -%kmake mrproper -%kmake %{_cross_vendor}_defconfig -%kmake %{?_smp_mflags} %{_cross_kimage} -%kmake %{?_smp_mflags} modules - -%install -%kmake %{?_smp_mflags} headers_install -%kmake %{?_smp_mflags} modules_install - -install -d %{buildroot}/boot -install -T -m 0755 arch/%{_cross_karch}/boot/%{_cross_kimage} %{buildroot}/boot/vmlinuz -install -m 0644 .config %{buildroot}/boot/config - -find %{buildroot}%{_cross_prefix} \ - \( -name .install -o -name .check -o \ - -name ..install.cmd -o -name ..check.cmd \) -delete - -# For out-of-tree kmod builds, we need to support the following targets: -# make scripts -> make prepare -> make modules -# -# This requires enough of the kernel tree to build host programs under the -# "scripts" and "tools" directories. - -# Any existing ELF objects will not work properly if we're cross-compiling for -# a different architecture, so get rid of them to avoid confusing errors. -find arch scripts tools -type f -executable \ - -exec sh -c "head -c4 {} | grep -q ELF && rm {}" \; - -# We don't need to include these files. -find -type f \( -name \*.cmd -o -name \*.gitignore \) -delete - -# Avoid an OpenSSL dependency by stubbing out options for module signing and -# trusted keyrings, so `sign-file` and `extract-cert` won't be built. External -# kernel modules do not have access to the keys they would need to make use of -# these tools. -sed -i \ - -e 's,$(CONFIG_MODULE_SIG_FORMAT),n,g' \ - -e 's,$(CONFIG_SYSTEM_TRUSTED_KEYRING),n,g' \ - scripts/Makefile - -# Restrict permissions on System.map. -chmod 600 System.map - -( - find * \ - -type f \ - \( -name Build\* -o -name Kbuild\* -o -name Kconfig\* -o -name Makefile\* \) \ - -print - - find arch/%{_cross_karch}/ \ - -type f \ - \( -name module.lds -o -name vmlinux.lds.S -o -name Platform -o -name \*.tbl \) \ - -print - - find arch/%{_cross_karch}/{include,lib}/ -type f ! -name \*.o ! -name \*.o.d -print - echo arch/%{_cross_karch}/kernel/asm-offsets.s - echo lib/vdso/gettimeofday.c - - for d in \ - arch/%{_cross_karch}/tools \ - arch/%{_cross_karch}/kernel/vdso ; do - [ -d "${d}" ] && find "${d}/" -type f -print - done - - find include -type f -print - find scripts -type f ! -name \*.l ! -name \*.y ! -name \*.o -print - - find tools/{arch/%{_cross_karch},include,objtool,scripts}/ -type f ! -name \*.o -print - echo tools/build/fixdep.c - find tools/lib/subcmd -type f -print - find tools/lib/{ctype,string,str_error_r}.c - - echo kernel/bounds.c - echo kernel/time/timeconst.bc - echo security/selinux/include/classmap.h - echo security/selinux/include/initial_sid_to_string.h - - echo .config - echo Module.symvers - echo System.map -) | sort -u > kernel_devel_files - -# Create squashfs of kernel-devel files (ie. /usr/src/kernels/). -# -# -no-exports: -# The filesystem does not need to be exported via NFS. -# -# -all-root: -# Make all files owned by root rather than the build user. -# -# -comp zstd: -# zstd offers compression ratios like xz and decompression speeds like lz4. -SQUASHFS_OPTS="-no-exports -all-root -comp zstd" -mkdir -p src_squashfs/%{version} -tar c -T kernel_devel_files | tar x -C src_squashfs/%{version} -mksquashfs src_squashfs kernel-devel.squashfs ${SQUASHFS_OPTS} - -# Create a tarball of the same files, for use outside the running system. -# In theory we could extract these files with `unsquashfs`, but we do not want -# to require it to be installed on the build host, and it errors out when run -# inside Docker unless the limit for open files is lowered. -tar cf kernel-devel.tar src_squashfs/%{version} --transform='s|src_squashfs/%{version}|kernel-devel|' -xz -T0 kernel-devel.tar - -install -D kernel-devel.squashfs %{buildroot}%{_cross_datadir}/bottlerocket/kernel-devel.squashfs -install -D kernel-devel.tar.xz %{buildroot}%{_cross_datadir}/bottlerocket/kernel-devel.tar.xz -install -d %{buildroot}%{kernel_sourcedir} - -# Replace the incorrect links from modules_install. These will be bound -# into a host container (and unused in the host) so they must not point -# to %{_cross_usrsrc} (eg. /x86_64-bottlerocket-linux-gnu/sys-root/...) -rm -f %{buildroot}%{kernel_libdir}/build %{buildroot}%{kernel_libdir}/source -ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{kernel_libdir}/build -ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{kernel_libdir}/source - -%files -%license COPYING LICENSES/preferred/GPL-2.0 LICENSES/exceptions/Linux-syscall-note -%{_cross_attribution_file} -/boot/vmlinuz -/boot/config - -%files modules -%dir %{_cross_libdir}/modules -%{_cross_libdir}/modules/* - -%files headers -%dir %{_cross_includedir}/asm -%dir %{_cross_includedir}/asm-generic -%dir %{_cross_includedir}/drm -%dir %{_cross_includedir}/linux -%dir %{_cross_includedir}/misc -%dir %{_cross_includedir}/mtd -%dir %{_cross_includedir}/rdma -%dir %{_cross_includedir}/scsi -%dir %{_cross_includedir}/sound -%dir %{_cross_includedir}/video -%dir %{_cross_includedir}/xen -%{_cross_includedir}/asm/* -%{_cross_includedir}/asm-generic/* -%{_cross_includedir}/drm/* -%{_cross_includedir}/linux/* -%{_cross_includedir}/misc/* -%{_cross_includedir}/mtd/* -%{_cross_includedir}/rdma/* -%{_cross_includedir}/scsi/* -%{_cross_includedir}/sound/* -%{_cross_includedir}/video/* -%{_cross_includedir}/xen/* - -%files devel -%dir %{kernel_sourcedir} -%{_cross_datadir}/bottlerocket/kernel-devel.squashfs - -%files archive -%{_cross_datadir}/bottlerocket/kernel-devel.tar.xz - -%changelog diff --git a/packages/kernel-5.4/latest-srpm-url.sh b/packages/kernel-5.4/latest-srpm-url.sh deleted file mode 100755 index 5e9e4591..00000000 --- a/packages/kernel-5.4/latest-srpm-url.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -docker run --rm amazonlinux:2 sh -c 'amazon-linux-extras enable kernel-5.4 >/dev/null && yum install -q -y yum-utils && yumdownloader -q --source --urls kernel | grep ^http' diff --git a/packages/kernel-5.4/pkg.rs b/packages/kernel-5.4/pkg.rs deleted file mode 100644 index d799fb2d..00000000 --- a/packages/kernel-5.4/pkg.rs +++ /dev/null @@ -1 +0,0 @@ -// not used From 14aa606227a2bd04fb9d9b11ce98dbdab5710126 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Tue, 2 Aug 2022 11:05:35 -0500 Subject: [PATCH 0713/1356] Format issue field in PR template We have had several instances where the issue associated with a PR has not been automatically closed. Engineers then need to know to track down the issue after the merge to manually close out the issue. Part of this appears to be due to the PR not having the format expected to properly link the PR to the Issue [0]. This updates our pull request template to format this placeholder string to help guide the user towards using the necessary format to link and close the issue. [0] https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword Signed-off-by: Sean McGinnis --- .github/pull_request_template.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index ef6d9a11..2daff887 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -6,7 +6,7 @@ Tips: **Issue number:** - +Closes # **Description of changes:** From 4ca68d5044172135fec1f67e7c506d646dd953af Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Tue, 9 Aug 2022 14:52:00 +0000 Subject: [PATCH 0714/1356] kernel: Disable drivers for USB-attached network interfaces There is no need to support USB-attached network interfaces in the cloud, and such devices are equally unlikely to be encountered in servers. Make sure not to build any drivers for them as they would just be dead weight. Signed-off-by: Markus Boehme --- packages/kernel-5.10/config-bottlerocket | 3 +++ packages/kernel-5.15/config-bottlerocket | 3 +++ 2 files changed, 6 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index 3fa5c9ae..203bc9c5 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -130,3 +130,6 @@ CONFIG_EXT4_USE_FOR_EXT2=y # CONFIG_RDS is not set # CONFIG_RFKILL is not set # CONFIG_TIPC is not set + +# Disable USB-attached network interfaces, unused in the cloud and on server-grade hardware. +# CONFIG_USB_NET_DRIVERS is not set diff --git a/packages/kernel-5.15/config-bottlerocket b/packages/kernel-5.15/config-bottlerocket index de01ebf1..7802f8e4 100644 --- a/packages/kernel-5.15/config-bottlerocket +++ b/packages/kernel-5.15/config-bottlerocket @@ -131,3 +131,6 @@ CONFIG_EXT4_USE_FOR_EXT2=y # CONFIG_RDS is not set # CONFIG_RFKILL is not set # CONFIG_TIPC is not set + +# Disable USB-attached network interfaces, unused in the cloud and on server-grade hardware. +# CONFIG_USB_NET_DRIVERS is not set From 01c43e2d1a7572b50ed5edf0a756711cec7118d1 Mon Sep 17 00:00:00 2001 From: ecpullen Date: Thu, 21 Jul 2022 21:12:47 +0000 Subject: [PATCH 0715/1356] testsys: migration support for aws-k8s variants --- tools/Cargo.lock | 414 ++++++++++++++++++++++++++++- tools/testsys/Cargo.toml | 2 + tools/testsys/src/aws_resources.rs | 281 ++++++++++++++++++-- tools/testsys/src/run.rs | 74 +++++- 4 files changed, 742 insertions(+), 29 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index a6ce1c5b..c807e4d8 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -119,6 +119,290 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "aws-config" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11a8c971b0cb0484fc9436a291a44503b95141edc36ce7a6af6b6d7a06a02ab0" +dependencies = [ + "aws-http", + "aws-sdk-sso", + "aws-sdk-sts", + "aws-smithy-async", + "aws-smithy-client", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-json", + "aws-smithy-types", + "aws-types", + "bytes", + "hex", + "http", + "hyper", + "ring", + "tokio", + "tower", + "tracing", + "zeroize", +] + +[[package]] +name = "aws-endpoint" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bc956f415dda77215372e5bc751a2463d1f9a1ec34edf3edc6c0ff67e5c8e43" +dependencies = [ + "aws-smithy-http", + "aws-types", + "http", + "regex", + "tracing", +] + +[[package]] +name = "aws-http" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a0d98a1d606aa24554e604f220878db4aa3b525b72f88798524497cc3867fc6" +dependencies = [ + "aws-smithy-http", + "aws-smithy-types", + "aws-types", + "bytes", + "http", + "http-body", + "lazy_static", + "percent-encoding", + "pin-project-lite", + "tracing", +] + +[[package]] +name = "aws-sdk-ec2" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85b6c04720f5846edb80aa8c4dda848b77efdf99597f1ae48e12ea6b1ad1d3ce" +dependencies = [ + "aws-endpoint", + "aws-http", + "aws-sig-auth", + "aws-smithy-async", + "aws-smithy-client", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-query", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "bytes", + "fastrand", + "http", + "tokio-stream", + "tower", +] + +[[package]] +name = "aws-sdk-sso" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baa0c66fab12976065403cf4cafacffe76afa91d0da335d195af379d4223d235" +dependencies = [ + "aws-endpoint", + "aws-http", + "aws-sig-auth", + "aws-smithy-async", + "aws-smithy-client", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-json", + "aws-smithy-types", + "aws-types", + "bytes", + "http", + "tokio-stream", + "tower", +] + +[[package]] +name = "aws-sdk-sts" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "048037cdfd7f42fb29b5f969c7f639b4b7eac00e8f911e4eac4f89fb7b3a0500" +dependencies = [ + "aws-endpoint", + "aws-http", + "aws-sig-auth", + "aws-smithy-async", + "aws-smithy-client", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-query", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "bytes", + "http", + "tower", +] + +[[package]] +name = "aws-sig-auth" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8386fc0d218dbf2011f65bd8300d21ba98603fd150b962f61239be8b02d1fc6" +dependencies = [ + "aws-sigv4", + "aws-smithy-http", + "aws-types", + "http", + "tracing", +] + +[[package]] +name = "aws-sigv4" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd866926c2c4978210bcb01d7d1b431c794f0c23ca9ee1e420204b018836b5fb" +dependencies = [ + "aws-smithy-http", + "form_urlencoded", + "hex", + "http", + "once_cell", + "percent-encoding", + "regex", + "ring", + "time 0.3.11", + "tracing", +] + +[[package]] +name = "aws-smithy-async" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "deb59cfdd21143006c01b9ca4dc4a9190b8c50c2ef831f9eb36f54f69efa42f1" +dependencies = [ + "futures-util", + "pin-project-lite", + "tokio", + "tokio-stream", +] + +[[package]] +name = "aws-smithy-client" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44243329ba8618474c3b7f396de281f175ae172dd515b3d35648671a3cf51871" +dependencies = [ + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-types", + "bytes", + "fastrand", + "http", + "http-body", + "hyper", + "hyper-rustls 0.22.1", + "lazy_static", + "pin-project-lite", + "tokio", + "tower", + "tracing", +] + +[[package]] +name = "aws-smithy-http" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fba78f69a5bbe7ac1826389304c67b789032d813574e78f9a2d450634277f833" +dependencies = [ + "aws-smithy-types", + "bytes", + "bytes-utils", + "futures-core", + "http", + "http-body", + "hyper", + "once_cell", + "percent-encoding", + "pin-project-lite", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "aws-smithy-http-tower" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff8a512d68350561e901626baa08af9491cfbd54596201b84b4da846a59e4da3" +dependencies = [ + "aws-smithy-http", + "bytes", + "http", + "http-body", + "pin-project-lite", + "tower", + "tracing", +] + +[[package]] +name = "aws-smithy-json" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31b7633698853aae80bd8b26866531420138eca91ea4620735d20b0537c93c2e" +dependencies = [ + "aws-smithy-types", +] + +[[package]] +name = "aws-smithy-query" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95a94b5a8cc94a85ccbff89eb7bc80dc135ede02847a73d68c04ac2a3e4cf6b7" +dependencies = [ + "aws-smithy-types", + "urlencoding", +] + +[[package]] +name = "aws-smithy-types" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d230d281653de22fb0e9c7c74d18d724a39d7148e2165b1e760060064c4967c0" +dependencies = [ + "itoa", + "num-integer", + "ryu", + "time 0.3.11", +] + +[[package]] +name = "aws-smithy-xml" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4aacaf6c0fa549ebe5d9daa96233b8635965721367ee7c69effc8d8078842df3" +dependencies = [ + "xmlparser", +] + +[[package]] +name = "aws-types" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb54f097516352475a0159c9355f8b4737c54044538a4d9aca4d376ef2361ccc" +dependencies = [ + "aws-smithy-async", + "aws-smithy-client", + "aws-smithy-http", + "aws-smithy-types", + "http", + "rustc_version", + "tracing", + "zeroize", +] + [[package]] name = "backtrace" version = "0.3.66" @@ -230,6 +514,16 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0b3de4a0c5e67e16066a0715723abd91edc2f9001d09c46e1dca929351e130e" +[[package]] +name = "bytes-utils" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1934a3ef9cac8efde4966a92781e77713e1ba329f1d42e446c7d7eba340d8ef1" +dependencies = [ + "bytes", + "either", +] + [[package]] name = "cargo-readme" version = "3.2.0" @@ -461,6 +755,15 @@ dependencies = [ "subtle", ] +[[package]] +name = "ct-logs" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1a816186fa68d9e426e3cb4ae4dff1fcd8e4a2c34b781bf7a822574a0d0aac8" +dependencies = [ + "sct 0.6.1", +] + [[package]] name = "darling" version = "0.14.1" @@ -915,6 +1218,23 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64" +dependencies = [ + "ct-logs", + "futures-util", + "hyper", + "log", + "rustls 0.19.1", + "rustls-native-certs 0.5.0", + "tokio", + "tokio-rustls 0.22.0", + "webpki 0.21.4", +] + [[package]] name = "hyper-rustls" version = "0.23.0" @@ -924,10 +1244,10 @@ dependencies = [ "http", "hyper", "log", - "rustls", - "rustls-native-certs", + "rustls 0.20.6", + "rustls-native-certs 0.6.2", "tokio", - "tokio-rustls", + "tokio-rustls 0.23.4", ] [[package]] @@ -1836,7 +2156,7 @@ dependencies = [ "http", "http-body", "hyper", - "hyper-rustls", + "hyper-rustls 0.23.0", "ipnet", "js-sys", "lazy_static", @@ -1844,13 +2164,13 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", - "rustls", + "rustls 0.20.6", "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls", + "tokio-rustls 0.23.4", "tower-service", "url", "wasm-bindgen", @@ -1902,7 +2222,7 @@ dependencies = [ "futures", "http", "hyper", - "hyper-rustls", + "hyper-rustls 0.23.0", "lazy_static", "log", "rusoto_credential", @@ -2058,6 +2378,19 @@ dependencies = [ "semver", ] +[[package]] +name = "rustls" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" +dependencies = [ + "base64", + "log", + "ring", + "sct 0.6.1", + "webpki 0.21.4", +] + [[package]] name = "rustls" version = "0.20.6" @@ -2066,8 +2399,20 @@ checksum = "5aab8ee6c7097ed6057f43c187a62418d0c05a4bd5f18b3571db50ee0f9ce033" dependencies = [ "log", "ring", - "sct", - "webpki", + "sct 0.7.0", + "webpki 0.22.0", +] + +[[package]] +name = "rustls-native-certs" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" +dependencies = [ + "openssl-probe", + "rustls 0.19.1", + "schannel", + "security-framework", ] [[package]] @@ -2146,6 +2491,16 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +[[package]] +name = "sct" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "sct" version = "0.7.0" @@ -2527,6 +2882,8 @@ name = "testsys" version = "0.1.0" dependencies = [ "anyhow", + "aws-config", + "aws-sdk-ec2", "bottlerocket-types", "bottlerocket-variant", "clap 3.2.15", @@ -2686,15 +3043,26 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-rustls" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" +dependencies = [ + "rustls 0.19.1", + "tokio", + "webpki 0.21.4", +] + [[package]] name = "tokio-rustls" version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ - "rustls", + "rustls 0.20.6", "tokio", - "webpki", + "webpki 0.22.0", ] [[package]] @@ -3003,6 +3371,12 @@ dependencies = [ "serde", ] +[[package]] +name = "urlencoding" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68b90931029ab9b034b300b797048cf23723400aa757e8a2bfb9d748102f9821" + [[package]] name = "utf-8" version = "0.7.6" @@ -3136,6 +3510,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "webpki" +version = "0.21.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "webpki" version = "0.22.0" @@ -3152,7 +3536,7 @@ version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1c760f0d366a6c24a02ed7816e23e691f5d92291f94d15e836006fd11b04daf" dependencies = [ - "webpki", + "webpki 0.22.0", ] [[package]] @@ -3244,6 +3628,12 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2d7d3948613f75c98fd9328cfdcc45acc4d360655289d0a7d4ec931392200a3" +[[package]] +name = "xmlparser" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "114ba2b24d2167ef6d67d7d04c8cc86522b87f490025f39f0303b7db5bf5e3d8" + [[package]] name = "yaml-rust" version = "0.4.5" diff --git a/tools/testsys/Cargo.toml b/tools/testsys/Cargo.toml index 84701cf9..653e59c0 100644 --- a/tools/testsys/Cargo.toml +++ b/tools/testsys/Cargo.toml @@ -8,6 +8,8 @@ publish = false [dependencies] anyhow = "1.0" +aws-config = "0.46" +aws-sdk-ec2 = "0.16" bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", rev = "021e8d6", version = "0.1"} bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } clap = { version = "3", features = ["derive", "env"] } diff --git a/tools/testsys/src/aws_resources.rs b/tools/testsys/src/aws_resources.rs index 4537cb29..52342509 100644 --- a/tools/testsys/src/aws_resources.rs +++ b/tools/testsys/src/aws_resources.rs @@ -1,10 +1,12 @@ use crate::run::{TestType, TestsysImages}; use anyhow::{anyhow, Context, Result}; use bottlerocket_types::agent_config::{ - ClusterType, CreationPolicy, Ec2Config, EksClusterConfig, K8sVersion, SonobuoyConfig, - SonobuoyMode, + ClusterType, CreationPolicy, Ec2Config, EksClusterConfig, K8sVersion, MigrationConfig, + SonobuoyConfig, SonobuoyMode, TufRepoConfig, }; +use aws_sdk_ec2::model::{Filter, Image}; +use aws_sdk_ec2::Region; use bottlerocket_variant::Variant; use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta; use k8s_openapi::serde_json::Value; @@ -15,6 +17,7 @@ use model::{ TestSpec, }; use std::collections::BTreeMap; +use std::convert::identity; pub(crate) struct AwsK8s { pub(crate) arch: String, @@ -26,11 +29,17 @@ pub(crate) struct AwsK8s { pub(crate) secrets: Option>, pub(crate) kube_conformance_image: Option, pub(crate) target_cluster_name: Option, + pub(crate) tuf_repo: Option, + pub(crate) starting_version: Option, + pub(crate) migrate_starting_commit: Option, + pub(crate) starting_image_id: Option, + pub(crate) migrate_to_version: Option, + pub(crate) capabilities: Option>, } impl AwsK8s { /// Create the necessary test and resource crds for the specified test type. - pub(crate) fn create_crds( + pub(crate) async fn create_crds( &self, test: TestType, testsys_images: &TestsysImages, @@ -40,6 +49,7 @@ impl AwsK8s { self.sonobuoy_test_crds(testsys_images, SonobuoyMode::CertifiedConformance) } TestType::Quick => self.sonobuoy_test_crds(testsys_images, SonobuoyMode::Quick), + TestType::Migration => self.migration_test_crds(testsys_images).await, } } @@ -49,13 +59,77 @@ impl AwsK8s { sonobuoy_mode: SonobuoyMode, ) -> Result> { let crds = vec![ - self.eks_crd("", testsys_images)?, - self.ec2_crd("", testsys_images)?, - self.sonobuoy_crd("", "-test", sonobuoy_mode, None, testsys_images)?, + self.eks_crd(testsys_images)?, + self.ec2_crd(testsys_images, None)?, + self.sonobuoy_crd("-test", sonobuoy_mode, None, testsys_images)?, ]; Ok(crds) } + /// Creates `Test` crds for migration testing. + async fn migration_test_crds(&self, testsys_images: &TestsysImages) -> Result> { + let ami = self + .starting_image_id + .as_ref() + .unwrap_or( + &get_ami_id( + format!( + "bottlerocket-{}-{}-{}-{}", + self.variant, self.arch, self.starting_version.as_ref().context("The starting version must be provided for migration testing")?, self.migrate_starting_commit.as_ref().context("The commit for the starting version must be provided if the starting image id is not")? + ), & self.arch, + self.region.to_string(), + ) + .await?, + ) + .to_string(); + let eks = self.eks_crd(testsys_images)?; + let ec2 = self.ec2_crd(testsys_images, Some(ami))?; + let mut depends_on = Vec::new(); + // Start with a `quick` test to make sure instances launched properly + let initial = self.sonobuoy_crd("-1-initial", SonobuoyMode::Quick, None, testsys_images)?; + depends_on.push(initial.name().context("Crd missing name")?); + // Migrate instances to the target version + let start_migrate = self.migration_crd( + format!("{}-2-migrate", self.cluster_name()), + MigrationVersion::Migrated, + Some(depends_on.clone()), + testsys_images, + )?; + // A `quick` test to validate the migration + depends_on.push(start_migrate.name().context("Crd missing name")?); + let migrated = self.sonobuoy_crd( + "-3-migrated", + SonobuoyMode::Quick, + Some(depends_on.clone()), + testsys_images, + )?; + // Migrate instances to the starting version + depends_on.push(migrated.name().context("Crd missing name")?); + let end_migrate = self.migration_crd( + format!("{}-4-migrate", self.cluster_name()), + MigrationVersion::Starting, + Some(depends_on.clone()), + testsys_images, + )?; + // A final quick test to validate the migration back to the starting version + depends_on.push(end_migrate.name().context("Crd missing name")?); + let last = self.sonobuoy_crd( + "-5-final", + SonobuoyMode::Quick, + Some(depends_on.clone()), + testsys_images, + )?; + Ok(vec![ + eks, + ec2, + initial, + start_migrate, + migrated, + end_migrate, + last, + ]) + } + /// Labels help filter test results with `testsys status`. fn labels(&self) -> BTreeMap { btreemap! { @@ -73,13 +147,13 @@ impl AwsK8s { } /// Bottlerocket cluster naming convention. - fn cluster_name(&self, suffix: &str) -> String { + fn cluster_name(&self) -> String { self.target_cluster_name .clone() - .unwrap_or_else(|| format!("{}-{}{}", self.kube_arch(), self.kube_variant(), suffix)) + .unwrap_or_else(|| format!("{}-{}", self.kube_arch(), self.kube_variant())) } - fn eks_crd(&self, cluster_suffix: &str, testsys_images: &TestsysImages) -> Result { + fn eks_crd(&self, testsys_images: &TestsysImages) -> Result { let cluster_version = K8sVersion::parse( Variant::new(&self.variant) .context("The provided variant cannot be interpreted.")? @@ -87,7 +161,7 @@ impl AwsK8s { .context("aws-k8s variant is missing k8s version")?, ) .map_err(|e| anyhow!(e))?; - let cluster_name = self.cluster_name(cluster_suffix); + let cluster_name = self.cluster_name(); let eks_crd = Resource { metadata: ObjectMeta { name: Some(cluster_name.clone()), @@ -125,10 +199,10 @@ impl AwsK8s { Ok(Crd::Resource(eks_crd)) } - fn ec2_crd(&self, cluster_suffix: &str, testsys_images: &TestsysImages) -> Result { - let cluster_name = self.cluster_name(cluster_suffix); + fn ec2_crd(&self, testsys_images: &TestsysImages, override_ami: Option) -> Result { + let cluster_name = self.cluster_name(); let mut ec2_config = Ec2Config { - node_ami: self.ami.clone(), + node_ami: override_ami.unwrap_or_else(|| self.ami.clone()), instance_count: Some(2), instance_type: self.instance_type.clone(), cluster_name: format!("${{{}.clusterName}}", cluster_name), @@ -179,13 +253,12 @@ impl AwsK8s { fn sonobuoy_crd( &self, - cluster_suffix: &str, test_name_suffix: &str, sonobuoy_mode: SonobuoyMode, depends_on: Option>, testsys_images: &TestsysImages, ) -> Result { - let cluster_name = self.cluster_name(cluster_suffix); + let cluster_name = self.cluster_name(); let ec2_resource_name = format!("{}-instances", cluster_name); let test_name = format!("{}{}", cluster_name, test_name_suffix); let sonobuoy = Test { @@ -227,3 +300,181 @@ impl AwsK8s { Ok(Crd::Test(sonobuoy)) } } + +/// In order to easily create migration tests for `aws-k8s` variants we need to implement +/// `Migration` for it. +impl Migration for AwsK8s { + fn migration_config(&self) -> Result { + Ok(MigrationsConfig { + tuf_repo: self + .tuf_repo + .as_ref() + .context("Tuf repo metadata is required for upgrade downgrade testing.")? + .clone(), + starting_version: self + .starting_version + .as_ref() + .context("You must provide a starting version for upgrade downgrade testing.")? + .clone(), + migrate_to_version: self + .migrate_to_version + .as_ref() + .context("You must provide a target version for upgrade downgrade testing.")? + .clone(), + region: self.region.to_string(), + secrets: self.secrets.clone(), + capabilities: self.capabilities.clone(), + assume_role: self.assume_role.clone(), + }) + } + + fn instance_provider(&self) -> String { + let cluster_name = self.cluster_name(); + format!("{}-instances", cluster_name) + } + + fn migration_labels(&self) -> BTreeMap { + btreemap! { + "testsys/arch".to_string() => self.arch.to_string(), + "testsys/variant".to_string() => self.variant.to_string(), + "testsys/flavor".to_string() => "updown".to_string(), + } + } +} + +/// An enum to differentiate between upgrade and downgrade tests. +enum MigrationVersion { + ///`MigrationVersion::Starting` will create a migration to the starting version. + Starting, + ///`MigrationVersion::Migrated` will create a migration to the target version. + Migrated, +} + +/// A configuration containing all information needed to create a migration test for a given +/// variant. +struct MigrationsConfig { + tuf_repo: TufRepoConfig, + starting_version: String, + migrate_to_version: String, + region: String, + secrets: Option>, + capabilities: Option>, + assume_role: Option, +} + +/// Migration is a trait that should be implemented for all traits that use upgrade/downgrade +/// testing. It provides the infrastructure to easily create migration tests. +trait Migration { + /// Create a migration config that is used to create migration tests. + fn migration_config(&self) -> Result; + + /// Create the labels that should be used for the migration tests. + fn migration_labels(&self) -> BTreeMap; + + /// Return the name of the instance provider that the migration agents should use to get the + /// instance ids. + fn instance_provider(&self) -> String; + + /// Create a migration test for a given arch/variant. + fn migration_crd( + &self, + test_name: String, + migration_version: MigrationVersion, + depends_on: Option>, + testsys_images: &TestsysImages, + ) -> Result { + // Get the migration configuration for the given type. + let migration = self.migration_config()?; + + // Determine which version we are migrating to. + let version = match migration_version { + MigrationVersion::Starting => migration.starting_version, + MigrationVersion::Migrated => migration.migrate_to_version, + }; + + // Create the migration test crd. + let mut migration_config = MigrationConfig { + aws_region: migration.region, + instance_ids: Default::default(), + migrate_to_version: version, + tuf_repo: Some(migration.tuf_repo.clone()), + assume_role: migration.assume_role.clone(), + } + .into_map() + .context("Unable to convert migration config to map")?; + migration_config.insert( + "instanceIds".to_string(), + Value::String(format!("${{{}.ids}}", self.instance_provider())), + ); + Ok(Crd::Test(Test { + metadata: ObjectMeta { + name: Some(test_name), + namespace: Some(NAMESPACE.into()), + labels: Some(self.migration_labels()), + ..Default::default() + }, + spec: TestSpec { + resources: vec![self.instance_provider()], + depends_on, + retries: None, + agent: Agent { + name: "migration-test-agent".to_string(), + image: testsys_images.migration_test.to_string(), + pull_secret: testsys_images.secret.clone(), + keep_running: true, + timeout: None, + configuration: Some(migration_config), + secrets: migration.secrets.clone(), + capabilities: migration.capabilities, + }, + }, + status: None, + })) + } +} + +/// Queries EC2 for the given AMI name. If found, returns Ok(Some(id)), if not returns Ok(None). +pub(crate) async fn get_ami_id(name: S1, arch: S2, region: S3) -> Result +where + S1: Into, + S2: Into, + S3: Into, +{ + let config = aws_config::from_env() + .region(Region::new(region.into())) + .load() + .await; + let ec2_client = aws_sdk_ec2::Client::new(&config); + let describe_images = ec2_client + .describe_images() + .owners("self") + .filters(Filter::builder().name("name").values(name).build()) + .filters( + Filter::builder() + .name("image-type") + .values("machine") + .build(), + ) + .filters(Filter::builder().name("architecture").values(arch).build()) + .filters( + Filter::builder() + .name("virtualization-type") + .values("hvm") + .build(), + ) + .send() + .await? + .images; + let images: Vec<&Image> = describe_images + .iter() + .flat_map(|image| identity(image)) + .collect(); + if images.len() > 1 { + return Err(anyhow!("Multiple images were found")); + }; + if let Some(image) = images.last().as_ref() { + Ok(image.image_id().context("No image id for AMI")?.to_string()) + } else { + Err(anyhow!("No images were found")) + } +} diff --git a/tools/testsys/src/run.rs b/tools/testsys/src/run.rs index cc881670..043846ac 100644 --- a/tools/testsys/src/run.rs +++ b/tools/testsys/src/run.rs @@ -1,5 +1,6 @@ use crate::aws_resources::AwsK8s; use anyhow::{anyhow, ensure, Context, Result}; +use bottlerocket_types::agent_config::TufRepoConfig; use bottlerocket_variant::Variant; use clap::Parser; use log::{debug, info}; @@ -30,6 +31,10 @@ pub(crate) struct Run { #[clap(long, env = "PUBLISH_INFRA_CONFIG_PATH", parse(from_os_str))] infra_config_path: PathBuf, + /// Use this named repo infrastructure from Infra.toml for upgrade/downgrade testing. + #[clap(long, env = "PUBLISH_REPO", default_value = "default")] + repo: String, + /// The path to `amis.json` #[clap(long, env = "AMI_INPUT")] ami_input: String, @@ -68,6 +73,32 @@ pub(crate) struct Run { #[clap(flatten)] agent_images: TestsysImages, + + // Migrations + /// Override the starting image used for migrations. The image will be pulled from available + /// amis in the users account if no override is provided. + #[clap(long, env = "TESTSYS_STARTING_IMAGE_ID")] + starting_image_id: Option, + + /// The starting version for migrations. This is required for all migrations tests. + /// This is the version that will be created and migrated to `migration-target-version`. + #[clap(long, env = "TESTSYS_STARTING_VERSION")] + migration_starting_version: Option, + + /// The commit id of the starting version for migrations. This is required for all migrations + /// tests unless `starting-image-id` is provided. This is the version that will be created and + /// migrated to `migration-target-version`. + #[clap( + long, + env = "TESTSYS_STARTING_COMMIT", + conflicts_with = "starting-image-id" + )] + migration_starting_commit: Option, + + /// The target version for migrations. This is required for all migration tests. This is the + /// version that will be migrated to. + #[clap(long, env = "BUILDSYS_VERSION_IMAGE")] + migration_target_version: Option, } impl Run { @@ -99,6 +130,26 @@ impl Run { .context("No region was provided and no regions found in infra config")? }; + let repo_config = infra_config + .repo + .unwrap_or_default() + .get(&self.repo) + .and_then(|repo| { + if let (Some(metadata_base_url), Some(targets_url)) = + (&repo.metadata_base_url, &repo.targets_url) + { + Some(TufRepoConfig { + metadata_url: format!( + "{}{}/{}", + metadata_base_url, &self.variant, &self.arch + ), + targets_url: targets_url.to_string(), + }) + } else { + None + } + }); + match variant.family() { "aws-k8s" => { debug!("Variant is in 'aws-k8s' family"); @@ -114,9 +165,17 @@ impl Run { secrets, kube_conformance_image: self.kube_conformance_image, target_cluster_name: self.target_cluster_name, + tuf_repo: repo_config, + starting_version: self.migration_starting_version, + starting_image_id: self.starting_image_id, + migrate_to_version: self.migration_target_version, + capabilities: None, + migrate_starting_commit: self.migration_starting_commit, }; debug!("Creating crds for aws-k8s testing"); - let crds = aws_k8s.create_crds(self.test_flavor, &self.agent_images)?; + let crds = aws_k8s + .create_crds(self.test_flavor, &self.agent_images) + .await?; debug!("Adding crds to testsys cluster"); for crd in crds { let crd = client @@ -170,6 +229,10 @@ pub(crate) enum TestType { /// variance this will run sonobuoy in "quick" mode. For ECS variants, this will run a simple /// ECS task. Quick, + /// Migration testing ensures that all bottlerocket migrations work as expected. Instances will + /// be created at the starting version, migrated to the target version and back to the starting + /// version with validation testing. + Migration, } derive_fromstr_from_deserialize!(TestType); @@ -177,7 +240,6 @@ derive_fromstr_from_deserialize!(TestType); #[derive(Clone, Debug, Deserialize)] pub(crate) struct Image { pub(crate) id: String, - // This is used to deserialize amis.json } #[derive(Debug, Parser)] @@ -206,6 +268,14 @@ pub(crate) struct TestsysImages { )] pub(crate) sonobuoy_test: String, + /// Migration test agent uri. If not provided the latest released test agent will be used. + #[clap( + long = "migration-test-agent-image", + env = "TESTSYS_MIGRATION_TEST_AGENT_IMAGE", + default_value = "public.ecr.aws/bottlerocket-test-system/migration-test-agent:v0.0.1" + )] + pub(crate) migration_test: String, + /// Images pull secret. This is the name of a Kubernetes secret that will be used to /// pull the container image from a private registry. For example, if you created a pull secret /// with `kubectl create secret docker-registry regcred` then you would pass From 59f77db7e176d69ced8e0139aabf02d15613f135 Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Fri, 12 Aug 2022 21:17:12 +0000 Subject: [PATCH 0716/1356] Update to latest version of tough --- tools/Cargo.lock | 50 +++++++++++++++++++++++++++++++++++------------- 1 file changed, 37 insertions(+), 13 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index c807e4d8..78c2ac97 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -26,6 +26,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "android_system_properties" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7ed72e1635e121ca3e79420540282af22da58be50de153d36f81ddc6b83aa9e" +dependencies = [ + "libc", +] + [[package]] name = "ansi_term" version = "0.12.1" @@ -553,15 +562,17 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.19" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +checksum = "3f725f340c3854e3cb3ab736dc21f0cca183303acea3b3ffec30f141503ac8eb" dependencies = [ - "libc", + "iana-time-zone", + "js-sys", "num-integer", "num-traits", "serde", "time 0.1.44", + "wasm-bindgen", "winapi", ] @@ -859,9 +870,9 @@ dependencies = [ [[package]] name = "dyn-clone" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d07a982d1fb29db01e5a59b1918e03da4df7297eaeee7686ac45542fd4e59c8" +checksum = "4f94fa09c2aeea5b8839e414b7b841bf429fd25b9c522116ac97ee87856d88b2" [[package]] name = "either" @@ -1275,6 +1286,19 @@ dependencies = [ "tokio-native-tls", ] +[[package]] +name = "iana-time-zone" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "808cf7d67cf4a22adc5be66e75ebdf769b3f2ea032041437a7061f97a63dad4b" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "js-sys", + "wasm-bindgen", + "winapi", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -2555,9 +2579,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.140" +version = "1.0.143" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc855a42c7967b7c369eb5860f7164ef1f6f81c20c7cc1141f2a604e18723b03" +checksum = "53e8e5d5b70924f74ff5c6d64d9a5acd91422117c60f48c4e07855238a254553" dependencies = [ "serde_derive", ] @@ -2574,9 +2598,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.140" +version = "1.0.143" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f2122636b9fe3b81f1cb25099fcf2d3f542cdb1d45940d56c713158884a05da" +checksum = "d3d8e8de557aee63c26b85b947f5e59b690d0454c753f3adeb5cd7835ab88391" dependencies = [ "proc-macro2", "quote", @@ -2596,9 +2620,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.82" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82c2c1fdcd807d1098552c5b9a36e425e42e9fbd7c6a37a8425f390f781f7fa7" +checksum = "38dd04e3c8279e75b31ef29dbdceebfe5ad89f4d0937213c53f7d49d01b3d5a7" dependencies = [ "indexmap", "itoa", @@ -3119,9 +3143,9 @@ checksum = "aa7c7f42dea4b1b99439786f5633aeb9c14c1b53f75e282803c2ec2ad545873c" [[package]] name = "tough" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1a0aa977ae8d619536dfcf7c27848bc280030ee18d358fa5c3174ad094e189c" +checksum = "70a537c6b4307f5401e82a0196e97aaab9599e9c0f880e168eafb176abbac63d" dependencies = [ "chrono", "dyn-clone", From 18ca31d0b3a851d0da1fac3a6c7b2685326e1ba4 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 11 Aug 2022 00:20:16 +0000 Subject: [PATCH 0717/1356] buildsys: fix clippy warnings Signed-off-by: Ben Cressey --- tools/buildsys/src/builder.rs | 10 +++++----- tools/buildsys/src/cache.rs | 4 ++-- tools/buildsys/src/cache/error.rs | 1 + tools/buildsys/src/main.rs | 8 +++++--- 4 files changed, 13 insertions(+), 10 deletions(-) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index eb960eca..6e739700 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -103,7 +103,7 @@ impl PackageBuilder { arch = arch, ); - build(BuildType::Package, &package, &arch, args, &tag, &output_dir)?; + build(BuildType::Package, package, &arch, args, &tag, &output_dir)?; Ok(Self) } @@ -231,10 +231,10 @@ fn build( let nocache = rand::thread_rng().gen::(); // Create a directory for tracking outputs before we move them into position. - let build_dir = create_build_dir(&kind, &what, &arch)?; + let build_dir = create_build_dir(&kind, what, arch)?; // Clean up any previous outputs we have tracked. - clean_build_files(&build_dir, &output_dir)?; + clean_build_files(&build_dir, output_dir)?; let target = match kind { BuildType::Package => "package", @@ -296,7 +296,7 @@ fn build( docker(&rmi, Retry::No)?; // Copy artifacts to the expected directory and write markers to track them. - copy_build_files(&build_dir, &output_dir)?; + copy_build_files(&build_dir, output_dir)?; Ok(()) } @@ -453,7 +453,7 @@ where .min_depth(1) .max_depth(1) .into_iter() - .filter_entry(move |e| filter(e)) + .filter_entry(filter) .flat_map(|e| e.context(error::DirectoryWalkSnafu)) .map(|e| e.into_path()) } diff --git a/tools/buildsys/src/cache.rs b/tools/buildsys/src/cache.rs index 3e924935..bd746eb2 100644 --- a/tools/buildsys/src/cache.rs +++ b/tools/buildsys/src/cache.rs @@ -29,7 +29,7 @@ impl LookasideCache { pub(crate) fn fetch(files: &[manifest::ExternalFile]) -> Result { for f in files { let url_file_name = Self::extract_file_name(&f.url)?; - let path = &f.path.as_ref().unwrap_or_else(|| &url_file_name); + let path = &f.path.as_ref().unwrap_or(&url_file_name); ensure!( path.components().count() == 1, error::ExternalFileNameSnafu { path } @@ -50,7 +50,7 @@ impl LookasideCache { let tmp = PathBuf::from(format!(".{}", name)); // first check the lookaside cache - let url = format!("{}/{}/{}/{}", LOOKASIDE_CACHE.to_string(), name, hash, name); + let url = format!("{}/{}/{}/{}", LOOKASIDE_CACHE, name, hash, name); match Self::fetch_file(&url, &tmp, hash) { Ok(_) => { fs::rename(&tmp, path) diff --git a/tools/buildsys/src/cache/error.rs b/tools/buildsys/src/cache/error.rs index 4617ad9e..ec8e1ccb 100644 --- a/tools/buildsys/src/cache/error.rs +++ b/tools/buildsys/src/cache/error.rs @@ -4,6 +4,7 @@ use std::path::PathBuf; #[derive(Debug, Snafu)] #[snafu(visibility(pub(super)))] +#[allow(clippy::enum_variant_names)] pub(crate) enum Error { #[snafu(display("Bad file name '{}'", path.display()))] ExternalFileName { path: PathBuf }, diff --git a/tools/buildsys/src/main.rs b/tools/buildsys/src/main.rs index 7b6cad11..6e100e9e 100644 --- a/tools/buildsys/src/main.rs +++ b/tools/buildsys/src/main.rs @@ -108,6 +108,8 @@ fn main() { } fn run() -> Result<()> { + // Not actually redundant for a diverging function. + #[allow(clippy::redundant_closure)] let command_str = std::env::args().nth(1).unwrap_or_else(|| usage()); let command = serde_plain::from_str::(&command_str).unwrap_or_else(|_| usage()); match command { @@ -140,7 +142,7 @@ fn build_package() -> Result<()> { } if let Some(files) = manifest.external_files() { - LookasideCache::fetch(&files).context(error::ExternalFileFetchSnafu)?; + LookasideCache::fetch(files).context(error::ExternalFileFetchSnafu)?; } if let Some(groups) = manifest.source_groups() { @@ -196,7 +198,7 @@ fn build_variant() -> Result<()> { let kernel_parameters = manifest.kernel_parameters(); let grub_features = manifest.grub_features(); VariantBuilder::build( - &packages, + packages, image_format, image_layout, kernel_parameters, @@ -222,7 +224,7 @@ fn supported_arch(manifest: &ManifestInfo) -> Result<()> { error::UnsupportedArchSnafu { arch: &arch, supported_arches: supported_arches - .into_iter() + .iter() .map(|a| a.to_string()) .collect::>() } From ad3fb4d55cfac3896ab9a34841d933b9bdefecc0 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 12 Aug 2022 17:04:10 +0000 Subject: [PATCH 0718/1356] pubsys: fix clippy warnings Signed-off-by: Ben Cressey --- tools/pubsys-config/src/lib.rs | 14 +++++++------- tools/pubsys-config/src/vmware.rs | 4 ++-- tools/pubsys/src/aws/ami/mod.rs | 6 +++--- tools/pubsys/src/aws/ami/register.rs | 2 +- tools/pubsys/src/aws/ami/wait.rs | 2 +- tools/pubsys/src/aws/client.rs | 2 +- tools/pubsys/src/aws/promote_ssm/mod.rs | 6 +++--- tools/pubsys/src/aws/publish_ami/mod.rs | 8 +++++--- tools/pubsys/src/aws/ssm/mod.rs | 11 ++++++----- tools/pubsys/src/aws/ssm/ssm.rs | 5 +++-- tools/pubsys/src/main.rs | 18 +++++++++--------- tools/pubsys/src/repo.rs | 10 +++++----- tools/pubsys/src/repo/check_expirations/mod.rs | 7 +++++-- tools/pubsys/src/repo/refresh_repo/mod.rs | 11 +++++++---- tools/pubsys/src/repo/validate_repo/mod.rs | 12 ++++++++---- tools/pubsys/src/vmware/govc.rs | 2 +- tools/pubsys/src/vmware/upload_ova/mod.rs | 5 ++--- 17 files changed, 69 insertions(+), 56 deletions(-) diff --git a/tools/pubsys-config/src/lib.rs b/tools/pubsys-config/src/lib.rs index 7c660c7a..11994bb7 100644 --- a/tools/pubsys-config/src/lib.rs +++ b/tools/pubsys-config/src/lib.rs @@ -15,7 +15,7 @@ use std::path::{Path, PathBuf}; use url::Url; /// Configuration needed to load and create repos -#[derive(Debug, Default, Deserialize, Serialize, PartialEq)] +#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq)] #[serde(deny_unknown_fields)] pub struct InfraConfig { // Repo subcommand config @@ -105,7 +105,7 @@ impl InfraConfig { } /// S3-specific TUF infrastructure configuration -#[derive(Debug, Default, Deserialize, Serialize, PartialEq)] +#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq)] pub struct S3Config { pub region: Option, #[serde(default)] @@ -116,7 +116,7 @@ pub struct S3Config { } /// AWS-specific infrastructure configuration -#[derive(Debug, Default, Deserialize, Serialize, PartialEq)] +#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq)] #[serde(deny_unknown_fields)] pub struct AwsConfig { #[serde(default)] @@ -130,7 +130,7 @@ pub struct AwsConfig { } /// AWS region-specific configuration -#[derive(Debug, Deserialize, Serialize, PartialEq)] +#[derive(Debug, Deserialize, Serialize, PartialEq, Eq)] #[serde(deny_unknown_fields)] pub struct AwsRegionConfig { pub role: Option, @@ -141,7 +141,7 @@ pub struct AwsRegionConfig { // These variant names are lowercase because they have to match the text in Infra.toml, and it's // more common for TOML config to be lowercase. #[allow(non_camel_case_types)] -#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)] +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)] #[serde(deny_unknown_fields)] pub enum SigningKeyConfig { file { @@ -158,7 +158,7 @@ pub enum SigningKeyConfig { } /// AWS region-specific configuration -#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)] +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)] //#[serde(deny_unknown_fields)] pub struct KMSKeyConfig { #[serde(default)] @@ -199,7 +199,7 @@ impl TryFrom for Url { } /// Represents a Bottlerocket repo's location and the metadata needed to update the repo -#[derive(Debug, Default, Deserialize, Serialize, PartialEq)] +#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq)] #[serde(deny_unknown_fields)] pub struct RepoConfig { pub root_role_url: Option, diff --git a/tools/pubsys-config/src/vmware.rs b/tools/pubsys-config/src/vmware.rs index e5d43256..a5046096 100644 --- a/tools/pubsys-config/src/vmware.rs +++ b/tools/pubsys-config/src/vmware.rs @@ -27,7 +27,7 @@ const GOVC_RESOURCE_POOL: &str = "GOVC_RESOURCE_POOL"; const GOVC_FOLDER: &str = "GOVC_FOLDER"; /// VMware-specific infrastructure configuration -#[derive(Debug, Default, Deserialize, Serialize, PartialEq)] +#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq)] #[serde(deny_unknown_fields)] pub struct VmwareConfig { #[serde(default)] @@ -42,7 +42,7 @@ pub struct VmwareConfig { /// Fields are optional here because this struct is used to gather environment variables, common /// config, and datacenter-specific configuration, each of which may not have the complete set of /// fields. It is used to build a complete datacenter configuration (hence the "Builder" name). -#[derive(Debug, Deserialize, Serialize, PartialEq)] +#[derive(Debug, Deserialize, Serialize, PartialEq, Eq)] #[serde(deny_unknown_fields)] pub struct DatacenterBuilder { pub vsphere_url: Option, diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index 5407dbb0..67bd6193 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -97,7 +97,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> .context(error::ConfigSnafu)?; trace!("Using infra config: {:?}", infra_config); - let aws = infra_config.aws.unwrap_or_else(|| Default::default()); + let aws = infra_config.aws.unwrap_or_default(); // If the user gave an override list of regions, use that, otherwise use what's in the config. let mut regions = if !ami_args.regions.is_empty() { @@ -280,7 +280,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> let mut ec2_clients = HashMap::with_capacity(regions.len()); for region in regions.iter() { let ec2_client = - build_client::(®ion, &base_region, &aws).context(error::ClientSnafu { + build_client::(region, &base_region, &aws).context(error::ClientSnafu { client_type: "EC2", region: region.name(), })?; @@ -429,7 +429,7 @@ async fn get_account_ids( let mut sts_clients = HashMap::with_capacity(regions.len()); for region in regions.iter() { let sts_client = - build_client::(®ion, &base_region, &aws).context(error::ClientSnafu { + build_client::(region, base_region, aws).context(error::ClientSnafu { client_type: "STS", region: region.name(), })?; diff --git a/tools/pubsys/src/aws/ami/register.rs b/tools/pubsys/src/aws/ami/register.rs index 50d78f7e..11433ff3 100644 --- a/tools/pubsys/src/aws/ami/register.rs +++ b/tools/pubsys/src/aws/ami/register.rs @@ -154,7 +154,7 @@ pub(crate) async fn register_image( ) .await; - if let Err(_) = register_result { + if register_result.is_err() { for snapshot_id in cleanup_snapshot_ids { let delete_request = DeleteSnapshotRequest { snapshot_id: snapshot_id.clone(), diff --git a/tools/pubsys/src/aws/ami/wait.rs b/tools/pubsys/src/aws/ami/wait.rs index 77fb23b8..3b98be08 100644 --- a/tools/pubsys/src/aws/ami/wait.rs +++ b/tools/pubsys/src/aws/ami/wait.rs @@ -41,7 +41,7 @@ pub(crate) async fn wait_for_ami( // Use a new client each time so we have more confidence that different endpoints can see // the new AMI. let ec2_client = - build_client::(®ion, &sts_region, &aws).context(error::ClientSnafu { + build_client::(region, sts_region, aws).context(error::ClientSnafu { client_type: "EC2", region: region.name(), })?; diff --git a/tools/pubsys/src/aws/client.rs b/tools/pubsys/src/aws/client.rs index 042883e2..2b35f20c 100644 --- a/tools/pubsys/src/aws/client.rs +++ b/tools/pubsys/src/aws/client.rs @@ -67,7 +67,7 @@ pub(crate) fn build_client( let maybe_regional_role = aws.region.get(region.name()).and_then(|r| r.role.clone()); let assume_roles = aws.role.iter().chain(maybe_regional_role.iter()).cloned(); let provider = build_provider( - &sts_region, + sts_region, assume_roles.clone(), base_provider(&aws.profile)?, )?; diff --git a/tools/pubsys/src/aws/promote_ssm/mod.rs b/tools/pubsys/src/aws/promote_ssm/mod.rs index e2c29e65..576ce595 100644 --- a/tools/pubsys/src/aws/promote_ssm/mod.rs +++ b/tools/pubsys/src/aws/promote_ssm/mod.rs @@ -57,8 +57,8 @@ pub(crate) async fn run(args: &Args, promote_args: &PromoteArgs) -> Result<()> { .context(error::ConfigSnafu)?; trace!("Parsed infra config: {:#?}", infra_config); - let aws = infra_config.aws.unwrap_or_else(Default::default); - let ssm_prefix = aws.ssm_prefix.as_deref().unwrap_or_else(|| ""); + let aws = infra_config.aws.unwrap_or_default(); + let ssm_prefix = aws.ssm_prefix.as_deref().unwrap_or(""); // If the user gave an override list of regions, use that, otherwise use what's in the config. let regions = if !promote_args.regions.is_empty() { @@ -81,7 +81,7 @@ pub(crate) async fn run(args: &Args, promote_args: &PromoteArgs) -> Result<()> { let mut ssm_clients = HashMap::with_capacity(regions.len()); for region in ®ions { let ssm_client = - build_client::(region, &base_region, &aws).context(error::ClientSnafu { + build_client::(region, base_region, &aws).context(error::ClientSnafu { client_type: "SSM", region: region.name(), })?; diff --git a/tools/pubsys/src/aws/publish_ami/mod.rs b/tools/pubsys/src/aws/publish_ami/mod.rs index 9035dd79..c5986bdf 100644 --- a/tools/pubsys/src/aws/publish_ami/mod.rs +++ b/tools/pubsys/src/aws/publish_ami/mod.rs @@ -90,7 +90,7 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { .context(error::ConfigSnafu)?; trace!("Using infra config: {:?}", infra_config); - let aws = infra_config.aws.unwrap_or_else(Default::default); + let aws = infra_config.aws.unwrap_or_default(); // If the user gave an override list of regions, use that, otherwise use what's in the config. let regions = if !publish_args.regions.is_empty() { @@ -138,7 +138,7 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { let mut ec2_clients = HashMap::with_capacity(amis.len()); for region in amis.keys() { let ec2_client = - build_client::(®ion, &base_region, &aws).context(error::ClientSnafu { + build_client::(region, &base_region, &aws).context(error::ClientSnafu { client_type: "EC2", region: region.name(), })?; @@ -150,7 +150,7 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { info!("Waiting for AMIs to be available..."); let mut wait_requests = Vec::with_capacity(amis.len()); for (region, image) in &amis { - let wait_future = wait_for_ami(&image.id, ®ion, &base_region, "available", 1, &aws); + let wait_future = wait_for_ami(&image.id, region, &base_region, "available", 1, &aws); // Store the region and ID so we can include it in errors let info_future = ready((region.clone(), image.id.clone())); wait_requests.push(join(info_future, wait_future)); @@ -372,6 +372,8 @@ pub(crate) async fn modify_regional_snapshots( // Send requests in parallel and wait for responses, collecting results into a list. let request_stream = stream::iter(requests).buffer_unordered(4); + + #[allow(clippy::type_complexity)] let responses: Vec<((Region, Vec), Result<()>)> = request_stream.collect().await; // Count up successes and failures so we can give a clear total in the final error message. diff --git a/tools/pubsys/src/aws/ssm/mod.rs b/tools/pubsys/src/aws/ssm/mod.rs index 38786bf4..5390caa6 100644 --- a/tools/pubsys/src/aws/ssm/mod.rs +++ b/tools/pubsys/src/aws/ssm/mod.rs @@ -1,6 +1,7 @@ //! The ssm module owns the 'ssm' subcommand and controls the process of setting SSM parameters //! based on current build information +#[allow(clippy::module_inception)] pub(crate) mod ssm; pub(crate) mod template; @@ -60,8 +61,8 @@ pub(crate) async fn run(args: &Args, ssm_args: &SsmArgs) -> Result<()> { let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) .context(error::ConfigSnafu)?; trace!("Parsed infra config: {:#?}", infra_config); - let aws = infra_config.aws.unwrap_or_else(Default::default); - let ssm_prefix = aws.ssm_prefix.as_deref().unwrap_or_else(|| ""); + let aws = infra_config.aws.unwrap_or_default(); + let ssm_prefix = aws.ssm_prefix.as_deref().unwrap_or(""); // If the user gave an override list of regions, use that, otherwise use what's in the config. let regions = if !ssm_args.regions.is_empty() { @@ -77,12 +78,12 @@ pub(crate) async fn run(args: &Args, ssm_args: &SsmArgs) -> Result<()> { ); let base_region = region_from_string(®ions[0], &aws).context(error::ParseRegionSnafu)?; - let amis = parse_ami_input(®ions, &ssm_args, &aws)?; + let amis = parse_ami_input(®ions, ssm_args, &aws)?; let mut ssm_clients = HashMap::with_capacity(amis.len()); for region in amis.keys() { let ssm_client = - build_client::(®ion, &base_region, &aws).context(error::ClientSnafu { + build_client::(region, &base_region, &aws).context(error::ClientSnafu { client_type: "SSM", region: region.name(), })?; @@ -237,7 +238,7 @@ fn parse_ami_input( .with_context(|| error::UnknownRegionsSnafu { regions: vec![name.clone()], })?; - let region = region_from_string(&name, &aws).context(error::ParseRegionSnafu)?; + let region = region_from_string(name, aws).context(error::ParseRegionSnafu)?; amis.insert(region, image); } diff --git a/tools/pubsys/src/aws/ssm/ssm.rs b/tools/pubsys/src/aws/ssm/ssm.rs index 91676e2c..3c5fe0f2 100644 --- a/tools/pubsys/src/aws/ssm/ssm.rs +++ b/tools/pubsys/src/aws/ssm/ssm.rs @@ -55,6 +55,7 @@ where // Send requests in parallel and wait for responses, collecting results into a list. let request_stream = stream::iter(requests).buffer_unordered(4); + #[allow(clippy::type_complexity)] let responses: Vec<( (Region, usize), std::result::Result>, @@ -197,7 +198,7 @@ pub(crate) async fn set_parameters( // Remove contexts from the list with drain; they get added back in if we retry the // request. for context in contexts.drain(..) { - let ssm_client = &ssm_clients[&context.region]; + let ssm_client = &ssm_clients[context.region]; let put_request = PutParameterRequest { name: context.name.to_string(), value: context.value.to_string(), @@ -301,7 +302,7 @@ pub(crate) async fn validate_parameters( ) -> Result<()> { // Fetch the given parameter names let expected_parameter_names: Vec<&SsmKey> = expected_parameters.keys().collect(); - let updated_parameters = get_parameters(&expected_parameter_names, &ssm_clients).await?; + let updated_parameters = get_parameters(&expected_parameter_names, ssm_clients).await?; // Walk through and check each value let mut success = true; diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs index 8f47f7fd..b810aaf9 100644 --- a/tools/pubsys/src/main.rs +++ b/tools/pubsys/src/main.rs @@ -43,21 +43,21 @@ fn run() -> Result<()> { SimpleLogger::init(args.log_level, LogConfig::default()).context(error::LoggerSnafu)?; match args.subcommand { - SubCommand::Repo(ref repo_args) => repo::run(&args, &repo_args).context(error::RepoSnafu), + SubCommand::Repo(ref repo_args) => repo::run(&args, repo_args).context(error::RepoSnafu), SubCommand::ValidateRepo(ref validate_repo_args) => { - repo::validate_repo::run(&args, &validate_repo_args).context(error::ValidateRepoSnafu) + repo::validate_repo::run(&args, validate_repo_args).context(error::ValidateRepoSnafu) } SubCommand::CheckRepoExpirations(ref check_expirations_args) => { - repo::check_expirations::run(&args, &check_expirations_args) + repo::check_expirations::run(&args, check_expirations_args) .context(error::CheckExpirationsSnafu) } SubCommand::RefreshRepo(ref refresh_repo_args) => { - repo::refresh_repo::run(&args, &refresh_repo_args).context(error::RefreshRepoSnafu) + repo::refresh_repo::run(&args, refresh_repo_args).context(error::RefreshRepoSnafu) } SubCommand::Ami(ref ami_args) => { let rt = Runtime::new().context(error::RuntimeSnafu)?; rt.block_on(async { - aws::ami::run(&args, &ami_args) + aws::ami::run(&args, ami_args) .await .context(error::AmiSnafu) }) @@ -65,7 +65,7 @@ fn run() -> Result<()> { SubCommand::PublishAmi(ref publish_args) => { let rt = Runtime::new().context(error::RuntimeSnafu)?; rt.block_on(async { - aws::publish_ami::run(&args, &publish_args) + aws::publish_ami::run(&args, publish_args) .await .context(error::PublishAmiSnafu) }) @@ -73,7 +73,7 @@ fn run() -> Result<()> { SubCommand::Ssm(ref ssm_args) => { let rt = Runtime::new().context(error::RuntimeSnafu)?; rt.block_on(async { - aws::ssm::run(&args, &ssm_args) + aws::ssm::run(&args, ssm_args) .await .context(error::SsmSnafu) }) @@ -81,13 +81,13 @@ fn run() -> Result<()> { SubCommand::PromoteSsm(ref promote_args) => { let rt = Runtime::new().context(error::RuntimeSnafu)?; rt.block_on(async { - aws::promote_ssm::run(&args, &promote_args) + aws::promote_ssm::run(&args, promote_args) .await .context(error::PromoteSsmSnafu) }) } SubCommand::UploadOva(ref upload_args) => { - vmware::upload_ova::run(&args, &upload_args).context(error::UploadOvaSnafu) + vmware::upload_ova::run(&args, upload_args).context(error::UploadOvaSnafu) } } } diff --git a/tools/pubsys/src/repo.rs b/tools/pubsys/src/repo.rs index 771bd5ee..620385b0 100644 --- a/tools/pubsys/src/repo.rs +++ b/tools/pubsys/src/repo.rs @@ -413,7 +413,7 @@ fn get_signing_key_source(signing_key_config: &SigningKeyConfig) -> Result Result<()> { }; // Build a repo editor and manifest, from an existing repo if available, otherwise fresh - let maybe_urls = repo_urls(&repo_config, &repo_args.variant, &repo_args.arch)?; + let maybe_urls = repo_urls(repo_config, &repo_args.variant, &repo_args.arch)?; let (mut editor, mut manifest) = if let Some((metadata_url, targets_url)) = maybe_urls.as_ref() { info!("Found metadata and target URLs, loading existing repository"); - match load_editor_and_manifest(&repo_args.root_role_path, &metadata_url, &targets_url)? { + match load_editor_and_manifest(&repo_args.root_role_path, metadata_url, targets_url)? { Some((editor, manifest)) => (editor, manifest), None => { warn!( @@ -508,7 +508,7 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { }; // Add update information to manifest - update_manifest(&repo_args, &mut manifest)?; + update_manifest(repo_args, &mut manifest)?; // Write manifest to tempfile so it can be copied in as target later let manifest_path = NamedTempFile::new() .context(error::TempFileSnafu)? @@ -526,7 +526,7 @@ pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { ]); let all_targets = copy_targets.iter().chain(link_targets.clone()); - update_editor(&repo_args, &mut editor, all_targets, &manifest_path)?; + update_editor(repo_args, &mut editor, all_targets, &manifest_path)?; // Sign repo =^..^= =^..^= =^..^= =^..^= diff --git a/tools/pubsys/src/repo/check_expirations/mod.rs b/tools/pubsys/src/repo/check_expirations/mod.rs index af2940fd..792e0232 100644 --- a/tools/pubsys/src/repo/check_expirations/mod.rs +++ b/tools/pubsys/src/repo/check_expirations/mod.rs @@ -146,7 +146,7 @@ pub(crate) fn run(args: &Args, check_expirations_args: &CheckExpirationsArgs) -> })?; let repo_urls = repo_urls( - &repo_config, + repo_config, &check_expirations_args.variant, &check_expirations_args.arch, )? @@ -171,7 +171,10 @@ mod error { #[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(context(false), display("{}", source))] - Repo { source: crate::repo::Error }, + Repo { + #[snafu(source(from(crate::repo::Error, Box::new)))] + source: Box, + }, #[snafu(display("Found expiring/expired metadata in '{}'", metadata_url))] RepoExpirations { metadata_url: Url }, diff --git a/tools/pubsys/src/repo/refresh_repo/mod.rs b/tools/pubsys/src/repo/refresh_repo/mod.rs index 35b3d99a..be0ebbd5 100644 --- a/tools/pubsys/src/repo/refresh_repo/mod.rs +++ b/tools/pubsys/src/repo/refresh_repo/mod.rs @@ -72,7 +72,7 @@ fn refresh_repo( // If the given metadata directory exists, throw an error. We don't want to overwrite a user's // existing repository. ensure!( - !Path::exists(&metadata_out_dir), + !Path::exists(metadata_out_dir), repo_error::RepoExistsSnafu { path: metadata_out_dir } @@ -102,7 +102,7 @@ fn refresh_repo( info!("Loaded TUF repo: {}", metadata_url); // Refresh the expiration dates of all non-root metadata files - set_expirations(&mut repo_editor, &expiration, *EXPIRATION_START_TIME)?; + set_expirations(&mut repo_editor, expiration, *EXPIRATION_START_TIME)?; // Refresh the versions of all non-root metadata files set_versions(&mut repo_editor)?; @@ -172,7 +172,7 @@ pub(crate) fn run(args: &Args, refresh_repo_args: &RefreshRepoArgs) -> Result<() .context(repo_error::ConfigSnafu)?; let repo_urls = repo_urls( - &repo_config, + repo_config, &refresh_repo_args.variant, &refresh_repo_args.arch, )? @@ -203,7 +203,10 @@ mod error { #[snafu(visibility(pub(super)))] pub(crate) enum Error { #[snafu(context(false), display("{}", source))] - Repo { source: crate::repo::Error }, + Repo { + #[snafu(source(from(crate::repo::Error, Box::new)))] + source: Box, + }, #[snafu(display("Failed to refresh & re-sign metadata for: {:#?}", list_of_urls))] RepoRefresh { list_of_urls: Vec }, diff --git a/tools/pubsys/src/repo/validate_repo/mod.rs b/tools/pubsys/src/repo/validate_repo/mod.rs index edec8e18..30536c77 100644 --- a/tools/pubsys/src/repo/validate_repo/mod.rs +++ b/tools/pubsys/src/repo/validate_repo/mod.rs @@ -56,10 +56,10 @@ fn retrieve_targets(repo: &Repository) -> Result<(), Error> { // create the channels through which our download results will be passed let (tx, rx) = mpsc::channel(); - for target in targets.keys().cloned() { + for target in targets.keys() { let tx = tx.clone(); let mut reader = repo - .read_target(&target) + .read_target(target) .with_context(|_| repo_error::ReadTargetSnafu { target: target.raw(), })? @@ -67,6 +67,7 @@ fn retrieve_targets(repo: &Repository) -> Result<(), Error> { target: target.raw(), })?; info!("Downloading target: {}", target.raw()); + let target = target.clone(); thread_pool.spawn(move || { tx.send({ // tough's `Read` implementation validates the target as it's being downloaded @@ -138,7 +139,7 @@ pub(crate) fn run(args: &Args, validate_repo_args: &ValidateRepoArgs) -> Result< })?; let repo_urls = repo_urls( - &repo_config, + repo_config, &validate_repo_args.variant, &validate_repo_args.arch, )? @@ -164,7 +165,10 @@ mod error { InvalidPercentage { percentage: u8 }, #[snafu(context(false), display("{}", source))] - Repo { source: crate::repo::Error }, + Repo { + #[snafu(source(from(crate::repo::Error, Box::new)))] + source: Box, + }, #[snafu(display("Failed to download and write target '{}': {}", target, source))] TargetDownload { target: String, source: io::Error }, diff --git a/tools/pubsys/src/vmware/govc.rs b/tools/pubsys/src/vmware/govc.rs index f576fd6c..b44d6700 100644 --- a/tools/pubsys/src/vmware/govc.rs +++ b/tools/pubsys/src/vmware/govc.rs @@ -81,7 +81,7 @@ impl Govc { "import.ova", &format!("-options={}", import_spec_container_path), "-name", - &name, + name, ova_container_path, ]; diff --git a/tools/pubsys/src/vmware/upload_ova/mod.rs b/tools/pubsys/src/vmware/upload_ova/mod.rs index 8226005c..49fc096f 100644 --- a/tools/pubsys/src/vmware/upload_ova/mod.rs +++ b/tools/pubsys/src/vmware/upload_ova/mod.rs @@ -174,9 +174,8 @@ where mark_as_template, }; - Ok(tt - .render(SPEC_TEMPLATE_NAME, &context) - .context(error::RenderTemplateSnafu)?) + tt.render(SPEC_TEMPLATE_NAME, &context) + .context(error::RenderTemplateSnafu) } mod error { From df3f61e9a61e9ae09016df120f3cc29851760f94 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 12 Aug 2022 17:04:24 +0000 Subject: [PATCH 0719/1356] infrasys: fix clippy warnings Signed-off-by: Ben Cressey --- tools/infrasys/src/main.rs | 8 ++++---- tools/infrasys/src/s3.rs | 2 +- tools/pubsys-config/src/lib.rs | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tools/infrasys/src/main.rs b/tools/infrasys/src/main.rs index 4550cc32..83cf4c34 100644 --- a/tools/infrasys/src/main.rs +++ b/tools/infrasys/src/main.rs @@ -124,7 +124,7 @@ async fn create_infra(toml_path: &Path, root_role_path: &Path) -> Result<()> { // Upload root.json. info!("Uploading root.json to S3 bucket..."); s3::upload_file( - &repo_info.s3_region, + repo_info.s3_region, &bucket_name, &repo_info.prefix, root_role_path, @@ -262,14 +262,14 @@ async fn create_repo_infrastructure( // Create S3 bucket info!("Creating S3 bucket..."); let (s3_stack_arn, bucket_name, bucket_rdn) = - s3::create_s3_bucket(&repo_info.s3_region, &repo_info.s3_stack_name).await?; + s3::create_s3_bucket(repo_info.s3_region, &repo_info.s3_stack_name).await?; // Add Bucket Policy to newly created bucket s3::add_bucket_policy( - &repo_info.s3_region, + repo_info.s3_region, &bucket_name, &repo_info.prefix, - &repo_info.vpce_id, + repo_info.vpce_id, ) .await?; diff --git a/tools/infrasys/src/s3.rs b/tools/infrasys/src/s3.rs index ee998aa3..be464523 100644 --- a/tools/infrasys/src/s3.rs +++ b/tools/infrasys/src/s3.rs @@ -66,7 +66,7 @@ pub async fn create_s3_bucket(region: &str, stack_name: &str) -> Result<(String, })?; // Grab the StackOutputs to get the Bucketname and BucketURL - let output_array = shared::get_stack_outputs(&cfn_client, &stack_name, region).await?; + let output_array = shared::get_stack_outputs(&cfn_client, stack_name, region).await?; let bucket_name = output_array[0] .output_value .as_ref() diff --git a/tools/pubsys-config/src/lib.rs b/tools/pubsys-config/src/lib.rs index 11994bb7..70345680 100644 --- a/tools/pubsys-config/src/lib.rs +++ b/tools/pubsys-config/src/lib.rs @@ -178,7 +178,7 @@ impl TryFrom for Url { // We don't support passing profiles to tough in the name of the key/parameter, so for // KMS and SSM we prepend a slash if there isn't one present. SigningKeyConfig::kms { key_id, .. } => { - let mut key_id = key_id.unwrap_or_else(Default::default); + let mut key_id = key_id.unwrap_or_default(); key_id = if key_id.starts_with('/') { key_id.to_string() } else { @@ -188,7 +188,7 @@ impl TryFrom for Url { } SigningKeyConfig::ssm { parameter } => { let parameter = if parameter.starts_with('/') { - parameter.to_string() + parameter } else { format!("/{}", parameter) }; From c355408572991d78d76938d51f768f7044a7ab87 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 12 Aug 2022 17:01:08 +0000 Subject: [PATCH 0720/1356] actions-workflow: run lint checks Signed-off-by: Ben Cressey --- .github/workflows/build.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3b7115fe..5fb7d00c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -117,6 +117,7 @@ jobs: EOF - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} unit-tests - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} check-fmt + - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} check-lints - run: | cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} \ -e BUILDSYS_ARCH=${{ matrix.arch }} \ From 9eb0bad0357d6aa8eb13fbdde28016ba61f477ce Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Mon, 15 Aug 2022 10:53:58 -0500 Subject: [PATCH 0721/1356] Remove redundant closure Clippy warnings about a redundant closure. The closure just makes a call to a function that would take the passed argument anyway, so we should remove the closure and just provide the function directly. Signed-off-by: Sean McGinnis --- tools/testsys/src/aws_resources.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tools/testsys/src/aws_resources.rs b/tools/testsys/src/aws_resources.rs index 52342509..183286fb 100644 --- a/tools/testsys/src/aws_resources.rs +++ b/tools/testsys/src/aws_resources.rs @@ -17,7 +17,6 @@ use model::{ TestSpec, }; use std::collections::BTreeMap; -use std::convert::identity; pub(crate) struct AwsK8s { pub(crate) arch: String, @@ -465,10 +464,7 @@ where .send() .await? .images; - let images: Vec<&Image> = describe_images - .iter() - .flat_map(|image| identity(image)) - .collect(); + let images: Vec<&Image> = describe_images.iter().flatten().collect(); if images.len() > 1 { return Err(anyhow!("Multiple images were found")); }; From ab31ec3b4cf9eb0f78f329b199a32de15e3852a0 Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Thu, 18 Aug 2022 14:58:39 +0000 Subject: [PATCH 0722/1356] docs: Update note about presence of boot settings The settings model includes the boot settings on all bare metal variants and all k8s-1.23 variants. Future variants will pick up the setting as well. Signed-off-by: Markus Boehme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 25b0067a..8b16869d 100644 --- a/README.md +++ b/README.md @@ -699,7 +699,7 @@ Here are the metrics settings: #### Boot-related settings -*Please note that boot settings only exist for bare-metal variants at the moment* +*Please note that boot settings currently only exist for the bare metal variants and \*-k8s-1.23 variants. Boot settings will be added to any future variant introduced after Bottlerocket v1.8.0.* Specifying either of the following settings will generate a kernel boot config file to be loaded on subsequent boots: From 778436f68f298e04953c4176a02e00125b1e083f Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Mon, 29 Aug 2022 10:41:01 -0500 Subject: [PATCH 0723/1356] Update community meeting information This makes the README details about our community meetings a little more general so we don't need to remember to update it before and after each event. Signed-off-by: Sean McGinnis --- README.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 8b16869d..25fab618 100644 --- a/README.md +++ b/README.md @@ -26,10 +26,14 @@ Some notable features include: There are many ways to take part in the Bottlerocket community: -- [Join on Meetup](https://www.meetup.com/bottlerocket-community/) to hear about the latest Bottlerocket (virtual/in-person) events and community meetings. The [next community meeting is August 24, 2022](https://www.meetup.com/bottlerocket-community/events/287425423/). +- [Join us on Meetup](https://www.meetup.com/bottlerocket-community/) to hear about the latest Bottlerocket (virtual/in-person) events and community meetings. + Community meetings are typically every other week. + + Details can be found under the [Events section on Meetup](https://www.meetup.com/bottlerocket-community/events/), and you will receive email notifications if you become a member of the Meetup group. (It's free to join!) + - [Start or join a discussion](https://github.com/bottlerocket-os/bottlerocket/discussions) if you have questions about Bottlerocket. - If you're interested in contributing, thank you! -Please see our [contributor's guide](CONTRIBUTING.md). + Please see our [contributor's guide](CONTRIBUTING.md). ## Contact us From 8ab6c70ba1224ff45eef5542d26ba5b1b21709e3 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Wed, 24 Aug 2022 14:22:23 +0000 Subject: [PATCH 0724/1356] kernel: config: reorder Intel networking config options Reorder intels networking configuration so that they are all in proximity to each other and in the same order as their definition in the Kconfig file for easier comparability. No functional change. Signed-off-by: Leonard Foerster --- .../kernel-5.10/config-bottlerocket-metal | 20 +++++++++---------- .../kernel-5.15/config-bottlerocket-metal | 20 +++++++++---------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/packages/kernel-5.10/config-bottlerocket-metal b/packages/kernel-5.10/config-bottlerocket-metal index e88c418f..7d400a4a 100644 --- a/packages/kernel-5.10/config-bottlerocket-metal +++ b/packages/kernel-5.10/config-bottlerocket-metal @@ -7,15 +7,6 @@ CONFIG_SATA_AHCI=y CONFIG_ATA=y CONFIG_ATA_PIIX=y -# Intel network support -CONFIG_IGB=m -CONFIG_IGBVF=m -CONFIG_NET_VENDOR_INTEL=y -CONFIG_IGB_HWMON=y -CONFIG_E1000=m -CONFIG_E1000E=m -CONFIG_E1000E_HWTS=y - # Broadcom network support CONFIG_NET_VENDOR_BROADCOM=y CONFIG_TIGON3_HWMON=y @@ -27,11 +18,20 @@ CONFIG_NET_VENDOR_CISCO=y CONFIG_ENIC=m CONFIG_INFINIBAND_USNIC=m +# Intel network support +CONFIG_NET_VENDOR_INTEL=y +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_E1000E_HWTS=y +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m + # Intel 10G network support CONFIG_IXGB=m CONFIG_IXGBE=m -CONFIG_IXGBE_DCB=y CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCB=y CONFIG_IXGBEVF=m # Mellanox network support diff --git a/packages/kernel-5.15/config-bottlerocket-metal b/packages/kernel-5.15/config-bottlerocket-metal index e88c418f..7d400a4a 100644 --- a/packages/kernel-5.15/config-bottlerocket-metal +++ b/packages/kernel-5.15/config-bottlerocket-metal @@ -7,15 +7,6 @@ CONFIG_SATA_AHCI=y CONFIG_ATA=y CONFIG_ATA_PIIX=y -# Intel network support -CONFIG_IGB=m -CONFIG_IGBVF=m -CONFIG_NET_VENDOR_INTEL=y -CONFIG_IGB_HWMON=y -CONFIG_E1000=m -CONFIG_E1000E=m -CONFIG_E1000E_HWTS=y - # Broadcom network support CONFIG_NET_VENDOR_BROADCOM=y CONFIG_TIGON3_HWMON=y @@ -27,11 +18,20 @@ CONFIG_NET_VENDOR_CISCO=y CONFIG_ENIC=m CONFIG_INFINIBAND_USNIC=m +# Intel network support +CONFIG_NET_VENDOR_INTEL=y +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_E1000E_HWTS=y +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m + # Intel 10G network support CONFIG_IXGB=m CONFIG_IXGBE=m -CONFIG_IXGBE_DCB=y CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCB=y CONFIG_IXGBEVF=m # Mellanox network support From ec08f855241d40326def7e57789949b00cc6fdd2 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 23 Aug 2022 13:17:41 +0000 Subject: [PATCH 0725/1356] kernel: net: add further Intel 10G+ network drivers The main drivers we are interested in here are I40E, ICE, and FM10K. Explicitly setting the resulting options as follows: * Disable DCB for I40E, we likely do not need DCB * Disable IB support for I40E (and ICE on 5.15): *5.10: INFINIBAND_I40IW *5.15: INFINIBAND_IRDMA * Enable PLDMFW firmware update format through PLDM needed by ICE Signed-off-by: Leonard Foerster --- packages/kernel-5.10/config-bottlerocket-metal | 6 ++++++ packages/kernel-5.15/config-bottlerocket-metal | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket-metal b/packages/kernel-5.10/config-bottlerocket-metal index 7d400a4a..68377758 100644 --- a/packages/kernel-5.10/config-bottlerocket-metal +++ b/packages/kernel-5.10/config-bottlerocket-metal @@ -28,11 +28,17 @@ CONFIG_IGB_HWMON=y CONFIG_IGBVF=m # Intel 10G network support +CONFIG_I40E=m +# CONFIG_I40E_DCB is not set +# CONFIG_INFINIBAND_I40IW is not set +CONFIG_ICE=m +CONFIG_PLDMFW=y CONFIG_IXGB=m CONFIG_IXGBE=m CONFIG_IXGBE_HWMON=y CONFIG_IXGBE_DCB=y CONFIG_IXGBEVF=m +CONFIG_FM10K=m # Mellanox network support CONFIG_MLXFW=m diff --git a/packages/kernel-5.15/config-bottlerocket-metal b/packages/kernel-5.15/config-bottlerocket-metal index 7d400a4a..a0365679 100644 --- a/packages/kernel-5.15/config-bottlerocket-metal +++ b/packages/kernel-5.15/config-bottlerocket-metal @@ -28,11 +28,17 @@ CONFIG_IGB_HWMON=y CONFIG_IGBVF=m # Intel 10G network support +CONFIG_I40E=m +# CONFIG_I40E_DCB is not set +CONFIG_ICE=m +# CONFIG_INFINIBAND_IRDMA is not set +CONFIG_PLDMFW=y CONFIG_IXGB=m CONFIG_IXGBE=m CONFIG_IXGBE_HWMON=y CONFIG_IXGBE_DCB=y CONFIG_IXGBEVF=m +CONFIG_FM10K=m # Mellanox network support CONFIG_MLXFW=m From e24822a5631c269bacd64cea3a12864eaef480dd Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 23 Aug 2022 13:19:46 +0000 Subject: [PATCH 0726/1356] kernel: net: Add AMD 10G+ network drivers We are interested in AMD_XGBE. Explicitly disable DCE for that driver as we likely do not need DCB. In addition this driver will select option AMD_XGBE_HAVE_ECC on X86 platforms. As this config fragment is used on both x86 and aarch64 platforms we can not explicitly set it. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/config-bottlerocket-metal | 5 +++++ packages/kernel-5.15/config-bottlerocket-metal | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket-metal b/packages/kernel-5.10/config-bottlerocket-metal index 68377758..d3823082 100644 --- a/packages/kernel-5.10/config-bottlerocket-metal +++ b/packages/kernel-5.10/config-bottlerocket-metal @@ -7,6 +7,11 @@ CONFIG_SATA_AHCI=y CONFIG_ATA=y CONFIG_ATA_PIIX=y +# AMD network support +CONFIG_NET_VENDOR_AMD=y +CONFIG_AMD_XGBE=m +# CONFIG_AMD_XGBE_DCB is not set + # Broadcom network support CONFIG_NET_VENDOR_BROADCOM=y CONFIG_TIGON3_HWMON=y diff --git a/packages/kernel-5.15/config-bottlerocket-metal b/packages/kernel-5.15/config-bottlerocket-metal index a0365679..d6364b58 100644 --- a/packages/kernel-5.15/config-bottlerocket-metal +++ b/packages/kernel-5.15/config-bottlerocket-metal @@ -7,6 +7,11 @@ CONFIG_SATA_AHCI=y CONFIG_ATA=y CONFIG_ATA_PIIX=y +# AMD network support +CONFIG_NET_VENDOR_AMD=y +CONFIG_AMD_XGBE=m +# CONFIG_AMD_XGBE_DCB is not set + # Broadcom network support CONFIG_NET_VENDOR_BROADCOM=y CONFIG_TIGON3_HWMON=y From 872af82cf77a341d353fc26f4424360e1ad09609 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 23 Aug 2022 13:21:44 +0000 Subject: [PATCH 0727/1356] kernel: net: add further Broadcom 10G+ network drivers Signed-off-by: Leonard Foerster --- packages/kernel-5.10/config-bottlerocket-metal | 2 ++ packages/kernel-5.15/config-bottlerocket-metal | 2 ++ 2 files changed, 4 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket-metal b/packages/kernel-5.10/config-bottlerocket-metal index d3823082..64f157f9 100644 --- a/packages/kernel-5.10/config-bottlerocket-metal +++ b/packages/kernel-5.10/config-bottlerocket-metal @@ -16,6 +16,8 @@ CONFIG_AMD_XGBE=m CONFIG_NET_VENDOR_BROADCOM=y CONFIG_TIGON3_HWMON=y CONFIG_TIGON3=m +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y CONFIG_BNXT=m # Cisco UCS network support diff --git a/packages/kernel-5.15/config-bottlerocket-metal b/packages/kernel-5.15/config-bottlerocket-metal index d6364b58..d94ad4ef 100644 --- a/packages/kernel-5.15/config-bottlerocket-metal +++ b/packages/kernel-5.15/config-bottlerocket-metal @@ -16,6 +16,8 @@ CONFIG_AMD_XGBE=m CONFIG_NET_VENDOR_BROADCOM=y CONFIG_TIGON3_HWMON=y CONFIG_TIGON3=m +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y CONFIG_BNXT=m # Cisco UCS network support From eafab1c39491e7225e485224e1d839ad88cfae32 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 23 Aug 2022 13:22:44 +0000 Subject: [PATCH 0728/1356] kernel: net: add Chelsio 10G+ network drivers For now disable support for DCB for these devices as well as the Infiniband support, Crypto offloading capabilities, and the ISCSI target driver. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/config-bottlerocket-metal | 9 +++++++++ packages/kernel-5.15/config-bottlerocket-metal | 9 +++++++++ 2 files changed, 18 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket-metal b/packages/kernel-5.10/config-bottlerocket-metal index 64f157f9..fa9dd698 100644 --- a/packages/kernel-5.10/config-bottlerocket-metal +++ b/packages/kernel-5.10/config-bottlerocket-metal @@ -20,6 +20,15 @@ CONFIG_BNX2X=m CONFIG_BNX2X_SRIOV=y CONFIG_BNXT=m +# Chelsio network support +CONFIG_NET_VENDOR_CHELSIO=y +CONFIG_CHELSIO_T4=m +CONFIG_CHELSIO_T4VF=m +# CONFIG_CHELSIO_T4_DCB is not set +# CONFIG_CHELSIO_INLINE_CRYPTO is not set +# CONFIG_INFINIBAND_CXGB4 is not set +# CONFIG_ISCSI_TARGET_CXGB4 is not set + # Cisco UCS network support CONFIG_NET_VENDOR_CISCO=y CONFIG_ENIC=m diff --git a/packages/kernel-5.15/config-bottlerocket-metal b/packages/kernel-5.15/config-bottlerocket-metal index d94ad4ef..1ab19e59 100644 --- a/packages/kernel-5.15/config-bottlerocket-metal +++ b/packages/kernel-5.15/config-bottlerocket-metal @@ -20,6 +20,15 @@ CONFIG_BNX2X=m CONFIG_BNX2X_SRIOV=y CONFIG_BNXT=m +# Chelsio network support +CONFIG_NET_VENDOR_CHELSIO=y +CONFIG_CHELSIO_T4=m +CONFIG_CHELSIO_T4VF=m +# CONFIG_CHELSIO_T4_DCB is not set +# CONFIG_CHELSIO_INLINE_CRYPTO is not set +# CONFIG_INFINIBAND_CXGB4 is not set +# CONFIG_ISCSI_TARGET_CXGB4 is not set + # Cisco UCS network support CONFIG_NET_VENDOR_CISCO=y CONFIG_ENIC=m From efb81ff87dcfa1bf12d045e0b9d3b8038682af30 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 23 Aug 2022 13:24:57 +0000 Subject: [PATCH 0729/1356] kernel: net: add Emulex 10G+ network drivers In order for the Emulex driver to be of use we additionally need to add the chipset drivers for BE2, BE3, Lancer and Skyhawk chipsets. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/config-bottlerocket-metal | 9 +++++++++ packages/kernel-5.15/config-bottlerocket-metal | 9 +++++++++ 2 files changed, 18 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket-metal b/packages/kernel-5.10/config-bottlerocket-metal index fa9dd698..8eb7629f 100644 --- a/packages/kernel-5.10/config-bottlerocket-metal +++ b/packages/kernel-5.10/config-bottlerocket-metal @@ -34,6 +34,15 @@ CONFIG_NET_VENDOR_CISCO=y CONFIG_ENIC=m CONFIG_INFINIBAND_USNIC=m +# Emulex network support +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_BE2NET=m +CONFIG_BE2NET_BE2=y +CONFIG_BE2NET_BE3=y +CONFIG_BE2NET_HWMON=y +CONFIG_BE2NET_LANCER=y +CONFIG_BE2NET_SKYHAWK=y + # Intel network support CONFIG_NET_VENDOR_INTEL=y CONFIG_E1000=m diff --git a/packages/kernel-5.15/config-bottlerocket-metal b/packages/kernel-5.15/config-bottlerocket-metal index 1ab19e59..ca571710 100644 --- a/packages/kernel-5.15/config-bottlerocket-metal +++ b/packages/kernel-5.15/config-bottlerocket-metal @@ -34,6 +34,15 @@ CONFIG_NET_VENDOR_CISCO=y CONFIG_ENIC=m CONFIG_INFINIBAND_USNIC=m +# Emulex network support +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_BE2NET=m +CONFIG_BE2NET_BE2=y +CONFIG_BE2NET_BE3=y +CONFIG_BE2NET_LANCER=y +CONFIG_BE2NET_SKYHAWK=y +CONFIG_BE2NET_HWMON=y + # Intel network support CONFIG_NET_VENDOR_INTEL=y CONFIG_E1000=m From e511a1d735c48208a17b537d8e7584958a20142b Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 23 Aug 2022 13:26:19 +0000 Subject: [PATCH 0730/1356] kernel: net: add Huawei 10G+ network drivers Signed-off-by: Leonard Foerster --- packages/kernel-5.10/config-bottlerocket-metal | 4 ++++ packages/kernel-5.15/config-bottlerocket-metal | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket-metal b/packages/kernel-5.10/config-bottlerocket-metal index 8eb7629f..a28ef3d1 100644 --- a/packages/kernel-5.10/config-bottlerocket-metal +++ b/packages/kernel-5.10/config-bottlerocket-metal @@ -43,6 +43,10 @@ CONFIG_BE2NET_HWMON=y CONFIG_BE2NET_LANCER=y CONFIG_BE2NET_SKYHAWK=y +# Huawei network support +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m + # Intel network support CONFIG_NET_VENDOR_INTEL=y CONFIG_E1000=m diff --git a/packages/kernel-5.15/config-bottlerocket-metal b/packages/kernel-5.15/config-bottlerocket-metal index ca571710..d702f101 100644 --- a/packages/kernel-5.15/config-bottlerocket-metal +++ b/packages/kernel-5.15/config-bottlerocket-metal @@ -43,6 +43,10 @@ CONFIG_BE2NET_LANCER=y CONFIG_BE2NET_SKYHAWK=y CONFIG_BE2NET_HWMON=y +# Huawei network support +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m + # Intel network support CONFIG_NET_VENDOR_INTEL=y CONFIG_E1000=m From b4700a8e9159600db57b3efd5999f2d42d15a468 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 23 Aug 2022 13:27:34 +0000 Subject: [PATCH 0731/1356] kernel: net: add Myricom 10G+ network drivers Explicitly set the resulting DCA support for that driver to its default value. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/config-bottlerocket-metal | 5 +++++ packages/kernel-5.15/config-bottlerocket-metal | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket-metal b/packages/kernel-5.10/config-bottlerocket-metal index a28ef3d1..a8e56579 100644 --- a/packages/kernel-5.10/config-bottlerocket-metal +++ b/packages/kernel-5.10/config-bottlerocket-metal @@ -77,6 +77,11 @@ CONFIG_NET_VENDOR_MELLANOX=y CONFIG_MLX5_CORE_EN=y CONFIG_NET_SWITCHDEV=y +# Myricom network support +CONFIG_NET_VENDOR_MYRI=y +CONFIG_MYRI10GE=m +CONFIG_MYRI10GE_DCA=y + # Cisco UCS HBA support CONFIG_FCOE_FNIC=m CONFIG_SCSI_SNIC=m diff --git a/packages/kernel-5.15/config-bottlerocket-metal b/packages/kernel-5.15/config-bottlerocket-metal index d702f101..4ea0464b 100644 --- a/packages/kernel-5.15/config-bottlerocket-metal +++ b/packages/kernel-5.15/config-bottlerocket-metal @@ -77,6 +77,11 @@ CONFIG_NET_VENDOR_MELLANOX=y CONFIG_MLX5_CORE_EN=y CONFIG_NET_SWITCHDEV=y +# Myricom network support +CONFIG_NET_VENDOR_MYRI=y +CONFIG_MYRI10GE=m +CONFIG_MYRI10GE_DCA=y + # Cisco UCS HBA support CONFIG_FCOE_FNIC=m CONFIG_SCSI_SNIC=m From e5dd7a945b823ce9d20ae3b40330ac1411af4569 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 23 Aug 2022 13:28:32 +0000 Subject: [PATCH 0732/1356] kernel: net: add Pensando 10G+ network drivers Signed-off-by: Leonard Foerster --- packages/kernel-5.10/config-bottlerocket-metal | 4 ++++ packages/kernel-5.15/config-bottlerocket-metal | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket-metal b/packages/kernel-5.10/config-bottlerocket-metal index a8e56579..7d11995f 100644 --- a/packages/kernel-5.10/config-bottlerocket-metal +++ b/packages/kernel-5.10/config-bottlerocket-metal @@ -82,6 +82,10 @@ CONFIG_NET_VENDOR_MYRI=y CONFIG_MYRI10GE=m CONFIG_MYRI10GE_DCA=y +# Pensando network support +CONFIG_NET_VENDOR_PENSANDO=y +CONFIG_IONIC=m + # Cisco UCS HBA support CONFIG_FCOE_FNIC=m CONFIG_SCSI_SNIC=m diff --git a/packages/kernel-5.15/config-bottlerocket-metal b/packages/kernel-5.15/config-bottlerocket-metal index 4ea0464b..3774956e 100644 --- a/packages/kernel-5.15/config-bottlerocket-metal +++ b/packages/kernel-5.15/config-bottlerocket-metal @@ -82,6 +82,10 @@ CONFIG_NET_VENDOR_MYRI=y CONFIG_MYRI10GE=m CONFIG_MYRI10GE_DCA=y +# Pensando network support +CONFIG_NET_VENDOR_PENSANDO=y +CONFIG_IONIC=m + # Cisco UCS HBA support CONFIG_FCOE_FNIC=m CONFIG_SCSI_SNIC=m From 0c09532029f87a2516b858abf60b9624b445dc83 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 23 Aug 2022 13:29:11 +0000 Subject: [PATCH 0733/1356] kernel: net: add Solarflare 10G+ network drivers The main drivers here are SFC and SFC_FALCON. In addition configure the options for these drivers to support HWMON (SFC_MCDI_MON) and SRIOV. Disable SFC_MCDI_LOGGING as that is not needed (debug logging for communication between driver and firmware). Signed-off-by: Leonard Foerster --- packages/kernel-5.10/config-bottlerocket-metal | 8 ++++++++ packages/kernel-5.15/config-bottlerocket-metal | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket-metal b/packages/kernel-5.10/config-bottlerocket-metal index 7d11995f..1d9e1bb2 100644 --- a/packages/kernel-5.10/config-bottlerocket-metal +++ b/packages/kernel-5.10/config-bottlerocket-metal @@ -86,6 +86,14 @@ CONFIG_MYRI10GE_DCA=y CONFIG_NET_VENDOR_PENSANDO=y CONFIG_IONIC=m +# Solarflare network support +CONFIG_NET_VENDOR_SOLARFLARE=y +CONFIG_SFC=m +CONFIG_SFC_SRIOV=y +# CONFIG_SFC_MCDI_LOGGING is not set +CONFIG_SFC_MCDI_MON=y +CONFIG_SFC_FALCON=m + # Cisco UCS HBA support CONFIG_FCOE_FNIC=m CONFIG_SCSI_SNIC=m diff --git a/packages/kernel-5.15/config-bottlerocket-metal b/packages/kernel-5.15/config-bottlerocket-metal index 3774956e..da7ac7f2 100644 --- a/packages/kernel-5.15/config-bottlerocket-metal +++ b/packages/kernel-5.15/config-bottlerocket-metal @@ -86,6 +86,14 @@ CONFIG_MYRI10GE_DCA=y CONFIG_NET_VENDOR_PENSANDO=y CONFIG_IONIC=m +# Solarflare network support +CONFIG_NET_VENDOR_SOLARFLARE=y +CONFIG_SFC=m +CONFIG_SFC_SRIOV=y +# CONFIG_SFC_MCDI_LOGGING is not set +CONFIG_SFC_MCDI_MON=y +CONFIG_SFC_FALCON=m + # Cisco UCS HBA support CONFIG_FCOE_FNIC=m CONFIG_SCSI_SNIC=m From 1d7255acc65a996bd44cd03d08fbc758051ea50b Mon Sep 17 00:00:00 2001 From: ecpullen Date: Thu, 14 Jul 2022 15:57:52 +0000 Subject: [PATCH 0734/1356] testsys: `cargo make test` for ecs variants Adds support for ecs variants testing. Everything works the same way as aws-k8s variants except ECS clusters are created instead of EKS, and there is not supported conformance testing for ecs variants. --- tools/testsys/src/aws_resources.rs | 318 ++++++++++++++++++++++++++++- tools/testsys/src/run.rs | 80 ++++++-- 2 files changed, 376 insertions(+), 22 deletions(-) diff --git a/tools/testsys/src/aws_resources.rs b/tools/testsys/src/aws_resources.rs index 183286fb..0d0de417 100644 --- a/tools/testsys/src/aws_resources.rs +++ b/tools/testsys/src/aws_resources.rs @@ -1,8 +1,8 @@ use crate::run::{TestType, TestsysImages}; use anyhow::{anyhow, Context, Result}; use bottlerocket_types::agent_config::{ - ClusterType, CreationPolicy, Ec2Config, EksClusterConfig, K8sVersion, MigrationConfig, - SonobuoyConfig, SonobuoyMode, TufRepoConfig, + ClusterType, CreationPolicy, Ec2Config, EcsClusterConfig, EcsTestConfig, EksClusterConfig, + K8sVersion, MigrationConfig, SonobuoyConfig, SonobuoyMode, TufRepoConfig, }; use aws_sdk_ec2::model::{Filter, Image}; @@ -341,6 +341,320 @@ impl Migration for AwsK8s { } } +/// All information required to test ECS variants of Bottlerocket are captured in the `AwsEcs` +/// struct for migration testing, either `starting_version` and `migration_starting_commit`, or +/// `starting_image_id` must be set. TestSys supports `quick` and `migration` testing on ECS +/// variants. +pub(crate) struct AwsEcs { + /// The architecture to test (`x86_64`,`aarch64') + pub(crate) arch: String, + /// The variant to test (`aws-ecs-1`) + pub(crate) variant: String, + /// The region testing should be performed in + pub(crate) region: String, + /// The role that should be assumed by the agents + pub(crate) assume_role: Option, + /// The desired instance type + pub(crate) instance_type: Option, + /// The ami that should be used for quick testing + pub(crate) ami: String, + /// Secrets that should be used by the agents + pub(crate) secrets: Option>, + /// The name of the target ECS cluster. If no cluster is provided, `-` will be + /// used + pub(crate) target_cluster_name: Option, + + // Migrations + /// The TUF repos for migration testing. If no TUF repos are used, the default Bottlerocket + /// repos will be used + pub(crate) tuf_repo: Option, + /// The starting version for migration testing + pub(crate) starting_version: Option, + /// The AMI id of the starting version for migration testing + pub(crate) starting_image_id: Option, + /// The short commit SHA of the starting version + pub(crate) migrate_starting_commit: Option, + /// The target version for Bottlerocket migrations + pub(crate) migrate_to_version: Option, + /// Additional capabilities that need to be enabled on the agent's pods + pub(crate) capabilities: Option>, +} + +impl AwsEcs { + /// Create the necessary test and resource crds for the specified test type. + pub(crate) async fn create_crds( + &self, + test: TestType, + testsys_images: &TestsysImages, + ) -> Result> { + match test { + TestType::Conformance => { + return Err(anyhow!( + "Conformance testing for ECS variants is not supported." + )) + } + TestType::Quick => self.ecs_test_crds(testsys_images), + TestType::Migration => self.migration_test_crds(testsys_images).await, + } + } + + fn ecs_test_crds(&self, testsys_images: &TestsysImages) -> Result> { + let crds = vec![ + self.ecs_crd(testsys_images)?, + self.ec2_crd(testsys_images, None)?, + self.ecs_test_crd("-test", None, testsys_images)?, + ]; + Ok(crds) + } + + async fn migration_test_crds(&self, testsys_images: &TestsysImages) -> Result> { + let ami = self + .starting_image_id + .as_ref() + .unwrap_or( + &get_ami_id( + format!( + "bottlerocket-{}-{}-{}-{}", + self.variant, + self.arch, + self.starting_version.as_ref().context("The starting version must be provided for migration testing")?, + self.migrate_starting_commit.as_ref().context("The commit for the starting version must be provided if the starting image id is not")? + ), & self.arch, + self.region.to_string(), + ) + .await?, + ) + .to_string(); + let ecs = self.ecs_crd(testsys_images)?; + let ec2 = self.ec2_crd(testsys_images, Some(ami))?; + let mut depends_on = Vec::new(); + let initial = self.ecs_test_crd("-1-initial", None, testsys_images)?; + depends_on.push(initial.name().context("Crd missing name")?); + let start_migrate = self.migration_crd( + format!("{}-2-migrate", self.cluster_name()), + MigrationVersion::Migrated, + Some(depends_on.clone()), + testsys_images, + )?; + depends_on.push(start_migrate.name().context("Crd missing name")?); + let migrated = + self.ecs_test_crd("-3-migrated", Some(depends_on.clone()), testsys_images)?; + depends_on.push(migrated.name().context("Crd missing name")?); + let end_migrate = self.migration_crd( + format!("{}-4-migrate", self.cluster_name()), + MigrationVersion::Starting, + Some(depends_on.clone()), + testsys_images, + )?; + depends_on.push(end_migrate.name().context("Crd missing name")?); + let last = self.ecs_test_crd("-5-final", Some(depends_on.clone()), testsys_images)?; + Ok(vec![ + ecs, + ec2, + initial, + start_migrate, + migrated, + end_migrate, + last, + ]) + } + + /// Labels help filter test results with `testsys status`. + fn labels(&self) -> BTreeMap { + btreemap! { + "testsys/arch".to_string() => self.arch.to_string(), + "testsys/variant".to_string() => self.variant.to_string(), + } + } + + fn kube_arch(&self) -> String { + self.arch.replace('_', "-") + } + + fn kube_variant(&self) -> String { + self.variant.replace('.', "") + } + + /// Bottlerocket cluster naming convention (-, for aws-ecs-1 on x86_64, x86-64-aws-ecs-1). + fn cluster_name(&self) -> String { + self.target_cluster_name + .clone() + .unwrap_or_else(|| format!("{}-{}", self.kube_arch(), self.kube_variant())) + } + + fn ecs_crd(&self, testsys_images: &TestsysImages) -> Result { + let cluster_name = self.cluster_name(); + let ecs_crd = Resource { + metadata: ObjectMeta { + name: Some(cluster_name.clone()), + namespace: Some(NAMESPACE.into()), + labels: Some(self.labels()), + ..Default::default() + }, + spec: ResourceSpec { + depends_on: None, + agent: Agent { + name: "ecs-provider".to_string(), + image: testsys_images.ecs_resource.clone(), + pull_secret: testsys_images.secret.clone(), + keep_running: false, + timeout: None, + configuration: Some( + EcsClusterConfig { + cluster_name, + region: Some(self.region.clone()), + assume_role: self.assume_role.clone(), + vpc: None, + } + .into_map() + .context("Unable to convert ECS config to map")?, + ), + secrets: self.secrets.clone(), + capabilities: None, + }, + destruction_policy: DestructionPolicy::Never, + }, + status: None, + }; + Ok(Crd::Resource(ecs_crd)) + } + + fn ec2_crd(&self, testsys_images: &TestsysImages, override_ami: Option) -> Result { + let cluster_name = self.cluster_name(); + let ec2_config = Ec2Config { + node_ami: override_ami.unwrap_or_else(|| self.ami.clone()), + instance_count: Some(2), + instance_type: self.instance_type.clone(), + cluster_name: format!("${{{}.clusterName}}", cluster_name), + region: format!("${{{}.region}}", cluster_name), + instance_profile_arn: format!("${{{}.iamInstanceProfileArn}}", cluster_name), + subnet_id: format!("${{{}.publicSubnetId}}", cluster_name), + cluster_type: ClusterType::Ecs, + endpoint: None, + certificate: None, + cluster_dns_ip: None, + security_groups: vec![], + assume_role: self.assume_role.clone(), + } + .into_map() + .context("Unable to create EC2 config")?; + + let ec2_resource = Resource { + metadata: ObjectMeta { + name: Some(format!("{}-instances", cluster_name)), + namespace: Some(NAMESPACE.into()), + labels: Some(self.labels()), + ..Default::default() + }, + spec: ResourceSpec { + depends_on: Some(vec![cluster_name]), + agent: Agent { + name: "ec2-provider".to_string(), + image: testsys_images.ec2_resource.clone(), + pull_secret: testsys_images.secret.clone(), + keep_running: false, + timeout: None, + configuration: Some(ec2_config), + secrets: self.secrets.clone(), + capabilities: None, + }, + destruction_policy: DestructionPolicy::OnDeletion, + }, + status: None, + }; + Ok(Crd::Resource(ec2_resource)) + } + + fn ecs_test_crd( + &self, + test_name_suffix: &str, + depends_on: Option>, + testsys_images: &TestsysImages, + ) -> Result { + let cluster_name = self.cluster_name(); + let ec2_resource_name = format!("{}-instances", cluster_name); + let test_name = format!("{}{}", cluster_name, test_name_suffix); + let ecs_test = Test { + metadata: ObjectMeta { + name: Some(test_name), + namespace: Some(NAMESPACE.into()), + labels: Some(self.labels()), + ..Default::default() + }, + spec: TestSpec { + resources: vec![ec2_resource_name, cluster_name.to_string()], + depends_on, + retries: Some(5), + agent: Agent { + name: "ecs-test-agent".to_string(), + image: testsys_images.ecs_test.clone(), + pull_secret: testsys_images.secret.clone(), + keep_running: true, + timeout: None, + configuration: Some( + EcsTestConfig { + assume_role: self.assume_role.clone(), + region: Some(self.region.clone()), + cluster_name: cluster_name.clone(), + task_count: 1, + subnet: format!("${{{}.publicSubnetId}}", cluster_name), + task_definition_name_and_revision: None, + } + .into_map() + .context("Unable to convert sonobuoy config to `Map`")?, + ), + secrets: self.secrets.clone(), + capabilities: None, + }, + }, + status: None, + }; + + Ok(Crd::Test(ecs_test)) + } +} + +/// In order to easily create migration tests for `aws-ecs` variants we need to implement +/// `Migration` for it. +impl Migration for AwsEcs { + fn migration_config(&self) -> Result { + Ok(MigrationsConfig { + tuf_repo: self + .tuf_repo + .as_ref() + .context("Tuf repo metadata is required for upgrade downgrade testing.")? + .clone(), + starting_version: self + .starting_version + .as_ref() + .context("You must provide a starting version for upgrade downgrade testing.")? + .clone(), + migrate_to_version: self + .migrate_to_version + .as_ref() + .context("You must provide a target version for upgrade downgrade testing.")? + .clone(), + region: self.region.to_string(), + secrets: self.secrets.clone(), + capabilities: self.capabilities.clone(), + assume_role: self.assume_role.clone(), + }) + } + + fn instance_provider(&self) -> String { + let cluster_name = self.cluster_name(); + format!("{}-instances", cluster_name) + } + + fn migration_labels(&self) -> BTreeMap { + btreemap! { + "testsys/arch".to_string() => self.arch.to_string(), + "testsys/variant".to_string() => self.variant.to_string(), + "testsys/flavor".to_string() => "updown".to_string(), + } + } +} + /// An enum to differentiate between upgrade and downgrade tests. enum MigrationVersion { ///`MigrationVersion::Starting` will create a migration to the starting version. diff --git a/tools/testsys/src/run.rs b/tools/testsys/src/run.rs index 043846ac..e40e1a0a 100644 --- a/tools/testsys/src/run.rs +++ b/tools/testsys/src/run.rs @@ -1,4 +1,4 @@ -use crate::aws_resources::AwsK8s; +use crate::aws_resources::{AwsEcs, AwsK8s}; use anyhow::{anyhow, ensure, Context, Result}; use bottlerocket_types::agent_config::TufRepoConfig; use bottlerocket_variant::Variant; @@ -45,7 +45,7 @@ pub(crate) struct Run { #[clap(long, env = "TESTSYS_TARGET_REGION")] target_region: Option, - /// The name of the cluster for resource agents (eks resource agent, ecs resource agent). Note: + /// The name of the cluster for resource agents (EKS resource agent, ECS resource agent). Note: /// This is not the name of the `testsys cluster` this is the name of the cluster that tests /// should be run on. If no cluster name is provided, the bottlerocket cluster /// naming convention `-` will be used. @@ -64,7 +64,7 @@ pub(crate) struct Run { /// Specify the instance type that should be used. This is only applicable for aws-* variants. /// It can be omitted for non-aws variants and can be omitted to use default instance types. - #[clap(long)] + #[clap(long, env = "TESTSYS_INSTANCE_TYPE")] instance_type: Option, /// Add secrets to the testsys agents (`--secret aws-credentials=my-secret`) @@ -150,7 +150,7 @@ impl Run { } }); - match variant.family() { + let crds = match variant.family() { "aws-k8s" => { debug!("Variant is in 'aws-k8s' family"); let bottlerocket_ami = ami(&self.ami_input, ®ion)?; @@ -173,19 +173,34 @@ impl Run { migrate_starting_commit: self.migration_starting_commit, }; debug!("Creating crds for aws-k8s testing"); - let crds = aws_k8s + aws_k8s .create_crds(self.test_flavor, &self.agent_images) - .await?; - debug!("Adding crds to testsys cluster"); - for crd in crds { - let crd = client - .create_object(crd) - .await - .context("Unable to create object")?; - if let Some(name) = crd.name() { - info!("Successfully added '{}'", name) - }; - } + .await? + } + "aws-ecs" => { + debug!("Variant is in 'aws-ecs' family"); + let bottlerocket_ami = ami(&self.ami_input, ®ion)?; + debug!("Using ami '{}'", bottlerocket_ami); + let aws_ecs = AwsEcs { + arch: self.arch, + variant: self.variant, + region, + assume_role: self.assume_role, + instance_type: self.instance_type, + ami: bottlerocket_ami.to_string(), + secrets, + target_cluster_name: self.target_cluster_name, + tuf_repo: repo_config, + starting_version: self.migration_starting_version, + starting_image_id: self.starting_image_id, + migrate_starting_commit: self.migration_starting_commit, + migrate_to_version: self.migration_target_version, + capabilities: None, + }; + debug!("Creating crds for aws-ecs testing"); + aws_ecs + .create_crds(self.test_flavor, &self.agent_images) + .await? } other => { return Err(anyhow!( @@ -195,6 +210,15 @@ impl Run { } }; + debug!("Adding crds to testsys cluster"); + for crd in crds { + let crd = client + .create_object(crd) + .await + .context("Unable to create object")?; + info!("Successfully added '{}'", crd.name().unwrap()); + } + Ok(()) } } @@ -244,7 +268,7 @@ pub(crate) struct Image { #[derive(Debug, Parser)] pub(crate) struct TestsysImages { - /// Eks resource agent uri. If not provided the latest released resource agent will be used. + /// EKS resource agent URI. If not provided the latest released resource agent will be used. #[clap( long = "eks-resource-agent-image", env = "TESTSYS_EKS_RESOURCE_AGENT_IMAGE", @@ -252,7 +276,15 @@ pub(crate) struct TestsysImages { )] pub(crate) eks_resource: String, - /// Ec2 resource agent uri. If not provided the latest released resource agent will be used. + /// ECS resource agent URI. If not provided the latest released resource agent will be used. + #[clap( + long = "ecs-resource-agent-image", + env = "TESTSYS_ECS_RESOURCE_AGENT_IMAGE", + default_value = "public.ecr.aws/bottlerocket-test-system/ecs-resource-agent:v0.0.1" + )] + pub(crate) ecs_resource: String, + + /// EC2 resource agent URI. If not provided the latest released resource agent will be used. #[clap( long = "ec2-resource-agent-image", env = "TESTSYS_EC2_RESOURCE_AGENT_IMAGE", @@ -260,7 +292,7 @@ pub(crate) struct TestsysImages { )] pub(crate) ec2_resource: String, - /// Sonobuoy test agent uri. If not provided the latest released test agent will be used. + /// Sonobuoy test agent URI. If not provided the latest released test agent will be used. #[clap( long = "sonobuoy-test-agent-image", env = "TESTSYS_SONOBUOY_TEST_AGENT_IMAGE", @@ -268,7 +300,15 @@ pub(crate) struct TestsysImages { )] pub(crate) sonobuoy_test: String, - /// Migration test agent uri. If not provided the latest released test agent will be used. + /// ECS test agent URI. If not provided the latest released test agent will be used. + #[clap( + long = "ecs-test-agent-image", + env = "TESTSYS_ECS_TEST_AGENT_IMAGE", + default_value = "public.ecr.aws/bottlerocket-test-system/ecs-test-agent:v0.0.1" + )] + pub(crate) ecs_test: String, + + /// Migration test agent URI. If not provided the latest released test agent will be used. #[clap( long = "migration-test-agent-image", env = "TESTSYS_MIGRATION_TEST_AGENT_IMAGE", From a3b51d154ccf34008c9891707c679970d160dbf6 Mon Sep 17 00:00:00 2001 From: ecpullen Date: Fri, 26 Aug 2022 19:28:59 +0000 Subject: [PATCH 0735/1356] testsys: Update testsys to checkout `07b9ae8` --- tools/Cargo.lock | 40 +++++++++++++++++++++++++--------------- tools/testsys/Cargo.toml | 6 +++--- 2 files changed, 28 insertions(+), 18 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 78c2ac97..1e8d2dc0 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -459,8 +459,8 @@ dependencies = [ [[package]] name = "bottlerocket-types" -version = "0.1.0" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?rev=021e8d6#021e8d69b13b7d05e79963a0ff3f1c5c1af10753" +version = "0.0.1" +source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?rev=07b9ae8#07b9ae8e902623842c334889517973d0c9d82691" dependencies = [ "model", "serde", @@ -1434,9 +1434,9 @@ dependencies = [ [[package]] name = "kube" -version = "0.73.1" +version = "0.74.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f68b954ea9ad888de953fb1488bd8f377c4c78d82d4642efa5925189210b50b7" +checksum = "a527a8001a61d8d470dab27ac650889938760c243903e7cd90faaf7c60a34bdd" dependencies = [ "k8s-openapi", "kube-client", @@ -1446,9 +1446,9 @@ dependencies = [ [[package]] name = "kube-client" -version = "0.73.1" +version = "0.74.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9150dc7107d9acf4986088f284a0a6dddc5ae37ef1ffdf142f6811dc5998dd58" +checksum = "c0d48f42df4e8342e9f488c4b97e3759d0042c4e7ab1a853cc285adb44409480" dependencies = [ "base64", "bytes", @@ -1484,9 +1484,9 @@ dependencies = [ [[package]] name = "kube-core" -version = "0.73.1" +version = "0.74.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc8c429676abe6a73b374438d5ca02caaf9ae7a635441253c589b779fa5d0622" +checksum = "91f56027f862fdcad265d2e9616af416a355e28a1c620bb709083494753e070d" dependencies = [ "chrono", "form_urlencoded", @@ -1502,9 +1502,9 @@ dependencies = [ [[package]] name = "kube-derive" -version = "0.73.1" +version = "0.74.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb405f0d39181acbfdc7c79e3fc095330c9b6465ab50aeb662d762e53b662f1" +checksum = "66d74121eb41af4480052901f31142d8d9bbdf1b7c6b856da43bcb02f5b1b177" dependencies = [ "darling", "proc-macro2", @@ -1617,8 +1617,8 @@ dependencies = [ [[package]] name = "model" -version = "0.1.0" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?rev=021e8d6#021e8d69b13b7d05e79963a0ff3f1c5c1af10753" +version = "0.0.1" +source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?rev=07b9ae8#07b9ae8e902623842c334889517973d0c9d82691" dependencies = [ "async-recursion", "async-trait", @@ -2882,6 +2882,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "term_size" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e4129646ca0ed8f45d09b929036bafad5377103edd06e50bf574b353d2b08d9" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "termcolor" version = "1.1.3" @@ -2921,7 +2931,7 @@ dependencies = [ "serde", "serde_json", "serde_plain", - "terminal_size", + "term_size", "tokio", "unescape", ] @@ -3137,9 +3147,9 @@ dependencies = [ [[package]] name = "topological-sort" -version = "0.1.0" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa7c7f42dea4b1b99439786f5633aeb9c14c1b53f75e282803c2ec2ad545873c" +checksum = "ea68304e134ecd095ac6c3574494fc62b909f416c4fca77e440530221e549d3d" [[package]] name = "tough" diff --git a/tools/testsys/Cargo.toml b/tools/testsys/Cargo.toml index 653e59c0..0da89992 100644 --- a/tools/testsys/Cargo.toml +++ b/tools/testsys/Cargo.toml @@ -10,7 +10,7 @@ publish = false anyhow = "1.0" aws-config = "0.46" aws-sdk-ec2 = "0.16" -bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", rev = "021e8d6", version = "0.1"} +bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", rev = "07b9ae8", version = "0.0.1"} bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } clap = { version = "3", features = ["derive", "env"] } env_logger = "0.9" @@ -18,11 +18,11 @@ futures = "0.3.8" k8s-openapi = { version = "0.15", features = ["v1_20", "api"], default-features = false } log = "0.4" maplit = "1.0.2" -model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", rev = "021e8d6", version = "0.1"} +model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", rev = "07b9ae8", version = "0.0.1"} pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } serde = { version = "1", features = ["derive"] } serde_json = "1" serde_plain = "1" -terminal_size = "0.1" +term_size = "0.3" tokio = { version = "1", features = ["macros", "rt-multi-thread", "fs"] } unescape = "0.1.0" From 8010f61ddc1b249b43c3ba0fe4d8d4e150ce7ed1 Mon Sep 17 00:00:00 2001 From: ecpullen Date: Fri, 26 Aug 2022 19:29:43 +0000 Subject: [PATCH 0736/1356] testsys: Update status to match `watch` size --- tools/testsys/src/status.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tools/testsys/src/status.rs b/tools/testsys/src/status.rs index 8f6df684..bdf27765 100644 --- a/tools/testsys/src/status.rs +++ b/tools/testsys/src/status.rs @@ -2,7 +2,6 @@ use anyhow::{Context, Result}; use clap::Parser; use log::{debug, info}; use model::test_manager::{SelectionParams, TestManager}; -use terminal_size::{Height, Width}; /// Check the status of testsys objects. #[derive(Debug, Parser)] @@ -45,10 +44,9 @@ impl Status { .context("Could not create string from status.")? ); } else { - let (terminal_size::Width(width), _) = - terminal_size::terminal_size().unwrap_or((Width(80), Height(0))); + let (width, _) = term_size::dimensions().unwrap_or((80, 0)); debug!("Window width '{}'", width); - println!("{}", status.to_string(width as usize)); + println!("{:width$}", status.to_string()); } Ok(()) } From 170463e3b4f53df23c41b92c6a40a73521fa8219 Mon Sep 17 00:00:00 2001 From: ecpullen Date: Fri, 26 Aug 2022 19:38:50 +0000 Subject: [PATCH 0737/1356] testsys: add `conflicts_with` field to resources --- tools/testsys/src/aws_resources.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/testsys/src/aws_resources.rs b/tools/testsys/src/aws_resources.rs index 0d0de417..5a358272 100644 --- a/tools/testsys/src/aws_resources.rs +++ b/tools/testsys/src/aws_resources.rs @@ -170,6 +170,7 @@ impl AwsK8s { }, spec: ResourceSpec { depends_on: None, + conflicts_with: None, agent: Agent { name: "eks-provider".to_string(), image: testsys_images.eks_resource.clone(), @@ -233,6 +234,7 @@ impl AwsK8s { }, spec: ResourceSpec { depends_on: Some(vec![cluster_name]), + conflicts_with: None, agent: Agent { name: "ec2-provider".to_string(), image: testsys_images.ec2_resource.clone(), @@ -284,6 +286,7 @@ impl AwsK8s { mode: sonobuoy_mode, kubernetes_version: None, kube_conformance_image: self.kube_conformance_image.clone(), + e2e_repo_config_base64: None, assume_role: self.assume_role.clone(), } .into_map() @@ -493,6 +496,7 @@ impl AwsEcs { }, spec: ResourceSpec { depends_on: None, + conflicts_with: None, agent: Agent { name: "ecs-provider".to_string(), image: testsys_images.ecs_resource.clone(), @@ -548,6 +552,7 @@ impl AwsEcs { }, spec: ResourceSpec { depends_on: Some(vec![cluster_name]), + conflicts_with: None, agent: Agent { name: "ec2-provider".to_string(), image: testsys_images.ec2_resource.clone(), From 35b00bd95884dc32543fab92d7616d177f127945 Mon Sep 17 00:00:00 2001 From: Zac Mrowicki Date: Tue, 16 Aug 2022 16:32:24 +0000 Subject: [PATCH 0738/1356] README: Add DNS settings documentation --- README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/README.md b/README.md index 25fab618..8f031ae6 100644 --- a/README.md +++ b/README.md @@ -631,6 +631,17 @@ In addition to the container runtime daemons, these credential settings will als 10.1.1.1 test2.example.com ``` +The following allows for custom DNS settings, which are used to generate the `/etc/resolv.conf`. +If either DNS setting is not populated, the system will use the DHCP lease of the primary interface to gather these setings. +See the `resolv.conf` [man page](https://man7.org/linux/man-pages/man5/resolv.conf.5.html) for more detail. +* `settings.dns.name-servers`: An array of IP address strings that represent the desired name server(s). +* `settings.dns.search-list`: An array of domain strings that represent the desired domain search path(s). +``` +[settings.dns] +name-servers = ["1.2.3.4", "5.6.7.8"] +search-list = ["foo.bar", "baz.foo"] +``` + ##### Proxy settings These settings will configure the proxying behavior of the following services: From 435f9c254f80cb9ffb8470a1a312250247504995 Mon Sep 17 00:00:00 2001 From: John McBride Date: Thu, 1 Sep 2022 21:15:39 +0000 Subject: [PATCH 0739/1356] buildsys: extend external-files to vendor go modules Using the `bundle-*` keys on `packag.metadata.build-package.external-files`, buildsys can vendor Go dependencies into a new archive (leaving the prestine upstream archive intact). Both archives may then be used to build the package in an isolated environment. toosl/docker-go now also retrieves the `GOSUMDB` environment variable from the host. Co-authored-by: John McBride Co-authored-by: Samuel Karp Signed-off-by: John McBride --- tools/buildsys/src/gomod.rs | 199 ++++++++++++++++++++++++++++++ tools/buildsys/src/gomod/error.rs | 33 +++++ tools/buildsys/src/main.rs | 20 +++ tools/buildsys/src/manifest.rs | 47 +++++++ tools/docker-go | 2 +- 5 files changed, 300 insertions(+), 1 deletion(-) create mode 100644 tools/buildsys/src/gomod.rs create mode 100644 tools/buildsys/src/gomod/error.rs diff --git a/tools/buildsys/src/gomod.rs b/tools/buildsys/src/gomod.rs new file mode 100644 index 00000000..8b3d8378 --- /dev/null +++ b/tools/buildsys/src/gomod.rs @@ -0,0 +1,199 @@ +/*! +Packages using the Go programming language may have upstream tar archives that +include only the source code of the project, but not the source code of any +dependencies. The Go programming language promotes the use of "modules" for +dependencies. Projects adopting modules will provide `go.mod` and `go.sum` files. + +This Rust module extends the functionality of `packages.metadata.build-package.external-files` +and provides the ability to retrieve and validate dependencies +declared using Go modules given a tar archive containing a `go.mod` and `go.sum`. + +The location where dependencies are retrieved from are controlled by the +standard environment variables employed by the Go tool: `GOPROXY`, `GOSUMDB`, and +`GOPRIVATE`. These variables are automatically retrieved from the host environment +when the docker-go script is invoked. + + */ + +pub(crate) mod error; +use error::Result; + +use super::manifest; +use duct::cmd; +use snafu::{ensure, OptionExt, ResultExt}; +use std::io::Write; +use std::os::unix::fs::PermissionsExt; +use std::path::{Path, PathBuf}; +use std::{env, fs}; + +pub(crate) struct GoMod; + +const GO_MOD_DOCKER_SCRIPT_NAME: &str = "docker-go-script.sh"; + +// The following bash template script is intended to be run within a container +// using the docker-go tool found in this codebase under `tools/docker-go`. +// +// This script inspects the top level directory found in the package upstream +// archive and uses that as the default Go module path if no explicit module +// path was provided. It will then untar the archive, vendor the Go +// dependencies, create a new archive using the {module-path}/vendor directory +// and name it the output path provided. If no output path was given, it +// defaults to "bundled-{package-file-name}". Finally, it cleans up by removing +// the untar'd source code. The upstream archive remains intact and both tar +// files can then be used during packaging. +// +// This script exists as an in memory template string literal and is populated +// into a temporary file in the package directory itself to enable buildsys to +// be as portable as possible and have no dependecy on runtime paths. Since +// buildsys is executed from the context of many different package directories, +// managing a temporary file via this Rust module prevents having to aquire the +// path of some static script file on the host system. +const GO_MOD_SCRIPT_TMPL: &str = r###"#!/bin/bash + +set -e + +toplevel=$(tar tf __LOCAL_FILE_NAME__ | head -1) +if [ -z __MOD_DIR__ ] ; then + targetdir="${toplevel}" +else + targetdir="__MOD_DIR__" +fi + +tar xf __LOCAL_FILE_NAME__ + +pushd "${targetdir}" + go list -mod=readonly ./... >/dev/null && go mod vendor +popd + +tar czf __OUTPUT__ "${targetdir}"/vendor +rm -rf "${targetdir}" +"###; + +impl GoMod { + pub(crate) fn vendor( + root_dir: &Path, + package_dir: &Path, + external_file: &manifest::ExternalFile, + ) -> Result<()> { + let url_file_name = extract_file_name(&external_file.url)?; + let local_file_name = &external_file.path.as_ref().unwrap_or(&url_file_name); + ensure!( + local_file_name.components().count() == 1, + error::InputFileSnafu + ); + + let full_path = package_dir.join(local_file_name); + ensure!( + full_path.is_file(), + error::InputFileBadSnafu { path: full_path } + ); + + // If a module directory was not provided, set as an empty path. + // By default, without a provided module directory, tar will be passed + // the first directory found in the archives as the top level Go module + let default_empty_path = PathBuf::from(""); + let mod_dir = external_file + .bundle_root_path + .as_ref() + .unwrap_or(&default_empty_path); + + // Use a default "bundle-{name-of-file}" if no output path was provided + let default_output_path = + PathBuf::from(format!("bundled-{}", local_file_name.to_string_lossy())); + let output_path_arg = external_file + .bundle_output_path + .as_ref() + .unwrap_or(&default_output_path); + println!( + "cargo:rerun-if-changed={}", + output_path_arg.to_string_lossy() + ); + + // Our SDK and toolchain are picked by the external `cargo make` invocation. + let sdk = env::var("BUILDSYS_SDK_IMAGE").context(error::EnvironmentSnafu { + var: "BUILDSYS_SDK_IMAGE", + })?; + + let args = DockerGoArgs { + module_path: package_dir, + sdk_image: sdk, + go_mod_cache: &root_dir.join(".gomodcache"), + command: format!("./{}", GO_MOD_DOCKER_SCRIPT_NAME), + }; + + // Create and/or write the temporary script file to the package directory + // using the script template string and placeholder variables + let script_contents = GO_MOD_SCRIPT_TMPL + .replace("__LOCAL_FILE_NAME__", &local_file_name.to_string_lossy()) + .replace("__MOD_DIR__", &mod_dir.to_string_lossy()) + .replace("__OUTPUT__", &default_output_path.to_string_lossy()); + let script_path = format!( + "{}/{}", + package_dir.to_string_lossy(), + GO_MOD_DOCKER_SCRIPT_NAME + ); + { + let mut script_file = fs::File::create(&script_path).unwrap(); + fs::set_permissions(&script_path, fs::Permissions::from_mode(0o777)).unwrap(); + script_file.write_all(script_contents.as_bytes()).unwrap(); + } + + let res = docker_go(root_dir, &args); + fs::remove_file(script_path).unwrap(); + res + } +} + +fn extract_file_name(url: &str) -> Result { + let parsed = reqwest::Url::parse(url).context(error::InputUrlSnafu { url })?; + let name = parsed + .path_segments() + .context(error::InputFileBadSnafu { path: url })? + .last() + .context(error::InputFileBadSnafu { path: url })?; + Ok(name.into()) +} + +struct DockerGoArgs<'a> { + module_path: &'a Path, + sdk_image: String, + go_mod_cache: &'a Path, + command: String, +} + +/// Run `docker-go` with the specified arguments. +fn docker_go(root_dir: &Path, dg_args: &DockerGoArgs) -> Result<()> { + let args = vec![ + "--module-path", + dg_args + .module_path + .to_str() + .context(error::InputFileSnafu)?, + "--sdk-image", + &dg_args.sdk_image, + "--go-mod-cache", + dg_args + .go_mod_cache + .to_str() + .context(error::InputFileSnafu)?, + "--command", + &dg_args.command, + ]; + let arg_string = args.join(" "); + let program = root_dir.join("tools/docker-go"); + println!("program: {}", program.to_string_lossy()); + let output = cmd(program, args) + .stderr_to_stdout() + .stdout_capture() + .unchecked() + .run() + .context(error::CommandStartSnafu)?; + + let stdout = String::from_utf8_lossy(&output.stdout); + println!("{}", &stdout); + ensure!( + output.status.success(), + error::DockerExecutionSnafu { args: arg_string } + ); + Ok(()) +} diff --git a/tools/buildsys/src/gomod/error.rs b/tools/buildsys/src/gomod/error.rs new file mode 100644 index 00000000..8a422e25 --- /dev/null +++ b/tools/buildsys/src/gomod/error.rs @@ -0,0 +1,33 @@ +use std::path::PathBuf; + +use snafu::Snafu; + +#[derive(Debug, Snafu)] +#[snafu(visibility(pub(super)))] +pub(crate) enum Error { + #[snafu(display("Failed to start command: {}", source))] + CommandStart { source: std::io::Error }, + + #[snafu(display("Failed to execute docker-go script. 'args: {}'", args))] + DockerExecution { args: String }, + + #[snafu(display("Input url is required"))] + InputFile, + + #[snafu(display("Input file {} must be a file", path.display()))] + InputFileBad { path: PathBuf }, + + #[snafu(display("Bad file url '{}': {}", url, source))] + InputUrl { + url: String, + source: url::ParseError, + }, + + #[snafu(display("Missing environment variable '{}'", var))] + Environment { + var: String, + source: std::env::VarError, + }, +} + +pub(super) type Result = std::result::Result; diff --git a/tools/buildsys/src/main.rs b/tools/buildsys/src/main.rs index 6e100e9e..158a2cdb 100644 --- a/tools/buildsys/src/main.rs +++ b/tools/buildsys/src/main.rs @@ -10,10 +10,13 @@ The implementation is closely tied to the top-level Dockerfile. */ mod builder; mod cache; +mod gomod; mod manifest; mod project; mod spec; +use crate::gomod::GoMod; +use crate::manifest::BundleModule; use builder::{PackageBuilder, VariantBuilder}; use cache::LookasideCache; use manifest::{ManifestInfo, SupportedArch}; @@ -43,6 +46,10 @@ mod error { source: super::cache::error::Error, }, + GoMod { + source: super::gomod::error::Error, + }, + ProjectCrawl { source: super::project::error::Error, }, @@ -143,6 +150,19 @@ fn build_package() -> Result<()> { if let Some(files) = manifest.external_files() { LookasideCache::fetch(files).context(error::ExternalFileFetchSnafu)?; + for f in files { + if f.bundle_modules.is_none() { + continue; + } + + for b in f.bundle_modules.as_ref().unwrap() { + match b { + BundleModule::Go => { + GoMod::vendor(&root_dir, &manifest_dir, f).context(error::GoModSnafu)? + } + } + } + } } if let Some(groups) = manifest.source_groups() { diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index a3442826..3973542b 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -33,6 +33,44 @@ url = "https://bar" sha512 = "123456" ``` +The `bundle-*` keys on `external-files` are a group of optional modifiers +and are used to untar an upstream external file archive, vendor any dependent +code, and produce an additional archive with those dependencies. +Only `bundle-modules` is required when bundling an archive's dependences. + +`bundle-modules` is a list of module "paradigms" the external-file should +be vendored through. For example, if a project contains a `go.mod` and `go.sum` +file, adding "go" to the list will vendor the dependencies through go modules. +Currently, only "go" is supported. + +`bundle-root-path` is an optional argument that provides the filepath +within the archive that contains the module. By default, the first top level +directory in the archive is used. So, for example, given a Go project that has +the necessary `go.mod` and `go.sum` files in the archive located at the +filepath `a/b/c`, this `bundle-root-path` value should be "a/b/c". Or, given an +archive with a single directory that contains a Go project that has `go.mod` +and `go.sum` files located in that top level directory, this option may be +omitted since the single top-level directory will authomatically be used. + +`bundle-output-path` is an optional argument that provides the desired path of +the output archive. By default, this will use the name of the existing archive, +but pre-pended with "bundled-". For example, if "my-unique-archive-name.tar.gz" +is entered as the value for `bundle-output-path`, then the output directory +will be named `my-unique-archive-name.tar.gz`. Or, by default, given the name +of some upstream archive is "my-package.tar.gz", the output archive would be +named `bundled-my-package.tar.gz`. This output path may then be referenced +within an RPM spec or when creating a package in order to access the vendored +upstream dependencies during build time. +``` +[[package.metadata.build-package.external-files]] +path = "foo" +url = "https://foo" +sha512 = "abcdef" +bundle-modules = [ "go" ] +bundle-root-path = "path/to/module" +bundle-output-path = "path/to/output.tar.gz" +``` + `package-name` lets you override the package name in Cargo.toml; this is useful if you have a package with "." in its name, for example, which Cargo doesn't allow. This means the directory name and spec file name can use your preferred @@ -346,12 +384,21 @@ impl fmt::Display for GrubFeature { } } +#[derive(Deserialize, Debug)] +#[serde(rename_all = "lowercase")] +pub(crate) enum BundleModule { + Go, +} + #[derive(Deserialize, Debug)] #[serde(rename_all = "kebab-case")] pub(crate) struct ExternalFile { pub(crate) path: Option, pub(crate) sha512: String, pub(crate) url: String, + pub(crate) bundle_modules: Option>, + pub(crate) bundle_root_path: Option, + pub(crate) bundle_output_path: Option, } impl fmt::Display for SupportedArch { diff --git a/tools/docker-go b/tools/docker-go index 49a75a84..7935ae3c 100755 --- a/tools/docker-go +++ b/tools/docker-go @@ -56,7 +56,7 @@ parse_args "${@}" # Pass through relevant Go variables, from the config or environment. go_env=( ) -for i in GOPROXY GONOPROXY GOPRIVATE ; do +for i in GOPROXY GONOPROXY GOPRIVATE GOSUMDB ; do if command -v go >/dev/null 2>&1 ; then govar="$(go env ${i})" if [ -n "${govar}" ] ; then From 0bef7d506c83e7f6df0050f8b31b86836eab8b16 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 11 Aug 2022 00:54:44 +0000 Subject: [PATCH 0740/1356] build: pass variant-related variables into builds Parse the variant tuple into its components with the standard module for that purpose, and pass each component into the build environment, to avoid the need for ad-hoc parsing in spec files. Signed-off-by: Ben Cressey --- tools/Cargo.lock | 1 + tools/buildsys/Cargo.toml | 1 + .../src/bin/bottlerocket-variant/main.rs | 52 +++++++++++++++++++ tools/buildsys/src/builder.rs | 29 ++++++----- 4 files changed, 71 insertions(+), 12 deletions(-) create mode 100644 tools/buildsys/src/bin/bottlerocket-variant/main.rs diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 1e8d2dc0..18bc4c29 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -489,6 +489,7 @@ dependencies = [ name = "buildsys" version = "0.1.0" dependencies = [ + "bottlerocket-variant", "duct", "hex", "lazy_static", diff --git a/tools/buildsys/Cargo.toml b/tools/buildsys/Cargo.toml index d9971fda..daf697c4 100644 --- a/tools/buildsys/Cargo.toml +++ b/tools/buildsys/Cargo.toml @@ -9,6 +9,7 @@ publish = false exclude = ["README.md"] [dependencies] +bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } duct = "0.13.0" hex = "0.4.0" lazy_static = "1.4" diff --git a/tools/buildsys/src/bin/bottlerocket-variant/main.rs b/tools/buildsys/src/bin/bottlerocket-variant/main.rs new file mode 100644 index 00000000..77f9e5e2 --- /dev/null +++ b/tools/buildsys/src/bin/bottlerocket-variant/main.rs @@ -0,0 +1,52 @@ +use bottlerocket_variant::Variant; +use snafu::ResultExt; +use std::{env, process}; + +// Returning a Result from main makes it print a Debug representation of the error, but with Snafu +// we have nice Display representations of the error, so we wrap "main" (run) and print any error. +// https://github.com/shepmaster/snafu/issues/110 +fn main() { + if let Err(e) = run() { + eprintln!("{}", e); + process::exit(1); + } +} + +/// Read `BUILDSYS_VARIANT` from the environment, parse into its components, +/// and emit related environment variables to set. +fn run() -> Result<()> { + let variant = Variant::new(getenv("BUILDSYS_VARIANT")?).context(error::VariantParseSnafu)?; + println!("BUILDSYS_VARIANT_PLATFORM={}", variant.platform()); + println!("BUILDSYS_VARIANT_RUNTIME={}", variant.runtime()); + println!("BUILDSYS_VARIANT_FAMILY={}", variant.family()); + println!( + "BUILDSYS_VARIANT_FLAVOR={}", + variant.variant_flavor().unwrap_or("''") + ); + Ok(()) +} + +/// Retrieve a variable that we expect to be set in the environment. +fn getenv(var: &str) -> Result { + env::var(var).context(error::EnvironmentSnafu { var }) +} + +mod error { + use snafu::Snafu; + + #[derive(Debug, Snafu)] + #[snafu(visibility(pub(super)))] + pub(super) enum Error { + VariantParse { + source: bottlerocket_variant::error::Error, + }, + + #[snafu(display("Missing environment variable '{}'", var))] + Environment { + var: String, + source: std::env::VarError, + }, + } +} + +type Result = std::result::Result; diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index 6e739700..93915226 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -80,22 +80,27 @@ impl PackageBuilder { .context(error::UnsupportedArchSnafu { arch: &arch })? .goarch(); - // We do *not* want to rebuild most packages when the variant changes, because most aren't - // affected; packages that care about variant should "echo cargo:rerun-if-env-changed=VAR" - // themselves in the package's spec file. - let var = "BUILDSYS_VARIANT"; - let variant = env::var(var).context(error::EnvironmentSnafu { var })?; - // Same for repo, which is used to determine the correct root.json, which is only included - // in the os package. - let var = "PUBLISH_REPO"; - let repo = env::var(var).context(error::EnvironmentSnafu { var })?; - let mut args = Vec::new(); args.build_arg("PACKAGE", package); args.build_arg("ARCH", &arch); args.build_arg("GOARCH", &goarch); - args.build_arg("VARIANT", variant); - args.build_arg("REPO", repo); + + // Pass certain environment variables into the build environment. These variables aren't + // automatically used to trigger rebuilds when they change, because most packages aren't + // affected. Packages that care should "echo cargo:rerun-if-env-changed=VAR" in their + // build.rs build script. + for (src_env_var, target_env_var) in [ + ("BUILDSYS_VARIANT", "VARIANT"), + ("BUILDSYS_VARIANT_PLATFORM", "VARIANT_PLATFORM"), + ("BUILDSYS_VARIANT_RUNTIME", "VARIANT_RUNTIME"), + ("BUILDSYS_VARIANT_FAMILY", "VARIANT_FAMILY"), + ("BUILDSYS_VARIANT_FLAVOR", "VARIANT_FLAVOR"), + ("PUBLISH_REPO", "REPO"), + ] { + let src_env_val = + env::var(src_env_var).context(error::EnvironmentSnafu { var: src_env_var })?; + args.build_arg(target_env_var, src_env_val); + } let tag = format!( "buildsys-pkg-{package}-{arch}", From 6573f183440dd05549fc0922c6de808335531c7b Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 11 Aug 2022 04:17:15 +0000 Subject: [PATCH 0741/1356] buildsys: support scoped variant sensitivity The previous definition of variant sensitivity treated any change to the variant as requiring a rebuild. However, packages might depend on only one component of the variant tuple. For example, the kernel package might conditionally enable some modules for the "aws" platform, and a different set for the "metal" platform. To prevent unnecessary rebuilds, packages are now allowed to specify which component, or group of components, they depend upon. Signed-off-by: Ben Cressey --- tools/buildsys/src/main.rs | 25 +++++++++++++++---- tools/buildsys/src/manifest.rs | 44 ++++++++++++++++++++++++++++++---- 2 files changed, 60 insertions(+), 9 deletions(-) diff --git a/tools/buildsys/src/main.rs b/tools/buildsys/src/main.rs index 158a2cdb..3c5e4035 100644 --- a/tools/buildsys/src/main.rs +++ b/tools/buildsys/src/main.rs @@ -141,10 +141,27 @@ fn build_package() -> Result<()> { let manifest = ManifestInfo::new(manifest_dir.join(manifest_file)).context(error::ManifestParseSnafu)?; - // if manifest has package.metadata.build-package.variant-specific = true, then println rerun-if-env-changed - if let Some(sensitive) = manifest.variant_sensitive() { - if sensitive { - println!("cargo:rerun-if-env-changed=BUILDSYS_VARIANT"); + // If manifest has package.metadata.build-package.variant-sensitive set, then track the + // appropriate environment variable for changes. + if let Some(sensitivity) = manifest.variant_sensitive() { + use manifest::{SensitivityType::*, VariantSensitivity::*}; + fn emit_variant_env(suffix: Option<&str>) { + if let Some(suffix) = suffix { + println!( + "cargo:rerun-if-env-changed=BUILDSYS_VARIANT_{}", + suffix.to_uppercase() + ); + } else { + println!("cargo:rerun-if-env-changed=BUILDSYS_VARIANT"); + } + } + match sensitivity { + Any(false) => (), + Any(true) => emit_variant_env(None), + Specific(Platform) => emit_variant_env(Some("platform")), + Specific(Runtime) => emit_variant_env(Some("runtime")), + Specific(Family) => emit_variant_env(Some("family")), + Specific(Flavor) => emit_variant_env(Some("flavor")), } } diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index 3973542b..fa791913 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -82,13 +82,29 @@ package-name = "better.name" `variant-sensitive` lets you specify whether the package should be rebuilt when building a new variant, and defaults to false; set it to true if a package is -using the variant to affect its build process. (Typically this means that it -reads BUILDSYS_VARIANT.) +using the variant to affect its build process. + ``` [package.metadata.build-package] variant-sensitive = true ``` +Some packages might only be sensitive to certain components of the variant +tuple, such as the platform, runtime, or family. The `variant-sensitive` field +can also take a string to indicate the source of the sensitivity. + +``` +[package.metadata.build-package] +# sensitive to platform, like "metal" or "aws" +variant-sensitive = "platform" + +# sensitive to runtime, like "k8s" or "ecs" +variant-sensitive = "runtime" + +# sensitive to family, like "metal-k8s" or "aws-ecs" +variant-sensitive = "family" +``` + `releases-url` is ignored by buildsys, but can be used by packager maintainers to indicate a good URL for checking whether the software has had a new release. ``` @@ -209,8 +225,9 @@ impl ManifestInfo { } /// Convenience method to find whether the package is sensitive to variant changes. - pub(crate) fn variant_sensitive(&self) -> Option { - self.build_package().and_then(|b| b.variant_sensitive) + pub(crate) fn variant_sensitive(&self) -> Option<&VariantSensitivity> { + self.build_package() + .and_then(|b| b.variant_sensitive.as_ref()) } /// Convenience method to return the list of included packages. @@ -283,7 +300,24 @@ pub(crate) struct BuildPackage { pub(crate) package_name: Option, pub(crate) releases_url: Option, pub(crate) source_groups: Option>, - pub(crate) variant_sensitive: Option, + pub(crate) variant_sensitive: Option, +} + +#[derive(Deserialize, Debug)] +#[serde(rename_all = "kebab-case")] +#[serde(untagged)] +pub(crate) enum VariantSensitivity { + Any(bool), + Specific(SensitivityType), +} + +#[derive(Deserialize, Debug)] +#[serde(rename_all = "kebab-case")] +pub(crate) enum SensitivityType { + Platform, + Runtime, + Family, + Flavor, } #[derive(Deserialize, Debug)] From 166f31d514b073d0cb451a565c81aa7c6e09b895 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 11 Aug 2022 04:25:25 +0000 Subject: [PATCH 0742/1356] kernel: scope variant sensitivity to platform Kernel support for various hardware depends on the target platform, and not other attributes of the variant like the orchestrator agent. Signed-off-by: Ben Cressey --- packages/kernel-5.10/Cargo.toml | 2 +- packages/kernel-5.15/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index c257034e..8aaff765 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -6,7 +6,7 @@ publish = false build = "build.rs" [package.metadata.build-package] -variant-sensitive = true +variant-sensitive = "platform" package-name = "kernel-5.10" [lib] diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 7a19e38b..21d45121 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -6,7 +6,7 @@ publish = false build = "build.rs" [package.metadata.build-package] -variant-sensitive = true +variant-sensitive = "platform" package-name = "kernel-5.15" [lib] From 141713af92915c38eb8a5b96594f901458589561 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 11 Aug 2022 05:00:44 +0000 Subject: [PATCH 0743/1356] kernel: always merge platform-specific config Leverage the newly-added variant platform macro to always include platform-specific kernel configuration files, and ensure these files exist even if empty. Signed-off-by: Ben Cressey --- packages/kernel-5.10/config-bottlerocket-aws | 0 packages/kernel-5.10/config-bottlerocket-vmware | 0 packages/kernel-5.10/kernel-5.10.spec | 9 ++++----- packages/kernel-5.15/config-bottlerocket-aws | 0 packages/kernel-5.15/config-bottlerocket-vmware | 0 packages/kernel-5.15/kernel-5.15.spec | 9 ++++----- 6 files changed, 8 insertions(+), 10 deletions(-) create mode 100644 packages/kernel-5.10/config-bottlerocket-aws create mode 100644 packages/kernel-5.10/config-bottlerocket-vmware create mode 100644 packages/kernel-5.15/config-bottlerocket-aws create mode 100644 packages/kernel-5.15/config-bottlerocket-vmware diff --git a/packages/kernel-5.10/config-bottlerocket-aws b/packages/kernel-5.10/config-bottlerocket-aws new file mode 100644 index 00000000..e69de29b diff --git a/packages/kernel-5.10/config-bottlerocket-vmware b/packages/kernel-5.10/config-bottlerocket-vmware new file mode 100644 index 00000000..e69de29b diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 61f3b848..08f24f69 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,5 +1,4 @@ %global debug_package %{nil} -%global _is_metal_variant %(if echo %{_cross_variant} | grep -Fqw "metal"; then echo 1; else echo 0; fi) Name: %{_cross_os}kernel-5.10 Version: 5.10.130 @@ -10,7 +9,9 @@ URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. Source0: https://cdn.amazonlinux.com/blobstore/04a89d2664b3be51cad04255bde6ff8ee1620a5281b0dc1f2f4707e1e6cfe150/kernel-5.10.130-118.517.amzn2.src.rpm Source100: config-bottlerocket -Source101: config-bottlerocket-metal +Source101: config-bottlerocket-aws +Source102: config-bottlerocket-metal +Source103: config-bottlerocket-vmware # Help out-of-tree module builds run `make prepare` automatically. Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch @@ -96,9 +97,7 @@ scripts/kconfig/merge_config.sh \ ../config-microcode \ %endif %{SOURCE100} \ -%if %{_is_metal_variant} - %{SOURCE101} -%endif + %{_sourcedir}/config-bottlerocket-%{_cross_variant_platform} rm -f ../config-* ../*.patch diff --git a/packages/kernel-5.15/config-bottlerocket-aws b/packages/kernel-5.15/config-bottlerocket-aws new file mode 100644 index 00000000..e69de29b diff --git a/packages/kernel-5.15/config-bottlerocket-vmware b/packages/kernel-5.15/config-bottlerocket-vmware new file mode 100644 index 00000000..e69de29b diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index cc1798ce..e3a08e7d 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,5 +1,4 @@ %global debug_package %{nil} -%global _is_metal_variant %(if echo %{_cross_variant} | grep -Fqw "metal"; then echo 1; else echo 0; fi) Name: %{_cross_os}kernel-5.15 Version: 5.15.54 @@ -10,7 +9,9 @@ URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. Source0: https://cdn.amazonlinux.com/blobstore/47fc1797c6cf0a9ee2cb4c2ccba9c73a47c0ff75bdb22bf19e939083029881dc/kernel-5.15.54-25.126.amzn2.src.rpm Source100: config-bottlerocket -Source101: config-bottlerocket-metal +Source101: config-bottlerocket-aws +Source102: config-bottlerocket-metal +Source103: config-bottlerocket-vmware # Help out-of-tree module builds run `make prepare` automatically. Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch @@ -94,9 +95,7 @@ scripts/kconfig/merge_config.sh \ ../config-microcode \ %endif %{SOURCE100} \ -%if %{_is_metal_variant} - %{SOURCE101} -%endif + %{_sourcedir}/config-bottlerocket-%{_cross_variant_platform} rm -f ../config-* ../*.patch From 007b07e6ab4e0cbd2803efb7a542826acedd8a91 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Wed, 7 Sep 2022 14:37:25 -0700 Subject: [PATCH 0744/1356] buildsys: Use bundle name setting for output The `bundled-output-path` setting for external files had a small error when processing the output bundle for Go project dependencies where it would always use the default value, even when a different value was provided. This fixes the name of the variable used so it uses the provided name instead. This already handles setting this to the default value if the package maintainer has not provided an overridden value. Signed-off-by: Sean McGinnis --- tools/buildsys/src/gomod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/buildsys/src/gomod.rs b/tools/buildsys/src/gomod.rs index 8b3d8378..8d92b237 100644 --- a/tools/buildsys/src/gomod.rs +++ b/tools/buildsys/src/gomod.rs @@ -126,7 +126,7 @@ impl GoMod { let script_contents = GO_MOD_SCRIPT_TMPL .replace("__LOCAL_FILE_NAME__", &local_file_name.to_string_lossy()) .replace("__MOD_DIR__", &mod_dir.to_string_lossy()) - .replace("__OUTPUT__", &default_output_path.to_string_lossy()); + .replace("__OUTPUT__", &output_path_arg.to_string_lossy()); let script_path = format!( "{}/{}", package_dir.to_string_lossy(), From b3fcd300fc866173a07b887b7cb6cdec34730463 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Thu, 8 Sep 2022 21:08:15 +0000 Subject: [PATCH 0745/1356] docs: update SHA for new 4.root.json We've rotated signing keys for TUF and have deployed a new root.json for interacting with Bottlerocket's TUF repositories. This updates all the checksums for the new root.json in the lookaside cache. --- BUILDING.md | 2 +- tools/pubsys/Infra.toml.example | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/BUILDING.md b/BUILDING.md index 46698424..8e884d54 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -204,7 +204,7 @@ Next, you need the Bottlerocket root role, which is used by tuftool to verify th This will download and verify the root role itself: ```bash curl -O "https://cache.bottlerocket.aws/root.json" -sha512sum -c <<<"e9b1ea5f9b4f95c9b55edada4238bf00b12845aa98bdd2d3edb63ff82a03ada19444546337ec6d6806cbf329027cf49f7fde31f54d551c5e02acbed7efe75785 root.json" +sha512sum -c <<<"b81af4d8eb86743539fbc4709d33ada7b118d9f929f0c2f6c04e1d41f46241ed80423666d169079d736ab79965b4dd25a5a6db5f01578b397496d49ce11a3aa2 root.json" ``` Next, set your desired parameters, and download the kmod kit: diff --git a/tools/pubsys/Infra.toml.example b/tools/pubsys/Infra.toml.example index 889b317a..4fd5e48e 100644 --- a/tools/pubsys/Infra.toml.example +++ b/tools/pubsys/Infra.toml.example @@ -14,7 +14,7 @@ root_role_sha512 = "0123456789abcdef" # For reference, this is the Bottlerocket root role: #root_role_url = "https://cache.bottlerocket.aws/root.json" -#root_role_sha512 = "e9b1ea5f9b4f95c9b55edada4238bf00b12845aa98bdd2d3edb63ff82a03ada19444546337ec6d6806cbf329027cf49f7fde31f54d551c5e02acbed7efe75785" +#root_role_sha512 = "b81af4d8eb86743539fbc4709d33ada7b118d9f929f0c2f6c04e1d41f46241ed80423666d169079d736ab79965b4dd25a5a6db5f01578b397496d49ce11a3aa2" # pubsys assumes a single publication key that signs the snapshot, targets, # and timestamp roles. Here you specify where that key lives so we can sign From 3d972a7ca6b2acdb418dffe081f74eb0e0c89f99 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Aug 2022 19:57:14 +0000 Subject: [PATCH 0746/1356] build(deps): bump iana-time-zone from 0.1.44 to 0.1.46 in /tools Bumps [iana-time-zone](https://github.com/strawlab/iana-time-zone) from 0.1.44 to 0.1.46. - [Release notes](https://github.com/strawlab/iana-time-zone/releases) - [Changelog](https://github.com/strawlab/iana-time-zone/blob/main/CHANGELOG.md) - [Commits](https://github.com/strawlab/iana-time-zone/compare/0.1.44...v0.1.46) --- updated-dependencies: - dependency-name: iana-time-zone dependency-type: indirect ... Signed-off-by: dependabot[bot] --- tools/Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 18bc4c29..a9d09e44 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -1289,9 +1289,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.44" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf7d67cf4a22adc5be66e75ebdf769b3f2ea032041437a7061f97a63dad4b" +checksum = "ad2bfd338099682614d3ee3fe0cd72e0b6a41ca6a87f6a74a3bd593c91650501" dependencies = [ "android_system_properties", "core-foundation-sys", From 62ab5731648c5b2e3a5a4f4356a1abb6b29b9708 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 16 Sep 2022 04:53:09 +0000 Subject: [PATCH 0747/1356] buildsys: replace unwraps with structured errors Signed-off-by: Ben Cressey --- tools/buildsys/src/gomod.rs | 15 +++++++++++---- tools/buildsys/src/gomod/error.rs | 24 ++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 4 deletions(-) diff --git a/tools/buildsys/src/gomod.rs b/tools/buildsys/src/gomod.rs index 8d92b237..b31e19ba 100644 --- a/tools/buildsys/src/gomod.rs +++ b/tools/buildsys/src/gomod.rs @@ -132,14 +132,21 @@ impl GoMod { package_dir.to_string_lossy(), GO_MOD_DOCKER_SCRIPT_NAME ); + + // Drop the reference after writing the file to avoid a "text busy" error + // when attempting to execute it. { - let mut script_file = fs::File::create(&script_path).unwrap(); - fs::set_permissions(&script_path, fs::Permissions::from_mode(0o777)).unwrap(); - script_file.write_all(script_contents.as_bytes()).unwrap(); + let mut script_file = fs::File::create(&script_path) + .context(error::CreateFileSnafu { path: &script_path })?; + fs::set_permissions(&script_path, fs::Permissions::from_mode(0o777)) + .context(error::SetFilePermissionsSnafu { path: &script_path })?; + script_file + .write_all(script_contents.as_bytes()) + .context(error::WriteFileSnafu { path: &script_path })?; } let res = docker_go(root_dir, &args); - fs::remove_file(script_path).unwrap(); + fs::remove_file(&script_path).context(error::RemoveFileSnafu { path: &script_path })?; res } } diff --git a/tools/buildsys/src/gomod/error.rs b/tools/buildsys/src/gomod/error.rs index 8a422e25..64d736d3 100644 --- a/tools/buildsys/src/gomod/error.rs +++ b/tools/buildsys/src/gomod/error.rs @@ -28,6 +28,30 @@ pub(crate) enum Error { var: String, source: std::env::VarError, }, + + #[snafu(display("Failed to create '{}': {}", path.display(), source))] + CreateFile { + path: PathBuf, + source: std::io::Error, + }, + + #[snafu(display("Failed to set permissions on '{}': {}", path.display(), source))] + SetFilePermissions { + path: PathBuf, + source: std::io::Error, + }, + + #[snafu(display("Failed to write contents to '{}': {}", path.display(), source))] + WriteFile { + path: PathBuf, + source: std::io::Error, + }, + + #[snafu(display("Failed to remove '{}': {}", path.display(), source))] + RemoveFile { + path: PathBuf, + source: std::io::Error, + }, } pub(super) type Result = std::result::Result; From 980e7f4747ae141e706222174acd50cbeb540b8c Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 16 Sep 2022 05:24:46 +0000 Subject: [PATCH 0748/1356] buildsys: sync file timestamps for module bundle Otherwise `cargo` will detect that the file is newer than the output log, and always rebuild. Signed-off-by: Ben Cressey --- tools/buildsys/src/gomod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/buildsys/src/gomod.rs b/tools/buildsys/src/gomod.rs index b31e19ba..71c85d93 100644 --- a/tools/buildsys/src/gomod.rs +++ b/tools/buildsys/src/gomod.rs @@ -67,6 +67,7 @@ popd tar czf __OUTPUT__ "${targetdir}"/vendor rm -rf "${targetdir}" +touch -r __LOCAL_FILE_NAME__ __OUTPUT__ "###; impl GoMod { From 6c6f34dc2b6b37b4739ddb4342add926502fd7d5 Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Fri, 19 Aug 2022 22:53:16 +0000 Subject: [PATCH 0749/1356] tools: add diff-kernel-config to identify kernel config changes With multiple kernel versions being supported for two architectures in cloud and metal flavors it can be tedious and error-prone to get the full picture on the effects changes have on the resulting kernel configurations. The new diff-kernel-config tool helps automate this task. diff-kernel-config compares kernel configurations before and after a series of commits to the Bottlerocket repository. For this, it runs several full kernel builds that cover all possible combinations of kernel version, architecture, and cloud/metal flavor that could be shipped in any of the official variants. Combinations not used in any variant are skipped. diff-kernel-config produces kernel config diffs broken down by combination in its output directory, alongside a comprehensive report of all changes and a summary that can aid the code review process. Signed-off-by: Markus Boehme --- tools/diff-kernel-config | 260 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 260 insertions(+) create mode 100755 tools/diff-kernel-config diff --git a/tools/diff-kernel-config b/tools/diff-kernel-config new file mode 100755 index 00000000..3be78fba --- /dev/null +++ b/tools/diff-kernel-config @@ -0,0 +1,260 @@ +#!/usr/bin/env bash + +# +# Common error handling +# + +exit_trap_cmds=() + +on_exit() { + exit_trap_cmds+=( "$1" ) +} + +run_exit_trap_cmds() { + for cmd in "${exit_trap_cmds[@]}"; do + eval "${cmd}" + done +} + +trap run_exit_trap_cmds EXIT + +bail() { + if [[ $# -gt 0 ]]; then + >&2 echo "Error: $*" + fi + exit 1 +} + +usage() { + cat <&2 usage + bail "$1" +} + + +# +# Parse arguments +# + +kernel_versions=() + +while [[ $# -gt 0 ]]; do + case $1 in + -a|--after) + shift; gitrev_after_arg=$1 ;; + -b|--before) + shift; gitrev_before_arg=$1 ;; + -k|--kernel) + shift; kernel_versions+=( "$1" ) ;; + -o|--output-dir) + shift; output_dir=$1 ;; + -h|--help) + usage; exit 0 ;; + *) + usage_error "Invalid option '$1'" ;; + esac + shift +done + +if [[ ${#kernel_versions[@]} -eq 0 ]]; then + kernel_versions=( 5.10 5.15 ) +else + for kver in "${kernel_versions[@]}"; do + case ${kver} in + 5.10) continue ;; + 5.15) continue ;; + *) bail "Unknown kernel version '${kver}'" ;; + esac + done +fi +readonly kernel_versions + +[[ -n ${output_dir} ]] || usage_error 'require -o|--output-dir' +[[ -e ${output_dir} ]] && bail "Output directory '${output_dir}' exists already, not touching it" +readonly output_dir + +# Validate and resolve the given before and after Git revisions. Resolving +# them now prevents relative references from moving around after the first +# checkout. +[[ -n ${gitrev_before_arg} ]] || usage_error 'require -b|--before' +[[ -n ${gitrev_after_arg} ]] || usage_error 'require -a|--after' +gitrev_before=$(git rev-parse --verify --end-of-options "${gitrev_before_arg}^{commit}") +gitrev_after=$(git rev-parse --verify --end-of-options "${gitrev_after_arg}^{commit}") +[[ -n ${gitrev_before} ]] || bail "Invalid Git revision '${gitrev_before_arg}'" +[[ -n ${gitrev_after} ]] || bail "Invalid Git revision '${gitrev_after_arg}'" +readonly gitrev_before +readonly gitrev_after + + +# +# Prepare working tree +# + +# We'll check out the before and after states to compare. For that the working +# tree and the index need to be clean. +if [[ -n $(git status --porcelain --untracked-files=no) ]]; then + bail 'The working tree or index of the repository are not clean. ' \ + 'Consider running "git stash" to temporarily stow away your changes.' +fi + +# Restore current repository state whenever we exit (either a checked out +# branch or the current detached head state). +gitrev_original=$(git rev-parse --abbrev-ref HEAD) +if [[ -z ${gitrev_original} ]]; then + gitrev_original=$(git rev-parse HEAD) || bail 'Cannot determine current repository HEAD.' +fi +readonly gitrev_original +on_exit "git checkout --quiet '${gitrev_original}'" + + +# +# Iterate over all viable build configurations in before and after states +# + +mkdir -p "${output_dir}" || bail "Failed to create output directory '${output_dir}'" + +for state in before after; do + + gitrev_var=gitrev_${state} + git checkout --quiet "${!gitrev_var}" || bail "Cannot check out '${!gitrev_var}'." + + for arch in aarch64 x86_64; do + + for kver in "${kernel_versions[@]}"; do + + variants=() + case ${kver} in + 5.10) + variants+=( 'aws-k8s-1.23' ) + if [[ ${arch} = x86_64 ]]; then + variants+=( 'metal-k8s-1.23' ) + fi + ;; + 5.15) + variants+=( 'aws-dev' 'metal-dev' ) + ;; + *) + bail "No known variants build kernel ${kver}." + ;; + esac + + for variant in "${variants[@]}"; do + + debug_id="state=${state} arch=${arch} variant=${variant}" + + # + # Run build + # + + cargo make \ + -e BUILDSYS_ARCH="${arch}" \ + -e BUILDSYS_VARIANT="${variant}" \ + -e PACKAGE="kernel-${kver/./_}" \ + build-package \ + || bail "Build failed for ${debug_id}" + + # + # Find kernel RPM + # + + shopt -s nullglob + kernel_rpms=( + ./build/rpms/bottlerocket-"${arch}"-*kernel-"${kver}"-"${kver}".*.rpm + ) + shopt -u nullglob + + case ${#kernel_rpms[@]} in + 0) bail "No kernel RPM found for ${debug_id}" ;; + 1) kernel_rpm=${kernel_rpms[0]} ;; + *) bail "More than one kernel RPM found for ${debug_id}" ;; + esac + + + # + # Extract kernel config + # + + config_path=./${output_dir}/config-${arch}-${kver}-${variant}-${state} + rpm2cpio "${kernel_rpm}" \ + | cpio --quiet --extract --to-stdout ./boot/config >"${config_path}" + [[ -s "${config_path}" ]] || bail "Failed to extract config for ${debug_id}" + + done # variant + + done # kver + + done # arch + +done # state + + +# +# Post-process the collected pairs of "before" and "after" configs (generate diffs, a report, a summary) +# + +# Get the helpful diffconfig script from the kernel source tree. We package it +# in the kernel-archive RPM from where it can be extracted. Here we extract the +# latest version of the script, but any kernel version and arch will do. +latest_kver=$(printf '%s\n' "${kernel_versions[@]}" | sort -V | tail -n1) +latest_archive_rpms=( ./build/rpms/bottlerocket-aarch64-kernel-"${latest_kver}"-archive-*.rpm ) +diffconfig=$(mktemp --suffix -bottlerocket-diffconfig) +on_exit "rm '${diffconfig}'" +rpm2cpio "${latest_archive_rpms[0]}" \ + | cpio --quiet --extract --to-stdout \ + | tar --xz --extract --to-stdout kernel-devel/scripts/diffconfig >"${diffconfig}" +[[ -s ${diffconfig} ]] || bail "Failed to extract diffconfig tool from '${latest_archive_rpms[0]}'." +chmod +x "${diffconfig}" + +# Diff the before and after states for each collected pair +for config_before in "${output_dir}"/config-*-before; do + config_after=${config_before/before/after} + config_diff=${config_before/before/diff} + "${diffconfig}" "${config_before}" "${config_after}" >"${config_diff}" \ + || bail "Failed to diff '${config_before}' and '${config_after}'" +done + +# Generate diff summary +echo +for config_diff in "${output_dir}"/config-*-diff; do + config_base=${config_diff##*/} + awk " + /^-/ { removed += 1 } + /^+/ { added += 1 } + / -> / { changed += 1 } + END { printf \"${config_base}:\t%3d removed, %3d added, %3d changed\n\", removed, added, changed } + " "${config_diff}" +done | sort -V +echo + +# Generate combined report of changes +head -v -n 999999 "${output_dir}"/*-diff >"${output_dir}"/diff-report +echo "A full report has been placed in '${output_dir}/diff-report'" From 785ed8946df8c73369a76d51bcd21b9bfbd52fd5 Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Wed, 7 Sep 2022 14:49:25 +0000 Subject: [PATCH 0750/1356] kernel: add bare bones readme with advice for testing config changes The diff-kernel-config tool may not be easily discoverable in the tools/ directory of the repository. Create minimal readme files in the kernel packages that point out its existence. The tool's usage information will answer any questions about how to produce the desired config change report. Signed-off-by: Markus Boehme --- packages/kernel-5.10/README.md | 16 ++++++++++++++++ packages/kernel-5.15/README.md | 16 ++++++++++++++++ 2 files changed, 32 insertions(+) create mode 100644 packages/kernel-5.10/README.md create mode 100644 packages/kernel-5.15/README.md diff --git a/packages/kernel-5.10/README.md b/packages/kernel-5.10/README.md new file mode 100644 index 00000000..6d05729e --- /dev/null +++ b/packages/kernel-5.10/README.md @@ -0,0 +1,16 @@ +# kernel-5.10 + +This package contains the Bottlerocket Linux kernel of the 5.10 series. + + +## Testing of Configuration Changes + +Bottlerocket kernels are built in multiple flavors (e.g. cloud, bare metal) and for multiple architectures (e.g. aarch64, x86_64). +The kernel configuration for any of those combinations might change independently of the others. +Please use `tools/diff-kernel-config` from the main Bottlerocket repository to ensure the configuration for any of the combinations does not change inadvertently. +Changes that can have an effect on the resulting kernel configuration include: + +* explicit kernel configuration changes +* package updates/kernel rebases + +Reviewers on a pull request potentially changing the kernel configuration will appreciate having the report produced by `diff-kernel-config` included in the PR description. diff --git a/packages/kernel-5.15/README.md b/packages/kernel-5.15/README.md new file mode 100644 index 00000000..4cd4f58d --- /dev/null +++ b/packages/kernel-5.15/README.md @@ -0,0 +1,16 @@ +# kernel-5.15 + +This package contains the Bottlerocket Linux kernel of the 5.15 series. + + +## Testing of Configuration Changes + +Bottlerocket kernels are built in multiple flavors (e.g. cloud, bare metal) and for multiple architectures (e.g. aarch64, x86_64). +The kernel configuration for any of those combinations might change independently of the others. +Please use `tools/diff-kernel-config` from the main Bottlerocket repository to ensure the configuration for any of the combinations does not change inadvertently. +Changes that can have an effect on the resulting kernel configuration include: + +* explicit kernel configuration changes +* package updates/kernel rebases + +Reviewers on a pull request potentially changing the kernel configuration will appreciate having the report produced by `diff-kernel-config` included in the PR description. From a1e8338a8b097fe2aef593338da93cf68c28520f Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Tue, 20 Sep 2022 11:36:23 -0500 Subject: [PATCH 0751/1356] lint: Remove unnecessary return With the 1.63.0 versions of rustc/cargo, there is a clippy error due to `return` being used when it is not needed. This removes the return statement to make the linter happy. Signed-off-by: Sean McGinnis --- tools/testsys/src/aws_resources.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tools/testsys/src/aws_resources.rs b/tools/testsys/src/aws_resources.rs index 5a358272..097179cd 100644 --- a/tools/testsys/src/aws_resources.rs +++ b/tools/testsys/src/aws_resources.rs @@ -391,11 +391,9 @@ impl AwsEcs { testsys_images: &TestsysImages, ) -> Result> { match test { - TestType::Conformance => { - return Err(anyhow!( - "Conformance testing for ECS variants is not supported." - )) - } + TestType::Conformance => Err(anyhow!( + "Conformance testing for ECS variants is not supported." + )), TestType::Quick => self.ecs_test_crds(testsys_images), TestType::Migration => self.migration_test_crds(testsys_images).await, } From bdee0b4e24b0281890a100412b69476eb470725a Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Tue, 20 Sep 2022 20:43:39 +0000 Subject: [PATCH 0752/1356] pubsys: update to AWS SDK Rust --- tools/Cargo.lock | 196 ++++++++++----------- tools/deny.toml | 13 +- tools/pubsys-config/src/lib.rs | 1 - tools/pubsys/Cargo.toml | 25 +-- tools/pubsys/src/aws/ami/mod.rs | 220 ++++++++++-------------- tools/pubsys/src/aws/ami/register.rs | 153 ++++++++-------- tools/pubsys/src/aws/ami/snapshot.rs | 1 + tools/pubsys/src/aws/ami/wait.rs | 80 ++++----- tools/pubsys/src/aws/client.rs | 188 ++++++-------------- tools/pubsys/src/aws/mod.rs | 32 +--- tools/pubsys/src/aws/promote_ssm/mod.rs | 35 ++-- tools/pubsys/src/aws/publish_ami/mod.rs | 176 ++++++++----------- tools/pubsys/src/aws/ssm/mod.rs | 58 ++----- tools/pubsys/src/aws/ssm/ssm.rs | 81 ++++----- tools/pubsys/src/aws/ssm/template.rs | 4 +- tools/pubsys/src/main.rs | 20 ++- tools/pubsys/src/repo.rs | 41 +++-- 17 files changed, 549 insertions(+), 775 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index a9d09e44..9a0f9c02 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -102,9 +102,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.56" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96cf8829f67d2eab0b2dfa42c5d0ef737e0724e4a82b01b3e292456202b19716" +checksum = "76464446b8bc32758d7e88ee1a804d9914cd9b1cb264c029899680b0be29826f" dependencies = [ "proc-macro2", "quote", @@ -186,6 +186,29 @@ dependencies = [ "tracing", ] +[[package]] +name = "aws-sdk-ebs" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "702e6f505ce8d61f0bef4d2b2747f156dcbd6ba23b2a870ad9aa868830f026c5" +dependencies = [ + "aws-endpoint", + "aws-http", + "aws-sig-auth", + "aws-smithy-async", + "aws-smithy-client", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-json", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "http", + "tokio-stream", + "tower", +] + [[package]] name = "aws-sdk-ec2" version = "0.16.0" @@ -210,6 +233,51 @@ dependencies = [ "tower", ] +[[package]] +name = "aws-sdk-kms" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fee45083be6f062676aaeab0fe16c931a4e188d2ddce6f2c8d17399c014dc81" +dependencies = [ + "aws-endpoint", + "aws-http", + "aws-sig-auth", + "aws-smithy-async", + "aws-smithy-client", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-json", + "aws-smithy-types", + "aws-types", + "bytes", + "http", + "tokio-stream", + "tower", +] + +[[package]] +name = "aws-sdk-ssm" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df79d41b62dcea814079c2f6540c465b6d45210b20a4e30efdcd24cc5dcf4aec" +dependencies = [ + "aws-endpoint", + "aws-http", + "aws-sig-auth", + "aws-smithy-async", + "aws-smithy-client", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-json", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "http", + "tokio-stream", + "tower", +] + [[package]] name = "aws-sdk-sso" version = "0.16.0" @@ -633,22 +701,22 @@ dependencies = [ [[package]] name = "coldsnap" -version = "0.3.3" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "415be2bbdb84dd0d33de3099c74b9f96bd78157fe54c2073683c2b4c4811463d" +checksum = "111badfcdc635ecbffc73bfa46ad0a83ae0c41bc01e09c4045ca3ffb1046c5c5" dependencies = [ "argh", "async-trait", + "aws-config", + "aws-sdk-ebs", + "aws-sdk-ec2", + "aws-smithy-http", + "aws-types", "base64", "bytes", "futures", "indicatif", "nix", - "rusoto_core", - "rusoto_credential", - "rusoto_ebs", - "rusoto_ec2", - "rusoto_signature", "sha2 0.10.2", "snafu", "tempfile", @@ -1673,7 +1741,6 @@ dependencies = [ "bitflags", "cfg-if", "libc", - "memoffset", ] [[package]] @@ -1984,11 +2051,21 @@ name = "pubsys" version = "0.1.0" dependencies = [ "async-trait", + "aws-config", + "aws-sdk-ebs", + "aws-sdk-ec2", + "aws-sdk-kms", + "aws-sdk-ssm", + "aws-sdk-sts", + "aws-smithy-types", + "aws-types", "chrono", "clap 3.2.15", "coldsnap", "duct", + "env_logger", "futures", + "http", "indicatif", "lazy_static", "log", @@ -1997,18 +2074,9 @@ dependencies = [ "pubsys-config", "rayon", "reqwest", - "rusoto_core", - "rusoto_credential", - "rusoto_ebs", - "rusoto_ec2", - "rusoto_kms", - "rusoto_signature", - "rusoto_ssm", - "rusoto_sts", "semver", "serde", "serde_json", - "simplelog", "snafu", "structopt", "tempfile", @@ -2277,49 +2345,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "rusoto_ebs" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3ab3b70d6e2b9e8550bc50a42fd03e5cf43b1146b3a2a4f73fae867c08787b2" -dependencies = [ - "async-trait", - "bytes", - "futures", - "rusoto_core", - "serde", - "serde_derive", - "serde_json", -] - -[[package]] -name = "rusoto_ec2" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "666c2f36b125e43229892f1a0d81ad28c0d0231d3b8b00ab0e8120975d6138ca" -dependencies = [ - "async-trait", - "bytes", - "futures", - "rusoto_core", - "serde_urlencoded", - "xml-rs", -] - -[[package]] -name = "rusoto_kms" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e1fc19cfcfd9f6b2f96e36d5b0dddda9004d2cbfc2d17543e3b9f10cc38fce8" -dependencies = [ - "async-trait", - "bytes", - "futures", - "rusoto_core", - "serde", - "serde_json", -] - [[package]] name = "rusoto_s3" version = "0.48.0" @@ -2359,35 +2384,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "rusoto_ssm" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "166034bb4835e1e6a7ac1cc659c9798e751cd75d7244f37beeaa12f2bbdda30b" -dependencies = [ - "async-trait", - "bytes", - "futures", - "rusoto_core", - "serde", - "serde_json", -] - -[[package]] -name = "rusoto_sts" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1643f49aa67cb7cb895ebac5a2ff3f991c6dbdc58ad98b28158cd5706aecd1d" -dependencies = [ - "async-trait", - "bytes", - "chrono", - "futures", - "rusoto_core", - "serde_urlencoded", - "xml-rs", -] - [[package]] name = "rustc-demangle" version = "0.1.21" @@ -3181,15 +3177,14 @@ dependencies = [ [[package]] name = "tough-kms" -version = "0.3.6" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baafc5e2a7f5207043f0a7e50f5c5163571c751c6a2d61a642bb8d3acdcc9659" +checksum = "52e17edfb12e2c08c9ac3fe6ebb43fafc82fa9ffbfffbe50030b7623f8f42f34" dependencies = [ + "aws-config", + "aws-sdk-kms", "pem", "ring", - "rusoto_core", - "rusoto_credential", - "rusoto_kms", "snafu", "tokio", "tough", @@ -3197,13 +3192,12 @@ dependencies = [ [[package]] name = "tough-ssm" -version = "0.6.6" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7a8be927f383be49de8e032532b72e82e7129f248c742bfa47247435bc7cdfb" +checksum = "71e423321963b68b425bc844c01a16bfcab3b8300ab768dad46992201026421e" dependencies = [ - "rusoto_core", - "rusoto_credential", - "rusoto_ssm", + "aws-config", + "aws-sdk-ssm", "serde", "serde_json", "snafu", diff --git a/tools/deny.toml b/tools/deny.toml index b1549246..65210ca1 100644 --- a/tools/deny.toml +++ b/tools/deny.toml @@ -58,14 +58,17 @@ multiple-versions = "deny" wildcards = "deny" skip-tree = [ - # temporarily using a different version of snafu - { name = "parse-datetime", version = "0.1.0" }, - # rusoto_signature uses an older version of sha2 { name = "rusoto_signature" }, - # reqwest uses an older rustls-pemfile - { name = "reqwest", version = "0.11.10" }, + # argh_derive pulls in an older version of heck + { name = "argh_derive", version = "0.1.8" }, + + # structopt pulls in an older version of clap + { name = "structopt", version = "0.3.26" }, + + # aws-smithy-client uses an older hyper-rustls + { name = "aws-smithy-client", version = "0.46.0" }, ] [sources] diff --git a/tools/pubsys-config/src/lib.rs b/tools/pubsys-config/src/lib.rs index 70345680..7feccd31 100644 --- a/tools/pubsys-config/src/lib.rs +++ b/tools/pubsys-config/src/lib.rs @@ -134,7 +134,6 @@ pub struct AwsConfig { #[serde(deny_unknown_fields)] pub struct AwsRegionConfig { pub role: Option, - pub endpoint: Option, } /// Location of signing keys diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index 8e301c44..c8298e71 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -8,12 +8,22 @@ publish = false [dependencies] async-trait = "0.1.53" +aws-config = "0.46.0" +aws-sdk-ebs = "0.16.0" +aws-sdk-ec2 = "0.16.0" +aws-sdk-kms = "0.16.0" +aws-sdk-ssm = "0.16.0" +aws-sdk-sts = "0.16.0" +aws-smithy-types = "0.46.0" +aws-types = "0.46.0" chrono = "0.4" clap = "3.1" -coldsnap = { version = "0.3", default-features = false, features = ["rusoto-rustls"]} +coldsnap = { version = "0.4", default-features = false, features = ["aws-sdk-rust-rustls"] } duct = "0.13.0" +env_logger = "0.9" pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } futures = "0.3.5" +http = "0.2.8" indicatif = "0.16.0" lazy_static = "1.4" log = "0.4" @@ -22,15 +32,6 @@ parse-datetime = { path = "../../sources/parse-datetime", version = "0.1.0" } rayon = "1" # Need to bring in reqwest with a TLS feature so tough can support TLS repos. reqwest = { version = "0.11.1", default-features = false, features = ["rustls-tls", "blocking"] } -rusoto_core = { version = "0.48.0", default-features = false, features = ["rustls"] } -rusoto_credential = "0.48.0" -rusoto_ebs = { version = "0.48.0", default-features = false, features = ["rustls"] } -rusoto_ec2 = { version = "0.48.0", default-features = false, features = ["rustls"] } -rusoto_kms = { version = "0.48.0", default-features = false, features = ["rustls"] } -rusoto_signature = "0.48.0" -rusoto_ssm = { version = "0.48.0", default-features = false, features = ["rustls"] } -rusoto_sts = { version = "0.48.0", default-features = false, features = ["rustls"] } -simplelog = "0.12" snafu = "0.7" semver = "1.0" serde = { version = "1.0", features = ["derive"] } @@ -41,8 +42,8 @@ tokio = { version = "1", features = ["full"] } # LTS tokio-stream = { version = "0.1", features = ["time"] } toml = "0.5" tough = { version = "0.12", features = ["http"] } -tough-kms = "0.3" -tough-ssm = "0.6" +tough-kms = "0.4" +tough-ssm = "0.7" update_metadata = { path = "../../sources/updater/update_metadata/", version = "0.1.0" } url = { version = "2.1.0", features = ["serde"] } tempfile = "3.1" diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index 67bd6193..6742eb39 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -6,19 +6,22 @@ mod snapshot; pub(crate) mod wait; use crate::aws::publish_ami::{get_snapshots, modify_image, modify_snapshots}; -use crate::aws::{client::build_client, parse_arch, region_from_string}; +use crate::aws::{client::build_client_config, parse_arch, region_from_string}; use crate::Args; +use aws_sdk_ebs::Client as EbsClient; +use aws_sdk_ec2::error::CopyImageError; +use aws_sdk_ec2::model::{ArchitectureValues, OperationType}; +use aws_sdk_ec2::output::CopyImageOutput; +use aws_sdk_ec2::types::SdkError; +use aws_sdk_ec2::{Client as Ec2Client, Region}; +use aws_sdk_sts::error::GetCallerIdentityError; +use aws_sdk_sts::output::GetCallerIdentityOutput; +use aws_sdk_sts::Client as StsClient; use futures::future::{join, lazy, ready, FutureExt}; use futures::stream::{self, StreamExt}; use log::{error, info, trace, warn}; -use pubsys_config::{AwsConfig, InfraConfig}; +use pubsys_config::{AwsConfig as PubsysAwsConfig, InfraConfig}; use register::{get_ami_id, register_image, RegisteredIds}; -use rusoto_core::{Region, RusotoError}; -use rusoto_ebs::EbsClient; -use rusoto_ec2::{CopyImageError, CopyImageRequest, CopyImageResult, Ec2, Ec2Client}; -use rusoto_sts::{ - GetCallerIdentityError, GetCallerIdentityRequest, GetCallerIdentityResponse, Sts, StsClient, -}; use serde::{Deserialize, Serialize}; use snafu::{ensure, OptionExt, ResultExt}; use std::collections::{HashMap, HashSet}; @@ -41,15 +44,15 @@ pub(crate) struct AmiArgs { /// Desired root volume size in gibibytes #[structopt(long)] - root_volume_size: Option, + root_volume_size: Option, /// Desired data volume size in gibibytes #[structopt(long)] - data_volume_size: Option, + data_volume_size: Option, /// The architecture of the machine image #[structopt(short = "a", long, parse(try_from_str = parse_arch))] - arch: String, + arch: ArchitectureValues, /// The desired AMI name #[structopt(short = "n", long)] @@ -106,8 +109,8 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> aws.regions.clone().into() } .into_iter() - .map(|name| region_from_string(&name, &aws).context(error::ParseRegionSnafu)) - .collect::>>()?; + .map(|name| region_from_string(&name)) + .collect::>(); ensure!( !regions.is_empty(), @@ -120,46 +123,37 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> let base_region = regions.remove(0); // Build EBS client for snapshot management, and EC2 client for registration - let base_ebs_client = build_client::(&base_region, &base_region, &aws).context( - error::ClientSnafu { - client_type: "EBS", - region: base_region.name(), - }, - )?; - let base_ec2_client = build_client::(&base_region, &base_region, &aws).context( - error::ClientSnafu { - client_type: "EC2", - region: base_region.name(), - }, - )?; + let client_config = build_client_config(&base_region, &base_region, &aws).await; + + let base_ebs_client = EbsClient::new(&client_config); + + let base_ec2_client = Ec2Client::new(&client_config); // Check if the AMI already exists, in which case we can use the existing ID, otherwise we // register a new one. let maybe_id = get_ami_id( &ami_args.name, &ami_args.arch, - base_region.name(), + &base_region, &base_ec2_client, ) .await .context(error::GetAmiIdSnafu { name: &ami_args.name, - arch: &ami_args.arch, - region: base_region.name(), + arch: ami_args.arch.as_ref(), + region: base_region.as_ref(), })?; let (ids_of_image, already_registered) = if let Some(found_id) = maybe_id { warn!( "Found '{}' already registered in {}: {}", - ami_args.name, - base_region.name(), - found_id + ami_args.name, base_region, found_id ); let snapshot_ids = get_snapshots(&found_id, &base_region, &base_ec2_client) .await .context(error::GetSnapshotsSnafu { image_id: &found_id, - region: base_region.name(), + region: base_region.as_ref(), })?; let found_ids = RegisteredIds { image_id: found_id, @@ -167,29 +161,22 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> }; (found_ids, true) } else { - let new_ids = register_image( - ami_args, - base_region.name(), - base_ebs_client, - &base_ec2_client, - ) - .await - .context(error::RegisterImageSnafu { - name: &ami_args.name, - arch: &ami_args.arch, - region: base_region.name(), - })?; + let new_ids = register_image(ami_args, &base_region, base_ebs_client, &base_ec2_client) + .await + .context(error::RegisterImageSnafu { + name: &ami_args.name, + arch: ami_args.arch.as_ref(), + region: base_region.as_ref(), + })?; info!( "Registered AMI '{}' in {}: {}", - ami_args.name, - base_region.name(), - new_ids.image_id + ami_args.name, base_region, new_ids.image_id ); (new_ids, false) }; amis.insert( - base_region.name().to_string(), + base_region.as_ref().to_string(), Image::new(&ids_of_image.image_id, &ami_args.name), ); @@ -211,7 +198,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> .await .context(error::WaitAmiSnafu { id: &ids_of_image.image_id, - region: base_region.name(), + region: base_region.as_ref(), })?; // For every other region, initiate copy-image calls. @@ -223,18 +210,14 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> // Get the account ID used in the base region; we don't need to grant to it so we can remove it // from the list. - let base_sts_client = build_client::(&base_region, &base_region, &aws).context( - error::ClientSnafu { - client_type: "STS", - region: base_region.name(), + let client_config = build_client_config(&base_region, &base_region, &aws).await; + let base_sts_client = StsClient::new(&client_config); + + let response = base_sts_client.get_caller_identity().send().await.context( + error::GetCallerIdentitySnafu { + region: base_region.as_ref(), }, )?; - let response = base_sts_client - .get_caller_identity(GetCallerIdentityRequest {}) - .await - .context(error::GetCallerIdentitySnafu { - region: base_region.name(), - })?; let base_account_id = response.account.context(error::MissingInResponseSnafu { request_type: "GetCallerIdentity", missing: "account", @@ -249,7 +232,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> modify_snapshots( Some(account_id_vec.clone()), None, - "add", + &OperationType::Add, &ids_of_image.snapshot_ids, &base_ec2_client, &base_region, @@ -257,21 +240,20 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> .await .context(error::GrantAccessSnafu { thing: "snapshots", - region: base_region.name(), + region: base_region.as_ref(), })?; modify_image( Some(account_id_vec.clone()), None, - "add", + &OperationType::Add, &ids_of_image.image_id, &base_ec2_client, - &base_region, ) .await - .context(error::GrantAccessSnafu { + .context(error::GrantImageAccessSnafu { thing: "image", - region: base_region.name(), + region: base_region.as_ref(), })?; } @@ -279,11 +261,8 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> // clients because they're used in a future and need to live until the future is resolved. let mut ec2_clients = HashMap::with_capacity(regions.len()); for region in regions.iter() { - let ec2_client = - build_client::(region, &base_region, &aws).context(error::ClientSnafu { - client_type: "EC2", - region: region.name(), - })?; + let client_config = build_client_config(region, &base_region, &aws).await; + let ec2_client = Ec2Client::new(&client_config); ec2_clients.insert(region.clone(), ec2_client); } @@ -292,7 +271,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> let mut get_requests = Vec::with_capacity(regions.len()); for region in regions.iter() { let ec2_client = &ec2_clients[region]; - let get_request = get_ami_id(&ami_args.name, &ami_args.arch, region.name(), ec2_client); + let get_request = get_ami_id(&ami_args.name, &ami_args.arch, region, ec2_client); let info_future = ready(region.clone()); get_requests.push(join(info_future, get_request)); } @@ -305,41 +284,33 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> for (region, get_response) in get_responses { let get_response = get_response.context(error::GetAmiIdSnafu { name: &ami_args.name, - arch: &ami_args.arch, - region: region.name(), + arch: ami_args.arch.as_ref(), + region: region.as_ref(), })?; if let Some(id) = get_response { info!( "Found '{}' already registered in {}: {}", - ami_args.name, - region.name(), - id + ami_args.name, region, id ); - amis.insert(region.name().to_string(), Image::new(&id, &ami_args.name)); + amis.insert(region.as_ref().to_string(), Image::new(&id, &ami_args.name)); continue; } let ec2_client = &ec2_clients[®ion]; - let copy_request = CopyImageRequest { - description: ami_args.description.clone(), - name: ami_args.name.clone(), - source_image_id: ids_of_image.image_id.clone(), - source_region: base_region.name().to_string(), - ..Default::default() - }; - let copy_future = ec2_client.copy_image(copy_request); + let base_region = base_region.to_owned(); + let copy_future = ec2_client + .copy_image() + .set_description(ami_args.description.clone()) + .set_name(Some(ami_args.name.clone())) + .set_source_image_id(Some(ids_of_image.image_id.clone())) + .set_source_region(Some(base_region.as_ref().to_string())) + .send(); - let base_region_name = base_region.name(); // Store the region so we can output it to the user let region_future = ready(region.clone()); // Let the user know the copy is starting, when this future goes to run - let message_future = lazy(move |_| { - info!( - "Starting copy from {} to {}", - base_region_name, - region.name() - ) - }); + let message_future = + lazy(move |_| info!("Starting copy from {} to {}", base_region, region)); copy_requests.push(message_future.then(|_| join(region_future, copy_future))); } @@ -357,7 +328,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> // Run through the stream and collect results into a list. let copy_responses: Vec<( Region, - std::result::Result>, + std::result::Result>, )> = request_stream.collect().await; // Report on successes and errors; don't fail immediately if we see an error so we can report @@ -369,26 +340,23 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> if let Some(image_id) = success.image_id { info!( "Registered AMI '{}' in {}: {}", - ami_args.name, - region.name(), - image_id, + ami_args.name, region, image_id, ); amis.insert( - region.name().to_string(), + region.as_ref().to_string(), Image::new(&image_id, &ami_args.name), ); } else { saw_error = true; error!( "Registered AMI '{}' in {} but didn't receive an AMI ID!", - ami_args.name, - region.name(), + ami_args.name, region, ); } } Err(e) => { saw_error = true; - error!("Copy to {} failed: {}", region.name(), e); + error!("Copy to {} failed: {}", region, e); } } } @@ -420,7 +388,7 @@ impl Image { async fn get_account_ids( regions: &[Region], base_region: &Region, - aws: &AwsConfig, + pubsys_aws_config: &PubsysAwsConfig, ) -> Result> { let mut grant_accounts = HashSet::new(); @@ -428,18 +396,15 @@ async fn get_account_ids( // live until the future is resolved. let mut sts_clients = HashMap::with_capacity(regions.len()); for region in regions.iter() { - let sts_client = - build_client::(region, base_region, aws).context(error::ClientSnafu { - client_type: "STS", - region: region.name(), - })?; + let client_config = build_client_config(region, base_region, pubsys_aws_config).await; + let sts_client = StsClient::new(&client_config); sts_clients.insert(region.clone(), sts_client); } let mut requests = Vec::with_capacity(regions.len()); for region in regions.iter() { let sts_client = &sts_clients[region]; - let response_future = sts_client.get_caller_identity(GetCallerIdentityRequest {}); + let response_future = sts_client.get_caller_identity().send(); // Store the region so we can include it in any errors let region_future = ready(region.clone()); @@ -450,12 +415,12 @@ async fn get_account_ids( // Run through the stream and collect results into a list. let responses: Vec<( Region, - std::result::Result>, + std::result::Result>, )> = request_stream.collect().await; for (region, response) in responses { let response = response.context(error::GetCallerIdentitySnafu { - region: region.name(), + region: region.as_ref(), })?; let account_id = response.account.context(error::MissingInResponseSnafu { request_type: "GetCallerIdentity", @@ -469,9 +434,10 @@ async fn get_account_ids( } mod error { - use crate::aws::{self, ami, publish_ami}; - use rusoto_core::RusotoError; - use rusoto_sts::GetCallerIdentityError; + use crate::aws::{ami, publish_ami}; + use aws_sdk_ec2::error::ModifyImageAttributeError; + use aws_sdk_ec2::types::SdkError; + use aws_sdk_sts::error::GetCallerIdentityError; use snafu::Snafu; use std::path::PathBuf; @@ -481,17 +447,8 @@ mod error { #[snafu(display("Some AMIs failed to copy, see above"))] AmiCopy, - #[snafu(display("Error creating {} client in {}: {}", client_type, region, source))] - Client { - client_type: String, - region: String, - source: aws::client::Error, - }, - #[snafu(display("Error reading config: {}", source))] - Config { - source: pubsys_config::Error, - }, + Config { source: pubsys_config::Error }, #[snafu(display("Failed to create file '{}': {}", path.display(), source))] FileCreate { @@ -510,7 +467,7 @@ mod error { #[snafu(display("Error getting account ID in {}: {}", region, source))] GetCallerIdentity { region: String, - source: RusotoError, + source: SdkError, }, #[snafu(display( @@ -532,21 +489,22 @@ mod error { source: publish_ami::Error, }, - #[snafu(display("Infra.toml is missing {}", missing))] - MissingConfig { - missing: String, + #[snafu(display("Failed to grant access to {} in {}: {}", thing, region, source))] + GrantImageAccess { + thing: String, + region: String, + source: SdkError, }, + #[snafu(display("Infra.toml is missing {}", missing))] + MissingConfig { missing: String }, + #[snafu(display("Response to {} was missing {}", request_type, missing))] MissingInResponse { request_type: String, missing: String, }, - ParseRegion { - source: crate::aws::Error, - }, - #[snafu(display("Error registering {} {} in {}: {}", arch, name, region, source))] RegisterImage { name: String, diff --git a/tools/pubsys/src/aws/ami/register.rs b/tools/pubsys/src/aws/ami/register.rs index 11433ff3..ace09424 100644 --- a/tools/pubsys/src/aws/ami/register.rs +++ b/tools/pubsys/src/aws/ami/register.rs @@ -1,11 +1,11 @@ use super::{snapshot::snapshot_from_image, AmiArgs}; +use aws_sdk_ebs::Client as EbsClient; +use aws_sdk_ec2::model::{ + ArchitectureValues, BlockDeviceMapping, EbsBlockDevice, Filter, VolumeType, +}; +use aws_sdk_ec2::{Client as Ec2Client, Region}; use coldsnap::{SnapshotUploader, SnapshotWaiter}; use log::{debug, info, warn}; -use rusoto_ebs::EbsClient; -use rusoto_ec2::{ - BlockDeviceMapping, DeleteSnapshotRequest, DescribeImagesRequest, EbsBlockDevice, Ec2, - Ec2Client, Filter, RegisterImageRequest, -}; use snafu::{ensure, OptionExt, ResultExt}; const ROOT_DEVICE_NAME: &str = "/dev/xvda"; @@ -27,7 +27,7 @@ pub(crate) struct RegisteredIds { /// they can be cleaned up on failure if desired. async fn _register_image( ami_args: &AmiArgs, - region: &str, + region: &Region, ebs_client: EbsClient, ec2_client: &Ec2Client, cleanup_snapshot_ids: &mut Vec, @@ -39,7 +39,7 @@ async fn _register_image( .await .context(error::SnapshotSnafu { path: &ami_args.root_image, - region, + region: region.as_ref(), })?; cleanup_snapshot_ids.push(root_snapshot.clone()); @@ -49,7 +49,7 @@ async fn _register_image( .await .context(error::SnapshotSnafu { path: &ami_args.root_image, - region, + region: region.as_ref(), })?; cleanup_snapshot_ids.push(snapshot.clone()); data_snapshot = Some(snapshot); @@ -74,17 +74,17 @@ async fn _register_image( } // Prepare parameters for AMI registration request - let root_bdm = BlockDeviceMapping { - device_name: Some(ROOT_DEVICE_NAME.to_string()), - ebs: Some(EbsBlockDevice { - delete_on_termination: Some(true), - snapshot_id: Some(root_snapshot.clone()), - volume_type: Some(VOLUME_TYPE.to_string()), - volume_size: ami_args.root_volume_size, - ..Default::default() - }), - ..Default::default() - }; + let root_bdm = BlockDeviceMapping::builder() + .set_device_name(Some(ROOT_DEVICE_NAME.to_string())) + .set_ebs(Some( + EbsBlockDevice::builder() + .set_delete_on_termination(Some(true)) + .set_snapshot_id(Some(root_snapshot.clone())) + .set_volume_type(Some(VolumeType::from(VOLUME_TYPE))) + .set_volume_size(ami_args.root_volume_size) + .build(), + )) + .build(); let mut data_bdm = None; if let Some(ref data_snapshot) = data_snapshot { @@ -102,27 +102,28 @@ async fn _register_image( block_device_mappings.push(data_bdm); } - let register_request = RegisterImageRequest { - architecture: Some(ami_args.arch.clone()), - block_device_mappings: Some(block_device_mappings), - description: ami_args.description.clone(), - ena_support: Some(ENA), - name: ami_args.name.clone(), - root_device_name: Some(ROOT_DEVICE_NAME.to_string()), - sriov_net_support: Some(SRIOV.to_string()), - virtualization_type: Some(VIRT_TYPE.to_string()), - ..Default::default() - }; - info!("Making register image call in {}", region); let register_response = ec2_client - .register_image(register_request) + .register_image() + .set_architecture(Some(ami_args.arch.clone())) + .set_block_device_mappings(Some(block_device_mappings)) + .set_description(ami_args.description.clone()) + .set_ena_support(Some(ENA)) + .set_name(Some(ami_args.name.clone())) + .set_root_device_name(Some(ROOT_DEVICE_NAME.to_string())) + .set_sriov_net_support(Some(SRIOV.to_string())) + .set_virtualization_type(Some(VIRT_TYPE.to_string())) + .send() .await - .context(error::RegisterImageSnafu { region })?; + .context(error::RegisterImageSnafu { + region: region.as_ref(), + })?; let image_id = register_response .image_id - .context(error::MissingImageIdSnafu { region })?; + .context(error::MissingImageIdSnafu { + region: region.as_ref(), + })?; let mut snapshot_ids = vec![root_snapshot]; if let Some(data_snapshot) = data_snapshot { @@ -139,7 +140,7 @@ async fn _register_image( /// mapping. Deletes snapshots on failure. pub(crate) async fn register_image( ami_args: &AmiArgs, - region: &str, + region: &Region, ebs_client: EbsClient, ec2_client: &Ec2Client, ) -> Result { @@ -156,11 +157,12 @@ pub(crate) async fn register_image( if register_result.is_err() { for snapshot_id in cleanup_snapshot_ids { - let delete_request = DeleteSnapshotRequest { - snapshot_id: snapshot_id.clone(), - ..Default::default() - }; - if let Err(e) = ec2_client.delete_snapshot(delete_request).await { + if let Err(e) = ec2_client + .delete_snapshot() + .set_snapshot_id(Some(snapshot_id.clone())) + .send() + .await + { warn!( "While cleaning up, failed to delete snapshot {}: {}", snapshot_id, e @@ -172,42 +174,41 @@ pub(crate) async fn register_image( } /// Queries EC2 for the given AMI name. If found, returns Ok(Some(id)), if not returns Ok(None). -pub(crate) async fn get_ami_id( - name: S1, - arch: S2, - region: &str, +pub(crate) async fn get_ami_id( + name: S, + arch: &ArchitectureValues, + region: &Region, ec2_client: &Ec2Client, ) -> Result> where - S1: Into, - S2: Into, + S: Into, { - let describe_request = DescribeImagesRequest { - owners: Some(vec!["self".to_string()]), - filters: Some(vec![ - Filter { - name: Some("name".to_string()), - values: Some(vec![name.into()]), - }, - Filter { - name: Some("architecture".to_string()), - values: Some(vec![arch.into()]), - }, - Filter { - name: Some("image-type".to_string()), - values: Some(vec!["machine".to_string()]), - }, - Filter { - name: Some("virtualization-type".to_string()), - values: Some(vec![VIRT_TYPE.to_string()]), - }, - ]), - ..Default::default() - }; let describe_response = ec2_client - .describe_images(describe_request) + .describe_images() + .set_owners(Some(vec!["self".to_string()])) + .set_filters(Some(vec![ + Filter::builder() + .set_name(Some("name".to_string())) + .set_values(Some(vec![name.into()])) + .build(), + Filter::builder() + .set_name(Some("architecture".to_string())) + .set_values(Some(vec![arch.as_ref().to_string()])) + .build(), + Filter::builder() + .set_name(Some("image-type".to_string())) + .set_values(Some(vec!["machine".to_string()])) + .build(), + Filter::builder() + .set_name(Some("virtualization-type".to_string())) + .set_values(Some(vec![VIRT_TYPE.to_string()])) + .build(), + ])) + .send() .await - .context(error::DescribeImagesSnafu { region })?; + .context(error::DescribeImagesSnafu { + region: region.as_ref(), + })?; if let Some(mut images) = describe_response.images { if images.is_empty() { return Ok(None); @@ -224,9 +225,9 @@ where let image = images.remove(0); // If there is an image but we couldn't find the ID of it, fail rather than returning None, // which would indicate no image. - let id = image - .image_id - .context(error::MissingImageIdSnafu { region })?; + let id = image.image_id.context(error::MissingImageIdSnafu { + region: region.as_ref(), + })?; Ok(Some(id)) } else { Ok(None) @@ -235,6 +236,8 @@ where mod error { use crate::aws::ami; + use aws_sdk_ec2::error::{DescribeImagesError, RegisterImageError}; + use aws_sdk_ec2::types::SdkError; use snafu::Snafu; use std::path::PathBuf; @@ -244,7 +247,7 @@ mod error { #[snafu(display("Failed to describe images in {}: {}", region, source))] DescribeImages { region: String, - source: rusoto_core::RusotoError, + source: SdkError, }, #[snafu(display("Image response in {} did not include image ID", region))] @@ -256,7 +259,7 @@ mod error { #[snafu(display("Failed to register image in {}: {}", region, source))] RegisterImage { region: String, - source: rusoto_core::RusotoError, + source: SdkError, }, #[snafu(display("Failed to upload snapshot from {} in {}: {}", path.display(),region, source))] diff --git a/tools/pubsys/src/aws/ami/snapshot.rs b/tools/pubsys/src/aws/ami/snapshot.rs index 04a9d0d6..b26249d8 100644 --- a/tools/pubsys/src/aws/ami/snapshot.rs +++ b/tools/pubsys/src/aws/ami/snapshot.rs @@ -46,6 +46,7 @@ mod error { #[derive(Debug, Snafu)] #[snafu(visibility(pub(super)))] + #[allow(clippy::large_enum_variant)] pub(crate) enum Error { #[snafu(display("Invalid image path '{}'", path.display()))] InvalidImagePath { path: PathBuf }, diff --git a/tools/pubsys/src/aws/ami/wait.rs b/tools/pubsys/src/aws/ami/wait.rs index 3b98be08..f9ec8d4b 100644 --- a/tools/pubsys/src/aws/ami/wait.rs +++ b/tools/pubsys/src/aws/ami/wait.rs @@ -1,8 +1,8 @@ -use crate::aws::client::build_client; +use crate::aws::client::build_client_config; +use aws_sdk_ec2::model::ImageState; +use aws_sdk_ec2::{Client as Ec2Client, Region}; use log::info; -use pubsys_config::AwsConfig; -use rusoto_core::Region; -use rusoto_ec2::{DescribeImagesRequest, Ec2, Ec2Client}; +use pubsys_config::AwsConfig as PubsysAwsConfig; use snafu::{ensure, ResultExt}; use std::thread::sleep; use std::time::Duration; @@ -15,7 +15,7 @@ pub(crate) async fn wait_for_ami( sts_region: &Region, state: &str, successes_required: u8, - aws: &AwsConfig, + pubsys_aws_config: &PubsysAwsConfig, ) -> Result<()> { let mut successes = 0; let max_attempts = 90; @@ -30,39 +30,36 @@ pub(crate) async fn wait_for_ami( error::MaxAttemptsSnafu { id, max_attempts, - region: region.name() + region: region.as_ref(), } ); - let describe_request = DescribeImagesRequest { - image_ids: Some(vec![id.to_string()]), - ..Default::default() - }; // Use a new client each time so we have more confidence that different endpoints can see // the new AMI. - let ec2_client = - build_client::(region, sts_region, aws).context(error::ClientSnafu { - client_type: "EC2", - region: region.name(), + let client_config = build_client_config(region, sts_region, pubsys_aws_config).await; + let ec2_client = Ec2Client::new(&client_config); + let describe_response = ec2_client + .describe_images() + .set_image_ids(Some(vec![id.to_string()])) + .send() + .await + .context(error::DescribeImagesSnafu { + region: region.as_ref(), })?; - let describe_response = ec2_client.describe_images(describe_request).await.context( - error::DescribeImagesSnafu { - region: region.name(), - }, - )?; + // The response contains an Option>, so we have to check that we got a // list at all, and then that the list contains the ID in question. if let Some(images) = describe_response.images { let mut saw_it = false; for image in images { - if let Some(ref found_id) = image.image_id { - if let Some(ref found_state) = image.state { - if id == found_id && state == found_state { + if let Some(found_id) = image.image_id { + if let Some(found_state) = image.state { + if id == found_id && ImageState::from(state) == found_state { // Success; check if we have enough to declare victory. saw_it = true; successes += 1; if successes >= successes_required { - info!("Found {} {} in {}", id, state, region.name()); + info!("Found {} {} in {}", id, state, region); return Ok(()); } break; @@ -70,16 +67,18 @@ pub(crate) async fn wait_for_ami( // If the state shows us the AMI failed, we know we'll never hit the // desired state. (Unless they desired "error", which will be caught // above.) - ensure!( - !["invalid", "deregistered", "failed", "error"] - .iter() - .any(|e| e == found_state), - error::StateSnafu { + match &found_state { + ImageState::Invalid + | ImageState::Deregistered + | ImageState::Failed + | ImageState::Error => error::StateSnafu { id, - state: found_state, - region: region.name() + state: found_state.as_ref(), + region: region.as_ref(), } - ); + .fail(), + _ => Ok(()), + }?; } } } @@ -95,11 +94,7 @@ pub(crate) async fn wait_for_ami( if attempts % 5 == 1 { info!( "Waiting for {} in {} to be {}... (attempt {} of {})", - id, - region.name(), - state, - attempts, - max_attempts + id, region, state, attempts, max_attempts ); } sleep(Duration::from_secs(seconds_between_attempts)); @@ -107,23 +102,18 @@ pub(crate) async fn wait_for_ami( } mod error { - use crate::aws; + use aws_sdk_ec2::error::DescribeImagesError; + use aws_sdk_ec2::types::SdkError; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(super)))] + #[allow(clippy::large_enum_variant)] pub(crate) enum Error { - #[snafu(display("Error creating {} client in {}: {}", client_type, region, source))] - Client { - client_type: String, - region: String, - source: aws::client::Error, - }, - #[snafu(display("Failed to describe images in {}: {}", region, source))] DescribeImages { region: String, - source: rusoto_core::RusotoError, + source: SdkError, }, #[snafu(display( diff --git a/tools/pubsys/src/aws/client.rs b/tools/pubsys/src/aws/client.rs index 2b35f20c..9bc5a40e 100644 --- a/tools/pubsys/src/aws/client.rs +++ b/tools/pubsys/src/aws/client.rs @@ -1,92 +1,36 @@ -use async_trait::async_trait; -use pubsys_config::AwsConfig; -use rusoto_core::{request::DispatchSignedRequest, HttpClient, Region}; -use rusoto_credential::{ - AutoRefreshingProvider, AwsCredentials, CredentialsError, DefaultCredentialsProvider, - ProfileProvider, ProvideAwsCredentials, -}; -use rusoto_ebs::EbsClient; -use rusoto_ec2::Ec2Client; -use rusoto_ssm::SsmClient; -use rusoto_sts::{StsAssumeRoleSessionCredentialsProvider, StsClient}; -use snafu::ResultExt; +use aws_config::default_provider::credentials::default_provider; +use aws_config::profile::ProfileFileCredentialsProvider; +use aws_config::sts::AssumeRoleProvider; +use aws_config::SdkConfig; +use aws_types::credentials::SharedCredentialsProvider; +use aws_types::region::Region; +use pubsys_config::AwsConfig as PubsysAwsConfig; -pub(crate) trait NewWith { - fn new_with(request_dispatcher: D, credentials_provider: P, region: Region) -> Self - where - P: ProvideAwsCredentials + Send + Sync + 'static, - D: DispatchSignedRequest + Send + Sync + 'static; -} - -impl NewWith for EbsClient { - fn new_with(request_dispatcher: D, credentials_provider: P, region: Region) -> Self - where - P: ProvideAwsCredentials + Send + Sync + 'static, - D: DispatchSignedRequest + Send + Sync + 'static, - { - Self::new_with(request_dispatcher, credentials_provider, region) - } -} - -impl NewWith for Ec2Client { - fn new_with(request_dispatcher: D, credentials_provider: P, region: Region) -> Self - where - P: ProvideAwsCredentials + Send + Sync + 'static, - D: DispatchSignedRequest + Send + Sync + 'static, - { - Self::new_with(request_dispatcher, credentials_provider, region) - } -} - -impl NewWith for SsmClient { - fn new_with(request_dispatcher: D, credentials_provider: P, region: Region) -> Self - where - P: ProvideAwsCredentials + Send + Sync + 'static, - D: DispatchSignedRequest + Send + Sync + 'static, - { - Self::new_with(request_dispatcher, credentials_provider, region) - } -} - -impl NewWith for StsClient { - fn new_with(request_dispatcher: D, credentials_provider: P, region: Region) -> Self - where - P: ProvideAwsCredentials + Send + Sync + 'static, - D: DispatchSignedRequest + Send + Sync + 'static, - { - Self::new_with(request_dispatcher, credentials_provider, region) - } -} - -/// Create a rusoto client of the given type using the given region and configuration. -pub(crate) fn build_client( +/// Create an AWS client config using the given regions and pubsys config. +pub(crate) async fn build_client_config( region: &Region, sts_region: &Region, - aws: &AwsConfig, -) -> Result { - let maybe_regional_role = aws.region.get(region.name()).and_then(|r| r.role.clone()); - let assume_roles = aws.role.iter().chain(maybe_regional_role.iter()).cloned(); - let provider = build_provider( - sts_region, - assume_roles.clone(), - base_provider(&aws.profile)?, - )?; - Ok(T::new_with( - rusoto_core::HttpClient::new().context(error::HttpClientSnafu)?, - provider, - region.clone(), - )) -} + pubsys_aws_config: &PubsysAwsConfig, +) -> SdkConfig { + let maybe_profile = pubsys_aws_config.profile.clone(); + let maybe_role = pubsys_aws_config.role.clone(); + let maybe_regional_role = pubsys_aws_config + .region + .get(region.as_ref()) + .and_then(|r| r.role.clone()); + let base_provider = base_provider(&maybe_profile).await; -/// Wrapper for trait object that implements ProvideAwsCredentials to simplify return values. -/// Might be able to remove if rusoto implements ProvideAwsCredentials for -/// Box. -struct CredentialsProvider(Box); -#[async_trait] -impl ProvideAwsCredentials for CredentialsProvider { - async fn credentials(&self) -> std::result::Result { - self.0.credentials().await - } + let config = match (&maybe_role, &maybe_regional_role) { + (None, None) => aws_config::from_env().credentials_provider(base_provider), + _ => { + let assume_roles = maybe_role.iter().chain(maybe_regional_role.iter()).cloned(); + let provider = + build_provider(sts_region, assume_roles.clone(), base_provider.clone()).await; + aws_config::from_env().credentials_provider(provider) + } + }; + + config.region(region.clone()).load().await } /// Chains credentials providers to assume the given roles in order. @@ -94,68 +38,34 @@ impl ProvideAwsCredentials for CredentialsProvider { /// credentials, not the region in which you want to talk to a service endpoint like EC2. This is /// needed because you may be assuming a role in an opt-in region from an account that has not /// opted-in to that region, and you need to get session credentials from an STS endpoint in a -/// region to which you have access in the base account. -fn build_provider

( +/// region to which you have access in the base account +async fn build_provider( sts_region: &Region, assume_roles: impl Iterator, - base_provider: P, -) -> Result -where - P: ProvideAwsCredentials + Send + Sync + 'static, -{ - let mut provider = CredentialsProvider(Box::new(base_provider)); + base_provider: SharedCredentialsProvider, +) -> SharedCredentialsProvider { + let mut provider = base_provider; for assume_role in assume_roles { - let sts = StsClient::new_with( - HttpClient::new().context(error::HttpClientSnafu)?, - provider, - sts_region.clone(), - ); - let expiring_provider = StsAssumeRoleSessionCredentialsProvider::new( - sts, - assume_role, - "pubsys".to_string(), // session name - None, // external ID - None, // session duration - None, // scope down policy - None, // MFA serial - ); - provider = CredentialsProvider(Box::new( - AutoRefreshingProvider::new(expiring_provider).context(error::ProviderSnafu)?, - )); + provider = SharedCredentialsProvider::new( + AssumeRoleProvider::builder(assume_role) + .region(sts_region.clone()) + .session_name("pubsys") + .build(provider.clone()), + ) } - Ok(provider) + provider } -/// If the user specified a profile, have rusoto use that, otherwise use Rusoto's default +/// If the user specified a profile, use that, otherwise use the default /// credentials mechanisms. -fn base_provider(maybe_profile: &Option) -> Result { +async fn base_provider(maybe_profile: &Option) -> SharedCredentialsProvider { if let Some(profile) = maybe_profile { - let mut p = ProfileProvider::new().context(error::ProviderSnafu)?; - p.set_profile(profile); - Ok(CredentialsProvider(Box::new(p))) + SharedCredentialsProvider::new( + ProfileFileCredentialsProvider::builder() + .profile_name(profile) + .build(), + ) } else { - Ok(CredentialsProvider(Box::new( - DefaultCredentialsProvider::new().context(error::ProviderSnafu)?, - ))) - } -} - -pub(crate) mod error { - use snafu::Snafu; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Failed to create HTTP client: {}", source))] - HttpClient { - source: rusoto_core::request::TlsError, - }, - - #[snafu(display("Failed to create AWS credentials provider: {}", source))] - Provider { - source: rusoto_credential::CredentialsError, - }, + SharedCredentialsProvider::new(default_provider().await) } } -pub(crate) use error::Error; -type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/mod.rs b/tools/pubsys/src/aws/mod.rs index 954a3b36..defc6a01 100644 --- a/tools/pubsys/src/aws/mod.rs +++ b/tools/pubsys/src/aws/mod.rs @@ -1,6 +1,5 @@ -use pubsys_config::AwsConfig; -use rusoto_core::Region; -use snafu::ResultExt; +use aws_sdk_ec2::model::ArchitectureValues; +use aws_sdk_ec2::Region; #[macro_use] pub(crate) mod client; @@ -10,24 +9,16 @@ pub(crate) mod promote_ssm; pub(crate) mod publish_ami; pub(crate) mod ssm; -/// Builds a Region from the given region name, and uses the custom endpoint from the AWS config, -/// if specified in aws.region.REGION.endpoint. -fn region_from_string(name: &str, aws: &AwsConfig) -> Result { - let maybe_endpoint = aws.region.get(name).and_then(|r| r.endpoint.clone()); - Ok(match maybe_endpoint { - Some(endpoint) => Region::Custom { - name: name.to_string(), - endpoint, - }, - None => name.parse().context(error::ParseRegionSnafu { name })?, - }) +/// Builds a Region from the given region name. +fn region_from_string(name: &str) -> Region { + Region::new(name.to_owned()) } /// Parses the given string as an architecture, mapping values to the ones used in EC2. -pub(crate) fn parse_arch(input: &str) -> Result { +pub(crate) fn parse_arch(input: &str) -> Result { match input { - "x86_64" | "amd64" => Ok("x86_64".to_string()), - "arm64" | "aarch64" => Ok("arm64".to_string()), + "x86_64" | "amd64" => Ok(ArchitectureValues::X8664), + "arm64" | "aarch64" => Ok(ArchitectureValues::Arm64), _ => error::ParseArchSnafu { input, msg: "unknown architecture", @@ -44,13 +35,6 @@ mod error { pub(crate) enum Error { #[snafu(display("Failed to parse arch '{}': {}", input, msg))] ParseArch { input: String, msg: String }, - - #[snafu(display("Failed to parse region '{}': {}", name, source))] - ParseRegion { - name: String, - source: rusoto_signature::region::ParseRegionError, - }, } } -pub(crate) use error::Error; type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/promote_ssm/mod.rs b/tools/pubsys/src/aws/promote_ssm/mod.rs index 576ce595..2863ee84 100644 --- a/tools/pubsys/src/aws/promote_ssm/mod.rs +++ b/tools/pubsys/src/aws/promote_ssm/mod.rs @@ -1,14 +1,14 @@ //! The promote_ssm module owns the 'promote-ssm' subcommand and controls the process of copying //! SSM parameters from one version to another -use crate::aws::client::build_client; +use crate::aws::client::build_client_config; use crate::aws::ssm::{key_difference, ssm, template, BuildContext, SsmKey}; use crate::aws::{parse_arch, region_from_string}; use crate::Args; +use aws_sdk_ec2::model::ArchitectureValues; +use aws_sdk_ssm::{Client as SsmClient, Region}; use log::{info, trace}; use pubsys_config::InfraConfig; -use rusoto_core::Region; -use rusoto_ssm::SsmClient; use snafu::{ensure, ResultExt}; use std::collections::HashMap; use std::path::PathBuf; @@ -20,7 +20,7 @@ use structopt::{clap, StructOpt}; pub(crate) struct PromoteArgs { /// The architecture of the machine image #[structopt(long, parse(try_from_str = parse_arch))] - arch: String, + arch: ArchitectureValues, /// The variant name for the current build #[structopt(long)] @@ -67,8 +67,8 @@ pub(crate) async fn run(args: &Args, promote_args: &PromoteArgs) -> Result<()> { aws.regions.clone().into() } .into_iter() - .map(|name| region_from_string(&name, &aws).context(error::ParseRegionSnafu)) - .collect::>>()?; + .map(|name| region_from_string(&name)) + .collect::>(); ensure!( !regions.is_empty(), @@ -80,11 +80,8 @@ pub(crate) async fn run(args: &Args, promote_args: &PromoteArgs) -> Result<()> { let mut ssm_clients = HashMap::with_capacity(regions.len()); for region in ®ions { - let ssm_client = - build_client::(region, base_region, &aws).context(error::ClientSnafu { - client_type: "SSM", - region: region.name(), - })?; + let client_config = build_client_config(region, base_region, &aws).await; + let ssm_client = SsmClient::new(&client_config); ssm_clients.insert(region.clone(), ssm_client); } @@ -93,13 +90,13 @@ pub(crate) async fn run(args: &Args, promote_args: &PromoteArgs) -> Result<()> { // Non-image-specific context for building and rendering templates let source_build_context = BuildContext { variant: &promote_args.variant, - arch: &promote_args.arch, + arch: promote_args.arch.as_str(), image_version: &promote_args.source, }; let target_build_context = BuildContext { variant: &promote_args.variant, - arch: &promote_args.arch, + arch: promote_args.arch.as_str(), image_version: &promote_args.target, }; @@ -220,20 +217,12 @@ pub(crate) async fn run(args: &Args, promote_args: &PromoteArgs) -> Result<()> { } mod error { - use crate::aws; use crate::aws::ssm::{ssm, template}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(super)))] pub(crate) enum Error { - #[snafu(display("Error creating {} client in {}: {}", client_type, region, source))] - Client { - client_type: String, - region: String, - source: aws::client::Error, - }, - #[snafu(display("Error reading config: {}", source))] Config { source: pubsys_config::Error, @@ -259,10 +248,6 @@ mod error { missing: String, }, - ParseRegion { - source: crate::aws::Error, - }, - #[snafu(display("Failed to render templates: {}", source))] RenderTemplates { source: template::Error, diff --git a/tools/pubsys/src/aws/publish_ami/mod.rs b/tools/pubsys/src/aws/publish_ami/mod.rs index c5986bdf..b5017e98 100644 --- a/tools/pubsys/src/aws/publish_ami/mod.rs +++ b/tools/pubsys/src/aws/publish_ami/mod.rs @@ -3,18 +3,18 @@ use crate::aws::ami::wait::{self, wait_for_ami}; use crate::aws::ami::Image; -use crate::aws::client::build_client; +use crate::aws::client::build_client_config; use crate::aws::region_from_string; use crate::Args; +use aws_sdk_ec2::error::{ModifyImageAttributeError, ModifySnapshotAttributeError}; +use aws_sdk_ec2::model::{ImageAttributeName, OperationType, SnapshotAttributeName}; +use aws_sdk_ec2::output::{ModifyImageAttributeOutput, ModifySnapshotAttributeOutput}; +use aws_sdk_ec2::types::SdkError; +use aws_sdk_ec2::{Client as Ec2Client, Region}; use futures::future::{join, ready}; use futures::stream::{self, StreamExt}; use log::{debug, error, info, trace}; use pubsys_config::InfraConfig; -use rusoto_core::{Region, RusotoError}; -use rusoto_ec2::{ - DescribeImagesRequest, Ec2, Ec2Client, ModifyImageAttributeRequest, - ModifySnapshotAttributeError, ModifySnapshotAttributeRequest, -}; use snafu::{ensure, OptionExt, ResultExt}; use std::collections::{HashMap, HashSet}; use std::fs::File; @@ -55,9 +55,9 @@ pub(crate) struct PublishArgs { /// Common entrypoint from main() pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { let (operation, description) = if publish_args.grant { - ("add".to_string(), "granting access") + (OperationType::Add, "granting access") } else if publish_args.revoke { - ("remove".to_string(), "revoking access") + (OperationType::Remove, "revoking access") } else { unreachable!("developer error: --grant and --revoke not required/exclusive"); }; @@ -104,7 +104,7 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { missing: "aws.regions" } ); - let base_region = region_from_string(®ions[0], &aws).context(error::ParseRegionSnafu)?; + let base_region = region_from_string(®ions[0]); // Check that the requested regions are a subset of the regions we *could* publish from the AMI // input JSON. @@ -120,7 +120,7 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { } ); - // Parse region names, adding endpoints from InfraConfig if specified + // Parse region names let mut amis = HashMap::with_capacity(regions.len()); for name in regions { let image = ami_input @@ -129,7 +129,7 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { .with_context(|| error::UnknownRegionsSnafu { regions: vec![name.clone()], })?; - let region = region_from_string(&name, &aws).context(error::ParseRegionSnafu)?; + let region = region_from_string(&name); amis.insert(region, image); } @@ -137,11 +137,8 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { // live until the future is resolved. let mut ec2_clients = HashMap::with_capacity(amis.len()); for region in amis.keys() { - let ec2_client = - build_client::(region, &base_region, &aws).context(error::ClientSnafu { - client_type: "EC2", - region: region.name(), - })?; + let client_config = build_client_config(region, &base_region, &aws).await; + let ec2_client = Ec2Client::new(&client_config); ec2_clients.insert(region.clone(), ec2_client); } @@ -164,7 +161,7 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { for ((region, image_id), wait_response) in wait_responses { wait_response.context(error::WaitAmiSnafu { id: &image_id, - region: region.name(), + region: region.as_ref(), })?; } @@ -204,14 +201,14 @@ pub(crate) async fn get_snapshots( region: &Region, ec2_client: &Ec2Client, ) -> Result> { - let describe_request = DescribeImagesRequest { - image_ids: Some(vec![image_id.to_string()]), - ..Default::default() - }; - let describe_response = ec2_client.describe_images(describe_request).await; - let describe_response = describe_response.context(error::DescribeImagesSnafu { - region: region.name(), - })?; + let describe_response = ec2_client + .describe_images() + .set_image_ids(Some(vec![image_id.to_string()])) + .send() + .await + .context(error::DescribeImagesSnafu { + region: region.as_ref(), + })?; // Get the image description, ensuring we only have one. let mut images = describe_response @@ -223,14 +220,14 @@ pub(crate) async fn get_snapshots( ensure!( !images.is_empty(), error::MissingImageSnafu { - region: region.name(), + region: region.as_ref(), image_id: image_id.to_string(), } ); ensure!( images.len() == 1, error::MultipleImagesSnafu { - region: region.name(), + region: region.as_ref(), images: images .into_iter() .map(|i| i.image_id.unwrap_or_else(|| "".to_string())) @@ -305,22 +302,21 @@ async fn get_regional_snapshots( pub(crate) async fn modify_snapshots( user_ids: Option>, group_names: Option>, - operation: &str, + operation: &OperationType, snapshot_ids: &[String], ec2_client: &Ec2Client, region: &Region, ) -> Result<()> { let mut requests = Vec::new(); for snapshot_id in snapshot_ids { - let request = ModifySnapshotAttributeRequest { - attribute: Some("createVolumePermission".to_string()), - user_ids: user_ids.clone(), - group_names: group_names.clone(), - operation_type: Some(operation.to_string()), - snapshot_id: snapshot_id.clone(), - ..Default::default() - }; - let response_future = ec2_client.modify_snapshot_attribute(request); + let response_future = ec2_client + .modify_snapshot_attribute() + .set_attribute(Some(SnapshotAttributeName::CreateVolumePermission)) + .set_user_ids(user_ids.clone()) + .set_group_names(group_names.clone()) + .set_operation_type(Some(operation.clone())) + .set_snapshot_id(Some(snapshot_id.clone())) + .send(); // Store the snapshot_id so we can include it in any errors let info_future = ready(snapshot_id.to_string()); requests.push(join(info_future, response_future)); @@ -330,14 +326,14 @@ pub(crate) async fn modify_snapshots( let request_stream = stream::iter(requests).buffer_unordered(4); let responses: Vec<( String, - std::result::Result<(), RusotoError>, + std::result::Result>, )> = request_stream.collect().await; for (snapshot_id, response) in responses { response.context(error::ModifyImageAttributeSnafu { snapshot_id, - region: region.name(), - })? + region: region.as_ref(), + })?; } Ok(()) @@ -348,7 +344,7 @@ pub(crate) async fn modify_snapshots( pub(crate) async fn modify_regional_snapshots( user_ids: Option>, group_names: Option>, - operation: &str, + operation: &OperationType, snapshots: &HashMap>, clients: &HashMap, ) -> Result<()> { @@ -385,7 +381,7 @@ pub(crate) async fn modify_regional_snapshots( success_count += 1; debug!( "Modified permissions in {} for snapshots [{}]", - region.name(), + region.as_ref(), snapshot_ids.join(", "), ); } @@ -393,7 +389,7 @@ pub(crate) async fn modify_regional_snapshots( error_count += 1; error!( "Failed to modify permissions in {} for snapshots [{}]: {}", - region.name(), + region.as_ref(), snapshot_ids.join(", "), e ); @@ -417,27 +413,21 @@ pub(crate) async fn modify_regional_snapshots( pub(crate) async fn modify_image( user_ids: Option>, user_groups: Option>, - operation: &str, + operation: &OperationType, image_id: &str, ec2_client: &Ec2Client, - region: &Region, -) -> Result<()> { - // Build requests to modify image attributes. - let modify_image_request = ModifyImageAttributeRequest { - attribute: Some("launchPermission".to_string()), - user_ids: user_ids.clone(), - user_groups: user_groups.clone(), - operation_type: Some(operation.to_string()), - image_id: image_id.to_string(), - ..Default::default() - }; +) -> std::result::Result> { ec2_client - .modify_image_attribute(modify_image_request) + .modify_image_attribute() + .set_attribute(Some( + ImageAttributeName::LaunchPermission.as_ref().to_string(), + )) + .set_user_ids(user_ids.clone()) + .set_user_groups(user_groups.clone()) + .set_operation_type(Some(operation.clone())) + .set_image_id(Some(image_id.to_string())) + .send() .await - .context(error::ModifyImageAttributesSnafu { - image_id, - region: region.name(), - }) } /// Modify launchPermission for the given users/groups, across all of the images in the given @@ -445,7 +435,7 @@ pub(crate) async fn modify_image( pub(crate) async fn modify_regional_images( user_ids: Option>, user_groups: Option>, - operation: &str, + operation: &OperationType, images: &HashMap, clients: &HashMap, ) -> Result<()> { @@ -459,26 +449,29 @@ pub(crate) async fn modify_regional_images( operation, image_id, ec2_client, - region, ); // Store the region and image ID so we can include it in errors - let info_future = ready((region.name().to_string(), image_id.clone())); + let info_future = ready((region.as_ref().to_string(), image_id.clone())); requests.push(join(info_future, modify_image_future)); } // Send requests in parallel and wait for responses, collecting results into a list. let request_stream = stream::iter(requests).buffer_unordered(4); - let responses: Vec<((String, String), Result<()>)> = request_stream.collect().await; + #[allow(clippy::type_complexity)] + let responses: Vec<( + (String, String), + std::result::Result>, + )> = request_stream.collect().await; // Count up successes and failures so we can give a clear total in the final error message. let mut error_count = 0u16; let mut success_count = 0u16; for ((region, image_id), modify_image_response) in responses { match modify_image_response { - Ok(()) => { + Ok(_) => { success_count += 1; - info!("Modified permissions of image {} in {}", image_id, region,); + info!("Modified permissions of image {} in {}", image_id, region); } Err(e) => { error_count += 1; @@ -502,9 +495,11 @@ pub(crate) async fn modify_regional_images( } mod error { - use crate::aws::{self, ami}; - use rusoto_core::RusotoError; - use rusoto_ec2::{ModifyImageAttributeError, ModifySnapshotAttributeError}; + use crate::aws::ami; + use aws_sdk_ec2::error::{ + DescribeImagesError, ModifyImageAttributeError, ModifySnapshotAttributeError, + }; + use aws_sdk_ec2::types::SdkError; use snafu::Snafu; use std::io; use std::path::PathBuf; @@ -512,22 +507,13 @@ mod error { #[derive(Debug, Snafu)] #[snafu(visibility(pub(super)))] pub(crate) enum Error { - #[snafu(display("Error creating {} client in {}: {}", client_type, region, source))] - Client { - client_type: String, - region: String, - source: aws::client::Error, - }, - #[snafu(display("Error reading config: {}", source))] - Config { - source: pubsys_config::Error, - }, + Config { source: pubsys_config::Error }, #[snafu(display("Failed to describe images in {}: {}", region, source))] DescribeImages { region: String, - source: rusoto_core::RusotoError, + source: SdkError, }, #[snafu(display("Failed to deserialize input from '{}': {}", path.display(), source))] @@ -544,20 +530,13 @@ mod error { }, #[snafu(display("Input '{}' is empty", path.display()))] - Input { - path: PathBuf, - }, + Input { path: PathBuf }, #[snafu(display("Infra.toml is missing {}", missing))] - MissingConfig { - missing: String, - }, + MissingConfig { missing: String }, #[snafu(display("Failed to find given AMI ID {} in {}", image_id, region))] - MissingImage { - region: String, - image_id: String, - }, + MissingImage { region: String, image_id: String }, #[snafu(display("Response to {} was missing {}", request_type, missing))] MissingInResponse { @@ -574,7 +553,7 @@ mod error { ModifyImageAttribute { snapshot_id: String, region: String, - source: RusotoError, + source: SdkError, }, #[snafu(display( @@ -595,7 +574,7 @@ mod error { ModifyImageAttributes { image_id: String, region: String, - source: RusotoError, + source: SdkError, }, #[snafu(display( @@ -608,22 +587,13 @@ mod error { }, #[snafu(display("DescribeImages in {} with unique filters returned multiple results: {}", region, images.join(", ")))] - MultipleImages { - region: String, - images: Vec, - }, - - ParseRegion { - source: crate::aws::Error, - }, + MultipleImages { region: String, images: Vec }, #[snafu(display( "Given region(s) in Infra.toml / regions argument that are not in --ami-input file: {}", regions.join(", ") ))] - UnknownRegions { - regions: Vec, - }, + UnknownRegions { regions: Vec }, #[snafu(display("AMI '{}' in {} did not become available: {}", id, region, source))] WaitAmi { diff --git a/tools/pubsys/src/aws/ssm/mod.rs b/tools/pubsys/src/aws/ssm/mod.rs index 5390caa6..8c5afd68 100644 --- a/tools/pubsys/src/aws/ssm/mod.rs +++ b/tools/pubsys/src/aws/ssm/mod.rs @@ -5,12 +5,12 @@ pub(crate) mod ssm; pub(crate) mod template; -use crate::aws::{ami::Image, client::build_client, parse_arch, region_from_string}; +use crate::aws::{ami::Image, client::build_client_config, parse_arch, region_from_string}; use crate::Args; +use aws_sdk_ec2::model::ArchitectureValues; +use aws_sdk_ssm::{Client as SsmClient, Region}; use log::{info, trace}; -use pubsys_config::{AwsConfig, InfraConfig}; -use rusoto_core::Region; -use rusoto_ssm::SsmClient; +use pubsys_config::InfraConfig; use serde::Serialize; use snafu::{ensure, OptionExt, ResultExt}; use std::collections::{HashMap, HashSet}; @@ -30,7 +30,7 @@ pub(crate) struct SsmArgs { /// The architecture of the machine image #[structopt(long, parse(try_from_str = parse_arch))] - arch: String, + arch: ArchitectureValues, /// The variant name for the current build #[structopt(long)] @@ -76,17 +76,14 @@ pub(crate) async fn run(args: &Args, ssm_args: &SsmArgs) -> Result<()> { missing: "aws.regions" } ); - let base_region = region_from_string(®ions[0], &aws).context(error::ParseRegionSnafu)?; + let base_region = region_from_string(®ions[0]); - let amis = parse_ami_input(®ions, ssm_args, &aws)?; + let amis = parse_ami_input(®ions, ssm_args)?; let mut ssm_clients = HashMap::with_capacity(amis.len()); for region in amis.keys() { - let ssm_client = - build_client::(region, &base_region, &aws).context(error::ClientSnafu { - client_type: "SSM", - region: region.name(), - })?; + let client_config = build_client_config(region, &base_region, &aws).await; + let ssm_client = SsmClient::new(&client_config); ssm_clients.insert(region.clone(), ssm_client); } @@ -95,7 +92,7 @@ pub(crate) async fn run(args: &Args, ssm_args: &SsmArgs) -> Result<()> { // Non-image-specific context for building and rendering templates let build_context = BuildContext { variant: &ssm_args.variant, - arch: &ssm_args.arch, + arch: ssm_args.arch.as_ref(), image_version: &ssm_args.version, }; @@ -190,11 +187,7 @@ pub(crate) struct BuildContext<'a> { type SsmParameters = HashMap; /// Parse the AMI input file -fn parse_ami_input( - regions: &[String], - ssm_args: &SsmArgs, - aws: &AwsConfig, -) -> Result> { +fn parse_ami_input(regions: &[String], ssm_args: &SsmArgs) -> Result> { info!("Using AMI data from path: {}", ssm_args.ami_input.display()); let file = File::open(&ssm_args.ami_input).context(error::FileSnafu { op: "open", @@ -229,7 +222,7 @@ fn parse_ami_input( } ); - // Parse region names, adding endpoints from InfraConfig if specified + // Parse region names let mut amis = HashMap::with_capacity(regions.len()); for name in regions { let image = ami_input @@ -238,8 +231,8 @@ fn parse_ami_input( .with_context(|| error::UnknownRegionsSnafu { regions: vec![name.clone()], })?; - let region = region_from_string(name, aws).context(error::ParseRegionSnafu)?; - amis.insert(region, image); + let region = region_from_string(name); + amis.insert(region.clone(), image); } Ok(amis) @@ -258,9 +251,7 @@ pub(crate) fn key_difference(wanted: &SsmParameters, current: &SsmParameters) -> let new_value = &wanted[key]; println!( "{} - {} - new parameter:\n new value: {}", - key.name, - key.region.name(), - new_value, + key.name, key.region, new_value, ); parameters_to_set.insert( SsmKey::new(key.region.clone(), key.name.clone()), @@ -273,14 +264,11 @@ pub(crate) fn key_difference(wanted: &SsmParameters, current: &SsmParameters) -> let new_value = &wanted[key]; if current_value == new_value { - println!("{} - {} - no change", key.name, key.region.name()); + println!("{} - {} - no change", key.name, key.region); } else { println!( "{} - {} - changing value:\n old value: {}\n new value: {}", - key.name, - key.region.name(), - current_value, - new_value + key.name, key.region, current_value, new_value ); parameters_to_set.insert( SsmKey::new(key.region.clone(), key.name.clone()), @@ -295,7 +283,6 @@ pub(crate) fn key_difference(wanted: &SsmParameters, current: &SsmParameters) -> } mod error { - use crate::aws; use crate::aws::ssm::{ssm, template}; use snafu::Snafu; use std::io; @@ -304,13 +291,6 @@ mod error { #[derive(Debug, Snafu)] #[snafu(visibility(pub(super)))] pub(crate) enum Error { - #[snafu(display("Error creating {} client in {}: {}", client_type, region, source))] - Client { - client_type: String, - region: String, - source: aws::client::Error, - }, - #[snafu(display("Error reading config: {}", source))] Config { source: pubsys_config::Error, @@ -352,10 +332,6 @@ mod error { #[snafu(display("Cowardly refusing to overwrite parameters without ALLOW_CLOBBER"))] NoClobber, - ParseRegion { - source: crate::aws::Error, - }, - #[snafu(display("Failed to render templates: {}", source))] RenderTemplates { source: template::Error, diff --git a/tools/pubsys/src/aws/ssm/ssm.rs b/tools/pubsys/src/aws/ssm/ssm.rs index 3c5fe0f2..1c1c1e68 100644 --- a/tools/pubsys/src/aws/ssm/ssm.rs +++ b/tools/pubsys/src/aws/ssm/ssm.rs @@ -1,14 +1,14 @@ //! The ssm module owns the getting and setting of parameters in SSM. use super::{SsmKey, SsmParameters}; +use aws_sdk_ssm::error::{GetParametersError, PutParameterError}; +use aws_sdk_ssm::model::ParameterType; +use aws_sdk_ssm::output::{GetParametersOutput, PutParameterOutput}; +use aws_sdk_ssm::types::SdkError; +use aws_sdk_ssm::{Client as SsmClient, Region}; use futures::future::{join, ready}; use futures::stream::{self, StreamExt}; use log::{debug, error, trace, warn}; -use rusoto_core::{Region, RusotoError}; -use rusoto_ssm::{ - GetParametersError, GetParametersRequest, GetParametersResult, PutParameterError, - PutParameterRequest, PutParameterResult, Ssm, SsmClient, -}; use snafu::{ensure, OptionExt, ResultExt}; use std::collections::{HashMap, HashSet}; use std::time::Duration; @@ -38,14 +38,13 @@ where for (region, names) in regional_names { // At most 10 parameters can be requested at a time. for names_chunk in names.chunks(10) { - trace!("Requesting {:?} in {}", names_chunk, region.name()); + trace!("Requesting {:?} in {}", names_chunk, region); let ssm_client = &clients[®ion]; let len = names_chunk.len(); - let get_request = GetParametersRequest { - names: names_chunk.to_vec(), - ..Default::default() - }; - let get_future = ssm_client.get_parameters(get_request); + let get_future = ssm_client + .get_parameters() + .set_names(Some(names_chunk.to_vec())) + .send(); // Store the region so we can include it in errors and the output map let info_future = ready((region.clone(), len)); @@ -58,7 +57,7 @@ where #[allow(clippy::type_complexity)] let responses: Vec<( (Region, usize), - std::result::Result>, + std::result::Result>, )> = request_stream.collect().await; // If you're checking parameters in a region you haven't pushed to before, you can get an @@ -78,11 +77,11 @@ where Err(e) => { // Note: there's no structured error type for this so we have to string match. if e.to_string().contains("is not a valid namespace") { - new_regions.insert(region.name().to_string()); + new_regions.insert(region.clone()); continue; } else { return Err(e).context(error::GetParametersSnafu { - region: region.name(), + region: region.as_ref(), }); } } @@ -97,7 +96,7 @@ where ensure!( total_count == expected_len, error::MissingInResponseSnafu { - region: region.name(), + region: region.as_ref(), request_type: "GetParameters", missing: format!( "parameters - got {}, expected {}", @@ -111,12 +110,12 @@ where if !valid_parameters.is_empty() { for parameter in valid_parameters { let name = parameter.name.context(error::MissingInResponseSnafu { - region: region.name(), + region: region.as_ref(), request_type: "GetParameters", missing: "parameter name", })?; let value = parameter.value.context(error::MissingInResponseSnafu { - region: region.name(), + region: region.as_ref(), request_type: "GetParameters", missing: format!("value for parameter {}", name), })?; @@ -149,7 +148,7 @@ pub(crate) async fn set_parameters( // We run all requests in a batch, and any failed requests are added to the next batch for // retry - let mut failed_parameters: HashMap)>> = HashMap::new(); + let mut failed_parameters: HashMap)>> = HashMap::new(); let max_failures = 5; /// Stores the values we need to be able to retry requests @@ -199,14 +198,14 @@ pub(crate) async fn set_parameters( // request. for context in contexts.drain(..) { let ssm_client = &ssm_clients[context.region]; - let put_request = PutParameterRequest { - name: context.name.to_string(), - value: context.value.to_string(), - overwrite: Some(true), - type_: Some("String".to_string()), - ..Default::default() - }; - let put_future = ssm_client.put_parameter(put_request); + + let put_future = ssm_client + .put_parameter() + .set_name(Some(context.name.to_string())) + .set_value(Some(context.value.to_string())) + .set_overwrite(Some(true)) + .set_type(Some(ParameterType::String)) + .send(); let regional_list = regional_requests .entry(context.region) @@ -230,7 +229,7 @@ pub(crate) async fn set_parameters( let parallel_requests = stream::select_all(throttled_streams).buffer_unordered(4); let responses: Vec<( RequestContext<'_>, - std::result::Result>, + std::result::Result>, )> = parallel_requests.collect().await; // For each error response, check if we should retry or bail. @@ -263,10 +262,7 @@ pub(crate) async fn set_parameters( }; debug!( "Request attempt {} of {} failed in {}: {}", - context.failures, - max_failures, - context.region.name(), - e + context.failures, max_failures, context.region, e ); contexts.push(context); } @@ -277,12 +273,7 @@ pub(crate) async fn set_parameters( if !failed_parameters.is_empty() { for (region, failures) in &failed_parameters { for (parameter, error) in failures { - error!( - "Failed to set {} in {}: {}", - parameter, - region.name(), - error - ); + error!("Failed to set {} in {}: {}", parameter, region, error); } } return error::SetParametersSnafu { @@ -315,18 +306,13 @@ pub(crate) async fn validate_parameters( // parameter wasn't updated / created. if let Some(updated_value) = updated_parameters.get(expected_key) { if updated_value != expected_value { - error!( - "Failed to set {} in {}", - expected_name, - expected_region.name() - ); + error!("Failed to set {} in {}", expected_name, expected_region); success = false; } } else { error!( "{} in {} still doesn't exist", - expected_name, - expected_region.name() + expected_name, expected_region ); success = false; } @@ -337,18 +323,19 @@ pub(crate) async fn validate_parameters( } mod error { - use rusoto_core::RusotoError; - use rusoto_ssm::GetParametersError; + use aws_sdk_ssm::error::GetParametersError; + use aws_sdk_ssm::types::SdkError; use snafu::Snafu; use std::time::Duration; #[derive(Debug, Snafu)] #[snafu(visibility(pub(super)))] + #[allow(clippy::large_enum_variant)] pub(crate) enum Error { #[snafu(display("Failed to fetch SSM parameters in {}: {}", region, source))] GetParameters { region: String, - source: RusotoError, + source: SdkError, }, #[snafu(display("Response to {} was missing {}", request_type, missing))] diff --git a/tools/pubsys/src/aws/ssm/template.rs b/tools/pubsys/src/aws/ssm/template.rs index 52aec136..17a0e08b 100644 --- a/tools/pubsys/src/aws/ssm/template.rs +++ b/tools/pubsys/src/aws/ssm/template.rs @@ -3,8 +3,8 @@ use super::{BuildContext, SsmKey, SsmParameters}; use crate::aws::ami::Image; +use aws_sdk_ssm::Region; use log::trace; -use rusoto_core::Region; use serde::{Deserialize, Serialize}; use snafu::{ensure, ResultExt}; use std::collections::HashMap; @@ -95,7 +95,7 @@ pub(crate) fn render_parameters( image_id: &image.id, image_name: &image.name, image_version: build_context.image_version, - region: region.name(), + region: region.as_ref(), }; for tp in &template_parameters.parameters { diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs index b810aaf9..7046e08e 100644 --- a/tools/pubsys/src/main.rs +++ b/tools/pubsys/src/main.rs @@ -27,8 +27,9 @@ mod aws; mod repo; mod vmware; +use env_logger::Builder; +use log::LevelFilter; use semver::Version; -use simplelog::{Config as LogConfig, LevelFilter, SimpleLogger}; use snafu::ResultExt; use std::path::PathBuf; use std::process; @@ -39,8 +40,18 @@ fn run() -> Result<()> { // Parse and store the args passed to the program let args = Args::from_args(); - // SimpleLogger will send errors to stderr and anything less to stdout. - SimpleLogger::init(args.log_level, LogConfig::default()).context(error::LoggerSnafu)?; + match args.log_level { + // Set log level for AWS SDK to error to reduce verbosity. + LevelFilter::Info => Builder::new() + .filter_level(args.log_level) + .filter(Some("aws_config"), LevelFilter::Warn) + .filter(Some("aws_smithy"), LevelFilter::Warn) + .filter(Some("tracing::span"), LevelFilter::Warn) + .init(), + + // Set the supplied log level across the whole crate. + _ => Builder::new().filter_level(args.log_level).init(), + } match args.subcommand { SubCommand::Repo(ref repo_args) => repo::run(&args, repo_args).context(error::RepoSnafu), @@ -151,9 +162,6 @@ mod error { #[snafu(display("Failed to build AMI: {}", source))] Ami { source: crate::aws::ami::Error }, - #[snafu(display("Logger setup error: {}", source))] - Logger { source: log::SetLoggerError }, - #[snafu(display("Failed to publish AMI: {}", source))] PublishAmi { source: crate::aws::publish_ami::Error, diff --git a/tools/pubsys/src/repo.rs b/tools/pubsys/src/repo.rs index 620385b0..730ecdf2 100644 --- a/tools/pubsys/src/repo.rs +++ b/tools/pubsys/src/repo.rs @@ -5,6 +5,7 @@ pub(crate) mod refresh_repo; pub(crate) mod validate_repo; use crate::{friendly_version, Args}; +use aws_sdk_kms::{Client as KmsClient, Region}; use chrono::{DateTime, Utc}; use lazy_static::lazy_static; use log::{debug, info, trace, warn}; @@ -12,17 +13,15 @@ use parse_datetime::parse_datetime; use pubsys_config::{ InfraConfig, KMSKeyConfig, RepoConfig, RepoExpirationPolicy, SigningKeyConfig, }; -use rusoto_core::Region; -use rusoto_kms::KmsClient; use semver::Version; use snafu::{ensure, OptionExt, ResultExt}; use std::convert::TryInto; use std::fs::{self, File}; use std::num::NonZeroU64; use std::path::{Path, PathBuf}; -use std::str::FromStr; use structopt::{clap, StructOpt}; use tempfile::NamedTempFile; +use tokio::runtime::Runtime; use tough::{ editor::signed::PathExists, editor::RepositoryEditor, @@ -411,9 +410,10 @@ fn get_signing_key_source(signing_key_config: &SigningKeyConfig) -> Result get_client(config_val, &key_id_val)?, + None => None, + } }, signing_algorithm: KmsSigningAlgorithm::RsassaPssSha256, })), @@ -425,17 +425,25 @@ fn get_signing_key_source(signing_key_config: &SigningKeyConfig) -> Result Result> { - if let Some(region) = config.available_keys.get(key_id) { - Ok(Some(KmsClient::new( - Region::from_str(region).context(error::ParseRegionSnafu { what: region })?, - ))) +/// Helper function that generates a KmsClient or None given config containing available keys +fn get_client(kmskey_config: &KMSKeyConfig, key_id: &str) -> Result> { + if let Some(region) = kmskey_config.available_keys.get(key_id) { + let rt = Runtime::new().context(error::RuntimeSnafu)?; + Ok(Some(rt.block_on(async { async_get_client(region).await }))) } else { Ok(None) } } +/// Helper function that generates a KmsClient given region +async fn async_get_client(region: &str) -> KmsClient { + let client_config = aws_config::from_env() + .region(Region::new(region.to_string())) + .load() + .await; + KmsClient::new(&client_config) +} + /// Common entrypoint from main() pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { let metadata_out_dir = repo_args @@ -701,12 +709,6 @@ mod error { #[snafu(display("Non-UTF8 path '{}' not supported", path.display()))] NonUtf8Path { path: PathBuf }, - #[snafu(display("Failed to parse {} to a valid rusoto region: {}", what, source))] - ParseRegion { - what: String, - source: rusoto_core::region::ParseRegionError, - }, - #[snafu(display("Invalid URL '{}': {}", input, source))] ParseUrl { input: String, @@ -719,6 +721,9 @@ mod error { source: tough::error::Error, }, + #[snafu(display("Failed to create async runtime: {}", source))] + Runtime { source: std::io::Error }, + #[snafu(display("Failed to parse target name from string '{}': {}", target, source))] ParseTargetName { target: String, From 1ec856b8f5894c7b03fb21be8e4a5e4b7651a4a3 Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Mon, 29 Aug 2022 10:06:26 +0000 Subject: [PATCH 0753/1356] models: add reboot-to-reconcile boot setting Add a new boolean settings.boot.reboot-to-reconcile to govern whether Bottlerocket should automatically reboot if kernel or systemd command line parameters are reconfigured during boot. This could happen either via user-data or via a bootstrap container. In either case, command line changes for the kernel or systemd will not take effect until the next reboot. This change only introduces the new setting to the model and threads it through in all places touching BootSettings. The flag is not yet armed, i.e. no reboot action is taken. Signed-off-by: Markus Boehme --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 8f031ae6..2db7757e 100644 --- a/README.md +++ b/README.md @@ -716,16 +716,20 @@ Here are the metrics settings: *Please note that boot settings currently only exist for the bare metal variants and \*-k8s-1.23 variants. Boot settings will be added to any future variant introduced after Bottlerocket v1.8.0.* -Specifying either of the following settings will generate a kernel boot config file to be loaded on subsequent boots: +Specifying any of the following settings will generate a kernel boot config file to be loaded on subsequent boots: * `settings.boot.kernel-parameters`: This allows additional kernel parameters to be specified on the kernel command line during boot. * `settings.boot.init-parameters`: This allows additional init parameters to be specified on the kernel command line during boot. +* `settings.boot.reboot-to-reconcile`: If set to `true`, Bottlerocket will automatically reboot again during boot if either the `settings.boot.kernel-parameters` or `settings.boot.init-parameters` were changed via user data or a bootstrap container so that these changes may take effect. You can learn more about kernel boot configuration [here](https://www.kernel.org/doc/html/latest/admin-guide/bootconfig.html). Example user data for specifying boot settings: ```toml +[settings.boot] +reboot-to-reconcile = true + [settings.boot.kernel-parameters] "console" = [ "tty0", From 53d906d49dffb570cc9620f2f1e3da0e60bdadc3 Mon Sep 17 00:00:00 2001 From: John McBride Date: Mon, 26 Sep 2022 13:35:09 -0700 Subject: [PATCH 0754/1356] Building docs: fix positional fetch-license argument Signed-off-by: John McBride --- BUILDING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/BUILDING.md b/BUILDING.md index 8e884d54..e917e717 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -138,7 +138,7 @@ licenses = [ 2. Fetch the licenses with this command: ```shell -cargo make fetch-licenses -e BUILDSYS_UPSTREAM_LICENSE_FETCH=true +cargo make -e BUILDSYS_UPSTREAM_LICENSE_FETCH=true fetch-licenses ``` 3. Build your image, setting the `BUILDSYS_UPSTREAM_SOURCE_FALLBACK` flag to `true`, if you haven't cached the driver's sources: From 87a80bf7f6c17685c87a8b84a39aa886d9c63bc2 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Mon, 26 Sep 2022 18:02:39 +0000 Subject: [PATCH 0755/1356] pubsys: switch back to simplelog --- tools/Cargo.lock | 2 +- tools/pubsys/Cargo.toml | 2 +- tools/pubsys/src/main.rs | 44 +++++++++++++++++++++++++++++----------- 3 files changed, 34 insertions(+), 14 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 9a0f9c02..43c24544 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -2063,7 +2063,6 @@ dependencies = [ "clap 3.2.15", "coldsnap", "duct", - "env_logger", "futures", "http", "indicatif", @@ -2077,6 +2076,7 @@ dependencies = [ "semver", "serde", "serde_json", + "simplelog", "snafu", "structopt", "tempfile", diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index c8298e71..0f82269d 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -20,7 +20,6 @@ chrono = "0.4" clap = "3.1" coldsnap = { version = "0.4", default-features = false, features = ["aws-sdk-rust-rustls"] } duct = "0.13.0" -env_logger = "0.9" pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } futures = "0.3.5" http = "0.2.8" @@ -32,6 +31,7 @@ parse-datetime = { path = "../../sources/parse-datetime", version = "0.1.0" } rayon = "1" # Need to bring in reqwest with a TLS feature so tough can support TLS repos. reqwest = { version = "0.11.1", default-features = false, features = ["rustls-tls", "blocking"] } +simplelog = "0.12" snafu = "0.7" semver = "1.0" serde = { version = "1.0", features = ["derive"] } diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs index 7046e08e..466f143e 100644 --- a/tools/pubsys/src/main.rs +++ b/tools/pubsys/src/main.rs @@ -27,9 +27,8 @@ mod aws; mod repo; mod vmware; -use env_logger::Builder; -use log::LevelFilter; use semver::Version; +use simplelog::{CombinedLogger, Config as LogConfig, ConfigBuilder, LevelFilter, SimpleLogger}; use snafu::ResultExt; use std::path::PathBuf; use std::process; @@ -40,17 +39,35 @@ fn run() -> Result<()> { // Parse and store the args passed to the program let args = Args::from_args(); + // SimpleLogger will send errors to stderr and anything less to stdout. + // To reduce verbosity of messages related to the AWS SDK for Rust we need + // to spin up two loggers, setting different levels for each. This allows + // us to retain the mixed logging of stdout/stderr in simplelog. match args.log_level { - // Set log level for AWS SDK to error to reduce verbosity. - LevelFilter::Info => Builder::new() - .filter_level(args.log_level) - .filter(Some("aws_config"), LevelFilter::Warn) - .filter(Some("aws_smithy"), LevelFilter::Warn) - .filter(Some("tracing::span"), LevelFilter::Warn) - .init(), - - // Set the supplied log level across the whole crate. - _ => Builder::new().filter_level(args.log_level).init(), + LevelFilter::Info => { + CombinedLogger::init(vec![ + SimpleLogger::new( + LevelFilter::Info, + ConfigBuilder::new() + .add_filter_ignore_str("aws_config") + .add_filter_ignore_str("aws_smithy") + .add_filter_ignore_str("tracing::span") + .build(), + ), + SimpleLogger::new( + LevelFilter::Warn, + ConfigBuilder::new() + .add_filter_allow_str("aws_config") + .add_filter_allow_str("aws_smithy") + .add_filter_allow_str("tracing::span") + .build(), + ), + ]) + .context(error::LoggerSnafu)?; + } + _ => { + SimpleLogger::init(args.log_level, LogConfig::default()).context(error::LoggerSnafu)? + } } match args.subcommand { @@ -162,6 +179,9 @@ mod error { #[snafu(display("Failed to build AMI: {}", source))] Ami { source: crate::aws::ami::Error }, + #[snafu(display("Logger setup error: {}", source))] + Logger { source: log::SetLoggerError }, + #[snafu(display("Failed to publish AMI: {}", source))] PublishAmi { source: crate::aws::publish_ami::Error, From 6b71d8cc476415bd8b0bd15d12ce44808e2cf9e0 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Mon, 26 Sep 2022 18:30:48 +0000 Subject: [PATCH 0756/1356] pubsys: sort dependendencies in Cargo.toml --- tools/pubsys/Cargo.toml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index 0f82269d..4d30af5c 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -20,7 +20,6 @@ chrono = "0.4" clap = "3.1" coldsnap = { version = "0.4", default-features = false, features = ["aws-sdk-rust-rustls"] } duct = "0.13.0" -pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } futures = "0.3.5" http = "0.2.8" indicatif = "0.16.0" @@ -28,15 +27,17 @@ lazy_static = "1.4" log = "0.4" num_cpus = "1" parse-datetime = { path = "../../sources/parse-datetime", version = "0.1.0" } +pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } rayon = "1" # Need to bring in reqwest with a TLS feature so tough can support TLS repos. reqwest = { version = "0.11.1", default-features = false, features = ["rustls-tls", "blocking"] } -simplelog = "0.12" -snafu = "0.7" semver = "1.0" -serde = { version = "1.0", features = ["derive"] } +serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" -structopt = { version = "0.3", default-features = false } +simplelog = "0.12" +snafu = "0.7" +structopt = { version = "0.3", default-features = false } +tempfile = "3.1" tinytemplate = "1.1" tokio = { version = "1", features = ["full"] } # LTS tokio-stream = { version = "0.1", features = ["time"] } @@ -46,4 +47,3 @@ tough-kms = "0.4" tough-ssm = "0.7" update_metadata = { path = "../../sources/updater/update_metadata/", version = "0.1.0" } url = { version = "2.1.0", features = ["serde"] } -tempfile = "3.1" From f3ca5ef12ac4a543b18ef0dfd0df91d6b1708783 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 15 Sep 2022 21:04:06 +0000 Subject: [PATCH 0757/1356] actions-workflow: bump rust and cargo-make Signed-off-by: Ben Cressey --- .github/workflows/build.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 5fb7d00c..1fba0d7e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -104,8 +104,8 @@ jobs: fail-fast: false steps: - uses: actions/checkout@v3 - - run: rustup default 1.61.0 - - run: cargo install --version 0.35.12 cargo-make + - run: rustup default 1.64.0 + - run: cargo install --version 0.36.0 cargo-make - if: contains(matrix.variant, 'nvidia') run: | cat <<-EOF > Licenses.toml From f93c33175ad1bde9cfe1ccab2a3f839a149028bb Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Mon, 26 Sep 2022 19:29:11 +0000 Subject: [PATCH 0758/1356] tools: update rust dependencies Ran 'cargo update' in sources workspace and updated license check tomls. --- tools/Cargo.lock | 361 +++++++++++++++++++++++------------------------ tools/deny.toml | 2 +- 2 files changed, 177 insertions(+), 186 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 43c24544..d78f9919 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -19,18 +19,18 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aho-corasick" -version = "0.7.18" +version = "0.7.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" dependencies = [ "memchr", ] [[package]] name = "android_system_properties" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7ed72e1635e121ca3e79420540282af22da58be50de153d36f81ddc6b83aa9e" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" dependencies = [ "libc", ] @@ -46,15 +46,15 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.58" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb07d2053ccdbe10e2af2995a2f116c1330396493dc1269f6a91d0ae82e19704" +checksum = "98161a4e3e2184da77bb14f02184cdd111e83bbbcc9979dfee3c44b9a85f5602" [[package]] name = "argh" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7e7e4aa7e40747e023c0761dafcb42333a9517575bbf1241747f68dd3177a62" +checksum = "c375edecfd2074d5edcc31396860b6e54b6f928714d0e097b983053fac0cabe3" dependencies = [ "argh_derive", "argh_shared", @@ -62,12 +62,12 @@ dependencies = [ [[package]] name = "argh_derive" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69f2bd7ff6ed6414f4e5521bd509bae46454bbd513801767ced3f21a751ab4bc" +checksum = "aa013479b80109a1bf01a039412b0f0013d716f36921226d86c6709032fb7a03" dependencies = [ "argh_shared", - "heck 0.3.3", + "heck 0.4.0", "proc-macro2", "quote", "syn", @@ -75,9 +75,9 @@ dependencies = [ [[package]] name = "argh_shared" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47253b98986dafc7a3e1cf3259194f1f47ac61abb57a57f46ec09e48d004ecda" +checksum = "149f75bbec1827618262e0855a68f0f9a7f2edc13faebf33c4f16d6725edb6a9" [[package]] name = "assert-json-diff" @@ -349,7 +349,7 @@ dependencies = [ "percent-encoding", "regex", "ring", - "time 0.3.11", + "time 0.3.14", "tracing", ] @@ -452,7 +452,7 @@ dependencies = [ "itoa", "num-integer", "ryu", - "time 0.3.11", + "time 0.3.14", ] [[package]] @@ -518,9 +518,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" +checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" dependencies = [ "generic-array", ] @@ -567,7 +567,7 @@ dependencies = [ "reqwest", "serde", "serde_plain", - "sha2 0.10.2", + "sha2 0.10.6", "snafu", "toml", "url", @@ -576,9 +576,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.10.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ccbd214614c6783386c1af30caf03192f17891059cecc394b4fb119e363de3" +checksum = "c1ad822118d20d2c234f427000d5acc36eabe1e29a348c89b63dd60b13f28e5d" [[package]] name = "byteorder" @@ -588,15 +588,15 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0b3de4a0c5e67e16066a0715723abd91edc2f9001d09c46e1dca929351e130e" +checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" [[package]] name = "bytes-utils" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1934a3ef9cac8efde4966a92781e77713e1ba329f1d42e446c7d7eba340d8ef1" +checksum = "e47d3a8076e283f3acd27400535992edb3ba4b5bb72f8891ad8fbe7932a7d4b9" dependencies = [ "bytes", "either", @@ -631,9 +631,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.21" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f725f340c3854e3cb3ab736dc21f0cca183303acea3b3ffec30f141503ac8eb" +checksum = "bfd4d1b31faaa3a89d7934dbded3111da0d2ef28e3ebccdb4f0179f5929d1ef1" dependencies = [ "iana-time-zone", "js-sys", @@ -662,9 +662,9 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.15" +version = "3.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44bbe24bbd31a185bc2c4f7c2abe80bea13a20d57ee4e55be70ac512bdc76417" +checksum = "86447ad904c7fb335a790c9d7fe3d0d971dc523b8ccd1561a520de9a85302750" dependencies = [ "atty", "bitflags", @@ -674,14 +674,14 @@ dependencies = [ "once_cell", "strsim 0.10.0", "termcolor", - "textwrap 0.15.0", + "textwrap 0.15.1", ] [[package]] name = "clap_derive" -version = "3.2.15" +version = "3.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba52acd3b0a5c33aeada5cdaa3267cdc7c594a98731d4268cdc1532f4264cb4" +checksum = "ea0c8bce528c4be4da13ea6fead8965e95b6073585a2f05204bd8f4119f82a65" dependencies = [ "heck 0.4.0", "proc-macro-error", @@ -717,7 +717,7 @@ dependencies = [ "futures", "indicatif", "nix", - "sha2 0.10.2", + "sha2 0.10.6", "snafu", "tempfile", "tokio", @@ -725,9 +725,9 @@ dependencies = [ [[package]] name = "console" -version = "0.15.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28b32d32ca44b70c3e4acd7db1babf555fa026e385fb95f18028f88848b3c31" +checksum = "89eab4d20ce20cea182308bca13088fecea9c05f6776cf287205d41a0ed3c847" dependencies = [ "encode_unicode", "libc", @@ -754,9 +754,9 @@ checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpufeatures" -version = "0.2.2" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b" +checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" dependencies = [ "libc", ] @@ -890,11 +890,11 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" +checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c" dependencies = [ - "block-buffer 0.10.2", + "block-buffer 0.10.3", "crypto-common", ] @@ -945,9 +945,9 @@ checksum = "4f94fa09c2aeea5b8839e414b7b841bf429fd25b9c522116ac97ee87856d88b2" [[package]] name = "either" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f107b87b6afc2a64fd13cac55fe06d6c8859f12d4b14cbcdd2c67d0976781be" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" [[package]] name = "encode_unicode" @@ -966,9 +966,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" +checksum = "c90bf5f19754d10198ccb95b70664fc925bd1fc090a0fd9a6ebc54acc8cd6272" dependencies = [ "atty", "humantime", @@ -1009,19 +1009,18 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" dependencies = [ - "matches", "percent-encoding", ] [[package]] name = "futures" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" +checksum = "7f21eda599937fba36daeb58a22e8f5cee2d14c4a17b5b7739c7c8e5e3b8230c" dependencies = [ "futures-channel", "futures-core", @@ -1034,9 +1033,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" +checksum = "30bdd20c28fadd505d0fd6712cdfcb0d4b5648baf45faef7f852afb2399bb050" dependencies = [ "futures-core", "futures-sink", @@ -1044,15 +1043,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" +checksum = "4e5aa3de05362c3fb88de6531e6296e85cde7739cccad4b9dfeeb7f6ebce56bf" [[package]] name = "futures-executor" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" +checksum = "9ff63c23854bee61b6e9cd331d523909f238fc7636290b96826e9cfa5faa00ab" dependencies = [ "futures-core", "futures-task", @@ -1061,15 +1060,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" +checksum = "bbf4d2a7a308fd4578637c0b17c7e1c7ba127b8f6ba00b29f717e9655d85eb68" [[package]] name = "futures-macro" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" +checksum = "42cd15d1c7456c04dbdf7e88bcd69760d74f3a798d6444e16974b505b0e62f17" dependencies = [ "proc-macro2", "quote", @@ -1078,21 +1077,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" +checksum = "21b20ba5a92e727ba30e72834706623d94ac93a725410b6a6b6fbc1b07f7ba56" [[package]] name = "futures-task" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" +checksum = "a6508c467c73851293f390476d4491cf4d227dbabcd4170f3bb6044959b294f1" [[package]] name = "futures-util" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" +checksum = "44fb6cb1be61cc1d2e43b262516aafcf63b241cffdb1d3fa115f91d9c7b09c90" dependencies = [ "futures-channel", "futures-core", @@ -1116,9 +1115,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.5" +version = "0.14.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" +checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" dependencies = [ "typenum", "version_check", @@ -1156,9 +1155,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" +checksum = "5ca32592cf21ac7ccab1825cd87f6c9b3d9022c44d086172ed0966bec8af30be" dependencies = [ "bytes", "fnv", @@ -1258,9 +1257,9 @@ checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" [[package]] name = "httparse" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" @@ -1357,9 +1356,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.46" +version = "0.1.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad2bfd338099682614d3ee3fe0cd72e0b6a41ca6a87f6a74a3bd593c91650501" +checksum = "fd911b35d940d2bd0bea0f9100068e5b97b51a1cbe13d13382f132e0365257a0" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -1376,11 +1375,10 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.2.3" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" dependencies = [ - "matches", "unicode-bidi", "unicode-normalization", ] @@ -1413,7 +1411,7 @@ version = "0.1.0" dependencies = [ "assert-json-diff", "async-trait", - "clap 3.2.15", + "clap 3.2.22", "hex", "log", "pubsys-config", @@ -1422,7 +1420,7 @@ dependencies = [ "rusoto_s3", "serde_json", "serde_yaml", - "sha2 0.10.2", + "sha2 0.10.6", "shell-words", "simplelog", "snafu", @@ -1449,15 +1447,15 @@ checksum = "879d54834c8c76457ef4293a689b2a8c59b076067ad77b15efafbb05f92a592b" [[package]] name = "itoa" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" +checksum = "6c8af84674fe1f223a982c933a0ee1086ac4d4052aa0fb8060c12c6ad838e754" [[package]] name = "js-sys" -version = "0.3.59" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "258451ab10b34f8af53416d1fdab72c22e805f0c92a1136d59470ec0b11138b2" +checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" dependencies = [ "wasm-bindgen", ] @@ -1590,9 +1588,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.126" +version = "0.2.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" +checksum = "c0f80d65747a3e43d1596c7c5492d95d5edddaabd45a7fcdb02b95f644164966" [[package]] name = "linked-hash-map" @@ -1602,9 +1600,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "lock_api" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" +checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" dependencies = [ "autocfg", "scopeguard", @@ -1625,12 +1623,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" -[[package]] -name = "matches" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" - [[package]] name = "md-5" version = "0.9.1" @@ -1665,9 +1657,9 @@ checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" [[package]] name = "miniz_oxide" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f5c75688da582b8ffc1f1799e9db273f32133c49e048f614d22ec3256773ccc" +checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34" dependencies = [ "adler", ] @@ -1815,9 +1807,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.13.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1" +checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1" [[package]] name = "opaque-debug" @@ -1891,9 +1883,9 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.2.0" +version = "6.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "648001efe5d5c0102d8cea768e348da85d90af8ba91f0bea908f157951493cd4" +checksum = "9ff7415e9ae3fff1225851df9e0d9e4e5479f947619774677a63572e55e80eff" [[package]] name = "papergrid" @@ -1965,24 +1957,24 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pin-project" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78203e83c48cffbe01e4a2d35d566ca4de445d79a85372fc64e378bfc812a260" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "710faf75e1b33345361201d36d04e98ac1ed8909151a017ed384700836104c74" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", @@ -2039,9 +2031,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.42" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c278e965f1d8cf32d6e0e96de3d3e79712178ae67986d9cf9151f51e95aac89b" +checksum = "7bd7356a8122b6c4a24a82b278680c73357984ca2fc79a0f9fa6dea7dced7c58" dependencies = [ "unicode-ident", ] @@ -2060,7 +2052,7 @@ dependencies = [ "aws-smithy-types", "aws-types", "chrono", - "clap 3.2.15", + "clap 3.2.22", "coldsnap", "duct", "futures", @@ -2115,7 +2107,7 @@ dependencies = [ "log", "pubsys-config", "reqwest", - "sha2 0.10.2", + "sha2 0.10.6", "shell-words", "simplelog", "snafu", @@ -2127,9 +2119,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bcdf212e9776fbcb2d23ab029360416bb1706b1aea2d1a5ba002727cbcab804" +checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" dependencies = [ "proc-macro2", ] @@ -2157,9 +2149,9 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", ] @@ -2236,9 +2228,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.11" +version = "0.11.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b75aa69a3f06bbcc66ede33af2af253c6f7a86b1ca0033f60c580a27074fbf92" +checksum = "431949c384f4e2ae07605ccaa56d1d9d2ecdb5cadd4f9577ccfab29f2e5149fc" dependencies = [ "base64", "bytes", @@ -2252,9 +2244,9 @@ dependencies = [ "hyper-rustls 0.23.0", "ipnet", "js-sys", - "lazy_static", "log", "mime", + "once_cell", "percent-encoding", "pin-project-lite", "rustls 0.20.6", @@ -2450,18 +2442,18 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7522c9de787ff061458fe9a829dc790a3f5b22dc571694fc5883f448b94d9a9" +checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" dependencies = [ "base64", ] [[package]] name = "ryu" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695" +checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" [[package]] name = "same-file" @@ -2544,9 +2536,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.6.1" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dc14f172faf8a0194a3aded622712b0de276821addc574fa54fc0a1167e10dc" +checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" dependencies = [ "bitflags", "core-foundation", @@ -2567,18 +2559,18 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.12" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2333e6df6d6598f2b1974829f853c2b4c5f4a6e503c10af918081aa6f8564e1" +checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4" dependencies = [ "serde", ] [[package]] name = "serde" -version = "1.0.143" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53e8e5d5b70924f74ff5c6d64d9a5acd91422117c60f48c4e07855238a254553" +checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b" dependencies = [ "serde_derive", ] @@ -2595,9 +2587,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.143" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3d8e8de557aee63c26b85b947f5e59b690d0454c753f3adeb5cd7835ab88391" +checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c" dependencies = [ "proc-macro2", "quote", @@ -2617,9 +2609,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.83" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38dd04e3c8279e75b31ef29dbdceebfe5ad89f4d0937213c53f7d49d01b3d5a7" +checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44" dependencies = [ "indexmap", "itoa", @@ -2668,7 +2660,7 @@ checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.3", + "digest 0.10.5", ] [[package]] @@ -2686,13 +2678,13 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.2" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676" +checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.3", + "digest 0.10.5", ] [[package]] @@ -2734,7 +2726,7 @@ checksum = "48dfff04aade74dd495b007c831cd6f4e0cee19c344dd9dc0884c0289b70a786" dependencies = [ "log", "termcolor", - "time 0.3.11", + "time 0.3.14", ] [[package]] @@ -2777,9 +2769,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.4" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" +checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" dependencies = [ "libc", "winapi", @@ -2835,9 +2827,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.98" +version = "1.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd" +checksum = "e90cde112c4b9690b8cbe810cba9ddd8bc1d7472e2cae317b69e9438c1cba7d2" dependencies = [ "proc-macro2", "quote", @@ -2917,7 +2909,7 @@ dependencies = [ "aws-sdk-ec2", "bottlerocket-types", "bottlerocket-variant", - "clap 3.2.15", + "clap 3.2.22", "env_logger", "futures", "k8s-openapi", @@ -2944,24 +2936,24 @@ dependencies = [ [[package]] name = "textwrap" -version = "0.15.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" +checksum = "949517c0cf1bf4ee812e2e07e08ab448e3ae0d23472aee8a06c985f0c8815b16" [[package]] name = "thiserror" -version = "1.0.31" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" +checksum = "0a99cb8c4b9a8ef0e7907cd3b617cc8dc04d571c4e73c8ae403d80ac160bb122" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.31" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" +checksum = "3a891860d3c8d66fec8e73ddb3765f90082374dbaaa833407b904a94f1a7eb43" dependencies = [ "proc-macro2", "quote", @@ -2981,9 +2973,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.11" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72c91f41dcb2f096c05f0873d667dceec1087ce5bcf984ec8ffb19acddbb3217" +checksum = "3c3f9a28b618c3a6b9251b6908e9c99e04b9e5c02e6581ccbb67d59c34ef7f9b" dependencies = [ "itoa", "libc", @@ -3024,9 +3016,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.20.1" +version = "1.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a8325f63a7d4774dd041e363b2409ed1c5cbbd0f867795e661df066b2b0a581" +checksum = "0020c875007ad96677dcc890298f4b942882c5d4eb7cc8f439fc3bf813dc9c95" dependencies = [ "autocfg", "bytes", @@ -3098,9 +3090,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df54d54117d6fdc4e4fea40fe1e4e566b3505700e148a6827e59b34b0d2600d9" +checksum = "f6edf2d6bc038a43d31353570e27270603f4648d18f5ed10c0e179abe43255af" dependencies = [ "futures-core", "pin-project-lite", @@ -3121,9 +3113,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc463cd8deddc3770d20f9852143d50bf6094e640b485cb2e189a2099085ff45" +checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" dependencies = [ "bytes", "futures-core", @@ -3256,9 +3248,9 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.35" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" +checksum = "2fce9567bd60a67d08a16488756721ba392f24f29006402881e43b19aac64307" dependencies = [ "cfg-if", "log", @@ -3280,9 +3272,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.28" +version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b7358be39f2f274f322d2aaed611acc57f382e8eb1e5b48cb9ae30933495ce7" +checksum = "5aeea4303076558a00714b823f9ad67d58a3bbda1df83d8827d21193156e22f7" dependencies = [ "once_cell", ] @@ -3341,30 +3333,30 @@ checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" [[package]] name = "unicode-ident" -version = "1.0.2" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15c61ba63f9235225a22310255a29b806b907c9b8c964bcbd0a2c70f3f2deea7" +checksum = "dcc811dc4066ac62f84f11307873c4850cb653bfa9b1719cee2bd2204a4bc5dd" [[package]] name = "unicode-normalization" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854cbdc4f7bc6ae19c820d44abdc3277ac3e1b2b93db20a636825d9322fb60e6" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] name = "unicode-segmentation" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8820f5d777f6224dc4be3632222971ac30164d4a258d595640799554ebfd99" +checksum = "0fdbf052a0783de01e944a6ce7a8cb939e295b1e7be835a1112c3b9a7f047a5a" [[package]] name = "unicode-width" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" [[package]] name = "untrusted" @@ -3389,22 +3381,21 @@ dependencies = [ [[package]] name = "url" -version = "2.2.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" dependencies = [ "form_urlencoded", "idna", - "matches", "percent-encoding", "serde", ] [[package]] name = "urlencoding" -version = "2.1.0" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b90931029ab9b034b300b797048cf23723400aa757e8a2bfb9d748102f9821" +checksum = "e8db7427f936968176eaa7cdf81b7f98b980b18495ec28f1b5791ac3bfe3eea9" [[package]] name = "utf-8" @@ -3465,9 +3456,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7652e3f6c4706c8d9cd54832c4a4ccb9b5336e2c3bd154d5cccfbf1c1f5f7d" +checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -3475,9 +3466,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "662cd44805586bd52971b9586b1df85cdbbd9112e4ef4d8f41559c334dc6ac3f" +checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" dependencies = [ "bumpalo", "log", @@ -3490,9 +3481,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.32" +version = "0.4.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa76fb221a1f8acddf5b54ace85912606980ad661ac7a503b4570ffd3a624dad" +checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" dependencies = [ "cfg-if", "js-sys", @@ -3502,9 +3493,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b260f13d3012071dfb1512849c033b1925038373aea48ced3012c09df952c602" +checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3512,9 +3503,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be8e654bdd9b79216c2929ab90721aa82faf65c48cdf08bdc4e7f51357b80da" +checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" dependencies = [ "proc-macro2", "quote", @@ -3525,15 +3516,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.82" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6598dd0bd3c7d51095ff6531a5b23e02acdc81804e30d8f07afb77b7215a140a" +checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" [[package]] name = "web-sys" -version = "0.3.59" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed055ab27f941423197eb86b2035720b1a3ce40504df082cac2ecc6ed73335a1" +checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" dependencies = [ "js-sys", "wasm-bindgen", @@ -3561,9 +3552,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1c760f0d366a6c24a02ed7816e23e691f5d92291f94d15e836006fd11b04daf" +checksum = "368bfe657969fb01238bb756d351dcade285e0f6fcbd36dcb23359a5169975be" dependencies = [ "webpki 0.22.0", ] diff --git a/tools/deny.toml b/tools/deny.toml index 65210ca1..cc455196 100644 --- a/tools/deny.toml +++ b/tools/deny.toml @@ -62,7 +62,7 @@ skip-tree = [ { name = "rusoto_signature" }, # argh_derive pulls in an older version of heck - { name = "argh_derive", version = "0.1.8" }, + { name = "argh_derive", version = "0.1.9" }, # structopt pulls in an older version of clap { name = "structopt", version = "0.3.26" }, From d848184301b09b5ceb3b484435b08eef639de4a3 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Mon, 26 Sep 2022 19:41:28 +0000 Subject: [PATCH 0759/1356] license-check: normalize exceptions between sources and tools --- tools/deny.toml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tools/deny.toml b/tools/deny.toml index cc455196..372ac1aa 100644 --- a/tools/deny.toml +++ b/tools/deny.toml @@ -25,7 +25,7 @@ allow = [ exceptions = [ { name = "webpki-roots", allow = ["MPL-2.0"], version = "*" }, - { name = "unicode-ident", allow = ["MIT", "Apache-2.0", "Unicode-DFS-2016"] }, + { name = "unicode-ident", version = "1.0.4", allow = ["MIT", "Apache-2.0", "Unicode-DFS-2016"] }, ] # https://github.com/hsivonen/encoding_rs The non-test code that isn't generated from the WHATWG data in this crate is @@ -57,13 +57,15 @@ license-files = [ multiple-versions = "deny" wildcards = "deny" +skip = [ + # older version used by chrono 0.4.22 + { name = "time", version = "0.1.44" }, +] + skip-tree = [ # rusoto_signature uses an older version of sha2 { name = "rusoto_signature" }, - # argh_derive pulls in an older version of heck - { name = "argh_derive", version = "0.1.9" }, - # structopt pulls in an older version of clap { name = "structopt", version = "0.3.26" }, From ecfb7e419bc2305d7977fea7058bbc8620a08bd3 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Mon, 26 Sep 2022 22:30:23 +0000 Subject: [PATCH 0760/1356] kubelet: Add setting to change log level This adds a new `kubernetes.log-level` setting to allow configuration of the kubelet logging verbosity. With this, an end user can turn up verbosity if they are trying to troubleshoot something, or turn down verbosity if they don't need as many log messages. Signed-off-by: Sean McGinnis --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 2db7757e..ee77ec4f 100644 --- a/README.md +++ b/README.md @@ -447,6 +447,8 @@ The following settings are optional and allow you to further configure your clus * `settings.kubernetes.image-gc-high-threshold-percent`: The percent of disk usage after which image garbage collection is always run. * `settings.kubernetes.image-gc-low-threshold-percent`: The percent of disk usage before which image garbage collection is never run. * `settings.kubernetes.provider-id`: This sets the unique ID of the instance that an external provider (i.e. cloudprovider) can use to identify a specific node. +* `settings.kubernetes.log-level`: Adjust the logging verbosity of the `kubelet` process. + The default log level is 2, with higher numbers enabling more verbose logging. You can also optionally specify static pods for your node with the following settings. Static pods can be particularly useful when running in standalone mode. From 72aae89117b258b3bb8ef9a60842d8c333c3994f Mon Sep 17 00:00:00 2001 From: John McBride Date: Thu, 11 Aug 2022 21:01:25 +0000 Subject: [PATCH 0761/1356] Update BUILDING.md and PUBLISHING-AWS.md to mention need for aws creds Several commands referenced in the developer docs interact with AWS services; AMI, EKS, etc. In order to successfully publish AMIs via the quickstart, AWS creds must be setup. This patch updates both BUILDING.md and PUBLISHING-AWS.md to reflect the need to have this setup. Signed-off-by: John McBride --- BUILDING.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/BUILDING.md b/BUILDING.md index e917e717..55dccfe7 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -154,6 +154,10 @@ cargo make \ To use the image in Amazon EC2, we need to register the image as an AMI. +To do this, you'll need to have your AWS account credentials setup on your system. +There are lots of ways to do this; one method is using [the `aws` CLI](https://aws.amazon.com/cli/) via its `configure` command with your user's access and secret keys. +If you're using an EC2 instance, the [EC2 instance's IAM role](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) will be used automatically if available. + For a simple start, pick an [EC2 region](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions), then run: ``` From 53a82fe44f16a9e6950c6f16cb27f10be3711085 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Tue, 21 Jun 2022 00:39:20 +0000 Subject: [PATCH 0762/1356] packages: add kmod-5.15-nvidia sources This adds the sources to compile the 515 NVIDIA driver for the 5.15 kernel. This version only supports the GPU architectures Maxwell, Pascal, Volta, Turing, Ampere, and forward. The driver will use the GPU System Processor (GSP) feature if the underlying hardware supports it by loading the binary file `/lib/firmware/nvidia//gsp.bin`. Signed-off-by: Arnaldo Garcia Rincon --- packages/kmod-5.15-nvidia/Cargo.toml | 25 ++ packages/kmod-5.15-nvidia/build.rs | 9 + .../kmod-5.15-nvidia/kmod-5.15-nvidia.spec | 323 ++++++++++++++++++ .../nvidia-dependencies-modules-load.conf | 2 + .../kmod-5.15-nvidia/nvidia-ld.so.conf.in | 1 + .../nvidia-tesla-build-config.toml.in | 18 + .../kmod-5.15-nvidia/nvidia-tesla-path.env.in | 1 + .../nvidia-tesla-tmpfiles.conf.in | 3 + .../kmod-5.15-nvidia/nvidia-tmpfiles.conf.in | 2 + packages/kmod-5.15-nvidia/pkg.rs | 1 + 10 files changed, 385 insertions(+) create mode 100644 packages/kmod-5.15-nvidia/Cargo.toml create mode 100644 packages/kmod-5.15-nvidia/build.rs create mode 100644 packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec create mode 100644 packages/kmod-5.15-nvidia/nvidia-dependencies-modules-load.conf create mode 100644 packages/kmod-5.15-nvidia/nvidia-ld.so.conf.in create mode 100644 packages/kmod-5.15-nvidia/nvidia-tesla-build-config.toml.in create mode 100644 packages/kmod-5.15-nvidia/nvidia-tesla-path.env.in create mode 100644 packages/kmod-5.15-nvidia/nvidia-tesla-tmpfiles.conf.in create mode 100644 packages/kmod-5.15-nvidia/nvidia-tmpfiles.conf.in create mode 100644 packages/kmod-5.15-nvidia/pkg.rs diff --git a/packages/kmod-5.15-nvidia/Cargo.toml b/packages/kmod-5.15-nvidia/Cargo.toml new file mode 100644 index 00000000..7d309abc --- /dev/null +++ b/packages/kmod-5.15-nvidia/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "kmod-5_15-nvidia" +version = "0.1.0" +edition = "2018" +publish = false +build = "build.rs" + +[lib] +path = "pkg.rs" + +[package.metadata.build-package] +package-name = "kmod-5.15-nvidia" +releases-url = "https://docs.nvidia.com/datacenter/tesla/" + +[[package.metadata.build-package.external-files]] +url = "https://us.download.nvidia.com/tesla/515.65.01/NVIDIA-Linux-x86_64-515.65.01.run" +sha512 = "5221a4ac071eb39a37a841f19cfe4983286dc35e918956b40604404ef36c122612475df7b9a391a9a70bd60f44e598c8a0e5ec54ccc3e90d51f01e1b2fbe5e33" + +[[package.metadata.build-package.external-files]] +url = "https://us.download.nvidia.com/tesla/515.65.01/NVIDIA-Linux-aarch64-515.65.01.run" +sha512 = "31ec7ba727bf14263eeadc3880bd8f2aaa0fe8c144aa216bb8af06a154dd1aa5f4a787fe386b20f5d739a49c80435bca5f6deba3010c593e1e54ecd29b4ab1b0" + +[build-dependencies] +glibc = { path = "../glibc" } +kernel-5_15 = { path = "../kernel-5.15" } diff --git a/packages/kmod-5.15-nvidia/build.rs b/packages/kmod-5.15-nvidia/build.rs new file mode 100644 index 00000000..cad8999a --- /dev/null +++ b/packages/kmod-5.15-nvidia/build.rs @@ -0,0 +1,9 @@ +use std::process::{exit, Command}; + +fn main() -> Result<(), std::io::Error> { + let ret = Command::new("buildsys").arg("build-package").status()?; + if !ret.success() { + exit(1); + } + Ok(()) +} diff --git a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec new file mode 100644 index 00000000..f4adba20 --- /dev/null +++ b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec @@ -0,0 +1,323 @@ +%global tesla_515 515.65.01 +%global tesla_515_libdir %{_cross_libdir}/nvidia/tesla/%{tesla_515} +%global tesla_515_bindir %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} +%global tesla_515_firmwaredir %{_cross_libdir}/firmware/nvidia/%{tesla_515} +%global spdx_id %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml spdx-id nvidia) +%global license_file %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml path nvidia -p ./licenses) + +Name: %{_cross_os}kmod-5.15-nvidia +Version: 1.0.0 +Release: 1%{?dist} +Summary: NVIDIA drivers for the 5.15 kernel +# We use these licences because we only ship our own software in the main package, +# each subpackage includes the LICENSE file provided by the Licenses.toml file +License: Apache-2.0 OR MIT +URL: http://www.nvidia.com/ + +# NVIDIA .run scripts from 0 to 199 +Source0: https://us.download.nvidia.com/tesla/%{tesla_515}/NVIDIA-Linux-x86_64-%{tesla_515}.run +Source1: https://us.download.nvidia.com/tesla/%{tesla_515}/NVIDIA-Linux-aarch64-%{tesla_515}.run + +# Common NVIDIA conf files from 200 to 299 +Source200: nvidia-tmpfiles.conf.in +Source202: nvidia-dependencies-modules-load.conf + +# NVIDIA tesla conf files from 300 to 399 +Source300: nvidia-tesla-tmpfiles.conf.in +Source301: nvidia-tesla-build-config.toml.in +Source302: nvidia-tesla-path.env.in +Source303: nvidia-ld.so.conf.in + +BuildRequires: %{_cross_os}glibc-devel +BuildRequires: %{_cross_os}kernel-5.15-archive + +%description +%{summary}. + +%package tesla-515 +Summary: NVIDIA 515 Tesla driver +Version: %{tesla_515} +License: %{spdx_id} +Requires: %{name} + +%description tesla-515 +%{summary} + +%prep +# Extract nvidia sources with `-x`, otherwise the script will try to install +# the driver in the current run +sh %{_sourcedir}/NVIDIA-Linux-%{_cross_arch}-%{tesla_515}.run -x + +%global kernel_sources %{_builddir}/kernel-devel +tar -xf %{_cross_datadir}/bottlerocket/kernel-devel.tar.xz + +%build +pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_515}/kernel + +# This recipe was based in the NVIDIA yum/dnf specs: +# https://github.com/NVIDIA/yum-packaging-precompiled-kmod + +# We set IGNORE_CC_MISMATCH even though we are using the same compiler used to compile the kernel, if +# we don't set this flag the compilation fails +make %{?_smp_mflags} ARCH=%{_cross_karch} IGNORE_CC_MISMATCH=1 SYSSRC=%{kernel_sources} CC=%{_cross_target}-gcc LD=%{_cross_target}-ld + +%{_cross_target}-strip -g --strip-unneeded nvidia/nv-interface.o +%{_cross_target}-strip -g --strip-unneeded nvidia-uvm.o +%{_cross_target}-strip -g --strip-unneeded nvidia-drm.o +%{_cross_target}-strip -g --strip-unneeded nvidia-peermem/nvidia-peermem.o +%{_cross_target}-strip -g --strip-unneeded nvidia-modeset/nv-modeset-interface.o + +# We delete these files since we just stripped the input .o files above, and +# will be build at runtime in the host +rm nvidia{,-modeset,-peermem}.o + +# Delete the .ko files created in make command, just to be safe that we +# don't include any linked module in the base image +rm nvidia{,-modeset,-peermem,-drm}.ko + +popd + +%install +install -d %{buildroot}%{_cross_libexecdir} +install -d %{buildroot}%{_cross_libdir} +install -d %{buildroot}%{_cross_tmpfilesdir} +install -d %{buildroot}%{_cross_unitdir} +install -d %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/{drivers,ld.so.conf.d} + +KERNEL_VERSION=$(cat %{kernel_sources}/include/config/kernel.release) +sed \ + -e "s|__KERNEL_VERSION__|${KERNEL_VERSION}|" \ + -e "s|__PREFIX__|%{_cross_prefix}|" %{S:200} > nvidia.conf +install -p -m 0644 nvidia.conf %{buildroot}%{_cross_tmpfilesdir} + +# Install modules-load.d drop-in to autoload required kernel modules +install -d %{buildroot}%{_cross_libdir}/modules-load.d +install -p -m 0644 %{S:202} %{buildroot}%{_cross_libdir}/modules-load.d/nvidia-dependencies.conf + +# Begin NVIDIA tesla 515 +pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_515} +# We install bins and libs in a versioned directory to prevent collisions with future drivers versions +install -d %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} +install -d %{buildroot}%{tesla_515_libdir} +install -d %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install -d %{buildroot}%{_cross_factorydir}/nvidia/tesla/%{tesla_515} + +sed -e 's|__NVIDIA_VERSION__|%{tesla_515}|' %{S:300} > nvidia-tesla-%{tesla_515}.conf +install -m 0644 nvidia-tesla-%{tesla_515}.conf %{buildroot}%{_cross_tmpfilesdir}/ +sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/|' %{S:301} > \ + nvidia-tesla-%{tesla_515}.toml +install -m 0644 nvidia-tesla-%{tesla_515}.toml %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/drivers +# Install nvidia-path environment file, will be used as a drop-in for containerd.service since +# libnvidia-container locates and mounts helper binaries into the containers from either +# `PATH` or `NVIDIA_PATH` +sed -e 's|__NVIDIA_BINDIR__|%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}|' %{S:302} > nvidia-path.env +install -m 0644 nvidia-path.env %{buildroot}%{_cross_factorydir}/nvidia/tesla/%{tesla_515} +# We need to add `_cross_libdir/tesla_515` to the paths loaded by the ldconfig service +# because libnvidia-container uses the `ldcache` file created by the service, to locate and mount the +# libraries into the containers +sed -e 's|__LIBDIR__|%{_cross_libdir}|' %{S:303} | sed -e 's|__NVIDIA_VERSION__|%{tesla_515}|' \ + > nvidia-tesla-%{tesla_515}.conf +install -m 0644 nvidia-tesla-%{tesla_515}.conf %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/ + +# driver +install kernel/nvidia.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install kernel/nvidia/nv-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install kernel/nvidia/nv-kernel.o_binary %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nv-kernel.o + +# uvm +install kernel/nvidia-uvm.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install kernel/nvidia-uvm.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d + +# modeset +install kernel/nvidia-modeset.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install kernel/nvidia-modeset/nv-modeset-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install kernel/nvidia-modeset/nv-modeset-kernel.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d + +# peermem +install kernel/nvidia-peermem.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install kernel/nvidia-peermem/nvidia-peermem.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d + +# drm +install kernel/nvidia-drm.mod.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install kernel/nvidia-drm.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d + +# Binaries +install -m 755 nvidia-smi %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} +install -m 755 nvidia-debugdump %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} +install -m 755 nvidia-cuda-mps-control %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} +install -m 755 nvidia-cuda-mps-server %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} +%if "%{_cross_arch}" == "x86_64" +install -m 755 nvidia-ngx-updater %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} +%endif + +# We install all the libraries, and filter them out in the 'files' section, so we can catch +# when new libraries are added +install -m 755 *.so* %{buildroot}/%{tesla_515_libdir}/ + +# This library has the same SONAME as libEGL.so.1.1.0, this will cause collisions while +# the symlinks are created. For now, we only symlink libEGL.so.1.1.0. +EXCLUDED_LIBS="libEGL.so.%{tesla_515}" + +for lib in $(find . -maxdepth 1 -type f -name 'lib*.so.*' -printf '%%P\n'); do + [[ "${EXCLUDED_LIBS}" =~ "${lib}" ]] && continue + soname="$(%{_cross_target}-readelf -d "${lib}" | awk '/SONAME/{print $5}' | tr -d '[]')" + [ -n "${soname}" ] || continue + [ "${lib}" == "${soname}" ] && continue + ln -s "${lib}" %{buildroot}/%{tesla_515_libdir}/"${soname}" +done + +# Include the firmware file for GSP support +install -d %{buildroot}%{tesla_515_firmwaredir} +install -p -m 0644 firmware/gsp.bin %{buildroot}%{tesla_515_firmwaredir} + +popd + +%files +%{_cross_attribution_file} +%dir %{_cross_libexecdir}/nvidia +%dir %{_cross_libdir}/nvidia +%dir %{_cross_datadir}/nvidia +%dir %{_cross_libdir}/modules-load.d +%dir %{_cross_factorydir}%{_cross_sysconfdir}/drivers +%{_cross_tmpfilesdir}/nvidia.conf +%{_cross_libdir}/systemd/system/ +%{_cross_libdir}/modules-load.d/nvidia-dependencies.conf + +%files tesla-515 +%license %{license_file} +%dir %{_cross_datadir}/nvidia/tesla/%{tesla_515} +%dir %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} +%dir %{tesla_515_libdir} +%dir %{tesla_515_firmwaredir} +%dir %{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +%dir %{_cross_factorydir}/nvidia/tesla/%{tesla_515} + +# Binaries +%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}/nvidia-debugdump +%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}/nvidia-smi + +# Configuration files +%{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-tesla-%{tesla_515}.toml +%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/nvidia-tesla-%{tesla_515}.conf +%{_cross_factorydir}/nvidia/tesla/%{tesla_515}/nvidia-path.env + +# driver +%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia.mod.o +%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nv-interface.o +%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nv-kernel.o + +# uvm +%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-uvm.mod.o +%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-uvm.o + +# modeset +%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nv-modeset-interface.o +%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nv-modeset-kernel.o +%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-modeset.mod.o + +# tmpfiles +%{_cross_tmpfilesdir}/nvidia-tesla-%{tesla_515}.conf + +# We only install the libraries required by all the DRIVER_CAPABILITIES, described here: +# https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html#driver-capabilities + +# Utility libs +%{tesla_515_libdir}/libnvidia-ml.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-ml.so.1 +%{tesla_515_libdir}/libnvidia-cfg.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-cfg.so.1 +%{tesla_515_libdir}/libnvidia-nvvm.so.4 +%{tesla_515_libdir}/libnvidia-nvvm.so.%{tesla_515} + +# Compute libs +%{tesla_515_libdir}/libcuda.so.%{tesla_515} +%{tesla_515_libdir}/libcuda.so.1 +%{tesla_515_libdir}/libnvidia-opencl.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-opencl.so.1 +%{tesla_515_libdir}/libnvidia-ptxjitcompiler.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-ptxjitcompiler.so.1 +%{tesla_515_libdir}/libnvidia-allocator.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-allocator.so.1 +%{tesla_515_libdir}/libOpenCL.so.1.0.0 +%{tesla_515_libdir}/libOpenCL.so.1 +%if "%{_cross_arch}" == "x86_64" +%{tesla_515_libdir}/libnvidia-compiler.so.%{tesla_515} +%endif + +# Video libs +%{tesla_515_libdir}/libvdpau_nvidia.so.%{tesla_515} +%{tesla_515_libdir}/libvdpau_nvidia.so.1 +%{tesla_515_libdir}/libnvidia-encode.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-encode.so.1 +%{tesla_515_libdir}/libnvidia-opticalflow.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-opticalflow.so.1 +%{tesla_515_libdir}/libnvcuvid.so.%{tesla_515} +%{tesla_515_libdir}/libnvcuvid.so.1 + +# Graphics libs +%{tesla_515_libdir}/libnvidia-eglcore.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-glcore.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-tls.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-glsi.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-rtcore.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-fbc.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-fbc.so.1 +%{tesla_515_libdir}/libnvoptix.so.%{tesla_515} +%{tesla_515_libdir}/libnvoptix.so.1 +%{tesla_515_libdir}/libnvidia-vulkan-producer.so.%{tesla_515} + +# Graphics GLVND libs +%{tesla_515_libdir}/libnvidia-glvkspirv.so.%{tesla_515} +%{tesla_515_libdir}/libGLX_nvidia.so.%{tesla_515} +%{tesla_515_libdir}/libGLX_nvidia.so.0 +%{tesla_515_libdir}/libEGL_nvidia.so.%{tesla_515} +%{tesla_515_libdir}/libEGL_nvidia.so.0 +%{tesla_515_libdir}/libGLESv2_nvidia.so.%{tesla_515} +%{tesla_515_libdir}/libGLESv2_nvidia.so.2 +%{tesla_515_libdir}/libGLESv1_CM_nvidia.so.%{tesla_515} +%{tesla_515_libdir}/libGLESv1_CM_nvidia.so.1 + +# Graphics compat +%{tesla_515_libdir}/libEGL.so.1.1.0 +%{tesla_515_libdir}/libEGL.so.1 +%{tesla_515_libdir}/libEGL.so.%{tesla_515} +%{tesla_515_libdir}/libGL.so.1.7.0 +%{tesla_515_libdir}/libGL.so.1 +%{tesla_515_libdir}/libGLESv1_CM.so.1.2.0 +%{tesla_515_libdir}/libGLESv1_CM.so.1 +%{tesla_515_libdir}/libGLESv2.so.2.1.0 +%{tesla_515_libdir}/libGLESv2.so.2 + +# NGX +%{tesla_515_libdir}/libnvidia-ngx.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-ngx.so.1 + +# Firmware +%{tesla_515_firmwaredir}/gsp.bin + +# Neither nvidia-peermem nor nvidia-drm are included in driver container images, we exclude them +# for now, and we will add them if requested +%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-peermem.mod.o +%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-peermem.o +%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-drm.mod.o +%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-drm.o +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}/nvidia-cuda-mps-control +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}/nvidia-cuda-mps-server +%if "%{_cross_arch}" == "x86_64" +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}/nvidia-ngx-updater +%endif + +# None of these libraries are required by libnvidia-container, so they +# won't be used by a containerized workload +%exclude %{tesla_515_libdir}/libGLX.so.0 +%exclude %{tesla_515_libdir}/libGLdispatch.so.0 +%exclude %{tesla_515_libdir}/libOpenGL.so.0 +%exclude %{tesla_515_libdir}/libglxserver_nvidia.so.%{tesla_515} +%exclude %{tesla_515_libdir}/libnvidia-gtk2.so.%{tesla_515} +%exclude %{tesla_515_libdir}/libnvidia-gtk3.so.%{tesla_515} +%exclude %{tesla_515_libdir}/nvidia_drv.so +%exclude %{tesla_515_libdir}/libnvidia-egl-wayland.so.1 +%exclude %{tesla_515_libdir}/libnvidia-egl-gbm.so.1 +%exclude %{tesla_515_libdir}/libnvidia-egl-gbm.so.1.1.0 +%exclude %{tesla_515_libdir}/libnvidia-egl-wayland.so.1.1.9 +%exclude %{tesla_515_libdir}/libnvidia-wayland-client.so.%{tesla_515} diff --git a/packages/kmod-5.15-nvidia/nvidia-dependencies-modules-load.conf b/packages/kmod-5.15-nvidia/nvidia-dependencies-modules-load.conf new file mode 100644 index 00000000..86f884a6 --- /dev/null +++ b/packages/kmod-5.15-nvidia/nvidia-dependencies-modules-load.conf @@ -0,0 +1,2 @@ +i2c_core +ipmi_msghandler diff --git a/packages/kmod-5.15-nvidia/nvidia-ld.so.conf.in b/packages/kmod-5.15-nvidia/nvidia-ld.so.conf.in new file mode 100644 index 00000000..a07b0ccb --- /dev/null +++ b/packages/kmod-5.15-nvidia/nvidia-ld.so.conf.in @@ -0,0 +1 @@ +__LIBDIR__/nvidia/tesla/__NVIDIA_VERSION__/ diff --git a/packages/kmod-5.15-nvidia/nvidia-tesla-build-config.toml.in b/packages/kmod-5.15-nvidia/nvidia-tesla-build-config.toml.in new file mode 100644 index 00000000..fb74dc51 --- /dev/null +++ b/packages/kmod-5.15-nvidia/nvidia-tesla-build-config.toml.in @@ -0,0 +1,18 @@ +[nvidia-tesla] +lib-modules-path = "kernel/drivers/extra/video/nvidia/tesla" +objects-source = "__NVIDIA_MODULES__" + +[nvidia-tesla.object-files."nvidia.o"] +link-objects = ["nv-interface.o", "nv-kernel.o"] + +[nvidia-tesla.kernel-modules."nvidia.ko"] +link-objects = ["nvidia.o", "nvidia.mod.o"] + +[nvidia-tesla.object-files."nvidia-modeset.o"] +link-objects = ["nv-modeset-interface.o", "nv-modeset-kernel.o"] + +[nvidia-tesla.kernel-modules."nvidia-modeset.ko"] +link-objects = ["nvidia-modeset.o", "nvidia-modeset.mod.o"] + +[nvidia-tesla.kernel-modules."nvidia-uvm.ko"] +link-objects = ["nvidia-uvm.o", "nvidia-uvm.mod.o"] diff --git a/packages/kmod-5.15-nvidia/nvidia-tesla-path.env.in b/packages/kmod-5.15-nvidia/nvidia-tesla-path.env.in new file mode 100644 index 00000000..28f74deb --- /dev/null +++ b/packages/kmod-5.15-nvidia/nvidia-tesla-path.env.in @@ -0,0 +1 @@ +NVIDIA_PATH=__NVIDIA_BINDIR__ diff --git a/packages/kmod-5.15-nvidia/nvidia-tesla-tmpfiles.conf.in b/packages/kmod-5.15-nvidia/nvidia-tesla-tmpfiles.conf.in new file mode 100644 index 00000000..f208e1d2 --- /dev/null +++ b/packages/kmod-5.15-nvidia/nvidia-tesla-tmpfiles.conf.in @@ -0,0 +1,3 @@ +C /etc/drivers/nvidia-tesla-__NVIDIA_VERSION__.toml +C /etc/containerd/nvidia.env - - - - /usr/share/factory/nvidia/tesla/__NVIDIA_VERSION__/nvidia-path.env +C /etc/ld.so.conf.d/nvidia-tesla-__NVIDIA_VERSION__.conf diff --git a/packages/kmod-5.15-nvidia/nvidia-tmpfiles.conf.in b/packages/kmod-5.15-nvidia/nvidia-tmpfiles.conf.in new file mode 100644 index 00000000..d4763f28 --- /dev/null +++ b/packages/kmod-5.15-nvidia/nvidia-tmpfiles.conf.in @@ -0,0 +1,2 @@ +R __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/tesla - - - - - +d __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/tesla 0755 root root - - diff --git a/packages/kmod-5.15-nvidia/pkg.rs b/packages/kmod-5.15-nvidia/pkg.rs new file mode 100644 index 00000000..d799fb2d --- /dev/null +++ b/packages/kmod-5.15-nvidia/pkg.rs @@ -0,0 +1 @@ +// not used From 5a713bfaa7a04588f5ac265bf27ba73fbdc726a2 Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Tue, 20 Sep 2022 21:57:57 +0000 Subject: [PATCH 0763/1356] kernel-5.10: update to 5.10.135 Rebase to Amazon Linux upstream version based on 5.10.135. Apply two config changes in the process: * Stop building the qlge NIC driver. The hardware has been EOL'd more than 8 years ago, and the driver has known quality problems which is the reason it lives in the staging tree. The Amazon Linux kernel based on 5.15.59 dropped it and I don't see a good reason to retain it for Bottlerocket either, so drop it from the 5.10 builds, too. * Continue not building the sch_cake qdisc. CAKE is targeted for residential links, and building devices such as routers in particular. It is unlikely to be useful for Bottlerocket for the time being. Since upstream builds it as a module now, sch_cake needs to be explicitly disabled. Signed-off-by: Markus Boehme --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/config-bottlerocket | 7 +++++++ packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 8aaff765..813f4f98 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -14,8 +14,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/04a89d2664b3be51cad04255bde6ff8ee1620a5281b0dc1f2f4707e1e6cfe150/kernel-5.10.130-118.517.amzn2.src.rpm" -sha512 = "3047b80f7f8d703b3c0ab9785493245d01b27faa5948fddbcb9d0843c5bfcfa0972b61afa70551a2cc3d2c8b92ec0069993ed92ca12459f7ec67d03a00a031b7" +url = "https://cdn.amazonlinux.com/blobstore/5ae72bc0cd79c8b003ee349c75c2f23292306528a387d4fba338d0627a7f0d4f/kernel-5.10.135-122.509.amzn2.src.rpm" +sha512 = "2c76a56b784355dcca30f06bd343f067691d97e09fbf80bb071a79e874f05a4dd75e3e2431aed0351fc9b4845a6b6310121828127166a3bc5d15fd9bdb5c7336" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index 203bc9c5..c2deafc1 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -133,3 +133,10 @@ CONFIG_EXT4_USE_FOR_EXT2=y # Disable USB-attached network interfaces, unused in the cloud and on server-grade hardware. # CONFIG_USB_NET_DRIVERS is not set + +# Disable obsolete NIC drivers +# CONFIG_QLGE is not set + +# Disable unused qdiscs +# - sch_cake targets home routers and residential links +# CONFIG_NET_SCH_CAKE is not set diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 08f24f69..23f084bf 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.130 +Version: 5.10.135 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/04a89d2664b3be51cad04255bde6ff8ee1620a5281b0dc1f2f4707e1e6cfe150/kernel-5.10.130-118.517.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/5ae72bc0cd79c8b003ee349c75c2f23292306528a387d4fba338d0627a7f0d4f/kernel-5.10.135-122.509.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 96406418e8716de773a58e9c08fc7de5e4a7e900 Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Tue, 20 Sep 2022 21:58:56 +0000 Subject: [PATCH 0764/1356] kernel-5.15: update to 5.15.59 Rebase to Amazon Linux upstream version based on 5.15.59. Apply a config change in the process: * Continue not building the sch_cake qdisc. CAKE is targeted for residential links, and building devices such as routers in particular. It is unlikely to be useful for Bottlerocket for the time being. Since upstream builds it as a module now, sch_cake needs to be explicitly disabled. Signed-off-by: Markus Boehme --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/config-bottlerocket | 4 ++++ packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 21d45121..98d4a6ea 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -14,8 +14,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/47fc1797c6cf0a9ee2cb4c2ccba9c73a47c0ff75bdb22bf19e939083029881dc/kernel-5.15.54-25.126.amzn2.src.rpm" -sha512 = "5c08b5cd682adccd1bb9e2a418ae5bbb24ddcdc53e6ae46ea9760415989a25e02066db9e1aa6240455523189fb319f3aa0cb5b1f9ae8b5bccda8f4c46f2cb7a8" +url = "https://cdn.amazonlinux.com/blobstore/ca3121c2e3966f8f7d542365e234f8a143c622dfbf6cfbbbe7793c2e8105ad64/kernel-5.15.59-33.133.amzn2.src.rpm" +sha512 = "5358583e50f58a56f29ef306435593f8511b822387386739ca9c6c01942870bb989af0250b33073d5fec08ca32dbcbafcccd9ff46db1f7ee524b7dd11be2d764" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/config-bottlerocket b/packages/kernel-5.15/config-bottlerocket index 7802f8e4..afe1ce66 100644 --- a/packages/kernel-5.15/config-bottlerocket +++ b/packages/kernel-5.15/config-bottlerocket @@ -134,3 +134,7 @@ CONFIG_EXT4_USE_FOR_EXT2=y # Disable USB-attached network interfaces, unused in the cloud and on server-grade hardware. # CONFIG_USB_NET_DRIVERS is not set + +# Disable unused qdiscs +# - sch_cake targets home routers and residential links +# CONFIG_NET_SCH_CAKE is not set diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index e3a08e7d..8d1b9af5 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.54 +Version: 5.15.59 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/47fc1797c6cf0a9ee2cb4c2ccba9c73a47c0ff75bdb22bf19e939083029881dc/kernel-5.15.54-25.126.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/ca3121c2e3966f8f7d542365e234f8a143c622dfbf6cfbbbe7793c2e8105ad64/kernel-5.15.59-33.133.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 56910bbec131115ce079d2d0e137bd03f1914312 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Mon, 3 Oct 2022 22:57:18 +0000 Subject: [PATCH 0765/1356] tools: update AWS SDK for Rust to 0.18.0 Also bumps coldsnap, indicatif, tough, tough-kms, and tough-ssm. --- tools/Cargo.lock | 115 ++++++++++++++------------- tools/deny.toml | 2 +- tools/pubsys/Cargo.toml | 18 ++--- tools/pubsys/src/aws/ami/snapshot.rs | 14 +++- tools/testsys/Cargo.toml | 4 +- 5 files changed, 81 insertions(+), 72 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index d78f9919..6ae14c31 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -130,9 +130,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "aws-config" -version = "0.46.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11a8c971b0cb0484fc9436a291a44503b95141edc36ce7a6af6b6d7a06a02ab0" +checksum = "7d4cf4608abd7c8038a4c609a1270e61b73c86550f5655654ca28322e0a2e2c1" dependencies = [ "aws-http", "aws-sdk-sso", @@ -149,6 +149,7 @@ dependencies = [ "http", "hyper", "ring", + "time 0.3.14", "tokio", "tower", "tracing", @@ -157,11 +158,12 @@ dependencies = [ [[package]] name = "aws-endpoint" -version = "0.46.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bc956f415dda77215372e5bc751a2463d1f9a1ec34edf3edc6c0ff67e5c8e43" +checksum = "7ffaf1da7a11d38a5afe7cdd202ab2e25528de7cf38c47b571c0dde4008d98ae" dependencies = [ "aws-smithy-http", + "aws-smithy-types", "aws-types", "http", "regex", @@ -170,9 +172,9 @@ dependencies = [ [[package]] name = "aws-http" -version = "0.46.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a0d98a1d606aa24554e604f220878db4aa3b525b72f88798524497cc3867fc6" +checksum = "8309108743e2e74f249ff29a7c7be79c6343ea649dd8c31e4c0e07ca6946d8ed" dependencies = [ "aws-smithy-http", "aws-smithy-types", @@ -188,9 +190,9 @@ dependencies = [ [[package]] name = "aws-sdk-ebs" -version = "0.16.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702e6f505ce8d61f0bef4d2b2747f156dcbd6ba23b2a870ad9aa868830f026c5" +checksum = "e901db77b1cbdce5804ee5587882aceac22250171dfdde51f6a2f95161bd1394" dependencies = [ "aws-endpoint", "aws-http", @@ -211,9 +213,9 @@ dependencies = [ [[package]] name = "aws-sdk-ec2" -version = "0.16.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b6c04720f5846edb80aa8c4dda848b77efdf99597f1ae48e12ea6b1ad1d3ce" +checksum = "e17191be59536e96d100c9dca58f6807fc8d2b0848a06c360b1ffa164bf16bca" dependencies = [ "aws-endpoint", "aws-http", @@ -235,9 +237,9 @@ dependencies = [ [[package]] name = "aws-sdk-kms" -version = "0.16.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fee45083be6f062676aaeab0fe16c931a4e188d2ddce6f2c8d17399c014dc81" +checksum = "829bf306cb8d20fc1d5d08a8dc440f37d24bfe6690657f55612ccc8a0c083675" dependencies = [ "aws-endpoint", "aws-http", @@ -257,9 +259,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssm" -version = "0.16.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df79d41b62dcea814079c2f6540c465b6d45210b20a4e30efdcd24cc5dcf4aec" +checksum = "504854d33ea2be4f61391b5c701be7e411c212272367fd1f810e793d255a547d" dependencies = [ "aws-endpoint", "aws-http", @@ -280,9 +282,9 @@ dependencies = [ [[package]] name = "aws-sdk-sso" -version = "0.16.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baa0c66fab12976065403cf4cafacffe76afa91d0da335d195af379d4223d235" +checksum = "f7a0659e5269f8c4bd06f362ec7e35b4f55956c4d60e0ca177b575db80584a45" dependencies = [ "aws-endpoint", "aws-http", @@ -302,9 +304,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "0.16.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "048037cdfd7f42fb29b5f969c7f639b4b7eac00e8f911e4eac4f89fb7b3a0500" +checksum = "edc795c7851c0e9bcefde5e6bb610c16a9e03220e0336fc12f75bb80d9ce7e80" dependencies = [ "aws-endpoint", "aws-http", @@ -324,9 +326,9 @@ dependencies = [ [[package]] name = "aws-sig-auth" -version = "0.46.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8386fc0d218dbf2011f65bd8300d21ba98603fd150b962f61239be8b02d1fc6" +checksum = "0ee4bf20136757fd9f606bb4adafe6d19fb02bc48033a8d4f205f21d56fa783a" dependencies = [ "aws-sigv4", "aws-smithy-http", @@ -337,9 +339,9 @@ dependencies = [ [[package]] name = "aws-sigv4" -version = "0.46.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd866926c2c4978210bcb01d7d1b431c794f0c23ca9ee1e420204b018836b5fb" +checksum = "b99b21b3aceaf224cccd693b353e1f38af4ede8c5fc618b97dd458bb63238efc" dependencies = [ "aws-smithy-http", "form_urlencoded", @@ -355,9 +357,9 @@ dependencies = [ [[package]] name = "aws-smithy-async" -version = "0.46.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "deb59cfdd21143006c01b9ca4dc4a9190b8c50c2ef831f9eb36f54f69efa42f1" +checksum = "ef79062cf5fa881dd156938ca438ec2de0f7ec9342c2f84fa6303274e1484b43" dependencies = [ "futures-util", "pin-project-lite", @@ -367,9 +369,9 @@ dependencies = [ [[package]] name = "aws-smithy-client" -version = "0.46.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44243329ba8618474c3b7f396de281f175ae172dd515b3d35648671a3cf51871" +checksum = "13f402fa9a45353f7f02f8046a6a568143844d201c5b4cc3bedb6442058538c8" dependencies = [ "aws-smithy-async", "aws-smithy-http", @@ -390,9 +392,9 @@ dependencies = [ [[package]] name = "aws-smithy-http" -version = "0.46.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fba78f69a5bbe7ac1826389304c67b789032d813574e78f9a2d450634277f833" +checksum = "23861d0b53a1369eab1e8d48c8bb3492eb3def1c2f2222dfb1bad58dd03914a5" dependencies = [ "aws-smithy-types", "bytes", @@ -411,9 +413,9 @@ dependencies = [ [[package]] name = "aws-smithy-http-tower" -version = "0.46.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff8a512d68350561e901626baa08af9491cfbd54596201b84b4da846a59e4da3" +checksum = "04f6b3ae42d5c52bbaadfdd31c09fd11c92b823d329915dedbb08c0e9525755c" dependencies = [ "aws-smithy-http", "bytes", @@ -426,18 +428,18 @@ dependencies = [ [[package]] name = "aws-smithy-json" -version = "0.46.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31b7633698853aae80bd8b26866531420138eca91ea4620735d20b0537c93c2e" +checksum = "5048b693643803c001f88fad36c5a7aa1159e56b0025527fadc57e830aa48b11" dependencies = [ "aws-smithy-types", ] [[package]] name = "aws-smithy-query" -version = "0.46.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a94b5a8cc94a85ccbff89eb7bc80dc135ede02847a73d68c04ac2a3e4cf6b7" +checksum = "b317cd3b326444e659a2f287f67e8c72903495c71a3473b0764880454b3aa25c" dependencies = [ "aws-smithy-types", "urlencoding", @@ -445,9 +447,9 @@ dependencies = [ [[package]] name = "aws-smithy-types" -version = "0.46.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d230d281653de22fb0e9c7c74d18d724a39d7148e2165b1e760060064c4967c0" +checksum = "4149b09b9d8cf37f0afc390144f5d71b8f4daadfd9540ddf43ad27b54d407470" dependencies = [ "itoa", "num-integer", @@ -457,18 +459,18 @@ dependencies = [ [[package]] name = "aws-smithy-xml" -version = "0.46.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4aacaf6c0fa549ebe5d9daa96233b8635965721367ee7c69effc8d8078842df3" +checksum = "2c6d8e7a15feb04f041cf0ede8f6c16e03fe5a4b03e164ae3a090e829404d925" dependencies = [ "xmlparser", ] [[package]] name = "aws-types" -version = "0.46.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb54f097516352475a0159c9355f8b4737c54044538a4d9aca4d376ef2361ccc" +checksum = "1bba03e59e1a0223a2bd3567da2b07a458b067ccf7846996b82406e80008ebc1" dependencies = [ "aws-smithy-async", "aws-smithy-client", @@ -701,9 +703,9 @@ dependencies = [ [[package]] name = "coldsnap" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "111badfcdc635ecbffc73bfa46ad0a83ae0c41bc01e09c4045ca3ffb1046c5c5" +checksum = "284c5dc4766d176cbd1ccc07cd5e0ffabe0e2abc1a944faa60fd6bd0de537f17" dependencies = [ "argh", "async-trait", @@ -733,6 +735,7 @@ dependencies = [ "libc", "once_cell", "terminal_size", + "unicode-width", "winapi", ] @@ -1395,14 +1398,13 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.16.2" +version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d207dc617c7a380ab07ff572a6e52fa202a2a8f355860ac9c38e23f8196be1b" +checksum = "bfddc9561e8baf264e0e45e197fd7696320026eb10a8180340debc27b18f535b" dependencies = [ "console", - "lazy_static", "number_prefix", - "regex", + "unicode-width", ] [[package]] @@ -1726,10 +1728,11 @@ dependencies = [ [[package]] name = "nix" -version = "0.24.2" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc" +checksum = "e322c04a9e3440c327fca7b6c8a63e6890a32fa2ad689db972425f07e0d22abb" dependencies = [ + "autocfg", "bitflags", "cfg-if", "libc", @@ -1796,9 +1799,9 @@ dependencies = [ [[package]] name = "olpc-cjson" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ca49fe685014bbf124ee547da94ed7bb65a6eb9dc9c4711773c081af96a39c" +checksum = "87dc75cf72208cd853671c1abccc5d5d1e43b1e378dde67340ef933219a8c13c" dependencies = [ "serde", "serde_json", @@ -3142,9 +3145,9 @@ checksum = "ea68304e134ecd095ac6c3574494fc62b909f416c4fca77e440530221e549d3d" [[package]] name = "tough" -version = "0.12.4" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a537c6b4307f5401e82a0196e97aaab9599e9c0f880e168eafb176abbac63d" +checksum = "dc636dd1ee889a366af6731f1b63b60baf19528b46df5a7c2d4b3bf8b60bca2d" dependencies = [ "chrono", "dyn-clone", @@ -3169,9 +3172,9 @@ dependencies = [ [[package]] name = "tough-kms" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e17edfb12e2c08c9ac3fe6ebb43fafc82fa9ffbfffbe50030b7623f8f42f34" +checksum = "6d12f5356a5902062ec2aaf6ca003b8742c7c8f82e959698fee903275a8ba506" dependencies = [ "aws-config", "aws-sdk-kms", @@ -3184,9 +3187,9 @@ dependencies = [ [[package]] name = "tough-ssm" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71e423321963b68b425bc844c01a16bfcab3b8300ab768dad46992201026421e" +checksum = "dffd50ee15d08e9b104b398e4537acfc3c02f76e1d0c569856290d278bb5180b" dependencies = [ "aws-config", "aws-sdk-ssm", diff --git a/tools/deny.toml b/tools/deny.toml index 372ac1aa..9e050d23 100644 --- a/tools/deny.toml +++ b/tools/deny.toml @@ -70,7 +70,7 @@ skip-tree = [ { name = "structopt", version = "0.3.26" }, # aws-smithy-client uses an older hyper-rustls - { name = "aws-smithy-client", version = "0.46.0" }, + { name = "aws-smithy-client", version = "0.48.0" }, ] [sources] diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index 4d30af5c..903909a0 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -8,21 +8,21 @@ publish = false [dependencies] async-trait = "0.1.53" -aws-config = "0.46.0" -aws-sdk-ebs = "0.16.0" -aws-sdk-ec2 = "0.16.0" -aws-sdk-kms = "0.16.0" -aws-sdk-ssm = "0.16.0" -aws-sdk-sts = "0.16.0" -aws-smithy-types = "0.46.0" -aws-types = "0.46.0" +aws-config = "0.48.0" +aws-sdk-ebs = "0.18.0" +aws-sdk-ec2 = "0.18.0" +aws-sdk-kms = "0.18.0" +aws-sdk-ssm = "0.18.0" +aws-sdk-sts = "0.18.0" +aws-smithy-types = "0.48.0" +aws-types = "0.48.0" chrono = "0.4" clap = "3.1" coldsnap = { version = "0.4", default-features = false, features = ["aws-sdk-rust-rustls"] } duct = "0.13.0" futures = "0.3.5" http = "0.2.8" -indicatif = "0.16.0" +indicatif = "0.17.1" lazy_static = "1.4" log = "0.4" num_cpus = "1" diff --git a/tools/pubsys/src/aws/ami/snapshot.rs b/tools/pubsys/src/aws/ami/snapshot.rs index b26249d8..15b25611 100644 --- a/tools/pubsys/src/aws/ami/snapshot.rs +++ b/tools/pubsys/src/aws/ami/snapshot.rs @@ -4,17 +4,18 @@ use snafu::{OptionExt, ResultExt}; use std::path::Path; /// Create a progress bar to show status of snapshot blocks, if wanted. -fn build_progress_bar(no_progress: bool, verb: &str) -> Option { +fn build_progress_bar(no_progress: bool, verb: &str) -> Result> { if no_progress { - return None; + return Ok(None); } let progress_bar = ProgressBar::new(0); progress_bar.set_style( ProgressStyle::default_bar() .template(&[" ", verb, " [{bar:50.white/black}] {pos}/{len} ({eta})"].concat()) + .context(error::ProgressBarTemplateSnafu)? .progress_chars("=> "), ); - Some(progress_bar) + Ok(Some(progress_bar)) } /// Uploads the given path into a snapshot. @@ -35,7 +36,7 @@ where .to_string_lossy(); uploader - .upload_from_file(path, desired_size, Some(&filename), progress_bar) + .upload_from_file(path, desired_size, Some(&filename), progress_bar?) .await .context(error::UploadSnapshotSnafu) } @@ -51,6 +52,11 @@ mod error { #[snafu(display("Invalid image path '{}'", path.display()))] InvalidImagePath { path: PathBuf }, + #[snafu(display("Failed to parse progress style template: {}", source))] + ProgressBarTemplate { + source: indicatif::style::TemplateError, + }, + #[snafu(display("Failed to upload snapshot: {}", source))] UploadSnapshot { source: coldsnap::UploadError }, } diff --git a/tools/testsys/Cargo.toml b/tools/testsys/Cargo.toml index 0da89992..a02b2636 100644 --- a/tools/testsys/Cargo.toml +++ b/tools/testsys/Cargo.toml @@ -8,8 +8,8 @@ publish = false [dependencies] anyhow = "1.0" -aws-config = "0.46" -aws-sdk-ec2 = "0.16" +aws-config = "0.48" +aws-sdk-ec2 = "0.18" bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", rev = "07b9ae8", version = "0.0.1"} bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } clap = { version = "3", features = ["derive", "env"] } From bf34ce559230b9bc12bacab84f2ab8a60ba7bf07 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Tue, 27 Sep 2022 20:09:22 +0000 Subject: [PATCH 0766/1356] packages: update grub Signed-off-by: Arnaldo Garcia Rincon --- packages/grub/Cargo.toml | 4 ++-- packages/grub/grub.spec | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/grub/Cargo.toml b/packages/grub/Cargo.toml index 290eb60e..c8f0669f 100644 --- a/packages/grub/Cargo.toml +++ b/packages/grub/Cargo.toml @@ -9,8 +9,8 @@ build = "build.rs" path = "pkg.rs" [[package.metadata.build-package.external-files]] -url = "https://cdn.amazonlinux.com/blobstore/a2f920abd554c7ab22af43c720198abcf5f78828c0543a0d7c65c654610eab26/grub2-2.06-2.amzn2.0.1.src.rpm" -sha512 = "0a30a75426f9030b9bab489b824d4cc51c864f2fef87df298ca4a725ecfb49dbd310f276740fadab64879ee1dfc60b35f52957b7cfc5ff023d856b536b0af04d" +url = "https://cdn.amazonlinux.com/blobstore/21d0df3b06c1c5cc9e5cf3bb559dad713335e782ac3a46b57c5d0097e22c0aec/grub2-2.06-9.amzn2.0.1.src.rpm" +sha512 = "f27b4005e789ce1e0e792133f6adfbdbf221245c03b27c25285ff5b81e53065385536971934744f33c52a924022480aa15cd25e8d5ded9f4999c753e8394ae36" [build-dependencies] glibc = { path = "../glibc" } diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index b453f6ae..b3e06463 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -14,7 +14,7 @@ Release: 1%{?dist} Summary: Bootloader with support for Linux and more License: GPL-3.0-or-later AND Unicode-DFS-2015 URL: https://www.gnu.org/software/grub/ -Source0: https://cdn.amazonlinux.com/blobstore/a2f920abd554c7ab22af43c720198abcf5f78828c0543a0d7c65c654610eab26/grub2-2.06-2.amzn2.0.1.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/21d0df3b06c1c5cc9e5cf3bb559dad713335e782ac3a46b57c5d0097e22c0aec/grub2-2.06-9.amzn2.0.1.src.rpm Source1: bios.cfg Source2: efi.cfg Patch0001: 0001-setup-Add-root-device-argument-to-grub-setup.patch From 592d1d098683996414a1a6bdc8b86468f2de43eb Mon Sep 17 00:00:00 2001 From: Richard Kelly Date: Thu, 25 Aug 2022 21:04:37 +0000 Subject: [PATCH 0767/1356] infrasys: update to AWS SDK Rust Co-authored-by: Patrick J.P. Culp --- tools/Cargo.lock | 379 ++++++++++++++--------------------- tools/deny.toml | 3 - tools/infrasys/Cargo.toml | 7 +- tools/infrasys/src/error.rs | 22 +- tools/infrasys/src/keys.rs | 44 ++-- tools/infrasys/src/main.rs | 53 ++++- tools/infrasys/src/root.rs | 39 ++-- tools/infrasys/src/s3.rs | 113 ++++++----- tools/infrasys/src/shared.rs | 46 +++-- 9 files changed, 342 insertions(+), 364 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 6ae14c31..481f33ff 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -149,7 +149,7 @@ dependencies = [ "http", "hyper", "ring", - "time 0.3.14", + "time 0.3.15", "tokio", "tower", "tracing", @@ -188,6 +188,30 @@ dependencies = [ "tracing", ] +[[package]] +name = "aws-sdk-cloudformation" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cafaf0b9199f52cd69ef07c1d15fc7a57bf3ff53a8b0885cf708110fa49f6450" +dependencies = [ + "aws-endpoint", + "aws-http", + "aws-sig-auth", + "aws-smithy-async", + "aws-smithy-client", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-query", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "bytes", + "fastrand", + "http", + "tokio-stream", + "tower", +] + [[package]] name = "aws-sdk-ebs" version = "0.18.0" @@ -257,6 +281,34 @@ dependencies = [ "tower", ] +[[package]] +name = "aws-sdk-s3" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "323b9107094fc396a0116326b577af48d9cfb26ec7c09588584ec82cee057b81" +dependencies = [ + "aws-endpoint", + "aws-http", + "aws-sig-auth", + "aws-sigv4", + "aws-smithy-async", + "aws-smithy-checksums", + "aws-smithy-client", + "aws-smithy-eventstream", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "bytes", + "bytes-utils", + "http", + "http-body", + "tokio-stream", + "tower", + "tracing", +] + [[package]] name = "aws-sdk-ssm" version = "0.18.0" @@ -331,6 +383,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ee4bf20136757fd9f606bb4adafe6d19fb02bc48033a8d4f205f21d56fa783a" dependencies = [ "aws-sigv4", + "aws-smithy-eventstream", "aws-smithy-http", "aws-types", "http", @@ -343,7 +396,9 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b99b21b3aceaf224cccd693b353e1f38af4ede8c5fc618b97dd458bb63238efc" dependencies = [ + "aws-smithy-eventstream", "aws-smithy-http", + "bytes", "form_urlencoded", "hex", "http", @@ -351,7 +406,7 @@ dependencies = [ "percent-encoding", "regex", "ring", - "time 0.3.14", + "time 0.3.15", "tracing", ] @@ -367,6 +422,27 @@ dependencies = [ "tokio-stream", ] +[[package]] +name = "aws-smithy-checksums" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6face4c12b335ba734a4416c15d5eeb0af88aa61182a84ff50db62bfa261183" +dependencies = [ + "aws-smithy-http", + "aws-smithy-types", + "bytes", + "crc32c", + "crc32fast", + "hex", + "http", + "http-body", + "md-5", + "pin-project-lite", + "sha1", + "sha2", + "tracing", +] + [[package]] name = "aws-smithy-client" version = "0.48.0" @@ -390,12 +466,24 @@ dependencies = [ "tracing", ] +[[package]] +name = "aws-smithy-eventstream" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b959c2c1752c2afbd863953046c06f7ee592f68d64719b7bab3193ac3b0fa77" +dependencies = [ + "aws-smithy-types", + "bytes", + "crc32fast", +] + [[package]] name = "aws-smithy-http" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23861d0b53a1369eab1e8d48c8bb3492eb3def1c2f2222dfb1bad58dd03914a5" dependencies = [ + "aws-smithy-eventstream", "aws-smithy-types", "bytes", "bytes-utils", @@ -454,7 +542,7 @@ dependencies = [ "itoa", "num-integer", "ryu", - "time 0.3.14", + "time 0.3.15", ] [[package]] @@ -509,15 +597,6 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "generic-array", -] - [[package]] name = "block-buffer" version = "0.10.3" @@ -569,7 +648,7 @@ dependencies = [ "reqwest", "serde", "serde_plain", - "sha2 0.10.6", + "sha2", "snafu", "toml", "url", @@ -719,7 +798,7 @@ dependencies = [ "futures", "indicatif", "nix", - "sha2 0.10.6", + "sha2", "snafu", "tempfile", "tokio", @@ -727,13 +806,13 @@ dependencies = [ [[package]] name = "console" -version = "0.15.1" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89eab4d20ce20cea182308bca13088fecea9c05f6776cf287205d41a0ed3c847" +checksum = "c050367d967ced717c04b65d8c619d863ef9292ce0c5760028655a2fb298718c" dependencies = [ "encode_unicode", + "lazy_static", "libc", - "once_cell", "terminal_size", "unicode-width", "winapi", @@ -764,6 +843,15 @@ dependencies = [ "libc", ] +[[package]] +name = "crc32c" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3dfea2db42e9927a3845fb268a10a72faed6d416065f77873f05e411457c363e" +dependencies = [ + "rustc_version", +] + [[package]] name = "crc32fast" version = "1.3.2" @@ -796,26 +884,24 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.10" +version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "045ebe27666471bb549370b4b0b3e51b07f56325befa4284db65fc89c02511b1" +checksum = "f916dfc5d356b0ed9dae65f1db9fc9770aa2851d2662b988ccf4fe3516e86348" dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", "memoffset", - "once_cell", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51887d4adc7b564537b15adcfb307936f8075dfcd5f00dde9a9f1d29383682bc" +checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac" dependencies = [ "cfg-if", - "once_cell", ] [[package]] @@ -828,16 +914,6 @@ dependencies = [ "typenum", ] -[[package]] -name = "crypto-mac" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" -dependencies = [ - "generic-array", - "subtle", -] - [[package]] name = "ct-logs" version = "0.8.0" @@ -882,22 +958,13 @@ dependencies = [ "syn", ] -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array", -] - [[package]] name = "digest" version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c" dependencies = [ - "block-buffer 0.10.3", + "block-buffer", "crypto-common", ] @@ -1211,16 +1278,6 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" -[[package]] -name = "hmac" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" -dependencies = [ - "crypto-mac", - "digest 0.9.0", -] - [[package]] name = "home" version = "0.5.3" @@ -1311,7 +1368,7 @@ dependencies = [ "hyper", "log", "rustls 0.19.1", - "rustls-native-certs 0.5.0", + "rustls-native-certs", "tokio", "tokio-rustls 0.22.0", "webpki 0.21.4", @@ -1325,9 +1382,7 @@ checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" dependencies = [ "http", "hyper", - "log", "rustls 0.20.6", - "rustls-native-certs 0.6.2", "tokio", "tokio-rustls 0.23.4", ] @@ -1413,16 +1468,17 @@ version = "0.1.0" dependencies = [ "assert-json-diff", "async-trait", + "aws-config", + "aws-sdk-cloudformation", + "aws-sdk-s3", + "aws-types", "clap 3.2.22", "hex", "log", "pubsys-config", - "rusoto_cloudformation", - "rusoto_core", - "rusoto_s3", "serde_json", "serde_yaml", - "sha2 0.10.6", + "sha2", "shell-words", "simplelog", "snafu", @@ -1590,9 +1646,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.133" +version = "0.2.134" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0f80d65747a3e43d1596c7c5492d95d5edddaabd45a7fcdb02b95f644164966" +checksum = "329c933548736bc49fd575ee68c89e8be4d260064184389a5b77517cddd99ffb" [[package]] name = "linked-hash-map" @@ -1627,13 +1683,11 @@ checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" [[package]] name = "md-5" -version = "0.9.1" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5a279bb9607f9f53c22d496eade00d138d1bdcccd07d74650387cf94942a15" +checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "opaque-debug", + "digest", ] [[package]] @@ -1814,17 +1868,11 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1" -[[package]] -name = "opaque-debug" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" - [[package]] name = "openssl" -version = "0.10.41" +version = "0.10.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "618febf65336490dfcf20b73f885f5651a0c89c64c2d4a8c3662585a70bf5bd0" +checksum = "12fc0523e3bd51a692c8850d075d74dc062ccf251c0110668cbd921917118a13" dependencies = [ "bitflags", "cfg-if", @@ -1854,9 +1902,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.75" +version = "0.9.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5f9bd0c2710541a3cda73d6f9ac4f1b240de4ae261065d309dbe73d9dceb42f" +checksum = "5230151e44c0f05157effb743e8d517472843121cf9243e8b81393edb5acd9ce" dependencies = [ "autocfg", "cc", @@ -2034,9 +2082,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.44" +version = "1.0.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bd7356a8122b6c4a24a82b278680c73357984ca2fc79a0f9fa6dea7dced7c58" +checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b" dependencies = [ "unicode-ident", ] @@ -2110,7 +2158,7 @@ dependencies = [ "log", "pubsys-config", "reqwest", - "sha2 0.10.6", + "sha2", "shell-words", "simplelog", "snafu", @@ -2283,102 +2331,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "rusoto_cloudformation" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd30fadf72299e6d385ed4e32b1b765cb1c20e359b05ff14fa35dd2d7dd6a229" -dependencies = [ - "async-trait", - "bytes", - "futures", - "rusoto_core", - "serde_urlencoded", - "xml-rs", -] - -[[package]] -name = "rusoto_core" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1db30db44ea73551326269adcf7a2169428a054f14faf9e1768f2163494f2fa2" -dependencies = [ - "async-trait", - "base64", - "bytes", - "crc32fast", - "futures", - "http", - "hyper", - "hyper-rustls 0.23.0", - "lazy_static", - "log", - "rusoto_credential", - "rusoto_signature", - "rustc_version", - "serde", - "serde_json", - "tokio", - "xml-rs", -] - -[[package]] -name = "rusoto_credential" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee0a6c13db5aad6047b6a44ef023dbbc21a056b6dab5be3b79ce4283d5c02d05" -dependencies = [ - "async-trait", - "chrono", - "dirs-next", - "futures", - "hyper", - "serde", - "serde_json", - "shlex", - "tokio", - "zeroize", -] - -[[package]] -name = "rusoto_s3" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aae4677183411f6b0b412d66194ef5403293917d66e70ab118f07cc24c5b14d" -dependencies = [ - "async-trait", - "bytes", - "futures", - "rusoto_core", - "xml-rs", -] - -[[package]] -name = "rusoto_signature" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5ae95491c8b4847931e291b151127eccd6ff8ca13f33603eb3d0035ecb05272" -dependencies = [ - "base64", - "bytes", - "chrono", - "digest 0.9.0", - "futures", - "hex", - "hmac", - "http", - "hyper", - "log", - "md-5", - "percent-encoding", - "pin-project-lite", - "rusoto_credential", - "rustc_version", - "serde", - "sha2 0.9.9", - "tokio", -] - [[package]] name = "rustc-demangle" version = "0.1.21" @@ -2431,18 +2383,6 @@ dependencies = [ "security-framework", ] -[[package]] -name = "rustls-native-certs" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" -dependencies = [ - "openssl-probe", - "rustls-pemfile", - "schannel", - "security-framework", -] - [[package]] name = "rustls-pemfile" version = "1.0.1" @@ -2479,9 +2419,9 @@ dependencies = [ [[package]] name = "schemars" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1847b767a3d62d95cbf3d8a9f0e421cf57a0d8aa4f411d4b16525afb0284d4ed" +checksum = "2a5fb6c61f29e723026dc8e923d94c694313212abbecbbe5f55a7748eec5b307" dependencies = [ "dyn-clone", "schemars_derive", @@ -2491,9 +2431,9 @@ dependencies = [ [[package]] name = "schemars_derive" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af4d7e1b012cb3d9129567661a63755ea4b8a7386d339dc945ae187e403c6743" +checksum = "f188d036977451159430f3b8dc82ec76364a42b7e289c2b18a9a18f4470058e9" dependencies = [ "proc-macro2", "quote", @@ -2663,20 +2603,18 @@ checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.5", + "digest", ] [[package]] -name = "sha2" -version = "0.9.9" +name = "sha1" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ - "block-buffer 0.9.0", "cfg-if", "cpufeatures", - "digest 0.9.0", - "opaque-debug", + "digest", ] [[package]] @@ -2687,7 +2625,7 @@ checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.5", + "digest", ] [[package]] @@ -2706,12 +2644,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" -[[package]] -name = "shlex" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" - [[package]] name = "signal-hook-registry" version = "1.4.0" @@ -2729,7 +2661,7 @@ checksum = "48dfff04aade74dd495b007c831cd6f4e0cee19c344dd9dc0884c0289b70a786" dependencies = [ "log", "termcolor", - "time 0.3.14", + "time 0.3.15", ] [[package]] @@ -2743,9 +2675,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "snafu" @@ -2822,12 +2754,6 @@ dependencies = [ "syn", ] -[[package]] -name = "subtle" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" - [[package]] name = "syn" version = "1.0.101" @@ -2945,18 +2871,18 @@ checksum = "949517c0cf1bf4ee812e2e07e08ab448e3ae0d23472aee8a06c985f0c8815b16" [[package]] name = "thiserror" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a99cb8c4b9a8ef0e7907cd3b617cc8dc04d571c4e73c8ae403d80ac160bb122" +checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a891860d3c8d66fec8e73ddb3765f90082374dbaaa833407b904a94f1a7eb43" +checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" dependencies = [ "proc-macro2", "quote", @@ -2976,9 +2902,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3f9a28b618c3a6b9251b6908e9c99e04b9e5c02e6581ccbb67d59c34ef7f9b" +checksum = "d634a985c4d4238ec39cacaed2e7ae552fbd3c476b552c1deac3021b7d7eaf0c" dependencies = [ "itoa", "libc", @@ -3019,9 +2945,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.21.1" +version = "1.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0020c875007ad96677dcc890298f4b942882c5d4eb7cc8f439fc3bf813dc9c95" +checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" dependencies = [ "autocfg", "bytes", @@ -3029,7 +2955,6 @@ dependencies = [ "memchr", "mio", "num_cpus", - "once_cell", "parking_lot", "pin-project-lite", "signal-hook-registry", @@ -3645,12 +3570,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "xml-rs" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d7d3948613f75c98fd9328cfdcc45acc4d360655289d0a7d4ec931392200a3" - [[package]] name = "xmlparser" version = "0.13.3" diff --git a/tools/deny.toml b/tools/deny.toml index 9e050d23..03982790 100644 --- a/tools/deny.toml +++ b/tools/deny.toml @@ -63,9 +63,6 @@ skip = [ ] skip-tree = [ - # rusoto_signature uses an older version of sha2 - { name = "rusoto_signature" }, - # structopt pulls in an older version of clap { name = "structopt", version = "0.3.26" }, diff --git a/tools/infrasys/Cargo.toml b/tools/infrasys/Cargo.toml index 7b982a0a..f0e9dae7 100644 --- a/tools/infrasys/Cargo.toml +++ b/tools/infrasys/Cargo.toml @@ -12,9 +12,10 @@ clap = "3.1" hex = "0.4.0" log = "0.4.14" pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } -rusoto_cloudformation = { version = "0.48.0", default-features = false, features = ["rustls"] } -rusoto_core = { version = "0.48.0", default-features = false, features = ["rustls"] } -rusoto_s3 = { version = "0.48.0", default-features = false, features = ["rustls"] } +aws-config = "0.48.0" +aws-types = "0.48.0" +aws-sdk-cloudformation = "0.18.0" +aws-sdk-s3 = "0.18.0" serde_json = "1.0.66" serde_yaml = "0.8.17" sha2 = "0.10" diff --git a/tools/infrasys/src/error.rs b/tools/infrasys/src/error.rs index 20bbdc01..f5624ddf 100644 --- a/tools/infrasys/src/error.rs +++ b/tools/infrasys/src/error.rs @@ -1,3 +1,4 @@ +use aws_sdk_s3::types::SdkError; use snafu::Snafu; use std::io; use std::path::PathBuf; @@ -14,7 +15,7 @@ pub enum Error { CreateStack { stack_name: String, region: String, - source: rusoto_core::RusotoError, + source: SdkError, }, #[snafu(display( @@ -40,6 +41,9 @@ pub enum Error { ))] CreateStackTimeout { stack_name: String, region: String }, + #[snafu(display("No stack data returned for CFN stack '{}' in {}", stack_name, region))] + MissingStack { stack_name: String, region: String }, + #[snafu(display( "Failed to fetch stack details for CFN stack '{}' in '{}': {}", stack_name, @@ -49,7 +53,7 @@ pub enum Error { DescribeStack { stack_name: String, region: String, - source: rusoto_core::RusotoError, + source: SdkError, }, #[snafu(display("Missing environment variable '{}'", var))] @@ -117,11 +121,11 @@ pub enum Error { source: std::num::ParseIntError, }, - #[snafu(display("Failed to parse '{}' to a valid rusoto region: {}", what, source))] - ParseRegion { - what: String, - source: rusoto_core::region::ParseRegionError, - }, + #[snafu(display("Failed to find default region"))] + DefaultRegion, + + #[snafu(display("Unable to parse stack status"))] + ParseStatus, #[snafu(display( "Failed to find field '{}' after attempting to create resource '{}'", @@ -139,7 +143,7 @@ pub enum Error { #[snafu(display("Failed to push object to bucket '{}': {}", bucket_name, source))] PutObject { bucket_name: String, - source: rusoto_core::RusotoError, + source: SdkError, }, #[snafu(display( @@ -149,7 +153,7 @@ pub enum Error { ))] PutPolicy { bucket_name: String, - source: rusoto_core::RusotoError, + source: SdkError, }, #[snafu(display("Failed to create async runtime: {}", source))] diff --git a/tools/infrasys/src/keys.rs b/tools/infrasys/src/keys.rs index 15afde39..a00283c2 100644 --- a/tools/infrasys/src/keys.rs +++ b/tools/infrasys/src/keys.rs @@ -1,10 +1,9 @@ use async_trait::async_trait; +use aws_sdk_cloudformation::Client as CloudFormationClient; +use aws_types::region::Region; use pubsys_config::{KMSKeyConfig, SigningKeyConfig}; -use rusoto_cloudformation::{CloudFormation, CloudFormationClient, CreateStackInput}; -use rusoto_core::Region; use snafu::{OptionExt, ResultExt}; use std::fs; -use std::str::FromStr; use super::{error, shared, Result}; @@ -34,7 +33,7 @@ pub fn check_signing_key_config(signing_key_config: &SigningKeyConfig) -> Result SigningKeyConfig::file { .. } => (), SigningKeyConfig::kms { config, .. } => { let config = config.as_ref().context(error::MissingConfigSnafu { - missing: "config field for a kms key", + missing: "config field for kms keys", })?; match ( @@ -89,9 +88,13 @@ impl KMSKeyConfigExt for KMSKeyConfig { missing: "key_alias", })? ); - let cfn_client = CloudFormationClient::new( - Region::from_str(region).context(error::ParseRegionSnafu { what: region })?, - ); + + let config = aws_config::from_env() + .region(Region::new(region.to_owned())) + .load() + .await; + let cfn_client = CloudFormationClient::new(&config); + let cfn_filepath = format!( "{}/infrasys/cloudformation-templates/kms_key_setup.yml", shared::getenv("BUILDSYS_TOOLS_DIR")? @@ -100,20 +103,19 @@ impl KMSKeyConfigExt for KMSKeyConfig { .context(error::FileReadSnafu { path: cfn_filepath })?; let stack_result = cfn_client - .create_stack(CreateStackInput { - parameters: Some(vec![shared::create_parameter( - "Alias".to_string(), - self.key_alias - .as_ref() - .context(error::KeyConfigSnafu { - missing: "key_alias", - })? - .to_string(), - )]), - stack_name: stack_name.clone(), - template_body: Some(cfn_template.clone()), - ..Default::default() - }) + .create_stack() + .parameters(shared::create_parameter( + "Alias".to_string(), + self.key_alias + .as_ref() + .context(error::KeyConfigSnafu { + missing: "key_alias", + })? + .to_string(), + )) + .stack_name(stack_name.clone()) + .template_body(cfn_template.clone()) + .send() .await .context(error::CreateStackSnafu { stack_name: &stack_name, diff --git a/tools/infrasys/src/main.rs b/tools/infrasys/src/main.rs index 83cf4c34..13f74085 100644 --- a/tools/infrasys/src/main.rs +++ b/tools/infrasys/src/main.rs @@ -4,12 +4,13 @@ mod root; mod s3; mod shared; +use aws_sdk_cloudformation::Region; use error::Result; use log::{error, info}; use pubsys_config::{InfraConfig, RepoConfig, S3Config, SigningKeyConfig}; use sha2::{Digest, Sha512}; use shared::KeyRole; -use simplelog::{Config as LogConfig, LevelFilter, SimpleLogger}; +use simplelog::{CombinedLogger, Config as LogConfig, ConfigBuilder, LevelFilter, SimpleLogger}; use snafu::{ensure, OptionExt, ResultExt}; use std::collections::HashMap; use std::num::NonZeroUsize; @@ -62,7 +63,35 @@ fn run() -> Result<()> { // Parse and store the args passed to the program let args = Args::from_args(); - SimpleLogger::init(args.log_level, LogConfig::default()).context(error::LoggerSnafu)?; + match args.log_level { + // Set log level for AWS SDK to error to reduce verbosity. + LevelFilter::Info => { + CombinedLogger::init(vec![ + SimpleLogger::new( + LevelFilter::Info, + ConfigBuilder::new() + .add_filter_ignore_str("aws_config") + .add_filter_ignore_str("aws_smithy") + .add_filter_ignore_str("tracing::span") + .build(), + ), + SimpleLogger::new( + LevelFilter::Warn, + ConfigBuilder::new() + .add_filter_allow_str("aws_config") + .add_filter_allow_str("aws_smithy") + .add_filter_allow_str("tracing::span") + .build(), + ), + ]) + .context(error::LoggerSnafu)?; + } + + // Set the supplied log level across the whole crate. + _ => { + SimpleLogger::init(args.log_level, LogConfig::default()).context(error::LoggerSnafu)? + } + } match args.subcommand { SubCommand::CreateInfra(ref run_task_args) => { @@ -124,7 +153,7 @@ async fn create_infra(toml_path: &Path, root_role_path: &Path) -> Result<()> { // Upload root.json. info!("Uploading root.json to S3 bucket..."); s3::upload_file( - repo_info.s3_region, + &repo_info.s3_region, &bucket_name, &repo_info.prefix, root_role_path, @@ -187,7 +216,7 @@ struct ValidRepoInfo<'a> { root_key_threshold: &'a NonZeroUsize, root_keys: &'a mut SigningKeyConfig, root_role_url: &'a mut Option, - s3_region: &'a String, + s3_region: Region, s3_stack_name: String, signing_keys: &'a mut SigningKeyConfig, stack_arn: &'a mut Option, @@ -204,20 +233,22 @@ impl<'a> ValidRepoInfo<'a> { let s3_stack_name = repo_config .file_hosting_config_name - .as_ref() + .to_owned() .context(error::MissingConfigSnafu { missing: "file_hosting_config_name", })?; let s3_info = s3_info_map - .get_mut(s3_stack_name) + .get_mut(&s3_stack_name) .context(error::MissingConfigSnafu { missing: format!("aws.s3 config with name {}", s3_stack_name), })?; Ok(ValidRepoInfo { s3_stack_name: s3_stack_name.to_string(), - s3_region: s3_info.region.as_ref().context(error::MissingConfigSnafu { - missing: format!("region for '{}' s3 config", s3_stack_name), - })?, + s3_region: Region::new(s3_info.region.as_ref().cloned().context( + error::MissingConfigSnafu { + missing: format!("region for '{}' s3 config", s3_stack_name), + }, + )?), bucket_name: &mut s3_info.bucket_name, stack_arn: &mut s3_info.stack_arn, vpce_id: s3_info @@ -262,11 +293,11 @@ async fn create_repo_infrastructure( // Create S3 bucket info!("Creating S3 bucket..."); let (s3_stack_arn, bucket_name, bucket_rdn) = - s3::create_s3_bucket(repo_info.s3_region, &repo_info.s3_stack_name).await?; + s3::create_s3_bucket(&repo_info.s3_region, &repo_info.s3_stack_name).await?; // Add Bucket Policy to newly created bucket s3::add_bucket_policy( - repo_info.s3_region, + &repo_info.s3_region, &bucket_name, &repo_info.prefix, repo_info.vpce_id, diff --git a/tools/infrasys/src/root.rs b/tools/infrasys/src/root.rs index e447f045..bd0c6108 100644 --- a/tools/infrasys/src/root.rs +++ b/tools/infrasys/src/root.rs @@ -1,7 +1,7 @@ use super::{error, KeyRole, Result}; +use aws_config::meta::region::RegionProviderChain; use log::{trace, warn}; use pubsys_config::SigningKeyConfig; -use rusoto_core::Region; use snafu::{ensure, OptionExt, ResultExt}; use std::collections::HashMap; use std::fs; @@ -39,6 +39,21 @@ pub fn check_root(root_role_path: &Path) -> Result<()> { }); Ok(()) } +pub fn get_region() -> Result { + let rt = tokio::runtime::Runtime::new().context(error::RuntimeSnafu)?; + rt.block_on(async { async_get_region().await }) +} + +async fn async_get_region() -> Result { + let default_region_fallback = "us-east-1"; + let default_region = RegionProviderChain::default_provider() + .or_else(default_region_fallback) + .region() + .await + .context(error::DefaultRegionSnafu)? + .to_string(); + Ok(default_region) +} /// Creates the directory where root.json will live and creates root.json itself according to details specified in root-role-path pub fn create_root(root_role_path: &Path) -> Result<()> { @@ -48,14 +63,12 @@ pub fn create_root(root_role_path: &Path) -> Result<()> { thing: "root role", })?; fs::create_dir_all(role_dir).context(error::MkdirSnafu { path: role_dir })?; + let default_region = get_region()?; + // Initialize root + tuftool!(&default_region, "root init '{}'", root_role_path.display()); tuftool!( - Region::default().name(), - "root init '{}'", - root_role_path.display() - ); - tuftool!( - Region::default().name(), + &default_region, // TODO: expose expiration date as a configurable parameter "root expire '{}' 'in 52 weeks'", root_role_path.display() @@ -63,7 +76,7 @@ pub fn create_root(root_role_path: &Path) -> Result<()> { Ok(()) } -/// Adds keys to root.json according to key type +/// Adds keys to root.json according to key type pub fn add_keys( signing_key_config: &mut SigningKeyConfig, role: &KeyRole, @@ -108,11 +121,11 @@ fn add_keys_kms( num_keys: (*available_keys).len(), } ); - + let default_region = get_region()?; match role { KeyRole::Root => { tuftool!( - Region::default().name(), + &default_region, "root set-threshold '{}' root '{}' ", filepath, threshold.to_string() @@ -128,19 +141,19 @@ fn add_keys_kms( } KeyRole::Publication => { tuftool!( - Region::default().name(), + &default_region, "root set-threshold '{}' snapshot '{}' ", filepath, threshold.to_string() ); tuftool!( - Region::default().name(), + &default_region, "root set-threshold '{}' targets '{}' ", filepath, threshold.to_string() ); tuftool!( - Region::default().name(), + &default_region, "root set-threshold '{}' timestamp '{}' ", filepath, threshold.to_string() diff --git a/tools/infrasys/src/s3.rs b/tools/infrasys/src/s3.rs index be464523..990ebcd1 100644 --- a/tools/infrasys/src/s3.rs +++ b/tools/infrasys/src/s3.rs @@ -1,14 +1,10 @@ -use rusoto_cloudformation::{CloudFormation, CloudFormationClient, CreateStackInput}; -use rusoto_core::Region; -use rusoto_s3::{ - GetBucketPolicyRequest, PutBucketPolicyRequest, PutObjectRequest, S3Client, StreamingBody, S3, -}; +use aws_sdk_cloudformation::{Client as CloudFormationClient, Region}; +use aws_sdk_s3::Client as S3Client; use snafu::{OptionExt, ResultExt}; use std::fs; use std::fs::File; use std::io::prelude::*; use std::path::{Path, PathBuf}; -use std::str::FromStr; use super::{error, shared, Result}; @@ -36,11 +32,17 @@ pub fn format_prefix(prefix: &str) -> String { /// Input: The region in which the bucket will be created and the name of the bucket /// Output: The stack_arn of the stack w/ the S3 bucket, the CFN allocated bucket name, /// and the bucket url (for the url fields in Infra.lock) -pub async fn create_s3_bucket(region: &str, stack_name: &str) -> Result<(String, String, String)> { +pub async fn create_s3_bucket( + region: &Region, + stack_name: &str, +) -> Result<(String, String, String)> { // TODO: Add support for accommodating pre-existing buckets (skip this creation process) - let cfn_client = CloudFormationClient::new( - Region::from_str(region).context(error::ParseRegionSnafu { what: region })?, - ); + let config = aws_config::from_env() + .region(region.to_owned()) + .load() + .await; + let cfn_client = CloudFormationClient::new(&config); + let cfn_filepath: PathBuf = format!( "{}/infrasys/cloudformation-templates/s3_setup.yml", shared::getenv("BUILDSYS_TOOLS_DIR")? @@ -48,14 +50,17 @@ pub async fn create_s3_bucket(region: &str, stack_name: &str) -> Result<(String, .into(); let cfn_template = fs::read_to_string(&cfn_filepath).context(error::FileReadSnafu { path: cfn_filepath })?; + let stack_result = cfn_client - .create_stack(CreateStackInput { - stack_name: stack_name.to_string(), - template_body: Some(cfn_template.clone()), - ..Default::default() - }) + .create_stack() + .stack_name(stack_name.to_string()) + .template_body(cfn_template.clone()) + .send() .await - .context(error::CreateStackSnafu { stack_name, region })?; + .context(error::CreateStackSnafu { + stack_name, + region: region.as_ref(), + })?; // We don't have to wait for successful stack creation to grab the stack ARN let stack_arn = stack_result .clone() @@ -66,7 +71,7 @@ pub async fn create_s3_bucket(region: &str, stack_name: &str) -> Result<(String, })?; // Grab the StackOutputs to get the Bucketname and BucketURL - let output_array = shared::get_stack_outputs(&cfn_client, stack_name, region).await?; + let output_array = shared::get_stack_outputs(&cfn_client, stack_name, region.as_ref()).await?; let bucket_name = output_array[0] .output_value .as_ref() @@ -90,21 +95,23 @@ pub async fn create_s3_bucket(region: &str, stack_name: &str) -> Result<(String, /// Adds a BucketPolicy allowing GetObject access to a specified VPC /// Input: Region, Name of bucket, which prefix root.json should be put under, and vpcid /// Note that the prefix parameter must have the format "//*" and the bucket name "" -/// Output: Doesn't need to save any metadata from this action +/// Output: Doesn't need to save any metadata from this action pub async fn add_bucket_policy( - region: &str, + region: &Region, bucket_name: &str, prefix: &str, vpcid: &str, ) -> Result<()> { // Get old policy - let s3_client = - S3Client::new(Region::from_str(region).context(error::ParseRegionSnafu { what: region })?); + let config = aws_config::from_env() + .region(region.to_owned()) + .load() + .await; + let s3_client = S3Client::new(&config); let mut policy: serde_json::Value = match s3_client - .get_bucket_policy(GetBucketPolicyRequest { - bucket: bucket_name.to_string(), - expected_bucket_owner: None, - }) + .get_bucket_policy() + .bucket(bucket_name.to_string()) + .send() .await { Ok(output) => serde_json::from_str(&output.policy.context(error::ParseResponseSnafu { @@ -153,13 +160,14 @@ pub async fn add_bucket_policy( // Push the new policy as a string s3_client - .put_bucket_policy(PutBucketPolicyRequest { - bucket: bucket_name.to_string(), - policy: serde_json::to_string(&policy).context(error::InvalidJsonSnafu { + .put_bucket_policy() + .bucket(bucket_name.to_string()) + .policy( + serde_json::to_string(&policy).context(error::InvalidJsonSnafu { what: format!("new bucket policy for {}", &bucket_name), })?, - ..Default::default() - }) + ) + .send() .await .context(error::PutPolicySnafu { bucket_name })?; @@ -171,13 +179,16 @@ pub async fn add_bucket_policy( /// Note that the prefix parameter must have the format "/" and the bucket name "" /// Output: Doesn't need to save any metadata from this action pub async fn upload_file( - region: &str, + region: &Region, bucket_name: &str, prefix: &str, file_path: &Path, ) -> Result<()> { - let s3_client = - S3Client::new(Region::from_str(region).context(error::ParseRegionSnafu { what: region })?); + let config = aws_config::from_env() + .region(region.to_owned()) + .load() + .await; + let s3_client = S3Client::new(&config); // File --> Bytes let mut file = File::open(file_path).context(error::FileOpenSnafu { path: file_path })?; @@ -186,12 +197,11 @@ pub async fn upload_file( .context(error::FileReadSnafu { path: file_path })?; s3_client - .put_object(PutObjectRequest { - bucket: format!("{}{}", bucket_name, prefix), - key: "root.json".to_string(), - body: Some(StreamingBody::from(buffer)), - ..Default::default() - }) + .put_object() + .bucket(format!("{}{}", bucket_name, prefix)) + .key("root.json".to_string()) + .body(aws_sdk_s3::types::ByteStream::from(buffer)) + .send() .await .context(error::PutObjectSnafu { bucket_name })?; @@ -215,16 +225,13 @@ mod tests { let empty = ""; let single_slash = "/"; - assert_eq!("/prefix", format_prefix(&valid.to_string())); - assert_eq!("/prefix", format_prefix(&missing_slash.to_string())); - assert_eq!("/prefix", format_prefix(&excess_ending_1.to_string())); - assert_eq!("/prefix", format_prefix(&excess_ending_2.to_string())); - assert_eq!( - "/prefix", - format_prefix(&slash_and_excess_ending.to_string()) - ); - assert_eq!("", format_prefix(&empty.to_string())); - assert_eq!("", format_prefix(&single_slash.to_string())); + assert_eq!("/prefix", format_prefix(valid)); + assert_eq!("/prefix", format_prefix(missing_slash)); + assert_eq!("/prefix", format_prefix(excess_ending_1)); + assert_eq!("/prefix", format_prefix(excess_ending_2)); + assert_eq!("/prefix", format_prefix(slash_and_excess_ending)); + assert_eq!("", format_prefix(empty)); + assert_eq!("", format_prefix(single_slash)); } #[test] @@ -247,9 +254,7 @@ mod tests { }} }} }}"#, - "test-bucket-name".to_string(), - "/test-prefix".to_string(), - "testvpc123".to_string() + "test-bucket-name", "/test-prefix", "testvpc123" )) .unwrap(); @@ -317,9 +322,7 @@ mod tests { }} }} }}"#, - "test-bucket-name".to_string(), - "/test-prefix".to_string(), - "testvpc123".to_string() + "test-bucket-name", "/test-prefix", "testvpc123" )) .unwrap(); diff --git a/tools/infrasys/src/shared.rs b/tools/infrasys/src/shared.rs index 0f147f56..0e458191 100644 --- a/tools/infrasys/src/shared.rs +++ b/tools/infrasys/src/shared.rs @@ -1,5 +1,6 @@ +use aws_sdk_cloudformation::model::{Output, Parameter}; +use aws_sdk_cloudformation::Client as CloudFormationClient; use log::info; -use rusoto_cloudformation::{CloudFormation, CloudFormationClient, DescribeStacksInput, Parameter}; use snafu::{ensure, OptionExt, ResultExt}; use std::{env, thread, time}; use structopt::StructOpt; @@ -19,11 +20,10 @@ pub fn getenv(var: &str) -> Result { /// Generates a parameter type object used to specify parameters in CloudFormation templates pub fn create_parameter(key: String, val: String) -> Parameter { - Parameter { - parameter_key: Some(key), - parameter_value: Some(val), - ..Default::default() - } + Parameter::builder() + .parameter_key(key) + .parameter_value(val) + .build() } /// Polls cfn_client for stack_name in region until it's ready @@ -32,23 +32,27 @@ pub async fn get_stack_outputs( cfn_client: &CloudFormationClient, stack_name: &str, region: &str, -) -> Result> { +) -> Result> { let mut stack_outputs = cfn_client - .describe_stacks(DescribeStacksInput { - stack_name: Some(stack_name.to_string()), - ..Default::default() - }) + .describe_stacks() + .stack_name(stack_name) + .send() .await .context(error::DescribeStackSnafu { stack_name, region })? .stacks .context(error::ParseResponseSnafu { what: "stacks", resource_name: stack_name, - })?[0] + })? + .first() + .context(error::MissingStackSnafu { stack_name, region })? .clone(); // Checking that keys have been created so we can return updated outputs - let mut status = stack_outputs.stack_status; + let mut status = stack_outputs + .stack_status() + .context(error::ParseStatusSnafu)? + .as_str(); // Max wait is 30 mins (90 attempts * 20s = 1800s = 30mins) let mut max_attempts: u32 = 90; while status != "CREATE_COMPLETE" { @@ -66,19 +70,23 @@ pub async fn get_stack_outputs( ); thread::sleep(time::Duration::from_secs(20)); stack_outputs = cfn_client - .describe_stacks(DescribeStacksInput { - stack_name: Some(stack_name.to_string()), - ..Default::default() - }) + .describe_stacks() + .stack_name(stack_name) + .send() .await .context(error::DescribeStackSnafu { stack_name, region })? .stacks .context(error::ParseResponseSnafu { what: "stacks", resource_name: stack_name, - })?[0] + })? + .first() + .context(error::MissingStackSnafu { stack_name, region })? .clone(); - status = stack_outputs.stack_status; + status = stack_outputs + .stack_status() + .context(error::ParseStatusSnafu)? + .as_str(); max_attempts -= 1; } From 81f036f05ec2532812abb8c4f2ca55bc2ed11ca3 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Fri, 7 Oct 2022 00:32:02 +0000 Subject: [PATCH 0768/1356] sources,variants: add aws-k8s-1.24 variant Signed-off-by: Arnaldo Garcia Rincon --- .github/workflows/build.yml | 2 +- README.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1fba0d7e..11a74657 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -24,7 +24,7 @@ jobs: continue-on-error: ${{ matrix.supported }} strategy: matrix: - variant: [aws-k8s-1.20, aws-k8s-1.21, aws-k8s-1.22, aws-k8s-1.23, aws-ecs-1] + variant: [aws-k8s-1.20, aws-k8s-1.21, aws-k8s-1.22, aws-k8s-1.23, aws-k8s-1.24, aws-ecs-1] arch: [x86_64, aarch64] supported: [true] fetch-upstream: ["false"] diff --git a/README.md b/README.md index ee77ec4f..f3c54bfc 100644 --- a/README.md +++ b/README.md @@ -64,6 +64,7 @@ The following variants support EKS, as described above: * `aws-k8s-1.21` * `aws-k8s-1.22` * `aws-k8s-1.23` +* `aws-k8s-1.24` * `aws-k8s-1.21-nvidia` * `aws-k8s-1.22-nvidia` * `aws-k8s-1.23-nvidia` From 5e37ec2f0a5ce9a1fd7a9acbced40136975b7a87 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Fri, 7 Oct 2022 00:32:49 +0000 Subject: [PATCH 0769/1356] sources,variants: add aws-k8s-1.24-nvidia variant Signed-off-by: Arnaldo Garcia Rincon --- .github/workflows/build.yml | 8 ++++++++ README.md | 10 +++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 11a74657..5f4567c2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -93,6 +93,14 @@ jobs: arch: aarch64 supported: true fetch-upstream: "true" + - variant: aws-k8s-1.24-nvidia + arch: x86_64 + supported: true + fetch-upstream: "true" + - variant: aws-k8s-1.24-nvidia + arch: aarch64 + supported: true + fetch-upstream: "true" - variant: aws-ecs-1-nvidia arch: x86_64 supported: true diff --git a/README.md b/README.md index f3c54bfc..0f0783a3 100644 --- a/README.md +++ b/README.md @@ -68,6 +68,7 @@ The following variants support EKS, as described above: * `aws-k8s-1.21-nvidia` * `aws-k8s-1.22-nvidia` * `aws-k8s-1.23-nvidia` +* `aws-k8s-1.24-nvidia` The following variants support ECS: @@ -952,7 +953,14 @@ There are a few important caveats about the provided kdump support: ### NVIDIA GPUs Support Bottlerocket's `nvidia` variants include the required packages and configurations to leverage NVIDIA GPUs. -The official AMIs for these variants can be used with EC2 GPU-equipped instance types such as: `p2`, `p3`, `p4`, `g4dn`, `g5` and `g5g`. +Currently, the following NVIDIA driver versions are supported in Bottlerocket: + +* 470.X +* 515.X + +The official AMIs for these variants can be used with EC2 GPU-equipped instance types such as: `p2`, `p3`, `p4`, `g3`, `g4dn`, `g5` and `g5g`. +Note that older instance types, such as `p2`, are not supported by NVIDIA driver `515.X` and above. +You need to make sure you select the appropriate AMI depending on the instance type you are planning to use. Please see [QUICKSTART-EKS](QUICKSTART-EKS.md#aws-k8s--nvidia-variants) for further details about Kubernetes variants, and [QUICKSTART-ECS](QUICKSTART-ECS.md#aws-ecs--nvidia-variants) for ECS variants. ## Details From 990f76f2c183cf0e843b2188e5f36057a8bc1f5c Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Mon, 19 Sep 2022 17:03:47 +0000 Subject: [PATCH 0770/1356] sources,variants: add vmware-k8s-1.24 variant Signed-off-by: Arnaldo Garcia Rincon --- .github/workflows/build.yml | 4 ++++ README.md | 1 + 2 files changed, 5 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 5f4567c2..ba8a8b7e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -69,6 +69,10 @@ jobs: arch: x86_64 supported: true fetch-upstream: "false" + - variant: vmware-k8s-1.24 + arch: x86_64 + supported: true + fetch-upstream: "false" - variant: aws-k8s-1.21-nvidia arch: x86_64 supported: true diff --git a/README.md b/README.md index 0f0783a3..6f791742 100644 --- a/README.md +++ b/README.md @@ -81,6 +81,7 @@ We also have variants that are designed to be Kubernetes worker nodes in VMware: * `vmware-k8s-1.21` * `vmware-k8s-1.22` * `vmware-k8s-1.23` +* `vmware-k8s-1.24` The following variants are designed to be Kubernetes worker nodes on bare metal: From bb9dd8625cb6d5010d1455efaceac1199e1b4368 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Mon, 19 Sep 2022 17:26:26 +0000 Subject: [PATCH 0771/1356] sources,variants: add metal-k8s-1.24 variant Signed-off-by: Arnaldo Garcia Rincon --- .github/workflows/build.yml | 4 ++++ README.md | 1 + 2 files changed, 5 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ba8a8b7e..debc270e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -53,6 +53,10 @@ jobs: arch: x86_64 supported: false fetch-upstream: "false" + - variant: metal-k8s-1.24 + arch: x86_64 + supported: false + fetch-upstream: "false" - variant: vmware-k8s-1.20 arch: x86_64 supported: true diff --git a/README.md b/README.md index 6f791742..0d4087f4 100644 --- a/README.md +++ b/README.md @@ -88,6 +88,7 @@ The following variants are designed to be Kubernetes worker nodes on bare metal: * `metal-k8s-1.21` * `metal-k8s-1.22` * `metal-k8s-1.23` +* `metal-k8s-1.24` The following variants are no longer supported: From b977bdc1e938fe4b2ac971f77c3c313e064136a6 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Fri, 14 Oct 2022 19:18:48 +0000 Subject: [PATCH 0772/1356] settings: Add container-runtime settings This adds `settings.container-runtime.max-container-log-line-size` and `settings.container-runtime.max-concurrent-downloads` settings. The affect how containerd is configured. `max-container-log-line-size` controls how long a log line can be from a container before containerd breaks it into multiple separate lines. `max-concurrent-downloads` controls how many concurrent downloads will be done in parallel to download an image. Signed-off-by: Sean McGinnis --- README.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/README.md b/README.md index 0d4087f4..672b6950 100644 --- a/README.md +++ b/README.md @@ -580,6 +580,27 @@ It is recommended to programmatically set these settings via `apiclient` through In addition to the container runtime daemons, these credential settings will also apply to [host-container](#host-containers-settings) and [bootstrap-container](#bootstrap-containers-settings) image pulls as well. +#### Container runtime settings + +Some behavior of the container runtime (currently `containerd`) can be modified with the following settings: + +* `settings.container-runtime.enable-unprivileged-icmp`: Allow unprivileged containers to open ICMP echo sockets. +* `settings.container-runtime.enable-unprivileged-ports`: Allow unprivileged containers to bind to ports < 1024. +* `settings.container-runtime.max-concurrent-downloads`: Restricts the number of concurrent layer downloads for each image. +* `settings.container-runtime.max-container-log-line-size`: Controls how long container log messages can be. + If the log output is longer than the limit, the log message will be broken into multiple lines. + +Example container runtime settings: + +```toml +[settings.container-runtime] +# Set log line length to unlimited +max-container-log-line-size = -1 +max-concurrent-downloads = 4 +enable-unprivileged-icmp = true +enable-unprivileged-ports = true +``` + #### Updates settings * `settings.updates.metadata-base-url`: The common portion of all URIs used to download update metadata. From 2fe626ba476e1f122da2f950b4739850fb6b22b0 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Wed, 5 Oct 2022 16:15:42 -0500 Subject: [PATCH 0773/1356] Clean up settings information in the README This sorts the settings values in the README file within each section to make it a little easier to find what you're looking for. Signed-off-by: Sean McGinnis Also adds the content type tags to all code blocks so things like TOML are formatted and easier to read. Some settings had example description as a sub-bullet, some had it as text, and one or two had it outside the bullet list of settings. This makes it consistent through our README doc. Signed-off-by: Sean McGinnis --- README.md | 286 +++++++++++++++++++++++++++++------------------------- 1 file changed, 152 insertions(+), 134 deletions(-) diff --git a/README.md b/README.md index 672b6950..e04d153f 100644 --- a/README.md +++ b/README.md @@ -164,7 +164,7 @@ Once the instance is started, you can start a session: If you prefer a command-line tool, you can start a session with a recent [AWS CLI](https://aws.amazon.com/cli/) and the [session-manager-plugin](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html). Then you'd be able to start a session using only your instance ID, like this: -``` +```txt aws ssm start-session --target INSTANCE_ID ``` @@ -172,7 +172,7 @@ With the [default control container](https://github.com/bottlerocket-os/bottlero To do even more, read the next section about the [admin container](#admin-container). You can access the admin container from the control container like this: -``` +```sh enter-admin-container ``` @@ -185,32 +185,32 @@ Outside of AWS, you can [pass in your own SSH keys](https://github.com/bottleroc To enable the container, you can change the setting in user data when starting Bottlerocket, for example EC2 instance user data: -``` +```toml [settings.host-containers.admin] enabled = true ``` If Bottlerocket is already running, you can enable the admin container from the default [control container](#control-container) like this: -``` +```sh enable-admin-container ``` Or you can start an interactive session immediately like this: -``` +```sh enter-admin-container ``` If you're using a custom control container, or want to make the API calls directly, you can enable the admin container like this instead: -``` +```txt apiclient set host-containers.admin.enabled=true ``` Once you've enabled the admin container, you can either access it through SSH or execute commands from the control container like this: -``` +```txt apiclient exec admin bash ``` @@ -245,7 +245,7 @@ apiclient knows how to handle those update APIs for you, and you can run it from To see what updates are available: -``` +```txt apiclient update check ``` @@ -254,20 +254,20 @@ The `available_updates` field will show the full list of available versions, inc To apply the latest update: -``` +```txt apiclient update apply ``` The next time you reboot, you'll start up in the new version, and system configuration will be automatically [migrated](sources/api/migration/). To reboot right away: -``` +```txt apiclient reboot ``` If you're confident about updating, the `apiclient update apply` command has `--check` and `--reboot` flags to combine the above actions, so you can accomplish all of the above steps like this: -``` +```txt apiclient update apply --check --reboot ``` @@ -278,7 +278,7 @@ See the [apiclient documentation](sources/api/apiclient/) for more details. The system will automatically roll back if it's unable to boot. If the update is not functional for a given container workload, you can do a manual rollback: -``` +```txt signpost rollback-to-inactive reboot ``` @@ -297,20 +297,20 @@ Here we'll describe the settings you can configure on your Bottlerocket instance You can see the current settings with an API request: -``` +```txt apiclient get settings ``` This will return all of the current settings in JSON format. For example, here's an abbreviated response: -``` -{"motd":"...", {"kubernetes": ...}} +```json +{"motd": "...", {"kubernetes": {}}} ``` You can change settings like this: -``` +```txt apiclient set motd="hi there" kubernetes.node-labels.environment=test ``` @@ -324,7 +324,7 @@ If you know what settings you want to change when you start your Bottlerocket in In user data, we structure the settings in TOML form to make things a bit simpler. Here's the user data to change the message of the day setting, as we did in the last section: -``` +```toml [settings] motd = "my own value!" ``` @@ -350,7 +350,7 @@ In this format, "settings.kubernetes.cluster-name" refers to the same key as in #### Top-level settings -* `settings.motd`: This setting is just written out to /etc/motd. It's useful as a way to get familiar with the API! Try changing it. +* `settings.motd`: This setting is just written out to /etc/motd. It's useful as a way to get familiar with the API! Try changing it. #### Kubernetes settings @@ -360,8 +360,8 @@ For more details about running Bottlerocket as a Kubernetes worker node in VMwar The following settings must be specified in order to join a Kubernetes cluster. You should [specify them in user data](#using-user-data). -* `settings.kubernetes.cluster-certificate`: This is the base64-encoded certificate authority of the cluster. * `settings.kubernetes.api-server`: This is the cluster's Kubernetes API endpoint. +* `settings.kubernetes.cluster-certificate`: This is the base64-encoded certificate authority of the cluster. For Kubernetes variants in AWS, you must also specify: @@ -369,12 +369,17 @@ For Kubernetes variants in AWS, you must also specify: For Kubernetes variants in VMware, you must specify: +* `settings.kubernetes.bootstrap-token`: The token used for [TLS bootstrapping](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/). + +The following settings can be optionally set to customize the node labels and taints. Remember to quote keys (since they often contain ".") and to quote all values. + * `settings.kubernetes.cluster-dns-ip`: The IP of the DNS service running in the cluster. This value can be set as a string containing a single IP address, or as a list containing multiple IP addresses. + Examples: - ``` + ```toml # Valid, single IP [settings.kubernetes] "cluster-dns-ip" = "10.0.0.1" @@ -384,99 +389,102 @@ For Kubernetes variants in VMware, you must specify: "cluster-dns-ip" = ["10.0.0.1", "10.0.0.2"] ``` -* `settings.kubernetes.bootstrap-token`: The token used for [TLS bootstrapping](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/). - -The following settings can be optionally set to customize the node labels and taints. Remember to quote keys (since they often contain ".") and to quote all values. - * `settings.kubernetes.node-labels`: [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) in the form of key, value pairs added when registering the node in the cluster. * `settings.kubernetes.node-taints`: [Taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) in the form of key, values and effects entries added when registering the node in the cluster. - * Example user data for setting up labels and taints: - ``` - [settings.kubernetes.node-labels] - "label1" = "foo" - "label2" = "bar" - [settings.kubernetes.node-taints] - "dedicated" = ["experimental:PreferNoSchedule", "experimental:NoExecute"] - "special" = ["true:NoSchedule"] - ``` - -The following settings are optional and allow you to further configure your cluster. + Example user data for setting up labels and taints: -* `settings.kubernetes.cluster-domain`: The DNS domain for this cluster, allowing all Kubernetes-run containers to search this domain before the host's search domains. Defaults to `cluster.local`. -* `settings.kubernetes.standalone-mode`: Whether to run the kubelet in standalone mode, without connecting to an API server. Defaults to `false`. -* `settings.kubernetes.cloud-provider`: The cloud provider for this cluster. Defaults to `aws` for AWS variants, and `external` for other variants. -* `settings.kubernetes.authentication-mode`: Which authentication method the kubelet should use to connect to the API server, and for incoming requests. Defaults to `aws` for AWS variants, and `tls` for other variants. -* `settings.kubernetes.server-tls-bootstrap`: Enables or disables server certificate bootstrap. When enabled, the kubelet will request a certificate from the certificates.k8s.io API. This requires an approver to approve the certificate signing requests (CSR). Defaults to `true`. -* `settings.kubernetes.bootstrap-token`: The token to use for [TLS bootstrapping](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/). This is only used with the `tls` authentication mode, and is otherwise ignored. -* `settings.kubernetes.eviction-hard`: The signals and thresholds that trigger pod eviction. - Remember to quote signals (since they all contain ".") and to quote all values. - * Example user data for setting up eviction hard: + ```toml + [settings.kubernetes.node-labels] + "label1" = "foo" + "label2" = "bar" + [settings.kubernetes.node-taints] + "dedicated" = ["experimental:PreferNoSchedule", "experimental:NoExecute"] + "special" = ["true:NoSchedule"] + ``` - ``` - [settings.kubernetes.eviction-hard] - "memory.available" = "15%" - ``` +The following settings are optional and allow you to further configure your cluster. * `settings.kubernetes.allowed-unsafe-sysctls`: Enables specified list of unsafe sysctls. - * Example user data for setting up allowed unsafe sysctls: - ``` - allowed-unsafe-sysctls = ["net.core.somaxconn", "net.ipv4.ip_local_port_range"] - ``` + Example user data for setting up allowed unsafe sysctls: -* `settings.kubernetes.system-reserved`: Resources reserved for system components. - * Example user data for setting up system reserved: - - ``` - [settings.kubernetes.system-reserved] - cpu = "10m" - memory = "100Mi" - ephemeral-storage= "1Gi" - ``` + ```toml + allowed-unsafe-sysctls = ["net.core.somaxconn", "net.ipv4.ip_local_port_range"] + ``` -* `settings.kubernetes.registry-qps`: The registry pull QPS. -* `settings.kubernetes.registry-burst`: The maximum size of bursty pulls. -* `settings.kubernetes.event-qps`: The maximum event creations per second. -* `settings.kubernetes.event-burst`: The maximum size of a burst of event creations. -* `settings.kubernetes.kube-api-qps`: The QPS to use while talking with kubernetes apiserver. -* `settings.kubernetes.kube-api-burst`: The burst to allow while talking with kubernetes. -* `settings.kubernetes.container-log-max-size`: The maximum size of container log file before it is rotated. +* `settings.kubernetes.authentication-mode`: Which authentication method the kubelet should use to connect to the API server, and for incoming requests. Defaults to `aws` for AWS variants, and `tls` for other variants. +* `settings.kubernetes.bootstrap-token`: The token to use for [TLS bootstrapping](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/). This is only used with the `tls` authentication mode, and is otherwise ignored. +* `settings.kubernetes.cloud-provider`: The cloud provider for this cluster. Defaults to `aws` for AWS variants, and `external` for other variants. +* `settings.kubernetes.cluster-domain`: The DNS domain for this cluster, allowing all Kubernetes-run containers to search this domain before the host's search domains. Defaults to `cluster.local`. * `settings.kubernetes.container-log-max-files`: The maximum number of container log files that can be present for a container. +* `settings.kubernetes.container-log-max-size`: The maximum size of container log file before it is rotated. * `settings.kubernetes.cpu-manager-policy`: Specifies the CPU manager policy. Possible values are `static` and `none`. Defaults to `none`. If you want to allow pods with certain resource characteristics to be granted increased CPU affinity and exclusivity on the node, you can set this setting to `static`. You should reboot if you change this setting after startup - try `apiclient reboot`. * `settings.kubernetes.cpu-manager-reconcile-period`: Specifies the CPU manager reconcile period, which controls how often updated CPU assignments are written to cgroupfs. The value is a duration like `30s` for 30 seconds or `1h5m` for 1 hour and 5 minutes. -* `settings.kubernetes.topology-manager-policy`: Specifies the topology manager policy. Possible values are `none`, `restricted`, `best-effort`, and `single-numa-node`. Defaults to `none`. -* `settings.kubernetes.topology-manager-scope`: Specifies the topology manager scope. Possible values are `container` and `pod`. Defaults to `container`. If you want to group all containers in a pod to a common set of NUMA nodes, you can set this setting to `pod`. -* `settings.kubernetes.pod-pids-limit`: The maximum number of processes per pod. +* `settings.kubernetes.event-burst`: The maximum size of a burst of event creations. +* `settings.kubernetes.event-qps`: The maximum event creations per second. +* `settings.kubernetes.eviction-hard`: The signals and thresholds that trigger pod eviction. + Remember to quote signals (since they all contain ".") and to quote all values. + + Example user data for setting up eviction hard: + + ```toml + [settings.kubernetes.eviction-hard] + "memory.available" = "15%" + ``` + * `settings.kubernetes.image-gc-high-threshold-percent`: The percent of disk usage after which image garbage collection is always run. * `settings.kubernetes.image-gc-low-threshold-percent`: The percent of disk usage before which image garbage collection is never run. -* `settings.kubernetes.provider-id`: This sets the unique ID of the instance that an external provider (i.e. cloudprovider) can use to identify a specific node. +* `settings.kubernetes.kube-api-burst`: The burst to allow while talking with kubernetes. +* `settings.kubernetes.kube-api-qps`: The QPS to use while talking with kubernetes apiserver. * `settings.kubernetes.log-level`: Adjust the logging verbosity of the `kubelet` process. The default log level is 2, with higher numbers enabling more verbose logging. +* `settings.kubernetes.pod-pids-limit`: The maximum number of processes per pod. +* `settings.kubernetes.provider-id`: This sets the unique ID of the instance that an external provider (i.e. cloudprovider) can use to identify a specific node. +* `settings.kubernetes.registry-burst`: The maximum size of bursty pulls. +* `settings.kubernetes.registry-qps`: The registry pull QPS. +* `settings.kubernetes.server-tls-bootstrap`: Enables or disables server certificate bootstrap. When enabled, the kubelet will request a certificate from the certificates.k8s.io API. This requires an approver to approve the certificate signing requests (CSR). Defaults to `true`. +* `settings.kubernetes.standalone-mode`: Whether to run the kubelet in standalone mode, without connecting to an API server. Defaults to `false`. +* `settings.kubernetes.system-reserved`: Resources reserved for system components. + + Example user data for setting up system reserved: + + ```toml + [settings.kubernetes.system-reserved] + cpu = "10m" + memory = "100Mi" + ephemeral-storage= "1Gi" + ``` + +* `settings.kubernetes.topology-manager-policy`: Specifies the topology manager policy. Possible values are `none`, `restricted`, `best-effort`, and `single-numa-node`. Defaults to `none`. +* `settings.kubernetes.topology-manager-scope`: Specifies the topology manager scope. Possible values are `container` and `pod`. Defaults to `container`. If you want to group all containers in a pod to a common set of NUMA nodes, you can set this setting to `pod`. You can also optionally specify static pods for your node with the following settings. Static pods can be particularly useful when running in standalone mode. -* `settings.kubernetes.static-pods..manifest`: A base64-encoded pod manifest. * `settings.kubernetes.static-pods..enabled`: Whether the static pod is enabled. +* `settings.kubernetes.static-pods..manifest`: A base64-encoded pod manifest. For Kubernetes variants in AWS and VMware, the following are set for you automatically, but you can override them if you know what you're doing! In AWS, [pluto](sources/api/) sets these based on runtime instance information. In VMware and on bare metal, Bottlerocket uses [netdog](sources/api/) (for `node-ip`) or relies on default values. (See the [VMware defaults](sources/models/src/vmware-k8s-1.23/defaults.d) or [bare metal defaults](sources/models/src/metal-k8s-1.23/defaults.d)). +* `settings.kubernetes.kube-reserved`: Resources reserved for node components. + + Bottlerocket provides default values for the resources by [schnauzer](sources/api/): + + * `cpu`: in millicores from the total number of vCPUs available on the instance. + * `memory`: in mebibytes from the max num of pods on the instance. `memory_to_reserve = max_num_pods * 11 + 255`. + * `ephemeral-storage`: defaults to `1Gi`. + * `settings.kubernetes.node-ip`: The IP address of this node. * `settings.kubernetes.pod-infra-container-image`: The URI of the "pause" container. -* `settings.kubernetes.kube-reserved`: Resources reserved for node components. - * Bottlerocket provides default values for the resources by [schnauzer](sources/api/): - * `cpu`: in millicores from the total number of vCPUs available on the instance. - * `memory`: in mebibytes from the max num of pods on the instance. `memory_to_reserve = max_num_pods * 11 + 255`. - * `ephemeral-storage`: defaults to `1Gi`. For Kubernetes variants in AWS, the following settings are set for you automatically by [pluto](sources/api/). -* `settings.kubernetes.max-pods`: The maximum number of pods that can be scheduled on this node (limited by number of available IPv4 addresses) * `settings.kubernetes.cluster-dns-ip`: Derived from the EKS Service IP CIDR or the CIDR block of the primary network interface. +* `settings.kubernetes.max-pods`: The maximum number of pods that can be scheduled on this node (limited by number of available IPv4 addresses) #### Amazon ECS settings @@ -488,36 +496,37 @@ Since joining a cluster happens at startup, they need to be [specified in user d * `settings.ecs.cluster`: The name or [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of your Amazon ECS cluster. If left unspecified, Bottlerocket will join your `default` cluster. * `settings.ecs.instance-attributes`: [Attributes](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html#attributes) in the form of key, value pairs added when registering the container instance in the cluster. - * Example user data for setting up attributes: - ``` - [settings.ecs.instance-attributes] - attribute1 = "foo" - attribute2 = "bar" - ``` + Example user data for setting up attributes: + + ```toml + [settings.ecs.instance-attributes] + attribute1 = "foo" + attribute2 = "bar" + ``` The following settings are optional and allow you to further configure your cluster. These settings can be changed at any time. -* `settings.ecs.logging-drivers`: The list of logging drivers available on the container instance. - The ECS agent running on a container instance must register available logging drivers before tasks that use those drivers are eligible to be placed on the instance. - Bottlerocket enables the `json-file`, `awslogs`, and `none` drivers by default. * `settings.ecs.allow-privileged-containers`: Whether launching privileged containers is allowed on the container instance. If this value is set to false, privileged containers are not permitted. Bottlerocket sets this value to false by default. -* `settings.ecs.loglevel`: The level of verbosity for the ECS agent's logs. - Supported values are `debug`, `info`, `warn`, `error`, and `crit`, and the default is `info`. * `settings.ecs.enable-spot-instance-draining`: If the instance receives a spot termination notice, the agent will set the instance's state to `DRAINING`, so the workload can be moved gracefully before the instance is removed. Defaults to `false`. * `settings.ecs.image-pull-behavior`: The behavior used to customize the [pull image process](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html#ecs-agent-availparam) for your container instances. Supported values are `default`, `always`, `once`, `prefer-cached`, and the default is `default`. +* `settings.ecs.logging-drivers`: The list of logging drivers available on the container instance. + The ECS agent running on a container instance must register available logging drivers before tasks that use those drivers are eligible to be placed on the instance. + Bottlerocket enables the `json-file`, `awslogs`, and `none` drivers by default. +* `settings.ecs.loglevel`: The level of verbosity for the ECS agent's logs. + Supported values are `debug`, `info`, `warn`, `error`, and `crit`, and the default is `info`. #### CloudFormation signal helper settings For AWS variants, these settings allow you to set up CloudFormation signaling to indicate whether Bottlerocket hosts running in EC2 have been successfully created or updated: +* `settings.cloudformation.logical-resource-id`: The logical ID of the AutoScalingGroup resource that you want to signal. * `settings.cloudformation.should-signal`: Whether to check status and send signal. Defaults to `false`. If set to `true`, both `stack-name` and `logical-resource-id` need to be specified. * `settings.cloudformation.stack-name`: Name of the CloudFormation Stack to signal. -* `settings.cloudformation.logical-resource-id`: The logical ID of the AutoScalingGroup resource that you want to signal. #### Auto Scaling group settings @@ -534,12 +543,13 @@ Once you opt-in to use additional OCI hooks, any new orchestrated containers wil The following setting is optional and allows you to configure image registry mirrors and pull-through caches for your containers. -* `settings.container-registry.mirrors`: An array of container image registry mirror settings. Each element specifies the registry and the endpoints for said registry. +* `settings.container-registry.mirrors`: An array of container image registry mirror settings. Each element specifies the registry and the endpoints for said registry. When pulling an image from a registry, the container runtime will try the endpoints one by one and use the first working one. (Docker and containerd will still try the default registry URL if the mirrors fail.) - * Example user data for setting up image registry mirrors: - ``` + Example user data for setting up image registry mirrors: + + ```toml [[settings.container-registry.mirrors]] registry = "*" endpoint = ["https://","https://"] @@ -549,7 +559,7 @@ When pulling an image from a registry, the container runtime will try the endpoi endpoint = [ "https://", "https://"] ``` - If you use a Bottlerocket variant that uses Docker as the container runtime, like `aws-ecs-1`, you should be aware that Docker only supports pull-through caches for images from Docker Hub (docker.io). Mirrors for other registries are ignored in this case. + If you use a Bottlerocket variant that uses Docker as the container runtime, like `aws-ecs-1`, you should be aware that Docker only supports pull-through caches for images from Docker Hub (docker.io). Mirrors for other registries are ignored in this case. For [host-container](#host-containers-settings) and [bootstrap-container](#bootstrap-containers-settings) images from Amazon ECR private repositories, registry mirrors are currently unsupported. @@ -558,7 +568,8 @@ The following setting is optional and allows you to configure image registry cre * `settings.container-registry.credentials`: An array of container images registry credential settings. Each element specifies the registry and the credential information for said registry. The credential fields map to [containerd's registry credential fields](https://github.com/containerd/containerd/blob/v1.6.0/docs/cri/registry.md#configure-registry-credentials), which in turn map to the fields in `.docker/config.json`. It is recommended to programmatically set these settings via `apiclient` through the Bottlerocket control container and/or custom host-containers. - * An example `apiclient` call to set registry credentials for `gcr.io` and `docker.io` looks like this: + + An example `apiclient` call to set registry credentials for `gcr.io` and `docker.io` looks like this: ```bash apiclient set --json '{ @@ -603,15 +614,16 @@ enable-unprivileged-ports = true #### Updates settings +* `settings.updates.ignore-waves`: Updates are rolled out in waves to reduce the impact of issues. For testing purposes, you can set this to `true` to ignore those waves and update immediately. * `settings.updates.metadata-base-url`: The common portion of all URIs used to download update metadata. +* `settings.updates.seed`: A `u32` value that determines how far into the update schedule this machine will accept an update. We recommend leaving this at its default generated value so that updates can be somewhat randomized in your cluster. * `settings.updates.targets-base-url`: The common portion of all URIs used to download update files. -* `settings.updates.seed`: A `u32` value that determines how far into the update schedule this machine will accept an update. We recommend leaving this at its default generated value so that updates can be somewhat randomized in your cluster. -* `settings.updates.version-lock`: Controls the version that will be selected when you issue an update request. Can be locked to a specific version like `v1.0.0`, or `latest` to take the latest available version. Defaults to `latest`. -* `settings.updates.ignore-waves`: Updates are rolled out in waves to reduce the impact of issues. For testing purposes, you can set this to `true` to ignore those waves and update immediately. +* `settings.updates.version-lock`: Controls the version that will be selected when you issue an update request. Can be locked to a specific version like `v1.0.0`, or `latest` to take the latest available version. Defaults to `latest`. #### Network settings * `settings.network.hostname`: The desired hostname of the system. + **Important note for all Kubernetes variants:** Changing this setting at runtime (not via user data) can cause issues with kubelet registration, as hostname is closely tied to the identity of the system for both registration and certificates/authorization purposes. Most users don't need to change this setting as the following defaults work for the majority of use cases. @@ -620,6 +632,7 @@ enable-unprivileged-ports = true * `settings.network.hosts`: A mapping of IP addresses to domain names which should resolve to those IP addresses. This setting results in modifications to the `/etc/hosts` file for Bottlerocket. + Note that this setting does not typically impact name resolution for containers, which usually rely on orchestrator-specific mechanisms for configuring static resolution. (See [ECS](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_HostEntry.html) and [Kubernetes](https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/) documentation for those mechanisms.) @@ -635,7 +648,7 @@ enable-unprivileged-ports = true This example would result in an `/etc/hosts` file entries like so: - ``` + ```txt 10.0.0.0 test.example.com test1.example.com 10.1.1.1 test2.example.com ``` @@ -653,7 +666,7 @@ enable-unprivileged-ports = true Would result in `/etc/hosts` entries like so: - ``` + ```txt 10.0.0.0 test.example.com test1.example.com test3.example.com 10.1.1.1 test2.example.com ``` @@ -661,13 +674,15 @@ enable-unprivileged-ports = true The following allows for custom DNS settings, which are used to generate the `/etc/resolv.conf`. If either DNS setting is not populated, the system will use the DHCP lease of the primary interface to gather these setings. See the `resolv.conf` [man page](https://man7.org/linux/man-pages/man5/resolv.conf.5.html) for more detail. + * `settings.dns.name-servers`: An array of IP address strings that represent the desired name server(s). * `settings.dns.search-list`: An array of domain strings that represent the desired domain search path(s). -``` -[settings.dns] -name-servers = ["1.2.3.4", "5.6.7.8"] -search-list = ["foo.bar", "baz.foo"] -``` + + ```toml + [settings.dns] + name-servers = ["1.2.3.4", "5.6.7.8"] + search-list = ["foo.bar", "baz.foo"] + ``` ##### Proxy settings @@ -684,9 +699,10 @@ These settings will configure the proxying behavior of the following services: * `settings.network.https-proxy`: The HTTPS proxy server to be used by services listed above. * `settings.network.no-proxy`: A list of hosts that are excluded from proxying. + Example: - ``` + ```toml [settings.network] https-proxy = "1.2.3.4:8080" no-proxy = ["localhost", "127.0.0.1"] @@ -719,25 +735,27 @@ Here are the metrics settings: * `settings.kernel.modules..allowed`: Whether the named kernel module is allowed to be loaded. **Important note:** this setting does not affect kernel modules that are already loaded. You may need to reboot for a change to disallow a kernel module to take effect. - * Example user data for blocking kernel modules: - ``` - [settings.kernel.modules.sctp] - allowed = false + Example user data for blocking kernel modules: + + ```toml + [settings.kernel.modules.sctp] + allowed = false - [settings.kernel.modules.udf] - allowed = false - ``` + [settings.kernel.modules.udf] + allowed = false + ``` * `settings.kernel.sysctl`: Key/value pairs representing Linux kernel parameters. Remember to quote keys (since they often contain ".") and to quote all values. - * Example user data for setting up sysctl: - ``` - [settings.kernel.sysctl] - "user.max_user_namespaces" = "16384" - "vm.max_map_count" = "262144" - ``` + Example user data for setting up sysctl: + + ```toml + [settings.kernel.sysctl] + "user.max_user_namespaces" = "16384" + "vm.max_map_count" = "262144" + ``` #### Boot-related settings @@ -745,8 +763,8 @@ Here are the metrics settings: Specifying any of the following settings will generate a kernel boot config file to be loaded on subsequent boots: -* `settings.boot.kernel-parameters`: This allows additional kernel parameters to be specified on the kernel command line during boot. * `settings.boot.init-parameters`: This allows additional init parameters to be specified on the kernel command line during boot. +* `settings.boot.kernel-parameters`: This allows additional kernel parameters to be specified on the kernel command line during boot. * `settings.boot.reboot-to-reconcile`: If set to `true`, Bottlerocket will automatically reboot again during boot if either the `settings.boot.kernel-parameters` or `settings.boot.init-parameters` were changed via user data or a bootstrap container so that these changes may take effect. You can learn more about kernel boot configuration [here](https://www.kernel.org/doc/html/latest/admin-guide/bootconfig.html). @@ -802,7 +820,7 @@ trusted=false Here's the same example but using API calls: -```sh +```txt apiclient set \ pki.my-trusted-bundle.data="W3N..." \ pki.my-trusted-bundle.trusted=true \ @@ -814,11 +832,11 @@ You can use this method from within a [bootstrap container](#bootstrap-container #### Host containers settings -* `settings.host-containers.admin.source`: The URI of the [admin container](#admin-container). * `settings.host-containers.admin.enabled`: Whether the admin container is enabled. +* `settings.host-containers.admin.source`: The URI of the [admin container](#admin-container). * `settings.host-containers.admin.superpowered`: Whether the admin container has high levels of access to the Bottlerocket host. -* `settings.host-containers.control.source`: The URI of the [control container](#control-container). * `settings.host-containers.control.enabled`: Whether the control container is enabled. +* `settings.host-containers.control.source`: The URI of the [control container](#control-container). * `settings.host-containers.control.superpowered`: Whether the control container has high levels of access to the Bottlerocket host. ##### Custom host containers @@ -830,11 +848,11 @@ As long as you define the three fields above -- `source` with a URI, and `enable You can optionally define a `user-data` field with arbitrary base64-encoded data, which will be made available in the container at `/.bottlerocket/host-containers/$HOST_CONTAINER_NAME/user-data` and (since Bottlerocket v1.0.8) `/.bottlerocket/host-containers/current/user-data`. (It was inspired by instance user data, but is entirely separate; it can be any data your host container feels like interpreting.) -Keep in mind that the default admin container (since Bottlerocket v1.0.6) relies on `user-data` to store SSH keys. You can set `user-data` to [customize the keys](https://github.com/bottlerocket-os/bottlerocket-admin-container/#authenticating-with-the-admin-container), or you can use it for your own purposes in a custom container. +Keep in mind that the default admin container (since Bottlerocket v1.0.6) relies on `user-data` to store SSH keys. You can set `user-data` to [customize the keys](https://github.com/bottlerocket-os/bottlerocket-admin-container/#authenticating-with-the-admin-container), or you can use it for your own purposes in a custom container. Here's an example of adding a custom host container with API calls: -``` +```txt apiclient set \ host-containers.custom.source=MY-CONTAINER-URI \ host-containers.custom.enabled=true \ @@ -843,7 +861,7 @@ apiclient set \ Here's the same example, but with the settings you'd add to user data: -``` +```toml [settings.host-containers.custom] enabled = true source = "MY-CONTAINER-URI" @@ -856,7 +874,7 @@ All host containers will have the `apiclient` binary available at `/usr/local/bi You can also use `apiclient` to run programs in other host containers. For example, to access the admin container: -``` +```txt apiclient exec admin bash ``` @@ -866,9 +884,9 @@ The default `admin` host-container, for example, stores its SSH host keys under There are a few important caveats to understand about host containers: -* They're not orchestrated. They only start or stop according to that `enabled` flag. +* They're not orchestrated. They only start or stop according to that `enabled` flag. * They run in a separate instance of containerd than the one used for orchestrated containers like Kubernetes pods. -* They're not updated automatically. You need to update the `source` and commit those changes. +* They're not updated automatically. You need to update the `source` and commit those changes. * If you set `superpowered` to true, they'll essentially have root access to the host. Because of these caveats, host containers are only intended for special use cases. @@ -878,9 +896,9 @@ Be careful, and make sure you have a similar low-level use case before reaching #### Bootstrap containers settings -* `settings.bootstrap-containers..source`: the image for the container -* `settings.bootstrap-containers..mode`: the mode of the container, it could be one of `off`, `once` or `always`. See below for a description of modes. * `settings.bootstrap-containers..essential`: whether or not the container should fail the boot process, defaults to `false` +* `settings.bootstrap-containers..mode`: the mode of the container, it could be one of `off`, `once` or `always`. See below for a description of modes. +* `settings.bootstrap-containers..source`: the image for the container * `settings.bootstrap-containers..user-data`: field with arbitrary base64-encoded data Bootstrap containers are host containers that can be used to "bootstrap" the host before services like ECS Agent, Kubernetes, and Docker start. @@ -904,7 +922,7 @@ Bootstrap containers have three different modes: Here's an example of adding a bootstrap container with API calls: -``` +```txt apiclient set \ bootstrap-containers.bootstrap.source=MY-CONTAINER-URI \ bootstrap-containers.bootstrap.mode=once \ @@ -913,7 +931,7 @@ apiclient set \ Here's the same example, but with the settings you'd add to user data: -``` +```toml [settings.bootstrap-containers.bootstrap] source = "MY-CONTAINER-URI" mode = "once" From abd992a525259f8c05b260b2974584845992eff1 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sun, 25 Sep 2022 00:33:34 +0000 Subject: [PATCH 0774/1356] grub: remove glibc dependency The tools for making images are built with the host toolchain, and the actual image contents are freestanding, so the target's C library is not required. Signed-off-by: Ben Cressey --- packages/grub/Cargo.toml | 3 --- packages/grub/grub.spec | 1 - 2 files changed, 4 deletions(-) diff --git a/packages/grub/Cargo.toml b/packages/grub/Cargo.toml index c8f0669f..87440cf0 100644 --- a/packages/grub/Cargo.toml +++ b/packages/grub/Cargo.toml @@ -11,6 +11,3 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] url = "https://cdn.amazonlinux.com/blobstore/21d0df3b06c1c5cc9e5cf3bb559dad713335e782ac3a46b57c5d0097e22c0aec/grub2-2.06-9.amzn2.0.1.src.rpm" sha512 = "f27b4005e789ce1e0e792133f6adfbdbf221245c03b27c25285ff5b81e53065385536971934744f33c52a924022480aa15cd25e8d5ded9f4999c753e8394ae36" - -[build-dependencies] -glibc = { path = "../glibc" } diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index b3e06463..f84f3925 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -63,7 +63,6 @@ BuildRequires: automake BuildRequires: bison BuildRequires: flex BuildRequires: gettext-devel -BuildRequires: %{_cross_os}glibc-devel %description %{summary}. From fc92aea9050b93f358298206a7d5cbd99da0d0ee Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sun, 25 Sep 2022 00:27:50 +0000 Subject: [PATCH 0775/1356] grub: drop extra autogen.sh invocation The bootstrap script calls `autogen.sh` at the end, and running it a second time adds around 20 seconds to the build. Signed-off-by: Ben Cressey --- packages/grub/grub.spec | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index f84f3925..cea7105b 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -109,7 +109,6 @@ git commit -a -q -m "base" git am --whitespace=nowarn ../*.patch %{patches} ./bootstrap -./autogen.sh %global grub_cflags -pipe -fno-stack-protector -fno-strict-aliasing %global grub_ldflags -static From 05b0c51dde000e346a50fab90c716d51099a3354 Mon Sep 17 00:00:00 2001 From: John McBride Date: Thu, 15 Sep 2022 09:46:14 -0700 Subject: [PATCH 0776/1356] Remove kubernetes 1.20 variants - Remove aws-k8s-1.20 variant - Remove vmware-k8s-1.20 variant Signed-off-by: John McBride --- .github/workflows/build.yml | 6 +----- README.md | 4 ++-- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index debc270e..190bc190 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -24,7 +24,7 @@ jobs: continue-on-error: ${{ matrix.supported }} strategy: matrix: - variant: [aws-k8s-1.20, aws-k8s-1.21, aws-k8s-1.22, aws-k8s-1.23, aws-k8s-1.24, aws-ecs-1] + variant: [aws-k8s-1.21, aws-k8s-1.22, aws-k8s-1.23, aws-k8s-1.24, aws-ecs-1] arch: [x86_64, aarch64] supported: [true] fetch-upstream: ["false"] @@ -57,10 +57,6 @@ jobs: arch: x86_64 supported: false fetch-upstream: "false" - - variant: vmware-k8s-1.20 - arch: x86_64 - supported: true - fetch-upstream: "false" - variant: vmware-k8s-1.21 arch: x86_64 supported: true diff --git a/README.md b/README.md index e04d153f..c62de765 100644 --- a/README.md +++ b/README.md @@ -60,7 +60,6 @@ For example, an `x86_64` build of the `aws-k8s-1.21` variant will produce an ima The following variants support EKS, as described above: -* `aws-k8s-1.20` * `aws-k8s-1.21` * `aws-k8s-1.22` * `aws-k8s-1.23` @@ -77,7 +76,6 @@ The following variants support ECS: We also have variants that are designed to be Kubernetes worker nodes in VMware: -* `vmware-k8s-1.20` * `vmware-k8s-1.21` * `vmware-k8s-1.22` * `vmware-k8s-1.23` @@ -97,6 +95,8 @@ The following variants are no longer supported: * `aws-k8s-1.17` * `aws-k8s-1.18` * `aws-k8s-1.19` +* `aws-k8s-1.20` +* `vmware-k8s-1.20` We recommend users replace nodes running these variants with the [latest variant compatible with their cluster](variants/). From b00160e633a63d7e7b6da4b7a915cd8af8513048 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Thu, 20 Oct 2022 16:26:42 -0500 Subject: [PATCH 0777/1356] housekeeping: Cancel GH Action on new push When a PR is proposed to the develop branch, a set of GitHub Action jobs are kicked off to test the various configuration we need to cover. With the current configuration of our workflow, if the developer notices an issue and pushes updates to the source branch it will kick off a new set of jobs for that updates code, but the jobs running for the previous revision will keep running. These jobs take a lot of time and resources to run. Once a new change is pushed to the source branch, there is no need to continue testing the older version of the code as it is no longer relevant. This updates our workflow definition to cancel and currently running jobs for the same source (`github.ref`) to avoid unnecessary runner load that may slow down getting results for current PRs. Signed-off-by: Sean McGinnis --- .github/workflows/build.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 190bc190..5f17f0b1 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -18,6 +18,10 @@ on: # Sample config files and OpenAPI docs - '**.yaml' +concurrency: + group: ${{ github.ref }} + cancel-in-progress: true + jobs: build: runs-on: [self-hosted, linux, x64] From 4ea9f29b5f64baeb24ab91567ace274886a95988 Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Fri, 14 Oct 2022 20:14:48 +0000 Subject: [PATCH 0778/1356] grub: update to grub-2.06-42.amzn2022 Update GRUB to grub-2.06-42.amzn2022. This also switches Bottlerocket's immediate upstream from Amazon Linux 2 to Amazon Linux 2022, which more closely tracks the GRUB project. Signed-off-by: Markus Boehme --- .../grub/0002-gpt-start-new-GPT-module.patch | 8 +-- ...write-function-and-gptrepair-command.patch | 8 +-- ...next-command-for-selecting-priority-.patch | 8 +-- ...by-partition-label-and-uuid-commands.patch | 62 ++++++++++--------- ...-gpt-add-search-by-disk-uuid-command.patch | 59 ++++++++++-------- packages/grub/Cargo.toml | 4 +- packages/grub/grub.spec | 19 +++--- packages/grub/latest-srpm-url.sh | 6 +- 8 files changed, 95 insertions(+), 79 deletions(-) diff --git a/packages/grub/0002-gpt-start-new-GPT-module.patch b/packages/grub/0002-gpt-start-new-GPT-module.patch index 896d62ab..d6d32c88 100644 --- a/packages/grub/0002-gpt-start-new-GPT-module.patch +++ b/packages/grub/0002-gpt-start-new-GPT-module.patch @@ -1,4 +1,4 @@ -From 6635d2ce6f2b2a0be4049af3ce271cdab3fbd97b Mon Sep 17 00:00:00 2001 +From b0aec16d82a40c58ed47235c8b10612d07645eaa Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Sun, 28 Sep 2014 21:26:21 -0700 Subject: [PATCH] gpt: start new GPT module @@ -47,10 +47,10 @@ index 3f191aa..c7efe17 100644 name = grub-menulst2cfg; mansection = 1; diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def -index 9cff83f..3443e9c 100644 +index 5212faf..e9cce06 100644 --- a/grub-core/Makefile.core.def +++ b/grub-core/Makefile.core.def -@@ -932,6 +932,11 @@ module = { +@@ -942,6 +942,11 @@ module = { common = commands/gptsync.c; }; @@ -919,5 +919,5 @@ index 0000000..a824cd9 + grub_fini_all (); +} -- -2.21.3 +2.36.1 diff --git a/packages/grub/0007-gpt-add-write-function-and-gptrepair-command.patch b/packages/grub/0007-gpt-add-write-function-and-gptrepair-command.patch index 1366729d..a3fd35e5 100644 --- a/packages/grub/0007-gpt-add-write-function-and-gptrepair-command.patch +++ b/packages/grub/0007-gpt-add-write-function-and-gptrepair-command.patch @@ -1,4 +1,4 @@ -From b6fdf2155967411d35ebf7667f0495ec741ad055 Mon Sep 17 00:00:00 2001 +From 822c0cb2c9d6cd7b0e4be0362bbc5804417e479f Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Sun, 19 Oct 2014 14:21:29 -0700 Subject: [PATCH] gpt: add write function and gptrepair command @@ -36,10 +36,10 @@ index c7efe17..a2ca51d 100644 testcase; name = file_filter_test; diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def -index 3443e9c..72d485f 100644 +index e9cce06..c040652 100644 --- a/grub-core/Makefile.core.def +++ b/grub-core/Makefile.core.def -@@ -932,6 +932,11 @@ module = { +@@ -942,6 +942,11 @@ module = { common = commands/gptsync.c; }; @@ -363,5 +363,5 @@ index 0000000..80b2de6 +cmp "${img1}" "${img2}" +echo -- -2.21.3 +2.36.1 diff --git a/packages/grub/0009-gpt-new-gptprio.next-command-for-selecting-priority-.patch b/packages/grub/0009-gpt-new-gptprio.next-command-for-selecting-priority-.patch index 97928b0c..67b6ca53 100644 --- a/packages/grub/0009-gpt-new-gptprio.next-command-for-selecting-priority-.patch +++ b/packages/grub/0009-gpt-new-gptprio.next-command-for-selecting-priority-.patch @@ -1,4 +1,4 @@ -From ad528de92162a7aaa2666782c5cd18a9ab14d7e2 Mon Sep 17 00:00:00 2001 +From 06eff592def149c09fa42fee4e5d98b7abf00717 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Mon, 3 Nov 2014 17:14:37 -0800 Subject: [PATCH] gpt: new gptprio.next command for selecting priority based @@ -42,10 +42,10 @@ index a2ca51d..eb4bc90 100644 testcase; name = file_filter_test; diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def -index 72d485f..78c64a8 100644 +index c040652..393aecd 100644 --- a/grub-core/Makefile.core.def +++ b/grub-core/Makefile.core.def -@@ -937,6 +937,11 @@ module = { +@@ -947,6 +947,11 @@ module = { common = commands/gptrepair.c; }; @@ -525,5 +525,5 @@ index 0000000..f4aea0d +check_prio 2 3 0 0 +check_prio 3 2 0 0 -- -2.21.3 +2.36.1 diff --git a/packages/grub/0014-gpt-add-search-by-partition-label-and-uuid-commands.patch b/packages/grub/0014-gpt-add-search-by-partition-label-and-uuid-commands.patch index bb4a151c..0e7697c0 100644 --- a/packages/grub/0014-gpt-add-search-by-partition-label-and-uuid-commands.patch +++ b/packages/grub/0014-gpt-add-search-by-partition-label-and-uuid-commands.patch @@ -1,19 +1,25 @@ -From 0fceffa51b087c1f3f19f470f08e9378d71ddb2d Mon Sep 17 00:00:00 2001 +From 334b58a122bad34c7efa1b1ea6d58389477fd255 Mon Sep 17 00:00:00 2001 From: Michael Marineau Date: Thu, 27 Nov 2014 16:34:21 -0800 Subject: [PATCH] gpt: add search by partition label and uuid commands Builds on the existing filesystem search code. Only for GPT right now. + +[markubo: Update to grub-2.06-42.amzn2022. Search functions take a more +general search_flags parameter now instead of a no_floppy flag.] + +Signed-off-by: Markus Boehme + --- Makefile.util.def | 2 + grub-core/Makefile.core.def | 10 +++ grub-core/commands/search.c | 49 +++++++++++++++ grub-core/commands/search_part_label.c | 5 ++ grub-core/commands/search_part_uuid.c | 5 ++ - grub-core/commands/search_wrap.c | 12 ++++ + grub-core/commands/search_wrap.c | 10 +++ grub-core/lib/gpt.c | 64 ++++++++++++++++++++ include/grub/gpt_partition.h | 16 +++++ - include/grub/search.h | 4 ++ + include/grub/search.h | 6 ++ tests/gpt_unit_test.c | 84 ++++++++++++++++++++++++++ 10 files changed, 251 insertions(+) create mode 100644 grub-core/commands/search_part_label.c @@ -33,10 +39,10 @@ index eb4bc90..8f74405 100644 common = grub-core/kern/emu/hostfs.c; common = grub-core/lib/gpt.c; diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def -index 78c64a8..fb4b1a1 100644 +index 393aecd..3cde624 100644 --- a/grub-core/Makefile.core.def +++ b/grub-core/Makefile.core.def -@@ -1136,6 +1136,16 @@ module = { +@@ -1152,6 +1152,16 @@ module = { common = commands/search_label.c; }; @@ -54,7 +60,7 @@ index 78c64a8..fb4b1a1 100644 name = setpci; common = commands/setpci.c; diff --git a/grub-core/commands/search.c b/grub-core/commands/search.c -index ed090b3..4ad72c5 100644 +index 57d26ce..bb0c7cb 100644 --- a/grub-core/commands/search.c +++ b/grub-core/commands/search.c @@ -30,6 +30,9 @@ @@ -67,7 +73,7 @@ index ed090b3..4ad72c5 100644 GRUB_MOD_LICENSE ("GPLv3+"); -@@ -90,6 +93,44 @@ iterate_device (const char *name, void *data) +@@ -109,6 +112,44 @@ iterate_device (const char *name, void *data) } grub_free (buf); } @@ -112,7 +118,7 @@ index ed090b3..4ad72c5 100644 #else { /* SEARCH_FS_UUID or SEARCH_LABEL */ -@@ -313,6 +354,10 @@ static grub_command_t cmd; +@@ -332,6 +373,10 @@ static grub_command_t cmd; #ifdef DO_SEARCH_FILE GRUB_MOD_INIT(search_fs_file) @@ -123,7 +129,7 @@ index ed090b3..4ad72c5 100644 #elif defined (DO_SEARCH_FS_UUID) GRUB_MOD_INIT(search_fs_uuid) #else -@@ -327,6 +372,10 @@ GRUB_MOD_INIT(search_label) +@@ -346,6 +391,10 @@ GRUB_MOD_INIT(search_label) #ifdef DO_SEARCH_FILE GRUB_MOD_FINI(search_fs_file) @@ -157,7 +163,7 @@ index 0000000..2d1d3d0 +#define HELP_MESSAGE N_("Search devices by partition UUID. If VARIABLE is specified, the first device found is set to a variable.") +#include "search.c" diff --git a/grub-core/commands/search_wrap.c b/grub-core/commands/search_wrap.c -index 47fc8eb..d357454 100644 +index 0b62acf..82f8e63 100644 --- a/grub-core/commands/search_wrap.c +++ b/grub-core/commands/search_wrap.c @@ -36,6 +36,10 @@ static const struct grub_arg_option options[] = @@ -171,7 +177,7 @@ index 47fc8eb..d357454 100644 {"set", 's', GRUB_ARG_OPTION_OPTIONAL, N_("Set a variable to the first device found."), N_("VARNAME"), ARG_TYPE_STRING}, -@@ -71,6 +75,8 @@ enum options +@@ -72,6 +76,8 @@ enum options SEARCH_FILE, SEARCH_LABEL, SEARCH_FS_UUID, @@ -179,20 +185,18 @@ index 47fc8eb..d357454 100644 + SEARCH_PART_UUID, SEARCH_SET, SEARCH_NO_FLOPPY, - SEARCH_HINT, -@@ -186,6 +192,12 @@ grub_cmd_search (grub_extcmd_context_t ctxt, int argc, char **args) + SEARCH_EFIDISK_ONLY, +@@ -193,6 +199,10 @@ grub_cmd_search (grub_extcmd_context_t ctxt, int argc, char **args) + grub_search_label (id, var, flags, hints, nhints); else if (state[SEARCH_FS_UUID].set) - grub_search_fs_uuid (id, var, state[SEARCH_NO_FLOPPY].set, - hints, nhints); + grub_search_fs_uuid (id, var, flags, hints, nhints); + else if (state[SEARCH_PART_LABEL].set) -+ grub_search_part_label (id, var, state[SEARCH_NO_FLOPPY].set, -+ hints, nhints); ++ grub_search_part_label (id, var, flags, hints, nhints); + else if (state[SEARCH_PART_UUID].set) -+ grub_search_part_uuid (id, var, state[SEARCH_NO_FLOPPY].set, -+ hints, nhints); ++ grub_search_part_uuid (id, var, flags, hints, nhints); else if (state[SEARCH_FILE].set) - grub_search_fs_file (id, var, state[SEARCH_NO_FLOPPY].set, - hints, nhints); + grub_search_fs_file (id, var, flags, hints, nhints); + else diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c index 9a1835b..10a4b85 100644 --- a/grub-core/lib/gpt.c @@ -309,16 +313,18 @@ index a44c0d5..7b04080 100644 + #endif /* ! GRUB_GPT_PARTITION_HEADER */ diff --git a/include/grub/search.h b/include/grub/search.h -index d80347d..c2f40ab 100644 +index 4190aeb..66722a6 100644 --- a/include/grub/search.h +++ b/include/grub/search.h -@@ -25,5 +25,9 @@ void grub_search_fs_uuid (const char *key, const char *var, int no_floppy, - char **hints, unsigned nhints); - void grub_search_label (const char *key, const char *var, int no_floppy, +@@ -34,5 +34,11 @@ void grub_search_fs_uuid (const char *key, const char *var, + void grub_search_label (const char *key, const char *var, + enum search_flags flags, char **hints, unsigned nhints); -+void grub_search_part_uuid (const char *key, const char *var, int no_floppy, ++void grub_search_part_uuid (const char *key, const char *var, ++ enum search_flags flags, + char **hints, unsigned nhints); -+void grub_search_part_label (const char *key, const char *var, int no_floppy, ++void grub_search_part_label (const char *key, const char *var, ++ enum search_flags flags, + char **hints, unsigned nhints); #endif @@ -442,5 +448,5 @@ index 5692a5a..deb55a9 100644 grub_fini_all (); } -- -2.21.3 +2.36.1 diff --git a/packages/grub/0017-gpt-add-search-by-disk-uuid-command.patch b/packages/grub/0017-gpt-add-search-by-disk-uuid-command.patch index 76a334d2..03d860f8 100644 --- a/packages/grub/0017-gpt-add-search-by-disk-uuid-command.patch +++ b/packages/grub/0017-gpt-add-search-by-disk-uuid-command.patch @@ -1,17 +1,22 @@ -From 9963416869afbe7fa9e671fa6ffd0871aec994f8 Mon Sep 17 00:00:00 2001 +From a4ecd2c5ff6cb23af51977a622dca5fc2a9b7cef Mon Sep 17 00:00:00 2001 From: Alex Crawford Date: Mon, 31 Aug 2015 15:15:48 -0700 Subject: [PATCH] gpt: add search by disk uuid command +[markubo: Update to grub-2.06-42.amzn2022. Search functions take a more +general search_flags parameter now instead of a no_floppy flag.] + +Signed-off-by: Markus Boehme + --- Makefile.util.def | 1 + grub-core/Makefile.core.def | 5 ++++ grub-core/commands/search.c | 28 +++++++++++++++++++++-- grub-core/commands/search_disk_uuid.c | 5 ++++ - grub-core/commands/search_wrap.c | 6 +++++ + grub-core/commands/search_wrap.c | 5 ++++ grub-core/lib/gpt.c | 21 +++++++++++++++++ include/grub/gpt_partition.h | 4 ++++ - include/grub/search.h | 2 ++ + include/grub/search.h | 3 +++ tests/gpt_unit_test.c | 33 +++++++++++++++++++++++++++ 9 files changed, 103 insertions(+), 2 deletions(-) create mode 100644 grub-core/commands/search_disk_uuid.c @@ -29,10 +34,10 @@ index 8f74405..33ce60d 100644 common = grub-core/kern/emu/hostfs.c; common = grub-core/lib/gpt.c; diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def -index fb4b1a1..b9c0494 100644 +index 3cde624..3096cd4 100644 --- a/grub-core/Makefile.core.def +++ b/grub-core/Makefile.core.def -@@ -1146,6 +1146,11 @@ module = { +@@ -1162,6 +1162,11 @@ module = { common = commands/search_part_label.c; }; @@ -45,7 +50,7 @@ index fb4b1a1..b9c0494 100644 name = setpci; common = commands/setpci.c; diff --git a/grub-core/commands/search.c b/grub-core/commands/search.c -index 4ad72c5..fd411ce 100644 +index bb0c7cb..ec03c75 100644 --- a/grub-core/commands/search.c +++ b/grub-core/commands/search.c @@ -30,7 +30,8 @@ @@ -58,9 +63,9 @@ index 4ad72c5..fd411ce 100644 #include #endif -@@ -69,7 +70,7 @@ iterate_device (const char *name, void *data) - name[0] == 'f' && name[1] == 'd' && name[2] >= '0' && name[2] <= '9') - return 1; +@@ -88,7 +89,7 @@ iterate_device (const char *name, void *data) + grub_device_close (dev); + } -#ifdef DO_SEARCH_FS_UUID +#if defined(DO_SEARCH_FS_UUID) || defined(DO_SEARCH_DISK_UUID) @@ -93,7 +98,7 @@ index 4ad72c5..fd411ce 100644 grub_device_close (dev); } } -@@ -360,6 +380,8 @@ GRUB_MOD_INIT(search_part_uuid) +@@ -379,6 +399,8 @@ GRUB_MOD_INIT(search_part_uuid) GRUB_MOD_INIT(search_part_label) #elif defined (DO_SEARCH_FS_UUID) GRUB_MOD_INIT(search_fs_uuid) @@ -102,7 +107,7 @@ index 4ad72c5..fd411ce 100644 #else GRUB_MOD_INIT(search_label) #endif -@@ -378,6 +400,8 @@ GRUB_MOD_FINI(search_part_uuid) +@@ -397,6 +419,8 @@ GRUB_MOD_FINI(search_part_uuid) GRUB_MOD_FINI(search_part_label) #elif defined (DO_SEARCH_FS_UUID) GRUB_MOD_FINI(search_fs_uuid) @@ -123,7 +128,7 @@ index 0000000..fba96f6 +#define HELP_MESSAGE N_("Search devices by disk UUID. If VARIABLE is specified, the first device found is set to a variable.") +#include "search.c" diff --git a/grub-core/commands/search_wrap.c b/grub-core/commands/search_wrap.c -index d357454..fc149cd 100644 +index 82f8e63..c8152b1 100644 --- a/grub-core/commands/search_wrap.c +++ b/grub-core/commands/search_wrap.c @@ -40,6 +40,8 @@ static const struct grub_arg_option options[] = @@ -135,24 +140,23 @@ index d357454..fc149cd 100644 {"set", 's', GRUB_ARG_OPTION_OPTIONAL, N_("Set a variable to the first device found."), N_("VARNAME"), ARG_TYPE_STRING}, -@@ -77,6 +79,7 @@ enum options +@@ -78,6 +80,7 @@ enum options SEARCH_FS_UUID, SEARCH_PART_LABEL, SEARCH_PART_UUID, + SEARCH_DISK_UUID, SEARCH_SET, SEARCH_NO_FLOPPY, - SEARCH_HINT, -@@ -198,6 +201,9 @@ grub_cmd_search (grub_extcmd_context_t ctxt, int argc, char **args) + SEARCH_EFIDISK_ONLY, +@@ -203,6 +206,8 @@ grub_cmd_search (grub_extcmd_context_t ctxt, int argc, char **args) + grub_search_part_label (id, var, flags, hints, nhints); else if (state[SEARCH_PART_UUID].set) - grub_search_part_uuid (id, var, state[SEARCH_NO_FLOPPY].set, - hints, nhints); + grub_search_part_uuid (id, var, flags, hints, nhints); + else if (state[SEARCH_DISK_UUID].set) -+ grub_search_disk_uuid (id, var, state[SEARCH_NO_FLOPPY].set, -+ hints, nhints); ++ grub_search_disk_uuid (id, var, flags, hints, nhints); else if (state[SEARCH_FILE].set) - grub_search_fs_file (id, var, state[SEARCH_NO_FLOPPY].set, - hints, nhints); + grub_search_fs_file (id, var, flags, hints, nhints); + else diff --git a/grub-core/lib/gpt.c b/grub-core/lib/gpt.c index aedc4f7..e162baf 100644 --- a/grub-core/lib/gpt.c @@ -199,14 +203,15 @@ index 1eb2f7b..16fdd7f 100644 + #endif /* ! GRUB_GPT_PARTITION_HEADER */ diff --git a/include/grub/search.h b/include/grub/search.h -index c2f40ab..7f69d25 100644 +index 66722a6..a5f56b2 100644 --- a/include/grub/search.h +++ b/include/grub/search.h -@@ -29,5 +29,7 @@ void grub_search_part_uuid (const char *key, const char *var, int no_floppy, - char **hints, unsigned nhints); - void grub_search_part_label (const char *key, const char *var, int no_floppy, +@@ -40,5 +40,8 @@ void grub_search_part_uuid (const char *key, const char *var, + void grub_search_part_label (const char *key, const char *var, + enum search_flags flags, char **hints, unsigned nhints); -+void grub_search_disk_uuid (const char *key, const char *var, int no_floppy, ++void grub_search_disk_uuid (const char *key, const char *var, ++ enum search_flags flags, + char **hints, unsigned nhints); #endif @@ -268,5 +273,5 @@ index 7a1af46..60f6017 100644 grub_fini_all (); } -- -2.21.3 +2.36.1 diff --git a/packages/grub/Cargo.toml b/packages/grub/Cargo.toml index 87440cf0..6c364bb1 100644 --- a/packages/grub/Cargo.toml +++ b/packages/grub/Cargo.toml @@ -9,5 +9,5 @@ build = "build.rs" path = "pkg.rs" [[package.metadata.build-package.external-files]] -url = "https://cdn.amazonlinux.com/blobstore/21d0df3b06c1c5cc9e5cf3bb559dad713335e782ac3a46b57c5d0097e22c0aec/grub2-2.06-9.amzn2.0.1.src.rpm" -sha512 = "f27b4005e789ce1e0e792133f6adfbdbf221245c03b27c25285ff5b81e53065385536971934744f33c52a924022480aa15cd25e8d5ded9f4999c753e8394ae36" +url = "https://al2022-repos-us-west-2-9761ab97.s3.dualstack.us-west-2.amazonaws.com/blobstore/aa41fdf9982b65a4c4dad5df5b49ba143b1710d60f82688221966f3c790c6c63/grub2-2.06-42.amzn2022.0.1.src.rpm" +sha512 = "3dbfc0cc48dc7125dca445ca9b6538ecb2c548cadc77714b930eb9992697e6eaef6c5eaece6a367b232d20a2d693a4fbd93b537d79596de4791c576f3b8ecc18" diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index cea7105b..98513b4f 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -6,7 +6,7 @@ # This is specific to the upstream source RPM, and will likely need to be # updated for each new version. -%global gnulib_fixes gnulib-fixes-0e9febb5e +%global gnulib_version gnulib-9f48fb992a3d7e96610c4ce8be969cff2d61a01b Name: %{_cross_os}grub Version: 2.06 @@ -14,7 +14,7 @@ Release: 1%{?dist} Summary: Bootloader with support for Linux and more License: GPL-3.0-or-later AND Unicode-DFS-2015 URL: https://www.gnu.org/software/grub/ -Source0: https://cdn.amazonlinux.com/blobstore/21d0df3b06c1c5cc9e5cf3bb559dad713335e782ac3a46b57c5d0097e22c0aec/grub2-2.06-9.amzn2.0.1.src.rpm +Source0: https://al2022-repos-us-west-2-9761ab97.s3.dualstack.us-west-2.amazonaws.com/blobstore/aa41fdf9982b65a4c4dad5df5b49ba143b1710d60f82688221966f3c790c6c63/grub2-2.06-42.amzn2022.0.1.src.rpm Source1: bios.cfg Source2: efi.cfg Patch0001: 0001-setup-Add-root-device-argument-to-grub-setup.patch @@ -83,7 +83,7 @@ Summary: Tools for the bootloader with support for Linux and more %prep rpm2cpio %{S:0} | cpio -iu grub-%{version}.tar.xz \ bootstrap bootstrap.conf \ - gitignore %{gnulib_fixes}.tar.gz \ + gitignore %{gnulib_version}.tar.gz \ "*.patch" # Mimic prep from upstream spec to prepare for patching. @@ -91,12 +91,8 @@ tar -xof grub-%{version}.tar.xz; rm grub-%{version}.tar.xz %setup -TDn grub-%{version} mv ../bootstrap{,.conf} . mv ../gitignore .gitignore -tar -xof ../%{gnulib_fixes}.tar.gz; rm ../%{gnulib_fixes}.tar.gz -mv %{gnulib_fixes} gnulib -pushd gnulib -patch -p1 < ../../gnulib-amzn2-cflags.patch -rm ../../gnulib-amzn2-cflags.patch -popd +tar -xof ../%{gnulib_version}.tar.gz; rm ../%{gnulib_version}.tar.gz +mv %{gnulib_version} gnulib cp unicode/COPYING COPYING.unicode rm -f configure @@ -108,6 +104,11 @@ git add . git commit -a -q -m "base" git am --whitespace=nowarn ../*.patch %{patches} +# Let bootstrap start from a clean slate and freshly copy in the relevant +# parts from gnulib. In particular remove the configure macros that aren't +# compatible with the copied in version of gnulib. +rm -r build-aux m4 + ./bootstrap %global grub_cflags -pipe -fno-stack-protector -fno-strict-aliasing diff --git a/packages/grub/latest-srpm-url.sh b/packages/grub/latest-srpm-url.sh index 7d5be9aa..bc1e0125 100755 --- a/packages/grub/latest-srpm-url.sh +++ b/packages/grub/latest-srpm-url.sh @@ -1,2 +1,6 @@ #!/bin/sh -docker run --rm amazonlinux:2 sh -c 'yum install -q -y yum-utils && yumdownloader -q --source --urls grub2 | grep ^http' +cmd='dnf install -q -y --releasever=latest yum-utils && yumdownloader -q --releasever=latest --source --urls grub2' +docker run --rm amazonlinux:2022 sh -c "${cmd}" \ + | grep '^http' \ + | xargs --max-args=1 --no-run-if-empty realpath --canonicalize-missing --relative-to=. \ + | sed 's_:/_://_' From 7f0ed427898a5e18c964444205653b76aa48721b Mon Sep 17 00:00:00 2001 From: ecpullen Date: Tue, 20 Sep 2022 16:39:58 +0000 Subject: [PATCH 0779/1356] testsys: Update to v0.0.2 This update was partially completed in #2392. Image locations have been updated and e2e repo config for sonobuoy agents has been added. --- tools/Cargo.lock | 8 ++++---- tools/testsys/Cargo.toml | 4 ++-- tools/testsys/src/install.rs | 2 +- tools/testsys/src/run.rs | 12 ++++++------ tools/testsys/src/status.rs | 2 +- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 481f33ff..d852cd7c 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -608,8 +608,8 @@ dependencies = [ [[package]] name = "bottlerocket-types" -version = "0.0.1" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?rev=07b9ae8#07b9ae8e902623842c334889517973d0c9d82691" +version = "0.0.2" +source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.2#dfd5bc90a481beb05e7a64ac4b20c86fd2491d9d" dependencies = [ "model", "serde", @@ -1734,8 +1734,8 @@ dependencies = [ [[package]] name = "model" -version = "0.0.1" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?rev=07b9ae8#07b9ae8e902623842c334889517973d0c9d82691" +version = "0.0.2" +source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.2#dfd5bc90a481beb05e7a64ac4b20c86fd2491d9d" dependencies = [ "async-recursion", "async-trait", diff --git a/tools/testsys/Cargo.toml b/tools/testsys/Cargo.toml index a02b2636..bdda2145 100644 --- a/tools/testsys/Cargo.toml +++ b/tools/testsys/Cargo.toml @@ -10,7 +10,7 @@ publish = false anyhow = "1.0" aws-config = "0.48" aws-sdk-ec2 = "0.18" -bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", rev = "07b9ae8", version = "0.0.1"} +bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.2", tag = "v0.0.2"} bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } clap = { version = "3", features = ["derive", "env"] } env_logger = "0.9" @@ -18,7 +18,7 @@ futures = "0.3.8" k8s-openapi = { version = "0.15", features = ["v1_20", "api"], default-features = false } log = "0.4" maplit = "1.0.2" -model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", rev = "07b9ae8", version = "0.0.1"} +model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.2", tag = "v0.0.2"} pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } serde = { version = "1", features = ["derive"] } serde_json = "1" diff --git a/tools/testsys/src/install.rs b/tools/testsys/src/install.rs index 11e7de59..b87ef3f2 100644 --- a/tools/testsys/src/install.rs +++ b/tools/testsys/src/install.rs @@ -21,7 +21,7 @@ pub(crate) struct Install { #[clap( long = "controller-uri", env = "TESTSYS_CONTROLLER_IMAGE", - default_value = "public.ecr.aws/bottlerocket-test-system/controller:v0.0.1" + default_value = "public.ecr.aws/bottlerocket-test-system/controller:v0.0.2" )] controller_uri: String, } diff --git a/tools/testsys/src/run.rs b/tools/testsys/src/run.rs index e40e1a0a..3e2aeddd 100644 --- a/tools/testsys/src/run.rs +++ b/tools/testsys/src/run.rs @@ -272,7 +272,7 @@ pub(crate) struct TestsysImages { #[clap( long = "eks-resource-agent-image", env = "TESTSYS_EKS_RESOURCE_AGENT_IMAGE", - default_value = "public.ecr.aws/bottlerocket-test-system/eks-resource-agent:v0.0.1" + default_value = "public.ecr.aws/bottlerocket-test-system/eks-resource-agent:v0.0.2" )] pub(crate) eks_resource: String, @@ -280,7 +280,7 @@ pub(crate) struct TestsysImages { #[clap( long = "ecs-resource-agent-image", env = "TESTSYS_ECS_RESOURCE_AGENT_IMAGE", - default_value = "public.ecr.aws/bottlerocket-test-system/ecs-resource-agent:v0.0.1" + default_value = "public.ecr.aws/bottlerocket-test-system/ecs-resource-agent:v0.0.2" )] pub(crate) ecs_resource: String, @@ -288,7 +288,7 @@ pub(crate) struct TestsysImages { #[clap( long = "ec2-resource-agent-image", env = "TESTSYS_EC2_RESOURCE_AGENT_IMAGE", - default_value = "public.ecr.aws/bottlerocket-test-system/ec2-resource-agent:v0.0.1" + default_value = "public.ecr.aws/bottlerocket-test-system/ec2-resource-agent:v0.0.2" )] pub(crate) ec2_resource: String, @@ -296,7 +296,7 @@ pub(crate) struct TestsysImages { #[clap( long = "sonobuoy-test-agent-image", env = "TESTSYS_SONOBUOY_TEST_AGENT_IMAGE", - default_value = "public.ecr.aws/bottlerocket-test-system/sonobuoy-test-agent:v0.0.1" + default_value = "public.ecr.aws/bottlerocket-test-system/sonobuoy-test-agent:v0.0.2" )] pub(crate) sonobuoy_test: String, @@ -304,7 +304,7 @@ pub(crate) struct TestsysImages { #[clap( long = "ecs-test-agent-image", env = "TESTSYS_ECS_TEST_AGENT_IMAGE", - default_value = "public.ecr.aws/bottlerocket-test-system/ecs-test-agent:v0.0.1" + default_value = "public.ecr.aws/bottlerocket-test-system/ecs-test-agent:v0.0.2" )] pub(crate) ecs_test: String, @@ -312,7 +312,7 @@ pub(crate) struct TestsysImages { #[clap( long = "migration-test-agent-image", env = "TESTSYS_MIGRATION_TEST_AGENT_IMAGE", - default_value = "public.ecr.aws/bottlerocket-test-system/migration-test-agent:v0.0.1" + default_value = "public.ecr.aws/bottlerocket-test-system/migration-test-agent:v0.0.2" )] pub(crate) migration_test: String, diff --git a/tools/testsys/src/status.rs b/tools/testsys/src/status.rs index bdf27765..fe5f3b94 100644 --- a/tools/testsys/src/status.rs +++ b/tools/testsys/src/status.rs @@ -46,7 +46,7 @@ impl Status { } else { let (width, _) = term_size::dimensions().unwrap_or((80, 0)); debug!("Window width '{}'", width); - println!("{:width$}", status.to_string()); + println!("{:width$}", status); } Ok(()) } From 82baeecf746b50146ac2724062a4ca04d2cef8dc Mon Sep 17 00:00:00 2001 From: ecpullen Date: Tue, 20 Sep 2022 16:42:19 +0000 Subject: [PATCH 0780/1356] testsys: Introduce `Test.toml` for test configs Add declarative testing for `cargo make test` this allows users to setup configuration once and use `cargo make -e BUILDSYS_ARCH= -e BUILDSYS_VARIANT= test` without any other needed variables. --- .gitignore | 1 + tools/Cargo.lock | 84 +++++++ tools/Cargo.toml | 1 + tools/testsys-config/Cargo.toml | 21 ++ tools/testsys-config/src/lib.rs | 387 +++++++++++++++++++++++++++++ tools/testsys/Cargo.toml | 2 + tools/testsys/Test.toml.example | 119 +++++++++ tools/testsys/src/aws_resources.rs | 380 +++++++++++++++++----------- tools/testsys/src/run.rs | 190 ++++++++------ 9 files changed, 972 insertions(+), 213 deletions(-) create mode 100644 tools/testsys-config/Cargo.toml create mode 100644 tools/testsys-config/src/lib.rs create mode 100644 tools/testsys/Test.toml.example diff --git a/.gitignore b/.gitignore index 595b687d..b50d04e8 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ /.gomodcache /html /Infra.toml +/Test.toml /testsys.kubeconfig /*.pem /keys diff --git a/tools/Cargo.lock b/tools/Cargo.lock index d852cd7c..8fef72b8 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -1242,6 +1242,20 @@ dependencies = [ "tracing", ] +[[package]] +name = "handlebars" +version = "4.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56b224eaa4987c03c30b251de7ef0c15a6a59f34222905850dbc3026dfb24d5f" +dependencies = [ + "log", + "pest", + "pest_derive", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "hashbrown" version = "0.12.3" @@ -2012,6 +2026,50 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" +[[package]] +name = "pest" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb779fcf4bb850fbbb0edc96ff6cf34fd90c4b1a112ce042653280d9a7364048" +dependencies = [ + "thiserror", + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "502b62a6d0245378b04ffe0a7fb4f4419a4815fce813bd8a0ec89a56e07d67b1" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "451e629bf49b750254da26132f1a5a9d11fd8a95a3df51d15c4abd1ba154cb6c" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pest_meta" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcec162c71c45e269dfc3fc2916eaeb97feab22993a21bcce4721d08cd7801a6" +dependencies = [ + "once_cell", + "pest", + "sha1", +] + [[package]] name = "pin-project" version = "1.0.12" @@ -2836,6 +2894,7 @@ dependencies = [ "anyhow", "aws-config", "aws-sdk-ec2", + "base64", "bottlerocket-types", "bottlerocket-variant", "clap 3.2.22", @@ -2850,10 +2909,29 @@ dependencies = [ "serde_json", "serde_plain", "term_size", + "testsys-config", "tokio", "unescape", ] +[[package]] +name = "testsys-config" +version = "0.1.0" +dependencies = [ + "bottlerocket-variant", + "handlebars", + "home", + "lazy_static", + "log", + "maplit", + "model", + "serde", + "serde_yaml", + "snafu", + "toml", + "url", +] + [[package]] name = "textwrap" version = "0.11.0" @@ -3247,6 +3325,12 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +[[package]] +name = "ucd-trie" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" + [[package]] name = "unescape" version = "0.1.0" diff --git a/tools/Cargo.toml b/tools/Cargo.toml index b76b84c5..03fb9b38 100644 --- a/tools/Cargo.toml +++ b/tools/Cargo.toml @@ -6,4 +6,5 @@ members = [ "pubsys-config", "pubsys-setup", "testsys", + "testsys-config", ] diff --git a/tools/testsys-config/Cargo.toml b/tools/testsys-config/Cargo.toml new file mode 100644 index 00000000..a3287ad9 --- /dev/null +++ b/tools/testsys-config/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "testsys-config" +version = "0.1.0" +authors = ["Ethan Pullen "] +license = "Apache-2.0 OR MIT" +edition = "2021" +publish = false + +[dependencies] +bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } +handlebars = "4.3" +home = "0.5" +lazy_static = "1.4" +log = "0.4" +maplit="1.0" +model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.2", tag = "v0.0.2"} +serde = { version = "1.0", features = ["derive"] } +serde_yaml = "0.8.17" +snafu = "0.7" +toml = "0.5" +url = { version = "2.1.0", features = ["serde"] } diff --git a/tools/testsys-config/src/lib.rs b/tools/testsys-config/src/lib.rs new file mode 100644 index 00000000..c2c9f163 --- /dev/null +++ b/tools/testsys-config/src/lib.rs @@ -0,0 +1,387 @@ +use bottlerocket_variant::Variant; +pub use error::Error; +use handlebars::Handlebars; +use log::warn; +use maplit::btreemap; +use model::SecretName; +use serde::{Deserialize, Serialize}; +use snafu::ResultExt; +use std::collections::{BTreeMap, HashMap}; +use std::fs; +use std::path::Path; +pub type Result = std::result::Result; + +/// Configuration needed to run tests +#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq)] +#[serde(rename_all = "kebab-case")] +pub struct TestConfig { + /// High level configuration for TestSys + pub test: Option, + + #[serde(flatten, serialize_with = "toml::ser::tables_last")] + /// Configuration for testing variants + pub configs: HashMap, +} + +impl TestConfig { + /// Deserializes a TestConfig from a given path + pub fn from_path

(path: P) -> Result + where + P: AsRef, + { + let path = path.as_ref(); + let test_config_str = fs::read_to_string(path).context(error::FileSnafu { path })?; + toml::from_str(&test_config_str).context(error::InvalidTomlSnafu { path }) + } + + /// Deserializes a TestConfig from a given path, if it exists, otherwise builds a default + /// config + pub fn from_path_or_default

(path: P) -> Result + where + P: AsRef, + { + if path.as_ref().exists() { + Self::from_path(path) + } else { + warn!( + "No test config was found at '{}'. Using the default config.", + path.as_ref().display() + ); + Ok(Self::default()) + } + } + + /// Create a single config for the `variant` and `arch` from this test configuration by + /// determining a list of tables that contain information relevant to the arch, variant + /// combination. Then, the tables are reduced to a single config by selecting values from the + /// table based on the order of precedence. If `starting_config` is provided it will be used as + /// the config with the highest precedence. + pub fn reduced_config( + &self, + variant: &Variant, + arch: S, + starting_config: Option, + ) -> GenericVariantConfig + where + S: Into, + { + let arch = arch.into(); + // Starting with a list of keys ordered by precedence, return a single config with values + // selected by the order of the list. + config_keys(variant) + // Convert the vec of keys in to an iterator of keys. + .into_iter() + // Convert the iterator of keys to and iterator of Configs. If the key does not have a + // configuration in the config file, remove it from the iterator. + .filter_map(|key| self.configs.get(&key).cloned()) + // Take the iterator of configurations and extract the arch specific config and the + // non-arch specific config for each config. Then, convert them into a single iterator. + .flat_map(|config| vec![config.for_arch(&arch), config.config]) + // Take the iterator of configurations and merge them into a single config by populating + // each field with the first value that is not `None` while following the list of + // precedence. + .fold( + starting_config.unwrap_or_default(), + GenericVariantConfig::merge, + ) + } +} + +/// High level configurations for a test +#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq, Clone)] +#[serde(deny_unknown_fields, rename_all = "kebab-case")] +pub struct Test { + /// The name of the repo in `Infra.toml` that should be used for testing + pub repo: Option, + + #[serde(flatten)] + /// The URI of TestSys images + pub testsys_images: TestsysImages, + + /// A registry containing all TestSys images + pub testsys_image_registry: Option, +} + +#[derive(Debug, Default)] +pub struct AwsK8sVariantConfig { + /// The names of all clusters this variant should be tested over. This is particularly useful + /// for testing Bottlerocket on ipv4 and ipv6 clusters. + pub cluster_names: Vec, + /// The instance type that instances should be launched with + pub instance_type: Option, + /// The secrets needed by the agents + pub secrets: BTreeMap, + /// The role that should be assumed for this particular variant + pub assume_role: Option, + /// The kubernetes conformance image that should be used for this variant + pub kube_conformance_image: Option, + /// The e2e repo containing sonobuoy images + pub e2e_repo_registry: Option, +} + +#[derive(Debug, Default)] +pub struct AwsEcsVariantConfig { + /// The names of all clusters this variant should be tested over + pub cluster_names: Vec, + /// The instance type that instances should be launched with + pub instance_type: Option, + /// The secrets needed by the agents + pub secrets: BTreeMap, + /// The role that should be assumed for this particular variant + pub assume_role: Option, +} + +/// Create a vec of relevant keys for this variant ordered from most specific to least specific. +fn config_keys(variant: &Variant) -> Vec { + let (family_flavor, platform_flavor) = variant + .variant_flavor() + .map(|flavor| { + ( + format!("{}-{}", variant.family(), flavor), + format!("{}-{}", variant.platform(), flavor), + ) + }) + .unwrap_or_default(); + + // The keys used to describe configuration (most specific -> least specific) + vec![ + variant.to_string(), + family_flavor, + variant.family().to_string(), + platform_flavor, + variant.platform().to_string(), + ] +} + +/// All configurations for a specific config level, i.e `-` +#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq, Clone)] +#[serde(deny_unknown_fields)] +pub struct GenericConfig { + #[serde(default)] + aarch64: GenericVariantConfig, + #[serde(default)] + x86_64: GenericVariantConfig, + #[serde(default, flatten)] + config: GenericVariantConfig, +} + +impl GenericConfig { + /// Get the configuration for a specific arch. + pub fn for_arch(&self, arch: S) -> GenericVariantConfig + where + S: Into, + { + match arch.into().as_str() { + "x86_64" => self.x86_64.clone(), + "aarch64" => self.aarch64.clone(), + _ => Default::default(), + } + } +} + +/// The configuration for a specific config level (-). This may or may not be arch +/// specific depending on it's location in `GenericConfig`. +#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq, Clone)] +#[serde(deny_unknown_fields)] +#[serde(rename_all = "kebab-case")] +pub struct GenericVariantConfig { + /// The names of all clusters this variant should be tested over. This is particularly useful + /// for testing Bottlerocket on ipv4 and ipv6 clusters. + #[serde(default)] + pub cluster_names: Vec, + /// The instance type that instances should be launched with + pub instance_type: Option, + /// The secrets needed by the agents + #[serde(default)] + pub secrets: BTreeMap, + /// The role that should be assumed for this particular variant + pub agent_role: Option, + /// The custom images used for conformance testing + pub conformance_image: Option, + /// The custom registry used for conformance testing + pub conformance_registry: Option, +} + +impl GenericVariantConfig { + /// Overwrite the unset values of `self` with the set values of `other` + fn merge(self, other: Self) -> Self { + let cluster_names = if self.cluster_names.is_empty() { + other.cluster_names + } else { + self.cluster_names + }; + + let secrets = if self.secrets.is_empty() { + other.secrets + } else { + self.secrets + }; + + Self { + cluster_names, + instance_type: self.instance_type.or(other.instance_type), + secrets, + agent_role: self.agent_role.or(other.agent_role), + conformance_image: self.conformance_image.or(other.conformance_image), + conformance_registry: self.conformance_registry.or(other.conformance_registry), + } + } +} + +impl From for AwsK8sVariantConfig { + fn from(val: GenericVariantConfig) -> Self { + Self { + cluster_names: val.cluster_names, + instance_type: val.instance_type, + secrets: val.secrets, + assume_role: val.agent_role, + kube_conformance_image: val.conformance_image, + e2e_repo_registry: val.conformance_registry, + } + } +} + +impl From for AwsEcsVariantConfig { + fn from(val: GenericVariantConfig) -> Self { + Self { + cluster_names: val.cluster_names, + instance_type: val.instance_type, + secrets: val.secrets, + assume_role: val.agent_role, + } + } +} + +/// Fill in the templated cluster name with `arch` and `variant`. +pub fn rendered_cluster_name(cluster_name: String, arch: S1, variant: S2) -> Result +where + S1: Into, + S2: Into, +{ + let mut cluster_template = Handlebars::new(); + cluster_template.register_template_string("cluster_name", cluster_name)?; + Ok(cluster_template.render( + "cluster_name", + &btreemap! {"arch".to_string() => arch.into(), "variant".to_string() => variant.into()}, + )?) +} + +#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq, Clone)] +#[serde(deny_unknown_fields)] +#[serde(rename_all = "kebab-case")] +pub struct TestsysImages { + pub eks_resource_agent_image: Option, + pub ecs_resource_agent_image: Option, + pub ec2_resource_agent_image: Option, + pub sonobuoy_test_agent_image: Option, + pub ecs_test_agent_image: Option, + pub migration_test_agent_image: Option, + pub testsys_agent_pull_secret: Option, +} + +const AGENT_VERSION: &str = "v0.0.2"; + +impl TestsysImages { + /// Create an images config for a specific registry. + pub fn new(registry: S) -> Self + where + S: Into, + { + let registry = registry.into(); + Self { + eks_resource_agent_image: Some(format!( + "{}/eks-resource-agent:{AGENT_VERSION}", + registry + )), + ecs_resource_agent_image: Some(format!( + "{}/ecs-resource-agent:{AGENT_VERSION}", + registry + )), + ec2_resource_agent_image: Some(format!( + "{}/ec2-resource-agent:{AGENT_VERSION}", + registry + )), + sonobuoy_test_agent_image: Some(format!( + "{}/sonobuoy-test-agent:{AGENT_VERSION}", + registry + )), + ecs_test_agent_image: Some(format!("{}/ecs-test-agent:{AGENT_VERSION}", registry)), + migration_test_agent_image: Some(format!( + "{}/migration-test-agent:{AGENT_VERSION}", + registry + )), + testsys_agent_pull_secret: None, + } + } + + pub fn merge(self, other: Self) -> Self { + Self { + eks_resource_agent_image: self + .eks_resource_agent_image + .or(other.eks_resource_agent_image), + ecs_resource_agent_image: self + .ecs_resource_agent_image + .or(other.ecs_resource_agent_image), + ec2_resource_agent_image: self + .ec2_resource_agent_image + .or(other.ec2_resource_agent_image), + sonobuoy_test_agent_image: self + .sonobuoy_test_agent_image + .or(other.sonobuoy_test_agent_image), + ecs_test_agent_image: self.ecs_test_agent_image.or(other.ecs_test_agent_image), + migration_test_agent_image: self + .migration_test_agent_image + .or(other.migration_test_agent_image), + testsys_agent_pull_secret: self + .testsys_agent_pull_secret + .or(other.testsys_agent_pull_secret), + } + } + + pub fn public_images() -> Self { + Self::new("public.ecr.aws/bottlerocket-test-system") + } +} + +mod error { + use snafu::Snafu; + use std::io; + use std::path::PathBuf; + + #[derive(Debug, Snafu)] + #[snafu(visibility(pub(super)))] + pub enum Error { + #[snafu(display("Failed to read '{}': {}", path.display(), source))] + File { path: PathBuf, source: io::Error }, + + #[snafu(display("Invalid config file at '{}': {}", path.display(), source))] + InvalidToml { + path: PathBuf, + source: toml::de::Error, + }, + + #[snafu(display("Invalid lock file at '{}': {}", path.display(), source))] + InvalidLock { + path: PathBuf, + source: serde_yaml::Error, + }, + + #[snafu(display("Missing config: {}", what))] + MissingConfig { what: String }, + + #[snafu(display("Failed to get parent of path: {}", path.display()))] + Parent { path: PathBuf }, + + #[snafu( + context(false), + display("Failed to create template for cluster name: {}", source) + )] + TemplateError { source: handlebars::TemplateError }, + + #[snafu( + context(false), + display("Failed to render templated cluster name: {}", source) + )] + RenderError { source: handlebars::RenderError }, + } +} diff --git a/tools/testsys/Cargo.toml b/tools/testsys/Cargo.toml index bdda2145..30003f73 100644 --- a/tools/testsys/Cargo.toml +++ b/tools/testsys/Cargo.toml @@ -10,6 +10,7 @@ publish = false anyhow = "1.0" aws-config = "0.48" aws-sdk-ec2 = "0.18" +base64 = "0.13" bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.2", tag = "v0.0.2"} bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } clap = { version = "3", features = ["derive", "env"] } @@ -24,5 +25,6 @@ serde = { version = "1", features = ["derive"] } serde_json = "1" serde_plain = "1" term_size = "0.3" +testsys-config = { path = "../testsys-config/", version = "0.1.0" } tokio = { version = "1", features = ["macros", "rt-multi-thread", "fs"] } unescape = "0.1.0" diff --git a/tools/testsys/Test.toml.example b/tools/testsys/Test.toml.example new file mode 100644 index 00000000..82190f87 --- /dev/null +++ b/tools/testsys/Test.toml.example @@ -0,0 +1,119 @@ +# This is an example testing configuration for TestSys, the tool that is used to validate +# Bottlerocket builds. + +# This section contains configuration details for all testing +[test] + +# The repo from `Infra.toml` that should be used for Bottlerocket update images. It may be useful to +# create a repo in `Infra.toml` that contains the infrastructure needed for testing +repo = "default" + +# The registry containing alternate TestSys agent images +testsys-images-registry = "public.ecr.aws/bottlerocket-test-system" + +# The URI for the EKS resource agent that should be used. An individual agent's provided URI will be +# used even if `testsys-images-registry` is present. +eks-resource-agent-image = "public.ecr.aws/bottlerocket-test-system/eks_resource_agent:v0.0.2" + +# Test Configurations +# +# Testing requirements tend to differ by variant and architecture. This configuration file provides +# the ability to set values that apply generally to a broad group of similar variants, and to +# override those values at a more granular level. For example, you can set a value for all `aws-k8s` +# variants, then override that value for 'aws-k8s-nvidia' variants, and further override the value +# for 'aws-k8s-nvidia'.aarch64 builds. +# +# The mechanism for resolving configuration values has the following order of precedence: +# +# ''.ARCH +# '' +# '-'.ARCH +# '-' +# ''.ARCH +# '' +# '-'.ARCH +# '-' +# ''.ARCH +# '' +# +# For concrete example, given a variant such as `aws-k8s-1.23-nvidia` with the architecture +# `x86_64`, configurations will have the following order of precedence: +# ['aws-k8s-1.23-nvidia'.x86_64] +# ['aws-k8s-1.23-nvidia'] +# ['aws-k8s-nvidia'.x86_64] +# ['aws-k8s-nvidia'] +# ['aws-k8s'.x86_64] +# ['aws-k8s'] +# ['aws-nvidia'.x86_64] +# ['aws-nvidia'] +# ['aws'.x86_64] +# ['aws'] +# +# Note: values passed by command line argument will take precedence over those passed by environment +# variable, and both take precedence over values set by `Test.toml`. + +# Example Configurations + +# Configuration for all variants with the `aws` platform. +[aws] +agent-role = "" + +# Configuration for all nvidia AWS variants on x86_64 (platform-flavor level configuration) +[aws-nvidia.x86_64] +instance-type = "p3.2xlarge" + +# Configuration for all nvidia AWS variants on aarch64 (platform-flavor level configuration) +[aws-nvidia.aarch64] +instance-type = "g5g.2xlarge" + +# Configuration for all `aws-k8s` variants testing (family level configuration). +[aws-k8s] +# A single role can be assumed by agents to test all `aws-k8s` variants in a separate +# testing account. +agent-role = "arn:aws:iam:::role/" + +# The cluster name templating can be defined for all `aws-k8s` variants. To test on ipv4 and ipv6 +# clusters, the following templates could be used. Note: TestSys does not currently support creating +# ipv6 clusters, so the ipv6 cluster must already exist. +cluster-names = ["{{arch}}-{{variant}}", "{{arch}}-{{variant}}-ipv6"] + +# A custom conformance registry may be needed for testing if image pull reliability is a concern. +conformance-registry = ".dkr.ecr.cn-north-1.amazonaws.com.cn" + +# If testing using a kind cluster, AWS credentials need to be passed as a K8s secret. +secrets = {"awsCreds" = "myAwsCredentials"} + +# Configuration for all nvidia AWS variants on x86_64 (family-flavor level configuration) +[aws-ecs-nvidia.x86_64] +instance-type = "p3.2xlarge" + +# Configuration for all nvidia AWS variants on aarch64 (family-flavor level configuration) +[aws-ecs-nvidia.aarch64] +instance-type = "g5g.2xlarge" + +# Configuration for only the `aws-k8s-1.21` variant (variant level configuration). +["aws-k8s-1.21".aarch64] +conformance-image = "" + +# Configurable values: +# +# cluster-names: +# All clusters the variant should be tested over. Cluster naming supports templated strings, and +# both `arch` and `variant` are provided as variables (`{{arch}}-{{variant}}`). +# +# instance-type: +# The instance type that should be used for testing. +# +# secrets: +# A map containing all secrets needed for resource creation and testing. +# +# agent-role: +# The role that should be assumed by each test and resource agent. +# +# conformance-image: (K8s only) +# Specify a custom image for conformance testing. For `aws-k8s` variants this will be used as a +# custom Kubernetes conformance image for Sonobuoy. +# +# conformance-registry: (K8s only) +# Specify a custom registry for conformance testing images. +# For `aws-k8s` variants this will be used as the Sonobuoy e2e registry. \ No newline at end of file diff --git a/tools/testsys/src/aws_resources.rs b/tools/testsys/src/aws_resources.rs index 097179cd..7b981019 100644 --- a/tools/testsys/src/aws_resources.rs +++ b/tools/testsys/src/aws_resources.rs @@ -1,4 +1,4 @@ -use crate::run::{TestType, TestsysImages}; +use crate::run::TestType; use anyhow::{anyhow, Context, Result}; use bottlerocket_types::agent_config::{ ClusterType, CreationPolicy, Ec2Config, EcsClusterConfig, EcsTestConfig, EksClusterConfig, @@ -10,6 +10,7 @@ use aws_sdk_ec2::Region; use bottlerocket_variant::Variant; use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta; use k8s_openapi::serde_json::Value; +use log::debug; use maplit::btreemap; use model::constants::NAMESPACE; use model::{ @@ -17,17 +18,16 @@ use model::{ TestSpec, }; use std::collections::BTreeMap; +use testsys_config::{ + rendered_cluster_name, AwsEcsVariantConfig, AwsK8sVariantConfig, TestsysImages, +}; pub(crate) struct AwsK8s { pub(crate) arch: String, pub(crate) variant: String, pub(crate) region: String, - pub(crate) assume_role: Option, - pub(crate) instance_type: Option, pub(crate) ami: String, - pub(crate) secrets: Option>, - pub(crate) kube_conformance_image: Option, - pub(crate) target_cluster_name: Option, + pub(crate) config: AwsK8sVariantConfig, pub(crate) tuf_repo: Option, pub(crate) starting_version: Option, pub(crate) migrate_starting_commit: Option, @@ -43,30 +43,57 @@ impl AwsK8s { test: TestType, testsys_images: &TestsysImages, ) -> Result> { - match test { - TestType::Conformance => { - self.sonobuoy_test_crds(testsys_images, SonobuoyMode::CertifiedConformance) - } - TestType::Quick => self.sonobuoy_test_crds(testsys_images, SonobuoyMode::Quick), - TestType::Migration => self.migration_test_crds(testsys_images).await, + let mut crds = Vec::new(); + let target_cluster_names = if self.config.cluster_names.is_empty() { + debug!("No cluster names were provided using default name"); + vec![self.default_cluster_name()] + } else { + self.config.cluster_names.clone() + }; + for template_cluster_name in target_cluster_names { + let cluster_name = &rendered_cluster_name( + template_cluster_name, + self.kube_arch(), + self.kube_variant(), + )?; + crds.append(&mut match &test { + TestType::Conformance => self.sonobuoy_test_crds( + testsys_images, + SonobuoyMode::CertifiedConformance, + cluster_name, + )?, + TestType::Quick => { + self.sonobuoy_test_crds(testsys_images, SonobuoyMode::Quick, cluster_name)? + } + TestType::Migration => { + self.migration_test_crds(cluster_name, testsys_images) + .await? + } + }) } + Ok(crds) } fn sonobuoy_test_crds( &self, testsys_images: &TestsysImages, sonobuoy_mode: SonobuoyMode, + cluster_name: &str, ) -> Result> { let crds = vec![ - self.eks_crd(testsys_images)?, - self.ec2_crd(testsys_images, None)?, - self.sonobuoy_crd("-test", sonobuoy_mode, None, testsys_images)?, + self.eks_crd(cluster_name, testsys_images)?, + self.ec2_crd(cluster_name, testsys_images, None)?, + self.sonobuoy_crd("-test", cluster_name, sonobuoy_mode, None, testsys_images)?, ]; Ok(crds) } /// Creates `Test` crds for migration testing. - async fn migration_test_crds(&self, testsys_images: &TestsysImages) -> Result> { + async fn migration_test_crds( + &self, + cluster_name: &str, + testsys_images: &TestsysImages, + ) -> Result> { let ami = self .starting_image_id .as_ref() @@ -81,15 +108,25 @@ impl AwsK8s { .await?, ) .to_string(); - let eks = self.eks_crd(testsys_images)?; - let ec2 = self.ec2_crd(testsys_images, Some(ami))?; + let eks = self.eks_crd(cluster_name, testsys_images)?; + let ec2 = self.ec2_crd(cluster_name, testsys_images, Some(ami))?; + let instance_provider = ec2 + .name() + .expect("The EC2 instance provider crd is missing a name."); let mut depends_on = Vec::new(); // Start with a `quick` test to make sure instances launched properly - let initial = self.sonobuoy_crd("-1-initial", SonobuoyMode::Quick, None, testsys_images)?; + let initial = self.sonobuoy_crd( + "-1-initial", + cluster_name, + SonobuoyMode::Quick, + None, + testsys_images, + )?; depends_on.push(initial.name().context("Crd missing name")?); // Migrate instances to the target version let start_migrate = self.migration_crd( - format!("{}-2-migrate", self.cluster_name()), + format!("{}-2-migrate", cluster_name), + instance_provider.clone(), MigrationVersion::Migrated, Some(depends_on.clone()), testsys_images, @@ -98,6 +135,7 @@ impl AwsK8s { depends_on.push(start_migrate.name().context("Crd missing name")?); let migrated = self.sonobuoy_crd( "-3-migrated", + cluster_name, SonobuoyMode::Quick, Some(depends_on.clone()), testsys_images, @@ -105,7 +143,8 @@ impl AwsK8s { // Migrate instances to the starting version depends_on.push(migrated.name().context("Crd missing name")?); let end_migrate = self.migration_crd( - format!("{}-4-migrate", self.cluster_name()), + format!("{}-4-migrate", cluster_name), + instance_provider, MigrationVersion::Starting, Some(depends_on.clone()), testsys_images, @@ -114,6 +153,7 @@ impl AwsK8s { depends_on.push(end_migrate.name().context("Crd missing name")?); let last = self.sonobuoy_crd( "-5-final", + cluster_name, SonobuoyMode::Quick, Some(depends_on.clone()), testsys_images, @@ -146,13 +186,11 @@ impl AwsK8s { } /// Bottlerocket cluster naming convention. - fn cluster_name(&self) -> String { - self.target_cluster_name - .clone() - .unwrap_or_else(|| format!("{}-{}", self.kube_arch(), self.kube_variant())) + fn default_cluster_name(&self) -> String { + format!("{}-{}", self.kube_arch(), self.kube_variant()) } - fn eks_crd(&self, testsys_images: &TestsysImages) -> Result { + fn eks_crd(&self, cluster_name: &str, testsys_images: &TestsysImages) -> Result { let cluster_version = K8sVersion::parse( Variant::new(&self.variant) .context("The provided variant cannot be interpreted.")? @@ -160,10 +198,9 @@ impl AwsK8s { .context("aws-k8s variant is missing k8s version")?, ) .map_err(|e| anyhow!(e))?; - let cluster_name = self.cluster_name(); let eks_crd = Resource { metadata: ObjectMeta { - name: Some(cluster_name.clone()), + name: Some(cluster_name.to_string()), namespace: Some(NAMESPACE.into()), labels: Some(self.labels()), ..Default::default() @@ -173,23 +210,26 @@ impl AwsK8s { conflicts_with: None, agent: Agent { name: "eks-provider".to_string(), - image: testsys_images.eks_resource.clone(), - pull_secret: testsys_images.secret.clone(), + image: testsys_images + .eks_resource_agent_image + .to_owned() + .expect("Missing default image for EKS resource agent"), + pull_secret: testsys_images.testsys_agent_pull_secret.clone(), keep_running: false, timeout: None, configuration: Some( EksClusterConfig { - cluster_name, + cluster_name: cluster_name.to_string(), creation_policy: Some(CreationPolicy::IfNotExists), region: Some(self.region.clone()), zones: None, version: Some(cluster_version), - assume_role: self.assume_role.clone(), + assume_role: self.config.assume_role.clone(), } .into_map() .context("Unable to convert eks config to map")?, ), - secrets: self.secrets.clone(), + secrets: Some(self.config.secrets.clone()), capabilities: None, }, destruction_policy: DestructionPolicy::Never, @@ -199,12 +239,16 @@ impl AwsK8s { Ok(Crd::Resource(eks_crd)) } - fn ec2_crd(&self, testsys_images: &TestsysImages, override_ami: Option) -> Result { - let cluster_name = self.cluster_name(); + fn ec2_crd( + &self, + cluster_name: &str, + testsys_images: &TestsysImages, + override_ami: Option, + ) -> Result { let mut ec2_config = Ec2Config { node_ami: override_ami.unwrap_or_else(|| self.ami.clone()), instance_count: Some(2), - instance_type: self.instance_type.clone(), + instance_type: self.config.instance_type.clone(), cluster_name: format!("${{{}.clusterName}}", cluster_name), region: format!("${{{}.region}}", cluster_name), instance_profile_arn: format!("${{{}.iamInstanceProfileArn}}", cluster_name), @@ -214,7 +258,7 @@ impl AwsK8s { certificate: Some(format!("${{{}.certificate}}", cluster_name)), cluster_dns_ip: Some(format!("${{{}.clusterDnsIp}}", cluster_name)), security_groups: vec![], - assume_role: self.assume_role.clone(), + assume_role: self.config.assume_role.clone(), } .into_map() .context("Unable to create ec2 config")?; @@ -233,16 +277,19 @@ impl AwsK8s { ..Default::default() }, spec: ResourceSpec { - depends_on: Some(vec![cluster_name]), + depends_on: Some(vec![cluster_name.to_string()]), conflicts_with: None, agent: Agent { name: "ec2-provider".to_string(), - image: testsys_images.ec2_resource.clone(), - pull_secret: testsys_images.secret.clone(), + image: testsys_images + .ec2_resource_agent_image + .to_owned() + .expect("Missing default image for EC2 resource agent"), + pull_secret: testsys_images.testsys_agent_pull_secret.clone(), keep_running: false, timeout: None, configuration: Some(ec2_config), - secrets: self.secrets.clone(), + secrets: Some(self.config.secrets.clone()), capabilities: None, }, destruction_policy: DestructionPolicy::OnDeletion, @@ -255,11 +302,11 @@ impl AwsK8s { fn sonobuoy_crd( &self, test_name_suffix: &str, + cluster_name: &str, sonobuoy_mode: SonobuoyMode, depends_on: Option>, testsys_images: &TestsysImages, ) -> Result { - let cluster_name = self.cluster_name(); let ec2_resource_name = format!("{}-instances", cluster_name); let test_name = format!("{}{}", cluster_name, test_name_suffix); let sonobuoy = Test { @@ -275,8 +322,11 @@ impl AwsK8s { retries: Some(5), agent: Agent { name: "sonobuoy-test-agent".to_string(), - image: testsys_images.sonobuoy_test.clone(), - pull_secret: testsys_images.secret.clone(), + image: testsys_images + .sonobuoy_test_agent_image + .to_owned() + .expect("Missing default image for Sonobuoy test agent"), + pull_secret: testsys_images.testsys_agent_pull_secret.clone(), keep_running: true, timeout: None, configuration: Some( @@ -285,14 +335,28 @@ impl AwsK8s { plugin: "e2e".to_string(), mode: sonobuoy_mode, kubernetes_version: None, - kube_conformance_image: self.kube_conformance_image.clone(), - e2e_repo_config_base64: None, - assume_role: self.assume_role.clone(), + kube_conformance_image: self.config.kube_conformance_image.clone(), + e2e_repo_config_base64: self.config.e2e_repo_registry.as_ref().map( + |e2e_registry| { + base64::encode(format!( + r#"buildImageRegistry: {e2e_registry} +dockerGluster: {e2e_registry} +dockerLibraryRegistry: {e2e_registry} +e2eRegistry: {e2e_registry} +e2eVolumeRegistry: {e2e_registry} +gcRegistry: {e2e_registry} +gcEtcdRegistry: {e2e_registry} +promoterE2eRegistry: {e2e_registry} +sigStorageRegistry: {e2e_registry}"# + )) + }, + ), + assume_role: self.config.assume_role.clone(), } .into_map() .context("Unable to convert sonobuoy config to `Map`")?, ), - secrets: self.secrets.clone(), + secrets: Some(self.config.secrets.clone()), capabilities: None, }, }, @@ -310,31 +374,23 @@ impl Migration for AwsK8s { Ok(MigrationsConfig { tuf_repo: self .tuf_repo - .as_ref() - .context("Tuf repo metadata is required for upgrade downgrade testing.")? - .clone(), + .to_owned() + .context("Tuf repo metadata is required for upgrade downgrade testing.")?, starting_version: self .starting_version - .as_ref() - .context("You must provide a starting version for upgrade downgrade testing.")? - .clone(), + .to_owned() + .context("You must provide a starting version for upgrade downgrade testing.")?, migrate_to_version: self .migrate_to_version - .as_ref() - .context("You must provide a target version for upgrade downgrade testing.")? - .clone(), + .to_owned() + .context("You must provide a target version for upgrade downgrade testing.")?, region: self.region.to_string(), - secrets: self.secrets.clone(), + secrets: Some(self.config.secrets.clone()), capabilities: self.capabilities.clone(), - assume_role: self.assume_role.clone(), + assume_role: self.config.assume_role.clone(), }) } - fn instance_provider(&self) -> String { - let cluster_name = self.cluster_name(); - format!("{}-instances", cluster_name) - } - fn migration_labels(&self) -> BTreeMap { btreemap! { "testsys/arch".to_string() => self.arch.to_string(), @@ -355,17 +411,10 @@ pub(crate) struct AwsEcs { pub(crate) variant: String, /// The region testing should be performed in pub(crate) region: String, - /// The role that should be assumed by the agents - pub(crate) assume_role: Option, - /// The desired instance type - pub(crate) instance_type: Option, + /// Configuration for the variant + pub(crate) config: AwsEcsVariantConfig, /// The ami that should be used for quick testing pub(crate) ami: String, - /// Secrets that should be used by the agents - pub(crate) secrets: Option>, - /// The name of the target ECS cluster. If no cluster is provided, `-` will be - /// used - pub(crate) target_cluster_name: Option, // Migrations /// The TUF repos for migration testing. If no TUF repos are used, the default Bottlerocket @@ -390,25 +439,54 @@ impl AwsEcs { test: TestType, testsys_images: &TestsysImages, ) -> Result> { - match test { - TestType::Conformance => Err(anyhow!( - "Conformance testing for ECS variants is not supported." - )), - TestType::Quick => self.ecs_test_crds(testsys_images), - TestType::Migration => self.migration_test_crds(testsys_images).await, + let mut crds = Vec::new(); + let target_cluster_names = if self.config.cluster_names.is_empty() { + debug!("No cluster names were provided using default name"); + vec![self.default_cluster_name()] + } else { + self.config.cluster_names.clone() + }; + for template_cluster_name in target_cluster_names { + let cluster_name = &rendered_cluster_name( + template_cluster_name, + self.kube_arch(), + self.kube_variant(), + )?; + crds.append(&mut match test { + TestType::Conformance => { + return Err(anyhow!( + "Conformance testing for ECS variants is not supported." + )) + } + TestType::Quick => self.ecs_test_crds(cluster_name, testsys_images)?, + TestType::Migration => { + self.migration_test_crds(cluster_name, testsys_images) + .await? + } + }); } + + Ok(crds) } - fn ecs_test_crds(&self, testsys_images: &TestsysImages) -> Result> { + fn ecs_test_crds( + &self, + cluster_name: &str, + testsys_images: &TestsysImages, + ) -> Result> { let crds = vec![ - self.ecs_crd(testsys_images)?, - self.ec2_crd(testsys_images, None)?, - self.ecs_test_crd("-test", None, testsys_images)?, + self.ecs_crd(cluster_name, testsys_images)?, + self.ec2_crd(cluster_name, testsys_images, None)?, + self.ecs_test_crd(cluster_name, "-test", None, testsys_images)?, ]; Ok(crds) } - async fn migration_test_crds(&self, testsys_images: &TestsysImages) -> Result> { + async fn migration_test_crds( + &self, + cluster_name: &str, + testsys_images: &TestsysImages, + ) -> Result> { let ami = self .starting_image_id .as_ref() @@ -426,29 +504,43 @@ impl AwsEcs { .await?, ) .to_string(); - let ecs = self.ecs_crd(testsys_images)?; - let ec2 = self.ec2_crd(testsys_images, Some(ami))?; + let ecs = self.ecs_crd(cluster_name, testsys_images)?; + let ec2 = self.ec2_crd(cluster_name, testsys_images, Some(ami))?; + let instance_provider = ec2 + .name() + .expect("The EC2 instance provider crd is missing a name."); let mut depends_on = Vec::new(); - let initial = self.ecs_test_crd("-1-initial", None, testsys_images)?; + let initial = self.ecs_test_crd(cluster_name, "-1-initial", None, testsys_images)?; depends_on.push(initial.name().context("Crd missing name")?); let start_migrate = self.migration_crd( - format!("{}-2-migrate", self.cluster_name()), + format!("{}-2-migrate", cluster_name), + instance_provider.clone(), MigrationVersion::Migrated, Some(depends_on.clone()), testsys_images, )?; depends_on.push(start_migrate.name().context("Crd missing name")?); - let migrated = - self.ecs_test_crd("-3-migrated", Some(depends_on.clone()), testsys_images)?; + let migrated = self.ecs_test_crd( + cluster_name, + "-3-migrated", + Some(depends_on.clone()), + testsys_images, + )?; depends_on.push(migrated.name().context("Crd missing name")?); let end_migrate = self.migration_crd( - format!("{}-4-migrate", self.cluster_name()), + format!("{}-4-migrate", cluster_name), + instance_provider, MigrationVersion::Starting, Some(depends_on.clone()), testsys_images, )?; depends_on.push(end_migrate.name().context("Crd missing name")?); - let last = self.ecs_test_crd("-5-final", Some(depends_on.clone()), testsys_images)?; + let last = self.ecs_test_crd( + cluster_name, + "-5-final", + Some(depends_on.clone()), + testsys_images, + )?; Ok(vec![ ecs, ec2, @@ -476,18 +568,15 @@ impl AwsEcs { self.variant.replace('.', "") } - /// Bottlerocket cluster naming convention (-, for aws-ecs-1 on x86_64, x86-64-aws-ecs-1). - fn cluster_name(&self) -> String { - self.target_cluster_name - .clone() - .unwrap_or_else(|| format!("{}-{}", self.kube_arch(), self.kube_variant())) + /// Bottlerocket cluster naming convention. + fn default_cluster_name(&self) -> String { + format!("{}-{}", self.kube_arch(), self.kube_variant()) } - fn ecs_crd(&self, testsys_images: &TestsysImages) -> Result { - let cluster_name = self.cluster_name(); + fn ecs_crd(&self, cluster_name: &str, testsys_images: &TestsysImages) -> Result { let ecs_crd = Resource { metadata: ObjectMeta { - name: Some(cluster_name.clone()), + name: Some(cluster_name.to_string()), namespace: Some(NAMESPACE.into()), labels: Some(self.labels()), ..Default::default() @@ -497,21 +586,24 @@ impl AwsEcs { conflicts_with: None, agent: Agent { name: "ecs-provider".to_string(), - image: testsys_images.ecs_resource.clone(), - pull_secret: testsys_images.secret.clone(), + image: testsys_images + .ecs_resource_agent_image + .to_owned() + .expect("Missing default image for ECS resource agent"), + pull_secret: testsys_images.testsys_agent_pull_secret.clone(), keep_running: false, timeout: None, configuration: Some( EcsClusterConfig { - cluster_name, + cluster_name: cluster_name.to_string(), region: Some(self.region.clone()), - assume_role: self.assume_role.clone(), + assume_role: self.config.assume_role.clone(), vpc: None, } .into_map() .context("Unable to convert ECS config to map")?, ), - secrets: self.secrets.clone(), + secrets: Some(self.config.secrets.clone()), capabilities: None, }, destruction_policy: DestructionPolicy::Never, @@ -521,12 +613,16 @@ impl AwsEcs { Ok(Crd::Resource(ecs_crd)) } - fn ec2_crd(&self, testsys_images: &TestsysImages, override_ami: Option) -> Result { - let cluster_name = self.cluster_name(); + fn ec2_crd( + &self, + cluster_name: &str, + testsys_images: &TestsysImages, + override_ami: Option, + ) -> Result { let ec2_config = Ec2Config { node_ami: override_ami.unwrap_or_else(|| self.ami.clone()), instance_count: Some(2), - instance_type: self.instance_type.clone(), + instance_type: self.config.instance_type.clone(), cluster_name: format!("${{{}.clusterName}}", cluster_name), region: format!("${{{}.region}}", cluster_name), instance_profile_arn: format!("${{{}.iamInstanceProfileArn}}", cluster_name), @@ -536,7 +632,7 @@ impl AwsEcs { certificate: None, cluster_dns_ip: None, security_groups: vec![], - assume_role: self.assume_role.clone(), + assume_role: self.config.assume_role.clone(), } .into_map() .context("Unable to create EC2 config")?; @@ -549,16 +645,19 @@ impl AwsEcs { ..Default::default() }, spec: ResourceSpec { - depends_on: Some(vec![cluster_name]), + depends_on: Some(vec![cluster_name.to_string()]), conflicts_with: None, agent: Agent { name: "ec2-provider".to_string(), - image: testsys_images.ec2_resource.clone(), - pull_secret: testsys_images.secret.clone(), + image: testsys_images + .ec2_resource_agent_image + .to_owned() + .expect("Missing default image for EC2 resource agent"), + pull_secret: testsys_images.testsys_agent_pull_secret.clone(), keep_running: false, timeout: None, configuration: Some(ec2_config), - secrets: self.secrets.clone(), + secrets: Some(self.config.secrets.clone()), capabilities: None, }, destruction_policy: DestructionPolicy::OnDeletion, @@ -570,11 +669,11 @@ impl AwsEcs { fn ecs_test_crd( &self, + cluster_name: &str, test_name_suffix: &str, depends_on: Option>, testsys_images: &TestsysImages, ) -> Result { - let cluster_name = self.cluster_name(); let ec2_resource_name = format!("{}-instances", cluster_name); let test_name = format!("{}{}", cluster_name, test_name_suffix); let ecs_test = Test { @@ -590,15 +689,18 @@ impl AwsEcs { retries: Some(5), agent: Agent { name: "ecs-test-agent".to_string(), - image: testsys_images.ecs_test.clone(), - pull_secret: testsys_images.secret.clone(), + image: testsys_images + .ecs_test_agent_image + .to_owned() + .expect("Missing default image for ECS test agent"), + pull_secret: testsys_images.testsys_agent_pull_secret.clone(), keep_running: true, timeout: None, configuration: Some( EcsTestConfig { - assume_role: self.assume_role.clone(), + assume_role: self.config.assume_role.clone(), region: Some(self.region.clone()), - cluster_name: cluster_name.clone(), + cluster_name: cluster_name.to_string(), task_count: 1, subnet: format!("${{{}.publicSubnetId}}", cluster_name), task_definition_name_and_revision: None, @@ -606,7 +708,7 @@ impl AwsEcs { .into_map() .context("Unable to convert sonobuoy config to `Map`")?, ), - secrets: self.secrets.clone(), + secrets: Some(self.config.secrets.clone()), capabilities: None, }, }, @@ -624,31 +726,23 @@ impl Migration for AwsEcs { Ok(MigrationsConfig { tuf_repo: self .tuf_repo - .as_ref() - .context("Tuf repo metadata is required for upgrade downgrade testing.")? - .clone(), + .to_owned() + .context("Tuf repo metadata is required for upgrade downgrade testing.")?, starting_version: self .starting_version - .as_ref() - .context("You must provide a starting version for upgrade downgrade testing.")? - .clone(), + .to_owned() + .context("You must provide a starting version for upgrade downgrade testing.")?, migrate_to_version: self .migrate_to_version - .as_ref() - .context("You must provide a target version for upgrade downgrade testing.")? - .clone(), + .to_owned() + .context("You must provide a target version for upgrade downgrade testing.")?, region: self.region.to_string(), - secrets: self.secrets.clone(), + secrets: Some(self.config.secrets.clone()), capabilities: self.capabilities.clone(), - assume_role: self.assume_role.clone(), + assume_role: self.config.assume_role.clone(), }) } - fn instance_provider(&self) -> String { - let cluster_name = self.cluster_name(); - format!("{}-instances", cluster_name) - } - fn migration_labels(&self) -> BTreeMap { btreemap! { "testsys/arch".to_string() => self.arch.to_string(), @@ -687,14 +781,11 @@ trait Migration { /// Create the labels that should be used for the migration tests. fn migration_labels(&self) -> BTreeMap; - /// Return the name of the instance provider that the migration agents should use to get the - /// instance ids. - fn instance_provider(&self) -> String; - /// Create a migration test for a given arch/variant. fn migration_crd( &self, test_name: String, + instance_provider: String, migration_version: MigrationVersion, depends_on: Option>, testsys_images: &TestsysImages, @@ -720,7 +811,7 @@ trait Migration { .context("Unable to convert migration config to map")?; migration_config.insert( "instanceIds".to_string(), - Value::String(format!("${{{}.ids}}", self.instance_provider())), + Value::String(format!("${{{}.ids}}", instance_provider)), ); Ok(Crd::Test(Test { metadata: ObjectMeta { @@ -730,13 +821,16 @@ trait Migration { ..Default::default() }, spec: TestSpec { - resources: vec![self.instance_provider()], + resources: vec![instance_provider], depends_on, retries: None, agent: Agent { name: "migration-test-agent".to_string(), - image: testsys_images.migration_test.to_string(), - pull_secret: testsys_images.secret.clone(), + image: testsys_images + .migration_test_agent_image + .to_owned() + .expect("Missing default image for migration test agent"), + pull_secret: testsys_images.testsys_agent_pull_secret.clone(), keep_running: true, timeout: None, configuration: Some(migration_config), diff --git a/tools/testsys/src/run.rs b/tools/testsys/src/run.rs index 3e2aeddd..f6eeb76a 100644 --- a/tools/testsys/src/run.rs +++ b/tools/testsys/src/run.rs @@ -12,6 +12,7 @@ use serde_plain::derive_fromstr_from_deserialize; use std::collections::HashMap; use std::fs::File; use std::path::PathBuf; +use testsys_config::{AwsEcsVariantConfig, AwsK8sVariantConfig, GenericVariantConfig, TestConfig}; /// Run a set of tests for a given arch and variant #[derive(Debug, Parser)] @@ -31,9 +32,13 @@ pub(crate) struct Run { #[clap(long, env = "PUBLISH_INFRA_CONFIG_PATH", parse(from_os_str))] infra_config_path: PathBuf, + /// The path to `Test.toml` + #[clap(long, env = "TESTSYS_TEST_CONFIG_PATH", parse(from_os_str))] + test_config_path: PathBuf, + /// Use this named repo infrastructure from Infra.toml for upgrade/downgrade testing. - #[clap(long, env = "PUBLISH_REPO", default_value = "default")] - repo: String, + #[clap(long, env = "PUBLISH_REPO")] + repo: Option, /// The path to `amis.json` #[clap(long, env = "AMI_INPUT")] @@ -45,35 +50,12 @@ pub(crate) struct Run { #[clap(long, env = "TESTSYS_TARGET_REGION")] target_region: Option, - /// The name of the cluster for resource agents (EKS resource agent, ECS resource agent). Note: - /// This is not the name of the `testsys cluster` this is the name of the cluster that tests - /// should be run on. If no cluster name is provided, the bottlerocket cluster - /// naming convention `-` will be used. - #[clap(long, env = "TESTSYS_TARGET_CLUSTER_NAME")] - target_cluster_name: Option, - - /// The custom kube conformance image that should be used by sonobuoy. This is only applicable - /// for k8s variants. It can be omitted for non-k8s variants and it can be omitted to use the - /// default sonobuoy conformance image. - #[clap(long)] - kube_conformance_image: Option, - - /// The role that should be assumed by the agents - #[clap(long, env = "TESTSYS_ASSUME_ROLE")] - assume_role: Option, - - /// Specify the instance type that should be used. This is only applicable for aws-* variants. - /// It can be omitted for non-aws variants and can be omitted to use default instance types. - #[clap(long, env = "TESTSYS_INSTANCE_TYPE")] - instance_type: Option, - - /// Add secrets to the testsys agents (`--secret aws-credentials=my-secret`) - #[clap(long, short, parse(try_from_str = parse_key_val), number_of_values = 1)] - secret: Vec<(String, SecretName)>, - #[clap(flatten)] agent_images: TestsysImages, + #[clap(flatten)] + config: CliConfig, + // Migrations /// Override the starting image used for migrations. The image will be pulled from available /// amis in the users account if no override is provided. @@ -101,16 +83,65 @@ pub(crate) struct Run { migration_target_version: Option, } +/// This is a CLI parsable version of `testsys_config::GenericVariantConfig`. +#[derive(Debug, Parser)] +struct CliConfig { + /// The repo containing images necessary for conformance testing. It may be omitted to use the + /// default conformance image registry. + #[clap(long, env = "TESTSYS_CONFORMANCE_REGISTRY")] + conformance_registry: Option, + + /// The name of the cluster for resource agents (EKS resource agent, ECS resource agent). Note: + /// This is not the name of the `testsys cluster` this is the name of the cluster that tests + /// should be run on. If no cluster name is provided, the bottlerocket cluster + /// naming convention `{{arch}}-{{variant}}` will be used. + #[clap(long, env = "TESTSYS_TARGET_CLUSTER_NAME")] + target_cluster_name: Option, + + /// The image that should be used for conformance testing. It may be omitted to use the default + /// testing image. + #[clap(long, env = "TESTSYS_CONFORMANCE_IMAGE")] + conformance_image: Option, + + /// The role that should be assumed by the agents + #[clap(long, env = "TESTSYS_ASSUME_ROLE")] + assume_role: Option, + + /// Specify the instance type that should be used. This is only applicable for aws-* variants. + /// It can be omitted for non-aws variants and can be omitted to use default instance types. + #[clap(long, env = "TESTSYS_INSTANCE_TYPE")] + instance_type: Option, + + /// Add secrets to the testsys agents (`--secret aws-credentials=my-secret`) + #[clap(long, short, parse(try_from_str = parse_key_val), number_of_values = 1)] + secret: Vec<(String, SecretName)>, +} + +impl From for GenericVariantConfig { + fn from(val: CliConfig) -> Self { + GenericVariantConfig { + cluster_names: val.target_cluster_name.into_iter().collect(), + instance_type: val.instance_type, + secrets: val.secret.into_iter().collect(), + agent_role: val.assume_role, + conformance_image: val.conformance_image, + conformance_registry: val.conformance_registry, + } + } +} + impl Run { pub(crate) async fn run(self, client: TestManager) -> Result<()> { let variant = Variant::new(&self.variant).context("The provided variant cannot be interpreted.")?; debug!("Using variant '{}'", variant); - let secrets = if self.secret.is_empty() { - None - } else { - Some(self.secret.into_iter().collect()) - }; + + // Use Test.toml or default + let test_config = TestConfig::from_path_or_default(&self.test_config_path) + .context("Unable to read test config")?; + + let test_opts = test_config.test.as_ref().cloned().unwrap_or_default(); + // If a lock file exists, use that, otherwise use Infra.toml or default let infra_config = InfraConfig::from_path_or_lock(&self.infra_config_path, true) .context("Unable to read infra config")?; @@ -133,7 +164,12 @@ impl Run { let repo_config = infra_config .repo .unwrap_or_default() - .get(&self.repo) + .get( + &self + .repo + .or(test_opts.repo) + .unwrap_or_else(|| "default".to_string()), + ) .and_then(|repo| { if let (Some(metadata_base_url), Some(targets_url)) = (&repo.metadata_base_url, &repo.targets_url) @@ -150,21 +186,32 @@ impl Run { } }); + let images = vec![ + Some(self.agent_images.into()), + Some(test_opts.testsys_images), + test_opts + .testsys_image_registry + .map(testsys_config::TestsysImages::new), + Some(testsys_config::TestsysImages::public_images()), + ] + .into_iter() + .flatten() + .fold(Default::default(), testsys_config::TestsysImages::merge); + let crds = match variant.family() { "aws-k8s" => { debug!("Variant is in 'aws-k8s' family"); let bottlerocket_ami = ami(&self.ami_input, ®ion)?; debug!("Using ami '{}'", bottlerocket_ami); + let config: AwsK8sVariantConfig = test_config + .reduced_config(&variant, &self.arch, Some(self.config.into())) + .into(); let aws_k8s = AwsK8s { arch: self.arch, variant: self.variant, region, - assume_role: self.assume_role, - instance_type: self.instance_type, + config, ami: bottlerocket_ami.to_string(), - secrets, - kube_conformance_image: self.kube_conformance_image, - target_cluster_name: self.target_cluster_name, tuf_repo: repo_config, starting_version: self.migration_starting_version, starting_image_id: self.starting_image_id, @@ -173,23 +220,22 @@ impl Run { migrate_starting_commit: self.migration_starting_commit, }; debug!("Creating crds for aws-k8s testing"); - aws_k8s - .create_crds(self.test_flavor, &self.agent_images) - .await? + + aws_k8s.create_crds(self.test_flavor, &images).await? } "aws-ecs" => { debug!("Variant is in 'aws-ecs' family"); let bottlerocket_ami = ami(&self.ami_input, ®ion)?; debug!("Using ami '{}'", bottlerocket_ami); + let config: AwsEcsVariantConfig = test_config + .reduced_config(&variant, &self.arch, Some(self.config.into())) + .into(); let aws_ecs = AwsEcs { arch: self.arch, variant: self.variant, region, - assume_role: self.assume_role, - instance_type: self.instance_type, + config, ami: bottlerocket_ami.to_string(), - secrets, - target_cluster_name: self.target_cluster_name, tuf_repo: repo_config, starting_version: self.migration_starting_version, starting_image_id: self.starting_image_id, @@ -198,9 +244,7 @@ impl Run { capabilities: None, }; debug!("Creating crds for aws-ecs testing"); - aws_ecs - .create_crds(self.test_flavor, &self.agent_images) - .await? + aws_ecs.create_crds(self.test_flavor, &images).await? } other => { return Err(anyhow!( @@ -266,55 +310,47 @@ pub(crate) struct Image { pub(crate) id: String, } +/// This is a CLI parsable version of `testsys_config::TestsysImages` #[derive(Debug, Parser)] pub(crate) struct TestsysImages { /// EKS resource agent URI. If not provided the latest released resource agent will be used. #[clap( long = "eks-resource-agent-image", - env = "TESTSYS_EKS_RESOURCE_AGENT_IMAGE", - default_value = "public.ecr.aws/bottlerocket-test-system/eks-resource-agent:v0.0.2" + env = "TESTSYS_EKS_RESOURCE_AGENT_IMAGE" )] - pub(crate) eks_resource: String, + pub(crate) eks_resource: Option, /// ECS resource agent URI. If not provided the latest released resource agent will be used. #[clap( long = "ecs-resource-agent-image", - env = "TESTSYS_ECS_RESOURCE_AGENT_IMAGE", - default_value = "public.ecr.aws/bottlerocket-test-system/ecs-resource-agent:v0.0.2" + env = "TESTSYS_ECS_RESOURCE_AGENT_IMAGE" )] - pub(crate) ecs_resource: String, + pub(crate) ecs_resource: Option, /// EC2 resource agent URI. If not provided the latest released resource agent will be used. #[clap( long = "ec2-resource-agent-image", - env = "TESTSYS_EC2_RESOURCE_AGENT_IMAGE", - default_value = "public.ecr.aws/bottlerocket-test-system/ec2-resource-agent:v0.0.2" + env = "TESTSYS_EC2_RESOURCE_AGENT_IMAGE" )] - pub(crate) ec2_resource: String, + pub(crate) ec2_resource: Option, /// Sonobuoy test agent URI. If not provided the latest released test agent will be used. #[clap( long = "sonobuoy-test-agent-image", - env = "TESTSYS_SONOBUOY_TEST_AGENT_IMAGE", - default_value = "public.ecr.aws/bottlerocket-test-system/sonobuoy-test-agent:v0.0.2" + env = "TESTSYS_SONOBUOY_TEST_AGENT_IMAGE" )] - pub(crate) sonobuoy_test: String, + pub(crate) sonobuoy_test: Option, /// ECS test agent URI. If not provided the latest released test agent will be used. - #[clap( - long = "ecs-test-agent-image", - env = "TESTSYS_ECS_TEST_AGENT_IMAGE", - default_value = "public.ecr.aws/bottlerocket-test-system/ecs-test-agent:v0.0.2" - )] - pub(crate) ecs_test: String, + #[clap(long = "ecs-test-agent-image", env = "TESTSYS_ECS_TEST_AGENT_IMAGE")] + pub(crate) ecs_test: Option, /// Migration test agent URI. If not provided the latest released test agent will be used. #[clap( long = "migration-test-agent-image", - env = "TESTSYS_MIGRATION_TEST_AGENT_IMAGE", - default_value = "public.ecr.aws/bottlerocket-test-system/migration-test-agent:v0.0.2" + env = "TESTSYS_MIGRATION_TEST_AGENT_IMAGE" )] - pub(crate) migration_test: String, + pub(crate) migration_test: Option, /// Images pull secret. This is the name of a Kubernetes secret that will be used to /// pull the container image from a private registry. For example, if you created a pull secret @@ -323,3 +359,17 @@ pub(crate) struct TestsysImages { #[clap(long = "images-pull-secret", env = "TESTSYS_IMAGES_PULL_SECRET")] pub(crate) secret: Option, } + +impl From for testsys_config::TestsysImages { + fn from(val: TestsysImages) -> Self { + testsys_config::TestsysImages { + eks_resource_agent_image: val.eks_resource, + ecs_resource_agent_image: val.ecs_resource, + ec2_resource_agent_image: val.ec2_resource, + sonobuoy_test_agent_image: val.sonobuoy_test, + ecs_test_agent_image: val.ecs_test, + migration_test_agent_image: val.migration_test, + testsys_agent_pull_secret: val.secret, + } + } +} From ca560d692032cb74483571d2ea0af601f6c8edea Mon Sep 17 00:00:00 2001 From: ecpullen Date: Thu, 27 Oct 2022 17:11:57 +0000 Subject: [PATCH 0781/1356] testsys: Accept `TESTSYS_STARTING_IMAGE_ID` testsys the cli was able to handle using the `starting_image_id` argument, but with `cargo make`, the env variable `TESTSYS_STARTING_COMMIT` is always passed in, so the conflicting args made `TESTSYS_STARTING_IMAGE_ID` un-usable. This fix removes the conflicts with, and uses the `TESTSYS_STARTING_IMAGE_ID` whenever it is provided. --- tools/testsys/src/aws_resources.rs | 14 ++++++-------- tools/testsys/src/run.rs | 6 +----- 2 files changed, 7 insertions(+), 13 deletions(-) diff --git a/tools/testsys/src/aws_resources.rs b/tools/testsys/src/aws_resources.rs index 7b981019..5ff00688 100644 --- a/tools/testsys/src/aws_resources.rs +++ b/tools/testsys/src/aws_resources.rs @@ -94,20 +94,18 @@ impl AwsK8s { cluster_name: &str, testsys_images: &TestsysImages, ) -> Result> { - let ami = self - .starting_image_id - .as_ref() - .unwrap_or( - &get_ami_id( + let ami = if let Some(ami) = self.starting_image_id.to_owned() { + ami + } else { + get_ami_id( format!( "bottlerocket-{}-{}-{}-{}", self.variant, self.arch, self.starting_version.as_ref().context("The starting version must be provided for migration testing")?, self.migrate_starting_commit.as_ref().context("The commit for the starting version must be provided if the starting image id is not")? ), & self.arch, self.region.to_string(), ) - .await?, - ) - .to_string(); + .await? + }; let eks = self.eks_crd(cluster_name, testsys_images)?; let ec2 = self.ec2_crd(cluster_name, testsys_images, Some(ami))?; let instance_provider = ec2 diff --git a/tools/testsys/src/run.rs b/tools/testsys/src/run.rs index f6eeb76a..472cbd6d 100644 --- a/tools/testsys/src/run.rs +++ b/tools/testsys/src/run.rs @@ -70,11 +70,7 @@ pub(crate) struct Run { /// The commit id of the starting version for migrations. This is required for all migrations /// tests unless `starting-image-id` is provided. This is the version that will be created and /// migrated to `migration-target-version`. - #[clap( - long, - env = "TESTSYS_STARTING_COMMIT", - conflicts_with = "starting-image-id" - )] + #[clap(long, env = "TESTSYS_STARTING_COMMIT")] migration_starting_commit: Option, /// The target version for migrations. This is required for all migration tests. This is the From fddd3cab2b27ae02fbd8858ba1105770e792de6a Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Thu, 27 Oct 2022 16:06:21 +0000 Subject: [PATCH 0782/1356] build: fix EFI file system to fit partition size rpm2img creates the various file systems comprising a Bottlerocket image, among them the FAT file system that serves as the EFI System Partition (ESP). rpm2img invokes mkfs.vfat to create the ESP, but accidentally causes it to size the file system structures for double the size of the actual ESP. The mistake is mostly harmless as proven by working images and current mode of operation that is essentially read-only. However, it can lead to unexpected problems when trying to write to the ESP. Depending on the attempted action, failure modes include receiving SIGBUS, receiving EIO, receiving EINVAL, corrupting files, seeing messages like "lost async page write" or "attempt to access beyond end of device" in the kernel log. The cause of the bug is a somewhat unfortunate interpretation of the file system size by mkfs.vfat. While its second argument is referred to as a block count, it is not actually related to the requested sector size, but always expressed in units of 1 KiB. rpm2img expressed the file system size in units of sectors (512 bytes), thereby oversizing the file system by a factor of two. Signed-off-by: Markus Boehme --- tools/rpm2img | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/rpm2img b/tools/rpm2img index 68241245..b62beb92 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -195,7 +195,7 @@ fi mv "${ROOT_MOUNT}/boot/efi"/* "${EFI_MOUNT}" dd if=/dev/zero of="${EFI_IMAGE}" bs=1M count="${partsize[EFI-A]}" -mkfs.vfat -I -S 512 "${EFI_IMAGE}" $((partsize[EFI-A] * 2048)) +mkfs.vfat -I -S 512 "${EFI_IMAGE}" $((partsize[EFI-A] * 1024)) mmd -i "${EFI_IMAGE}" ::/EFI mmd -i "${EFI_IMAGE}" ::/EFI/BOOT mcopy -i "${EFI_IMAGE}" "${EFI_MOUNT}/EFI/BOOT"/*.efi ::/EFI/BOOT From 13a4485aa1562d4d371c927f1d6e86bbe1ade93c Mon Sep 17 00:00:00 2001 From: ecpullen Date: Tue, 6 Sep 2022 19:30:54 +0000 Subject: [PATCH 0783/1356] testsys: Enable multiple tests on a cluster TestSys uses labels on the crd's to determine which ec2 resources rely on a specific cluster. If a new instance provider requires a cluster that is already created, TestSys marks the new instance provider as conflicting with all other ones which prevents the new instances from being created until the other instance providers have been deleted. --- tools/testsys/src/aws_resources.rs | 143 ++++++++++++++++++++++++----- tools/testsys/src/run.rs | 11 ++- 2 files changed, 127 insertions(+), 27 deletions(-) diff --git a/tools/testsys/src/aws_resources.rs b/tools/testsys/src/aws_resources.rs index 5ff00688..b44a3b42 100644 --- a/tools/testsys/src/aws_resources.rs +++ b/tools/testsys/src/aws_resources.rs @@ -12,7 +12,9 @@ use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta; use k8s_openapi::serde_json::Value; use log::debug; use maplit::btreemap; +use model::clients::{AllowNotFound, CrdClient}; use model::constants::NAMESPACE; +use model::test_manager::{SelectionParams, TestManager}; use model::{ Agent, Configuration, Crd, DestructionPolicy, Resource, ResourceSpec, SecretName, Test, TestSpec, @@ -40,6 +42,7 @@ impl AwsK8s { /// Create the necessary test and resource crds for the specified test type. pub(crate) async fn create_crds( &self, + client: &TestManager, test: TestType, testsys_images: &TestsysImages, ) -> Result> { @@ -56,18 +59,56 @@ impl AwsK8s { self.kube_arch(), self.kube_variant(), )?; + // Check for existing cluster crd + let cluster_exists = client + .resource_client() + .get(&cluster_name) + .await + .allow_not_found(|_| ())? + .is_some(); + if !cluster_exists { + debug!("Cluster crd does not exist"); + crds.push(self.eks_crd(cluster_name, testsys_images, &test)?) + } + // Check for conflicting resources (ones that use the same cluster) + let conflicting_resources: Vec = if cluster_exists { + client + .list(&SelectionParams::Label(format!( + "testsys/cluster={}, testsys/type=instances", + cluster_name + ))) + .await? + .into_iter() + // Retrieve the name from each resource + .filter_map(|crd| crd.name()) + .collect() + } else { + Default::default() + }; + crds.append(&mut match &test { TestType::Conformance => self.sonobuoy_test_crds( + &test, testsys_images, SonobuoyMode::CertifiedConformance, cluster_name, + &conflicting_resources, + )?, + TestType::Quick => self.sonobuoy_test_crds( + &test, + testsys_images, + SonobuoyMode::Quick, + cluster_name, + &conflicting_resources, )?, - TestType::Quick => { - self.sonobuoy_test_crds(testsys_images, SonobuoyMode::Quick, cluster_name)? - } TestType::Migration => { - self.migration_test_crds(cluster_name, testsys_images) - .await? + self.migration_test_crds( + cluster_name, + &test, + &conflicting_resources, + testsys_images, + ) + .await? } }) } @@ -76,14 +117,30 @@ impl AwsK8s { fn sonobuoy_test_crds( &self, + test_type: &TestType, testsys_images: &TestsysImages, sonobuoy_mode: SonobuoyMode, cluster_name: &str, + conflicting_resources: &[String], ) -> Result> { let crds = vec![ - self.eks_crd(cluster_name, testsys_images)?, - self.ec2_crd(cluster_name, testsys_images, None)?, - self.sonobuoy_crd("-test", cluster_name, sonobuoy_mode, None, testsys_images)?, + self.ec2_crd( + cluster_name, + "test", + test_type, + conflicting_resources, + testsys_images, + None, + )?, + self.sonobuoy_crd( + "-test", + cluster_name, + test_type, + "test", + sonobuoy_mode, + None, + testsys_images, + )?, ]; Ok(crds) } @@ -92,6 +149,8 @@ impl AwsK8s { async fn migration_test_crds( &self, cluster_name: &str, + test_type: &TestType, + conflicting_resources: &[String], testsys_images: &TestsysImages, ) -> Result> { let ami = if let Some(ami) = self.starting_image_id.to_owned() { @@ -106,8 +165,14 @@ impl AwsK8s { ) .await? }; - let eks = self.eks_crd(cluster_name, testsys_images)?; - let ec2 = self.ec2_crd(cluster_name, testsys_images, Some(ami))?; + let ec2 = self.ec2_crd( + cluster_name, + "migration", + test_type, + conflicting_resources, + testsys_images, + Some(ami), + )?; let instance_provider = ec2 .name() .expect("The EC2 instance provider crd is missing a name."); @@ -116,6 +181,8 @@ impl AwsK8s { let initial = self.sonobuoy_crd( "-1-initial", cluster_name, + test_type, + "migration", SonobuoyMode::Quick, None, testsys_images, @@ -134,6 +201,8 @@ impl AwsK8s { let migrated = self.sonobuoy_crd( "-3-migrated", cluster_name, + test_type, + "migration", SonobuoyMode::Quick, Some(depends_on.clone()), testsys_images, @@ -152,12 +221,13 @@ impl AwsK8s { let last = self.sonobuoy_crd( "-5-final", cluster_name, + test_type, + "migration", SonobuoyMode::Quick, Some(depends_on.clone()), testsys_images, )?; Ok(vec![ - eks, ec2, initial, start_migrate, @@ -168,10 +238,22 @@ impl AwsK8s { } /// Labels help filter test results with `testsys status`. - fn labels(&self) -> BTreeMap { + fn labels( + &self, + cluster_name: S1, + testsys_type: S2, + test_type: &TestType, + ) -> BTreeMap + where + S1: Into, + S2: Into, + { btreemap! { "testsys/arch".to_string() => self.arch.to_string(), "testsys/variant".to_string() => self.variant.to_string(), + "testsys/cluster".to_string() => cluster_name.into(), + "testsys/type".to_string() => testsys_type.into(), + "testsys/test".to_string() => test_type.to_string(), } } @@ -188,7 +270,12 @@ impl AwsK8s { format!("{}-{}", self.kube_arch(), self.kube_variant()) } - fn eks_crd(&self, cluster_name: &str, testsys_images: &TestsysImages) -> Result { + fn eks_crd( + &self, + cluster_name: &str, + testsys_images: &TestsysImages, + test_type: &TestType, + ) -> Result { let cluster_version = K8sVersion::parse( Variant::new(&self.variant) .context("The provided variant cannot be interpreted.")? @@ -200,7 +287,7 @@ impl AwsK8s { metadata: ObjectMeta { name: Some(cluster_name.to_string()), namespace: Some(NAMESPACE.into()), - labels: Some(self.labels()), + labels: Some(self.labels(cluster_name, "cluster", test_type)), ..Default::default() }, spec: ResourceSpec { @@ -240,6 +327,9 @@ impl AwsK8s { fn ec2_crd( &self, cluster_name: &str, + resource_name_suffix: &str, + test_type: &TestType, + conflicting_resources: &[String], testsys_images: &TestsysImages, override_ami: Option, ) -> Result { @@ -269,14 +359,17 @@ impl AwsK8s { let ec2_resource = Resource { metadata: ObjectMeta { - name: Some(format!("{}-instances", cluster_name)), + name: Some(format!( + "{}-instances-{}", + cluster_name, resource_name_suffix + )), namespace: Some(NAMESPACE.into()), - labels: Some(self.labels()), + labels: Some(self.labels(cluster_name, "instances", test_type)), ..Default::default() }, spec: ResourceSpec { depends_on: Some(vec![cluster_name.to_string()]), - conflicts_with: None, + conflicts_with: Some(conflicting_resources.into()), agent: Agent { name: "ec2-provider".to_string(), image: testsys_images @@ -290,28 +383,31 @@ impl AwsK8s { secrets: Some(self.config.secrets.clone()), capabilities: None, }, - destruction_policy: DestructionPolicy::OnDeletion, + destruction_policy: DestructionPolicy::OnTestSuccess, }, status: None, }; Ok(Crd::Resource(ec2_resource)) } + #[allow(clippy::too_many_arguments)] fn sonobuoy_crd( &self, test_name_suffix: &str, cluster_name: &str, + test_type: &TestType, + ec2_resource_name_suffix: &str, sonobuoy_mode: SonobuoyMode, depends_on: Option>, testsys_images: &TestsysImages, ) -> Result { - let ec2_resource_name = format!("{}-instances", cluster_name); + let ec2_resource_name = format!("{}-instances-{}", cluster_name, ec2_resource_name_suffix); let test_name = format!("{}{}", cluster_name, test_name_suffix); let sonobuoy = Test { metadata: ObjectMeta { name: Some(test_name), namespace: Some(NAMESPACE.into()), - labels: Some(self.labels()), + labels: Some(self.labels(cluster_name, "test", test_type)), ..Default::default() }, spec: TestSpec { @@ -393,7 +489,8 @@ impl Migration for AwsK8s { btreemap! { "testsys/arch".to_string() => self.arch.to_string(), "testsys/variant".to_string() => self.variant.to_string(), - "testsys/flavor".to_string() => "updown".to_string(), + "testsys/type".to_string() => "test".to_string(), + "testsys/test".to_string() => TestType::Migration.to_string(), } } } @@ -604,7 +701,7 @@ impl AwsEcs { secrets: Some(self.config.secrets.clone()), capabilities: None, }, - destruction_policy: DestructionPolicy::Never, + destruction_policy: DestructionPolicy::OnTestSuccess, }, status: None, }; @@ -658,7 +755,7 @@ impl AwsEcs { secrets: Some(self.config.secrets.clone()), capabilities: None, }, - destruction_policy: DestructionPolicy::OnDeletion, + destruction_policy: DestructionPolicy::OnTestSuccess, }, status: None, }; diff --git a/tools/testsys/src/run.rs b/tools/testsys/src/run.rs index 472cbd6d..81d2c6f1 100644 --- a/tools/testsys/src/run.rs +++ b/tools/testsys/src/run.rs @@ -7,8 +7,8 @@ use log::{debug, info}; use model::test_manager::TestManager; use model::SecretName; use pubsys_config::InfraConfig; -use serde::Deserialize; -use serde_plain::derive_fromstr_from_deserialize; +use serde::{Deserialize, Serialize}; +use serde_plain::{derive_display_from_serialize, derive_fromstr_from_deserialize}; use std::collections::HashMap; use std::fs::File; use std::path::PathBuf; @@ -217,7 +217,9 @@ impl Run { }; debug!("Creating crds for aws-k8s testing"); - aws_k8s.create_crds(self.test_flavor, &images).await? + aws_k8s + .create_crds(&client, self.test_flavor, &images) + .await? } "aws-ecs" => { debug!("Variant is in 'aws-ecs' family"); @@ -282,7 +284,7 @@ fn parse_key_val(s: &str) -> Result<(String, SecretName)> { Ok((key.to_string(), SecretName::new(value)?)) } -#[derive(Debug, Deserialize)] +#[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] pub(crate) enum TestType { /// Conformance testing is a full integration test that asserts that Bottlerocket is working for @@ -300,6 +302,7 @@ pub(crate) enum TestType { } derive_fromstr_from_deserialize!(TestType); +derive_display_from_serialize!(TestType); #[derive(Clone, Debug, Deserialize)] pub(crate) struct Image { From 25f0676682bf65e7b56f6b185aa3bbe5aa9d6e17 Mon Sep 17 00:00:00 2001 From: ecpullen Date: Mon, 31 Oct 2022 19:15:57 +0000 Subject: [PATCH 0784/1356] testsys: Update to v0.0.3 --- tools/Cargo.lock | 60 +++++++++++++++++++++--------- tools/deny.toml | 3 ++ tools/testsys-config/Cargo.toml | 2 +- tools/testsys-config/src/lib.rs | 2 +- tools/testsys/Cargo.toml | 6 +-- tools/testsys/src/aws_resources.rs | 47 ++++++++++++++--------- tools/testsys/src/install.rs | 2 +- 7 files changed, 81 insertions(+), 41 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 8fef72b8..e9974565 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -608,12 +608,16 @@ dependencies = [ [[package]] name = "bottlerocket-types" -version = "0.0.2" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.2#dfd5bc90a481beb05e7a64ac4b20c86fd2491d9d" +version = "0.0.3" +source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.3#6c904544b08e4b0f5916c6a2f5166bfe7950841e" dependencies = [ + "builder-derive", + "configuration-derive", "model", "serde", + "serde_json", "serde_plain", + "serde_yaml", ] [[package]] @@ -634,6 +638,17 @@ dependencies = [ "memchr", ] +[[package]] +name = "builder-derive" +version = "0.0.1" +source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.3#6c904544b08e4b0f5916c6a2f5166bfe7950841e" +dependencies = [ + "proc-macro2", + "quote", + "serde", + "syn", +] + [[package]] name = "buildsys" version = "0.1.0" @@ -804,6 +819,15 @@ dependencies = [ "tokio", ] +[[package]] +name = "configuration-derive" +version = "0.0.1" +source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.3#6c904544b08e4b0f5916c6a2f5166bfe7950841e" +dependencies = [ + "quote", + "syn", +] + [[package]] name = "console" version = "0.15.2" @@ -1556,9 +1580,9 @@ dependencies = [ [[package]] name = "k8s-openapi" -version = "0.15.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2ae2c04fcee6b01b04e3aadd56bb418932c8e0a9d8a93f48bc68c6bdcdb559d" +checksum = "6d9455388f4977de4d0934efa9f7d36296295537d774574113a20f6082de03da" dependencies = [ "base64", "bytes", @@ -1573,9 +1597,9 @@ dependencies = [ [[package]] name = "kube" -version = "0.74.0" +version = "0.75.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a527a8001a61d8d470dab27ac650889938760c243903e7cd90faaf7c60a34bdd" +checksum = "9bb19108692aeafebb108fd0a1c381c06ac4c03859652599420975165e939b8a" dependencies = [ "k8s-openapi", "kube-client", @@ -1585,9 +1609,9 @@ dependencies = [ [[package]] name = "kube-client" -version = "0.74.0" +version = "0.75.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0d48f42df4e8342e9f488c4b97e3759d0042c4e7ab1a853cc285adb44409480" +checksum = "97e1a80ecd1b1438a2fc004549e155d47250b9e01fbfcf4cfbe9c8b56a085593" dependencies = [ "base64", "bytes", @@ -1623,9 +1647,9 @@ dependencies = [ [[package]] name = "kube-core" -version = "0.74.0" +version = "0.75.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91f56027f862fdcad265d2e9616af416a355e28a1c620bb709083494753e070d" +checksum = "f4d780f2bb048eeef64a4c6b2582d26a0fe19e30b4d3cc9e081616e1779c5d47" dependencies = [ "chrono", "form_urlencoded", @@ -1641,9 +1665,9 @@ dependencies = [ [[package]] name = "kube-derive" -version = "0.74.0" +version = "0.75.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d74121eb41af4480052901f31142d8d9bbdf1b7c6b856da43bcb02f5b1b177" +checksum = "98459d53b2841237392cd6959956185b2df15c19d32c3b275ed6ca7b7ee1adae" dependencies = [ "darling", "proc-macro2", @@ -1748,8 +1772,8 @@ dependencies = [ [[package]] name = "model" -version = "0.0.2" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.2#dfd5bc90a481beb05e7a64ac4b20c86fd2491d9d" +version = "0.0.3" +source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.3#6c904544b08e4b0f5916c6a2f5166bfe7950841e" dependencies = [ "async-recursion", "async-trait", @@ -2477,9 +2501,9 @@ dependencies = [ [[package]] name = "schemars" -version = "0.8.11" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a5fb6c61f29e723026dc8e923d94c694313212abbecbbe5f55a7748eec5b307" +checksum = "1847b767a3d62d95cbf3d8a9f0e421cf57a0d8aa4f411d4b16525afb0284d4ed" dependencies = [ "dyn-clone", "schemars_derive", @@ -2489,9 +2513,9 @@ dependencies = [ [[package]] name = "schemars_derive" -version = "0.8.11" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f188d036977451159430f3b8dc82ec76364a42b7e289c2b18a9a18f4470058e9" +checksum = "af4d7e1b012cb3d9129567661a63755ea4b8a7386d339dc945ae187e403c6743" dependencies = [ "proc-macro2", "quote", diff --git a/tools/deny.toml b/tools/deny.toml index 03982790..e8551a4d 100644 --- a/tools/deny.toml +++ b/tools/deny.toml @@ -68,6 +68,9 @@ skip-tree = [ # aws-smithy-client uses an older hyper-rustls { name = "aws-smithy-client", version = "0.48.0" }, + + # testsys v0.0.3 included wildcard deps will be fixed in v0.0.4 + { name = "bottlerocket-types", version="0.0.3" }, ] [sources] diff --git a/tools/testsys-config/Cargo.toml b/tools/testsys-config/Cargo.toml index a3287ad9..ccda8161 100644 --- a/tools/testsys-config/Cargo.toml +++ b/tools/testsys-config/Cargo.toml @@ -13,7 +13,7 @@ home = "0.5" lazy_static = "1.4" log = "0.4" maplit="1.0" -model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.2", tag = "v0.0.2"} +model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.3", tag = "v0.0.3"} serde = { version = "1.0", features = ["derive"] } serde_yaml = "0.8.17" snafu = "0.7" diff --git a/tools/testsys-config/src/lib.rs b/tools/testsys-config/src/lib.rs index c2c9f163..0e003d88 100644 --- a/tools/testsys-config/src/lib.rs +++ b/tools/testsys-config/src/lib.rs @@ -279,7 +279,7 @@ pub struct TestsysImages { pub testsys_agent_pull_secret: Option, } -const AGENT_VERSION: &str = "v0.0.2"; +const AGENT_VERSION: &str = "v0.0.3"; impl TestsysImages { /// Create an images config for a specific registry. diff --git a/tools/testsys/Cargo.toml b/tools/testsys/Cargo.toml index 30003f73..4dfdafa3 100644 --- a/tools/testsys/Cargo.toml +++ b/tools/testsys/Cargo.toml @@ -11,15 +11,15 @@ anyhow = "1.0" aws-config = "0.48" aws-sdk-ec2 = "0.18" base64 = "0.13" -bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.2", tag = "v0.0.2"} +bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.3", tag = "v0.0.3"} bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } clap = { version = "3", features = ["derive", "env"] } env_logger = "0.9" futures = "0.3.8" -k8s-openapi = { version = "0.15", features = ["v1_20", "api"], default-features = false } +k8s-openapi = { version = "0.16", features = ["v1_20", "api"], default-features = false } log = "0.4" maplit = "1.0.2" -model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.2", tag = "v0.0.2"} +model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.3", tag = "v0.0.3"} pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } serde = { version = "1", features = ["derive"] } serde_json = "1" diff --git a/tools/testsys/src/aws_resources.rs b/tools/testsys/src/aws_resources.rs index b44a3b42..c8b265c4 100644 --- a/tools/testsys/src/aws_resources.rs +++ b/tools/testsys/src/aws_resources.rs @@ -2,7 +2,7 @@ use crate::run::TestType; use anyhow::{anyhow, Context, Result}; use bottlerocket_types::agent_config::{ ClusterType, CreationPolicy, Ec2Config, EcsClusterConfig, EcsTestConfig, EksClusterConfig, - K8sVersion, MigrationConfig, SonobuoyConfig, SonobuoyMode, TufRepoConfig, + EksctlConfig, K8sVersion, MigrationConfig, SonobuoyConfig, SonobuoyMode, TufRepoConfig, }; use aws_sdk_ec2::model::{Filter, Image}; @@ -304,18 +304,20 @@ impl AwsK8s { timeout: None, configuration: Some( EksClusterConfig { - cluster_name: cluster_name.to_string(), creation_policy: Some(CreationPolicy::IfNotExists), - region: Some(self.region.clone()), - zones: None, - version: Some(cluster_version), assume_role: self.config.assume_role.clone(), + config: EksctlConfig::Args { + cluster_name: cluster_name.to_string(), + region: Some(self.region.clone()), + zones: None, + version: Some(cluster_version), + }, } .into_map() .context("Unable to convert eks config to map")?, ), secrets: Some(self.config.secrets.clone()), - capabilities: None, + ..Default::default() }, destruction_policy: DestructionPolicy::Never, }, @@ -336,11 +338,11 @@ impl AwsK8s { let mut ec2_config = Ec2Config { node_ami: override_ami.unwrap_or_else(|| self.ami.clone()), instance_count: Some(2), - instance_type: self.config.instance_type.clone(), + instance_types: self.config.instance_type.iter().cloned().collect(), cluster_name: format!("${{{}.clusterName}}", cluster_name), region: format!("${{{}.region}}", cluster_name), instance_profile_arn: format!("${{{}.iamInstanceProfileArn}}", cluster_name), - subnet_id: format!("${{{}.privateSubnetId}}", cluster_name), + subnet_ids: Default::default(), cluster_type: ClusterType::Eks, endpoint: Some(format!("${{{}.endpoint}}", cluster_name)), certificate: Some(format!("${{{}.certificate}}", cluster_name)), @@ -357,6 +359,11 @@ impl AwsK8s { Value::String(format!("${{{}.securityGroups}}", cluster_name)), ); + ec2_config.insert( + "subnetIds".to_owned(), + Value::String(format!("${{{}.privateSubnetIds}}", cluster_name)), + ); + let ec2_resource = Resource { metadata: ObjectMeta { name: Some(format!( @@ -381,7 +388,7 @@ impl AwsK8s { timeout: None, configuration: Some(ec2_config), secrets: Some(self.config.secrets.clone()), - capabilities: None, + ..Default::default() }, destruction_policy: DestructionPolicy::OnTestSuccess, }, @@ -451,7 +458,7 @@ sigStorageRegistry: {e2e_registry}"# .context("Unable to convert sonobuoy config to `Map`")?, ), secrets: Some(self.config.secrets.clone()), - capabilities: None, + ..Default::default() }, }, status: None, @@ -694,12 +701,13 @@ impl AwsEcs { region: Some(self.region.clone()), assume_role: self.config.assume_role.clone(), vpc: None, + iam_instance_profile_name: None, } .into_map() .context("Unable to convert ECS config to map")?, ), secrets: Some(self.config.secrets.clone()), - capabilities: None, + ..Default::default() }, destruction_policy: DestructionPolicy::OnTestSuccess, }, @@ -714,14 +722,14 @@ impl AwsEcs { testsys_images: &TestsysImages, override_ami: Option, ) -> Result { - let ec2_config = Ec2Config { + let mut ec2_config = Ec2Config { node_ami: override_ami.unwrap_or_else(|| self.ami.clone()), instance_count: Some(2), - instance_type: self.config.instance_type.clone(), + instance_types: self.config.instance_type.iter().cloned().collect(), cluster_name: format!("${{{}.clusterName}}", cluster_name), region: format!("${{{}.region}}", cluster_name), instance_profile_arn: format!("${{{}.iamInstanceProfileArn}}", cluster_name), - subnet_id: format!("${{{}.publicSubnetId}}", cluster_name), + subnet_ids: Default::default(), cluster_type: ClusterType::Ecs, endpoint: None, certificate: None, @@ -732,6 +740,11 @@ impl AwsEcs { .into_map() .context("Unable to create EC2 config")?; + ec2_config.insert( + "subnetIds".to_owned(), + Value::String(format!("${{{}.privateSubnetIds}}", cluster_name)), + ); + let ec2_resource = Resource { metadata: ObjectMeta { name: Some(format!("{}-instances", cluster_name)), @@ -753,7 +766,7 @@ impl AwsEcs { timeout: None, configuration: Some(ec2_config), secrets: Some(self.config.secrets.clone()), - capabilities: None, + ..Default::default() }, destruction_policy: DestructionPolicy::OnTestSuccess, }, @@ -797,14 +810,13 @@ impl AwsEcs { region: Some(self.region.clone()), cluster_name: cluster_name.to_string(), task_count: 1, - subnet: format!("${{{}.publicSubnetId}}", cluster_name), task_definition_name_and_revision: None, } .into_map() .context("Unable to convert sonobuoy config to `Map`")?, ), secrets: Some(self.config.secrets.clone()), - capabilities: None, + ..Default::default() }, }, status: None, @@ -931,6 +943,7 @@ trait Migration { configuration: Some(migration_config), secrets: migration.secrets.clone(), capabilities: migration.capabilities, + ..Default::default() }, }, status: None, diff --git a/tools/testsys/src/install.rs b/tools/testsys/src/install.rs index b87ef3f2..ce54f611 100644 --- a/tools/testsys/src/install.rs +++ b/tools/testsys/src/install.rs @@ -21,7 +21,7 @@ pub(crate) struct Install { #[clap( long = "controller-uri", env = "TESTSYS_CONTROLLER_IMAGE", - default_value = "public.ecr.aws/bottlerocket-test-system/controller:v0.0.2" + default_value = "public.ecr.aws/bottlerocket-test-system/controller:v0.0.3" )] controller_uri: String, } From 9a10955d0b3ed565f6a038063031039352bb8774 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sat, 24 Sep 2022 23:49:00 +0000 Subject: [PATCH 0785/1356] buildsys: track build artifacts in subdirectories Signed-off-by: Ben Cressey --- tools/buildsys/src/builder.rs | 113 ++++++++++++++++++++-------- tools/buildsys/src/builder/error.rs | 23 +++++- 2 files changed, 104 insertions(+), 32 deletions(-) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index 93915226..d203daf1 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -14,6 +14,7 @@ use rand::Rng; use regex::Regex; use sha2::{Digest, Sha512}; use snafu::{ensure, OptionExt, ResultExt}; +use std::collections::HashSet; use std::env; use std::fs::{self, File}; use std::num::NonZeroU16; @@ -372,32 +373,41 @@ fn create_build_dir(kind: &BuildType, name: &str, arch: &str) -> Result const MARKER_EXTENSION: &str = ".buildsys_marker"; /// Copy build artifacts to the output directory. -/// Currently we expect a "flat" structure where all files are in the same directory. /// Before we copy each file, we create a corresponding marker file to record its existence. fn copy_build_files

(build_dir: P, output_dir: P) -> Result<()> where P: AsRef, { - fn is_artifact(entry: &DirEntry) -> bool { - entry.file_type().is_file() + fn has_artifacts(entry: &DirEntry) -> bool { + let is_dir = entry.path().is_dir(); + let is_file = entry.file_type().is_file(); + let is_not_marker = is_file && entry .file_name() .to_str() .map(|s| !s.ends_with(MARKER_EXTENSION)) - .unwrap_or(false) + .unwrap_or(false); + is_dir || is_not_marker } - for artifact_file in find_files(&build_dir, is_artifact) { + for artifact_file in find_files(&build_dir, has_artifacts) { let mut marker_file = artifact_file.clone().into_os_string(); marker_file.push(MARKER_EXTENSION); File::create(&marker_file).context(error::FileCreateSnafu { path: &marker_file })?; let mut output_file: PathBuf = output_dir.as_ref().into(); - output_file.push( - artifact_file - .file_name() - .context(error::BadFilenameSnafu { path: &output_file })?, - ); + output_file.push(artifact_file.strip_prefix(&build_dir).context( + error::StripPathPrefixSnafu { + path: &marker_file, + prefix: build_dir.as_ref(), + }, + )?); + + let parent_dir = output_file + .parent() + .context(error::BadDirectorySnafu { path: &output_file })?; + fs::create_dir_all(&parent_dir) + .context(error::DirectoryCreateSnafu { path: &parent_dir })?; fs::rename(&artifact_file, &output_file).context(error::FileRenameSnafu { old_path: &artifact_file, @@ -411,34 +421,77 @@ where /// Remove build artifacts from the output directory. /// Any marker file we find could have a corresponding file that should be cleaned up. /// We also clean up the marker files so they do not accumulate across builds. +/// For the same reason, if a directory is empty after build artifacts, marker files, and other +/// empty directories have been removed, then that directory will also be removed. fn clean_build_files

(build_dir: P, output_dir: P) -> Result<()> where P: AsRef, { - fn is_marker(entry: &DirEntry) -> bool { - entry - .file_name() - .to_str() - .map(|s| s.ends_with(MARKER_EXTENSION)) - .unwrap_or(false) - } + let build_dir = build_dir.as_ref(); + let output_dir = output_dir.as_ref(); - for marker_file in find_files(&build_dir, is_marker) { - let mut output_file: PathBuf = output_dir.as_ref().into(); - output_file.push( - marker_file + fn has_markers(entry: &DirEntry) -> bool { + let is_dir = entry.path().is_dir(); + let is_file = entry.file_type().is_file(); + let is_marker = is_file + && entry .file_name() - .context(error::BadFilenameSnafu { path: &marker_file })?, - ); + .to_str() + .map(|s| s.ends_with(MARKER_EXTENSION)) + .unwrap_or(false); + is_dir || is_marker + } - output_file.set_extension(""); - if output_file.exists() { - std::fs::remove_file(&output_file) - .context(error::FileRemoveSnafu { path: &output_file })?; + fn cleanup(path: &Path, top: &Path, dirs: &mut HashSet) -> Result<()> { + if !path.exists() { + return Ok(()); } + std::fs::remove_file(&path).context(error::FileRemoveSnafu { path })?; + let mut parent = path.parent(); + while let Some(p) = parent { + if p == top || dirs.contains(p) { + break; + } + dirs.insert(p.into()); + parent = p.parent() + } + Ok(()) + } + + fn is_empty_dir(path: &Path) -> Result { + Ok(path.is_dir() + && path + .read_dir() + .context(error::DirectoryReadSnafu { path })? + .next() + .is_none()) + } + + let mut clean_dirs: HashSet = HashSet::new(); - std::fs::remove_file(&marker_file) - .context(error::FileRemoveSnafu { path: &marker_file })?; + for marker_file in find_files(&build_dir, has_markers) { + let mut output_file: PathBuf = output_dir.into(); + output_file.push(marker_file.strip_prefix(&build_dir).context( + error::StripPathPrefixSnafu { + path: &marker_file, + prefix: build_dir, + }, + )?); + output_file.set_extension(""); + cleanup(&output_file, output_dir, &mut clean_dirs)?; + cleanup(&marker_file, build_dir, &mut clean_dirs)?; + } + + // Clean up directories in reverse order, so that empty child directories don't stop an + // otherwise empty parent directory from being removed. + let mut clean_dirs = clean_dirs.into_iter().collect::>(); + clean_dirs.sort_by(|a, b| b.cmp(a)); + + for clean_dir in clean_dirs { + if is_empty_dir(&clean_dir)? { + std::fs::remove_dir(&clean_dir) + .context(error::DirectoryRemoveSnafu { path: &clean_dir })?; + } } Ok(()) @@ -456,11 +509,11 @@ where .follow_links(false) .same_file_system(true) .min_depth(1) - .max_depth(1) .into_iter() .filter_entry(filter) .flat_map(|e| e.context(error::DirectoryWalkSnafu)) .map(|e| e.into_path()) + .filter(|e| e.is_file()) } /// Retrieve a BUILDSYS_* variable that we expect to be set in the environment, diff --git a/tools/buildsys/src/builder/error.rs b/tools/buildsys/src/builder/error.rs index 6fccfd62..fe527590 100644 --- a/tools/buildsys/src/builder/error.rs +++ b/tools/buildsys/src/builder/error.rs @@ -16,8 +16,8 @@ pub(crate) enum Error { source: std::io::Error, }, - #[snafu(display("Failed to get filename for '{}'", path.display()))] - BadFilename { path: PathBuf }, + #[snafu(display("Failed to get parent directory for '{}'", path.display()))] + BadDirectory { path: PathBuf }, #[snafu(display("Failed to create directory '{}': {}", path.display(), source))] DirectoryCreate { @@ -25,6 +25,18 @@ pub(crate) enum Error { source: std::io::Error, }, + #[snafu(display("Failed to create directory '{}': {}", path.display(), source))] + DirectoryRemove { + path: PathBuf, + source: std::io::Error, + }, + + #[snafu(display("Failed to read directory '{}': {}", path.display(), source))] + DirectoryRead { + path: PathBuf, + source: std::io::Error, + }, + #[snafu(display("Failed to walk directory to find marker files: {}", source))] DirectoryWalk { source: walkdir::Error }, @@ -53,6 +65,13 @@ pub(crate) enum Error { source: std::env::VarError, }, + #[snafu(display("Failed to strip prefix '{}' from path '{}': {}", prefix.display(), path.display(), source))] + StripPathPrefix { + path: PathBuf, + prefix: PathBuf, + source: std::path::StripPrefixError, + }, + #[snafu(display("Unsupported architecture '{}'", arch))] UnsupportedArch { arch: String, From 4794a011b88ff15915526a3ce04666dfc5b2f5f2 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sun, 25 Sep 2022 17:25:23 +0000 Subject: [PATCH 0786/1356] buildsys: track symlinks in output Signed-off-by: Ben Cressey --- tools/buildsys/src/builder.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index d203daf1..0a1a6d43 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -387,7 +387,8 @@ where .to_str() .map(|s| !s.ends_with(MARKER_EXTENSION)) .unwrap_or(false); - is_dir || is_not_marker + let is_symlink = entry.file_type().is_symlink(); + is_dir || is_not_marker || is_symlink } for artifact_file in find_files(&build_dir, has_artifacts) { @@ -443,7 +444,7 @@ where } fn cleanup(path: &Path, top: &Path, dirs: &mut HashSet) -> Result<()> { - if !path.exists() { + if !path.exists() && !path.is_symlink() { return Ok(()); } std::fs::remove_file(&path).context(error::FileRemoveSnafu { path })?; @@ -513,7 +514,7 @@ where .filter_entry(filter) .flat_map(|e| e.context(error::DirectoryWalkSnafu)) .map(|e| e.into_path()) - .filter(|e| e.is_file()) + .filter(|e| e.is_file() || e.is_symlink()) } /// Retrieve a BUILDSYS_* variable that we expect to be set in the environment, From c8e9ddbfac414cc8878f254b2b8c36b504e8ce69 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sun, 25 Sep 2022 22:21:10 +0000 Subject: [PATCH 0787/1356] build: refactor image outputs and symlink handling The larger goal of this change is to make it easier to archive the contents of a specific build, to allow it to be saved and restored at a later point in time or on a different machine. Now that `buildsys` can track and clean up symlinks from builds, move all symlink generation into the image build process. Since `buildsys` can also keep track of artifacts in subdirectories, switch to versioned directories to store the output of image builds. Previously, "latest" was a directory containing friendly symlinks to the most recent set of images. Now it is a symlink to the most recent versioned directory, and the friendly symlinks are created during the image build. Signed-off-by: Ben Cressey --- tools/rpm2img | 62 ++++++++++++++++++++++++++++++++++++++------ tools/rpm2kmodkit | 12 +++++++++ tools/rpm2migrations | 10 +++++++ 3 files changed, 76 insertions(+), 8 deletions(-) diff --git a/tools/rpm2img b/tools/rpm2img index b62beb92..a9d801ce 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# shellcheck disable=SC2034 set -eu -o pipefail shopt -qs failglob @@ -38,16 +39,39 @@ case "${PARTITION_PLAN}" in ;; esac +# Store output artifacts in a versioned directory. +OUTPUT_DIR="${OUTPUT_DIR}/${VERSION_ID}-${BUILD_ID}" mkdir -p "${OUTPUT_DIR}" FILENAME_PREFIX="${IMAGE_NAME}-${VARIANT}-${ARCH}-${VERSION_ID}-${BUILD_ID}" +SYMLINK_PREFIX="${IMAGE_NAME}-${VARIANT}-${ARCH}" +VERSIONED_SYMLINK_PREFIX="${IMAGE_NAME}-${VARIANT}-${ARCH}-${VERSION_ID}" +FRIENDLY_VERSIONED_SYMLINK_PREFIX="${IMAGE_NAME}-${VARIANT}-${ARCH}-v${VERSION_ID}" -OS_IMAGE_BASENAME="${FILENAME_PREFIX}" -DATA_IMAGE_BASENAME="${FILENAME_PREFIX}-data" +OS_IMAGE_NAME="${FILENAME_PREFIX}" +OS_IMAGE_SYMLINK="${SYMLINK_PREFIX}" +OS_IMAGE_VERSIONED_SYMLINK="${VERSIONED_SYMLINK_PREFIX}" +OS_IMAGE_FRIENDLY_VERSIONED_SYMLINK="${FRIENDLY_VERSIONED_SYMLINK_PREFIX}" + +DATA_IMAGE_NAME="${FILENAME_PREFIX}-data" +DATA_IMAGE_SYMLINK="${SYMLINK_PREFIX}-data" +DATA_IMAGE_VERSIONED_SYMLINK="${VERSIONED_SYMLINK_PREFIX}-data" +DATA_IMAGE_FRIENDLY_VERSIONED_SYMLINK="${FRIENDLY_VERSIONED_SYMLINK_PREFIX}-data" BOOT_IMAGE_NAME="${FILENAME_PREFIX}-boot.ext4.lz4" +BOOT_IMAGE_SYMLINK="${SYMLINK_PREFIX}-boot.ext4.lz4" +BOOT_IMAGE_VERSIONED_SYMLINK="${VERSIONED_SYMLINK_PREFIX}-boot.ext4.lz4" +BOOT_IMAGE_FRIENDLY_VERSIONED_SYMLINK="${FRIENDLY_VERSIONED_SYMLINK_PREFIX}-boot.ext4.lz4" + VERITY_IMAGE_NAME="${FILENAME_PREFIX}-root.verity.lz4" +VERITY_IMAGE_SYMLINK="${SYMLINK_PREFIX}-root.verity.lz4" +VERITY_IMAGE_VERSIONED_SYMLINK="${VERSIONED_SYMLINK_PREFIX}-root.verity.lz4" +VERITY_IMAGE_FRIENDLY_VERSIONED_SYMLINK="${FRIENDLY_VERSIONED_SYMLINK_PREFIX}-root.verity.lz4" + ROOT_IMAGE_NAME="${FILENAME_PREFIX}-root.ext4.lz4" +ROOT_IMAGE_SYMLINK="${SYMLINK_PREFIX}-root.ext4.lz4" +ROOT_IMAGE_VERSIONED_SYMLINK="${VERSIONED_SYMLINK_PREFIX}-root.ext4.lz4" +ROOT_IMAGE_FRIENDLY_VERSIONED_SYMLINK="${FRIENDLY_VERSIONED_SYMLINK_PREFIX}-root.ext4.lz4" OS_IMAGE="$(mktemp)" BOOT_IMAGE="$(mktemp)" @@ -332,21 +356,39 @@ esac sgdisk -v "${OS_IMAGE}" [ -s "${DATA_IMAGE}" ] && sgdisk -v "${DATA_IMAGE}" +symlink_image() { + local ext what + ext="${1}" + what="${2}" + ext="${ext:+.$ext}" + target="${what^^}_NAME" + for link in symlink versioned_symlink friendly_versioned_symlink ; do + link="${what^^}_${link^^}" + ln -s "${!target}${ext}" "${OUTPUT_DIR}/${!link}${ext}" + done +} + if [[ ${OUTPUT_FMT} == "raw" ]]; then - lz4 -vc "${OS_IMAGE}" >"${OUTPUT_DIR}/${OS_IMAGE_BASENAME}.img.lz4" + lz4 -vc "${OS_IMAGE}" >"${OUTPUT_DIR}/${OS_IMAGE_NAME}.img.lz4" + symlink_image "img.lz4" "os_image" if [ -s "${DATA_IMAGE}" ] ; then - lz4 -vc "${DATA_IMAGE}" >"${OUTPUT_DIR}/${DATA_IMAGE_BASENAME}.img.lz4" + lz4 -vc "${DATA_IMAGE}" >"${OUTPUT_DIR}/${DATA_IMAGE_NAME}.img.lz4" + symlink_image "img.lz4" "data_image" fi elif [[ ${OUTPUT_FMT} == "qcow2" ]]; then - qemu-img convert -f raw -O qcow2 "${OS_IMAGE}" "${OUTPUT_DIR}/${OS_IMAGE_BASENAME}.qcow2" + qemu-img convert -f raw -O qcow2 "${OS_IMAGE}" "${OUTPUT_DIR}/${OS_IMAGE_NAME}.qcow2" + symlink_image "qcow2" "os_image" if [ -s "${DATA_IMAGE}" ] ; then - qemu-img convert -f raw -O qcow2 "${DATA_IMAGE}" "${OUTPUT_DIR}/${DATA_IMAGE_BASENAME}.qcow2" + qemu-img convert -f raw -O qcow2 "${DATA_IMAGE}" "${OUTPUT_DIR}/${DATA_IMAGE_NAME}.qcow2" + symlink_image "qcow2" "data_image" fi elif [[ ${OUTPUT_FMT} == "vmdk" ]]; then # Stream optimization is required for creating an Open Virtual Appliance (OVA) - qemu-img convert -f raw -O vmdk -o subformat=streamOptimized "${OS_IMAGE}" "${OUTPUT_DIR}/${OS_IMAGE_BASENAME}.vmdk" + qemu-img convert -f raw -O vmdk -o subformat=streamOptimized "${OS_IMAGE}" "${OUTPUT_DIR}/${OS_IMAGE_NAME}.vmdk" + symlink_image "vmdk" "os_image" if [ -s "${DATA_IMAGE}" ] ; then - qemu-img convert -f raw -O vmdk -o subformat=streamOptimized "${DATA_IMAGE}" "${OUTPUT_DIR}/${DATA_IMAGE_BASENAME}.vmdk" + qemu-img convert -f raw -O vmdk -o subformat=streamOptimized "${DATA_IMAGE}" "${OUTPUT_DIR}/${DATA_IMAGE_NAME}.vmdk" + symlink_image "vmdk" "data_image" fi fi @@ -354,6 +396,10 @@ lz4 -9vc "${BOOT_IMAGE}" >"${OUTPUT_DIR}/${BOOT_IMAGE_NAME}" lz4 -9vc "${VERITY_IMAGE}" >"${OUTPUT_DIR}/${VERITY_IMAGE_NAME}" lz4 -9vc "${ROOT_IMAGE}" >"${OUTPUT_DIR}/${ROOT_IMAGE_NAME}" +symlink_image "" "boot_image" +symlink_image "" "verity_image" +symlink_image "" "root_image" + find "${OUTPUT_DIR}" -type f -print -exec chown 1000:1000 {} \; # Clean up temporary files to reduce size of layer. diff --git a/tools/rpm2kmodkit b/tools/rpm2kmodkit index 1c0c8871..079d98ce 100755 --- a/tools/rpm2kmodkit +++ b/tools/rpm2kmodkit @@ -12,11 +12,17 @@ for opt in "$@"; do esac done +# Store output artifacts in a versioned directory. +OUTPUT_DIR="${OUTPUT_DIR}/${VERSION_ID}-${BUILD_ID}" + # Use a friendly name for the top-level directory inside the archive. KMOD_KIT="${VARIANT}-${ARCH}-kmod-kit-v${VERSION_ID}" # Use the build ID within the filename, to align with our build's expectations. KMOD_KIT_FULL="bottlerocket-${VARIANT}-${ARCH}-${VERSION_ID}-${BUILD_ID}-kmod-kit" +KMOD_KIT_VERSIONED_SYMLINK="bottlerocket-${VARIANT}-${ARCH}-${VERSION_ID}-kmod-kit" +KMOD_KIT_FRIENDLY_VERSIONED_SYMLINK="bottlerocket-${VARIANT}-${ARCH}-v${VERSION_ID}-kmod-kit" +KMOD_KIT_SYMLINK="bottlerocket-${VARIANT}-${ARCH}-kmod-kit" EXTRACT_DIR="$(mktemp -d)" KIT_DIR="$(mktemp -d)" @@ -43,4 +49,10 @@ tar cf "${OUTPUT_DIR}/${KMOD_KIT_FULL}.tar" "${KMOD_KIT}" xz -T0 "${OUTPUT_DIR}/${KMOD_KIT_FULL}.tar" popd >/dev/null +# Create friendly symlinks. +ln -s "${KMOD_KIT_FULL}.tar.xz" "${OUTPUT_DIR}/${KMOD_KIT_FRIENDLY_VERSIONED_SYMLINK}.tar.xz" +ln -s "${KMOD_KIT_FULL}.tar.xz" "${OUTPUT_DIR}/${KMOD_KIT_VERSIONED_SYMLINK}.tar.xz" +ln -s "${KMOD_KIT_FULL}.tar.xz" "${OUTPUT_DIR}/${KMOD_KIT_SYMLINK}.tar.xz" +ln -s "${KMOD_KIT_FULL}.tar.xz" "${OUTPUT_DIR}/${KMOD_KIT}.tar.xz" + rm -rf "${EXTRACT_DIR}" "${KIT_DIR}" diff --git a/tools/rpm2migrations b/tools/rpm2migrations index da4bbc26..498bc637 100755 --- a/tools/rpm2migrations +++ b/tools/rpm2migrations @@ -11,9 +11,14 @@ for opt in "$@"; do esac done +# Store output artifacts in a versioned directory. +OUTPUT_DIR="${OUTPUT_DIR}/${VERSION_ID}-${BUILD_ID}" mkdir -p "${OUTPUT_DIR}" MIGRATIONS_ARCHIVE="bottlerocket-${VARIANT}-${ARCH}-${VERSION_ID}-${BUILD_ID}-migrations.tar" +MIGRATIONS_VERSIONED_SYMLINK="bottlerocket-${VARIANT}-${ARCH}-${VERSION_ID}-migrations.tar" +MIGRATIONS_FRIENDLY_VERSIONED_SYMLINK="bottlerocket-${VARIANT}-${ARCH}-v${VERSION_ID}-migrations.tar" +MIGRATIONS_SYMLINK="bottlerocket-${VARIANT}-${ARCH}-migrations.tar" ROOT_TEMP="$(mktemp -d)" SYS_ROOT="${ARCH}-bottlerocket-linux-gnu/sys-root" MIGRATIONS_DIR="${ROOT_TEMP}/${SYS_ROOT}/usr/share/migrations" @@ -43,4 +48,9 @@ else fi popd +# Create friendly symlinks. +ln -s "${MIGRATIONS_ARCHIVE}" "${OUTPUT_DIR}/${MIGRATIONS_FRIENDLY_VERSIONED_SYMLINK}" +ln -s "${MIGRATIONS_ARCHIVE}" "${OUTPUT_DIR}/${MIGRATIONS_VERSIONED_SYMLINK}" +ln -s "${MIGRATIONS_ARCHIVE}" "${OUTPUT_DIR}/${MIGRATIONS_SYMLINK}" + rm -rf "${ROOT_TEMP}" From fc8619426fd15d156097204c2552870e6c8a9d28 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 10 Oct 2022 15:04:21 +0000 Subject: [PATCH 0788/1356] buildsys: turn manifest parser into library crate This allows metadata about the variant to be parsed by other tools in a consistent way. In particular, `pubsys` will need access to fields related to published image sizes and whether the Secure Boot feature is enabled. Signed-off-by: Ben Cressey --- tools/buildsys/src/builder.rs | 2 +- tools/buildsys/src/cache.rs | 2 +- tools/buildsys/src/gomod.rs | 2 +- tools/buildsys/src/lib.rs | 1 + tools/buildsys/src/main.rs | 10 ++- tools/buildsys/src/manifest.rs | 101 ++++++++++++++------------- tools/buildsys/src/manifest/error.rs | 4 +- 7 files changed, 62 insertions(+), 60 deletions(-) create mode 100644 tools/buildsys/src/lib.rs diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index 0a1a6d43..c1811c9e 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -22,7 +22,7 @@ use std::path::{Path, PathBuf}; use std::process::Output; use walkdir::{DirEntry, WalkDir}; -use crate::manifest::{GrubFeature, ImageFormat, ImageLayout, PartitionPlan, SupportedArch}; +use buildsys::manifest::{GrubFeature, ImageFormat, ImageLayout, PartitionPlan, SupportedArch}; /* There's a bug in BuildKit that can lead to a build failure during parallel diff --git a/tools/buildsys/src/cache.rs b/tools/buildsys/src/cache.rs index bd746eb2..d8bb53a3 100644 --- a/tools/buildsys/src/cache.rs +++ b/tools/buildsys/src/cache.rs @@ -13,7 +13,7 @@ It implements a two-tier approach to retrieval: files are first pulled from the pub(crate) mod error; use error::Result; -use super::manifest; +use buildsys::manifest; use sha2::{Digest, Sha512}; use snafu::{ensure, OptionExt, ResultExt}; use std::fs::{self, File}; diff --git a/tools/buildsys/src/gomod.rs b/tools/buildsys/src/gomod.rs index 71c85d93..ae5b606b 100644 --- a/tools/buildsys/src/gomod.rs +++ b/tools/buildsys/src/gomod.rs @@ -18,7 +18,7 @@ when the docker-go script is invoked. pub(crate) mod error; use error::Result; -use super::manifest; +use buildsys::manifest; use duct::cmd; use snafu::{ensure, OptionExt, ResultExt}; use std::io::Write; diff --git a/tools/buildsys/src/lib.rs b/tools/buildsys/src/lib.rs new file mode 100644 index 00000000..640fc648 --- /dev/null +++ b/tools/buildsys/src/lib.rs @@ -0,0 +1 @@ +pub mod manifest; diff --git a/tools/buildsys/src/main.rs b/tools/buildsys/src/main.rs index 3c5e4035..9cf3a5e6 100644 --- a/tools/buildsys/src/main.rs +++ b/tools/buildsys/src/main.rs @@ -11,15 +11,13 @@ The implementation is closely tied to the top-level Dockerfile. mod builder; mod cache; mod gomod; -mod manifest; mod project; mod spec; -use crate::gomod::GoMod; -use crate::manifest::BundleModule; use builder::{PackageBuilder, VariantBuilder}; +use buildsys::manifest::{BundleModule, ManifestInfo, SupportedArch}; use cache::LookasideCache; -use manifest::{ManifestInfo, SupportedArch}; +use gomod::GoMod; use project::ProjectInfo; use serde::Deserialize; use snafu::{ensure, ResultExt}; @@ -35,7 +33,7 @@ mod error { #[snafu(visibility(pub(super)))] pub(super) enum Error { ManifestParse { - source: super::manifest::error::Error, + source: buildsys::manifest::Error, }, SpecParse { @@ -144,7 +142,7 @@ fn build_package() -> Result<()> { // If manifest has package.metadata.build-package.variant-sensitive set, then track the // appropriate environment variable for changes. if let Some(sensitivity) = manifest.variant_sensitive() { - use manifest::{SensitivityType::*, VariantSensitivity::*}; + use buildsys::manifest::{SensitivityType::*, VariantSensitivity::*}; fn emit_variant_env(suffix: Option<&str>) { if let Some(suffix) = suffix { println!( diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index fa791913..6918774e 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -182,84 +182,89 @@ grub-features = [ ``` */ -pub(crate) mod error; -use error::Result; +mod error; use serde::Deserialize; -use snafu::ResultExt; +use snafu::{ResultExt, Snafu}; use std::collections::HashSet; use std::fmt; use std::fs; use std::path::{Path, PathBuf}; +#[derive(Debug, Snafu)] +pub struct Error(error::Error); +type Result = std::result::Result; + /// The nested structures here are somewhat complex, but they make it trivial /// to deserialize the structure we expect to find in the manifest. #[derive(Deserialize, Debug)] #[serde(rename_all = "kebab-case")] -pub(crate) struct ManifestInfo { +pub struct ManifestInfo { package: Package, } impl ManifestInfo { /// Extract the settings we understand from `Cargo.toml`. - pub(crate) fn new>(path: P) -> Result { + pub fn new>(path: P) -> Result { let path = path.as_ref(); let manifest_data = fs::read_to_string(path).context(error::ManifestFileReadSnafu { path })?; - toml::from_str(&manifest_data).context(error::ManifestFileLoadSnafu { path }) + let manifest = + toml::from_str(&manifest_data).context(error::ManifestFileLoadSnafu { path })?; + Ok(manifest) } /// Convenience method to return the list of source groups. - pub(crate) fn source_groups(&self) -> Option<&Vec> { + pub fn source_groups(&self) -> Option<&Vec> { self.build_package().and_then(|b| b.source_groups.as_ref()) } /// Convenience method to return the list of external files. - pub(crate) fn external_files(&self) -> Option<&Vec> { + pub fn external_files(&self) -> Option<&Vec> { self.build_package().and_then(|b| b.external_files.as_ref()) } /// Convenience method to return the package name override, if any. - pub(crate) fn package_name(&self) -> Option<&String> { + pub fn package_name(&self) -> Option<&String> { self.build_package().and_then(|b| b.package_name.as_ref()) } /// Convenience method to find whether the package is sensitive to variant changes. - pub(crate) fn variant_sensitive(&self) -> Option<&VariantSensitivity> { + pub fn variant_sensitive(&self) -> Option<&VariantSensitivity> { self.build_package() .and_then(|b| b.variant_sensitive.as_ref()) } /// Convenience method to return the list of included packages. - pub(crate) fn included_packages(&self) -> Option<&Vec> { + pub fn included_packages(&self) -> Option<&Vec> { self.build_variant() .and_then(|b| b.included_packages.as_ref()) } /// Convenience method to return the image format override, if any. - pub(crate) fn image_format(&self) -> Option<&ImageFormat> { + pub fn image_format(&self) -> Option<&ImageFormat> { self.build_variant().and_then(|b| b.image_format.as_ref()) } /// Convenience method to return the image layout, if specified. - pub(crate) fn image_layout(&self) -> Option<&ImageLayout> { + pub fn image_layout(&self) -> Option<&ImageLayout> { self.build_variant().and_then(|b| b.image_layout.as_ref()) } /// Convenience method to return the supported architectures for this variant. - pub(crate) fn supported_arches(&self) -> Option<&HashSet> { + pub fn supported_arches(&self) -> Option<&HashSet> { self.build_variant() .and_then(|b| b.supported_arches.as_ref()) } /// Convenience method to return the kernel parameters for this variant. - pub(crate) fn kernel_parameters(&self) -> Option<&Vec> { + pub fn kernel_parameters(&self) -> Option<&Vec> { self.build_variant() .and_then(|b| b.kernel_parameters.as_ref()) } /// Convenience method to return the GRUB features for this variant. - pub(crate) fn grub_features(&self) -> Option<&Vec> { + pub fn grub_features(&self) -> Option<&Vec> { self.build_variant().and_then(|b| b.grub_features.as_ref()) } @@ -295,25 +300,25 @@ struct Metadata { #[derive(Deserialize, Debug)] #[serde(rename_all = "kebab-case")] #[allow(dead_code)] -pub(crate) struct BuildPackage { - pub(crate) external_files: Option>, - pub(crate) package_name: Option, - pub(crate) releases_url: Option, - pub(crate) source_groups: Option>, - pub(crate) variant_sensitive: Option, +pub struct BuildPackage { + pub external_files: Option>, + pub package_name: Option, + pub releases_url: Option, + pub source_groups: Option>, + pub variant_sensitive: Option, } #[derive(Deserialize, Debug)] #[serde(rename_all = "kebab-case")] #[serde(untagged)] -pub(crate) enum VariantSensitivity { +pub enum VariantSensitivity { Any(bool), Specific(SensitivityType), } #[derive(Deserialize, Debug)] #[serde(rename_all = "kebab-case")] -pub(crate) enum SensitivityType { +pub enum SensitivityType { Platform, Runtime, Family, @@ -322,18 +327,18 @@ pub(crate) enum SensitivityType { #[derive(Deserialize, Debug)] #[serde(rename_all = "kebab-case")] -pub(crate) struct BuildVariant { - pub(crate) included_packages: Option>, - pub(crate) image_format: Option, - pub(crate) image_layout: Option, - pub(crate) supported_arches: Option>, - pub(crate) kernel_parameters: Option>, - pub(crate) grub_features: Option>, +pub struct BuildVariant { + pub included_packages: Option>, + pub image_format: Option, + pub image_layout: Option, + pub supported_arches: Option>, + pub kernel_parameters: Option>, + pub grub_features: Option>, } #[derive(Deserialize, Debug)] #[serde(rename_all = "lowercase")] -pub(crate) enum ImageFormat { +pub enum ImageFormat { Qcow2, Raw, Vmdk, @@ -341,13 +346,13 @@ pub(crate) enum ImageFormat { #[derive(Deserialize, Debug, Copy, Clone)] #[serde(rename_all = "kebab-case")] -pub(crate) struct ImageLayout { +pub struct ImageLayout { #[serde(default = "ImageLayout::default_os_image_size_gib")] - pub(crate) os_image_size_gib: u32, + pub os_image_size_gib: u32, #[serde(default = "ImageLayout::default_data_image_size_gib")] - pub(crate) data_image_size_gib: u32, + pub data_image_size_gib: u32, #[serde(default = "ImageLayout::default_partition_plan")] - pub(crate) partition_plan: PartitionPlan, + pub partition_plan: PartitionPlan, } /// These are the historical defaults for all variants, before we added support @@ -382,21 +387,21 @@ impl Default for ImageLayout { #[derive(Deserialize, Debug, Copy, Clone)] #[serde(rename_all = "lowercase")] -pub(crate) enum PartitionPlan { +pub enum PartitionPlan { Split, Unified, } #[derive(Deserialize, Debug, PartialEq, Eq, Hash)] #[serde(rename_all = "lowercase")] -pub(crate) enum SupportedArch { +pub enum SupportedArch { X86_64, Aarch64, } /// Map a Linux architecture into the corresponding Docker architecture. impl SupportedArch { - pub(crate) fn goarch(&self) -> &'static str { + pub fn goarch(&self) -> &'static str { match self { SupportedArch::X86_64 => "amd64", SupportedArch::Aarch64 => "arm64", @@ -406,7 +411,7 @@ impl SupportedArch { #[derive(Deserialize, Debug, PartialEq, Eq, Hash)] #[serde(rename_all = "kebab-case")] -pub(crate) enum GrubFeature { +pub enum GrubFeature { SetPrivateVar, } @@ -420,19 +425,19 @@ impl fmt::Display for GrubFeature { #[derive(Deserialize, Debug)] #[serde(rename_all = "lowercase")] -pub(crate) enum BundleModule { +pub enum BundleModule { Go, } #[derive(Deserialize, Debug)] #[serde(rename_all = "kebab-case")] -pub(crate) struct ExternalFile { - pub(crate) path: Option, - pub(crate) sha512: String, - pub(crate) url: String, - pub(crate) bundle_modules: Option>, - pub(crate) bundle_root_path: Option, - pub(crate) bundle_output_path: Option, +pub struct ExternalFile { + pub path: Option, + pub sha512: String, + pub url: String, + pub bundle_modules: Option>, + pub bundle_root_path: Option, + pub bundle_output_path: Option, } impl fmt::Display for SupportedArch { diff --git a/tools/buildsys/src/manifest/error.rs b/tools/buildsys/src/manifest/error.rs index a2f2056c..aad287e5 100644 --- a/tools/buildsys/src/manifest/error.rs +++ b/tools/buildsys/src/manifest/error.rs @@ -4,7 +4,7 @@ use std::path::PathBuf; #[derive(Debug, Snafu)] #[snafu(visibility(pub(super)))] -pub(crate) enum Error { +pub(super) enum Error { #[snafu(display("Failed to read manifest file '{}': {}", path.display(), source))] ManifestFileRead { path: PathBuf, source: io::Error }, @@ -14,5 +14,3 @@ pub(crate) enum Error { source: toml::de::Error, }, } - -pub(super) type Result = std::result::Result; From d250c254a7e65e751f04e5cecf9ecfb341c62e39 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sat, 15 Oct 2022 17:41:28 +0000 Subject: [PATCH 0789/1356] buildsys: add publish image size hint to manifests Allow manifests to specify a hint for the published image sizes, for the common case where additional storage space is desired on the data volume. A helper function is provided to calculate the sizes for the OS and data volumes, based on the image layout and the publish size hint. Signed-off-by: Ben Cressey --- tools/buildsys/src/manifest.rs | 71 ++++++++++++++++++++++++---- tools/buildsys/src/manifest/error.rs | 3 ++ 2 files changed, 65 insertions(+), 9 deletions(-) diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index 6918774e..72288ad6 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -141,6 +141,13 @@ overhead for the GPT labels and partition alignment. The data partition will be automatically resized to fill the disk on boot, so it is usually not necessary to increase this value. +`publish-image-size-hint-gib` is the desired size of the published image in GiB. +When the `split` layout is used, the "os" image volume will remain at the built +size, and any additional space will be allocated to the "data" image volume. +When the `unified` layout is used, this value will be used directly for the +single "os" image volume. The hint will be ignored if the combined size of the +"os" and "data" images exceeds the specified value. + `partition-plan` is the desired strategy for image partitioning. This can be `split` (the default) for "os" and "data" images backed by separate volumes, or `unified` to have "os" and "data" share the same volume. @@ -148,6 +155,7 @@ volumes, or `unified` to have "os" and "data" share the same volume. [package.metadata.build-variant.image-layout] os-image-size-gib = 2 data-image-size-gib = 1 +publish-image-size-hint-gib = 22 partition-plan = "split" ``` @@ -186,8 +194,10 @@ mod error; use serde::Deserialize; use snafu::{ResultExt, Snafu}; +use std::cmp::max; use std::collections::HashSet; -use std::fmt; +use std::convert::TryFrom; +use std::fmt::{self, Display}; use std::fs; use std::path::{Path, PathBuf}; @@ -248,7 +258,7 @@ impl ManifestInfo { /// Convenience method to return the image layout, if specified. pub fn image_layout(&self) -> Option<&ImageLayout> { - self.build_variant().and_then(|b| b.image_layout.as_ref()) + self.build_variant().map(|b| &b.image_layout) } /// Convenience method to return the supported architectures for this variant. @@ -330,7 +340,8 @@ pub enum SensitivityType { pub struct BuildVariant { pub included_packages: Option>, pub image_format: Option, - pub image_layout: Option, + #[serde(default)] + pub image_layout: ImageLayout, pub supported_arches: Option>, pub kernel_parameters: Option>, pub grub_features: Option>, @@ -344,35 +355,76 @@ pub enum ImageFormat { Vmdk, } +#[derive(Deserialize, Debug, Copy, Clone)] +/// Constrain specified image sizes to a plausible range, from 0 - 65535 GiB. +pub struct ImageSize(u16); + +impl Display for ImageSize { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} + #[derive(Deserialize, Debug, Copy, Clone)] #[serde(rename_all = "kebab-case")] pub struct ImageLayout { #[serde(default = "ImageLayout::default_os_image_size_gib")] - pub os_image_size_gib: u32, + pub os_image_size_gib: ImageSize, #[serde(default = "ImageLayout::default_data_image_size_gib")] - pub data_image_size_gib: u32, + pub data_image_size_gib: ImageSize, + #[serde(default = "ImageLayout::default_publish_image_size_hint_gib")] + publish_image_size_hint_gib: ImageSize, #[serde(default = "ImageLayout::default_partition_plan")] pub partition_plan: PartitionPlan, } /// These are the historical defaults for all variants, before we added support /// for customizing these properties. -static DEFAULT_OS_IMAGE_SIZE_GIB: u32 = 2; -static DEFAULT_DATA_IMAGE_SIZE_GIB: u32 = 1; +static DEFAULT_OS_IMAGE_SIZE_GIB: ImageSize = ImageSize(2); +static DEFAULT_DATA_IMAGE_SIZE_GIB: ImageSize = ImageSize(1); +static DEFAULT_PUBLISH_IMAGE_SIZE_HINT_GIB: ImageSize = ImageSize(22); static DEFAULT_PARTITION_PLAN: PartitionPlan = PartitionPlan::Split; impl ImageLayout { - fn default_os_image_size_gib() -> u32 { + fn default_os_image_size_gib() -> ImageSize { DEFAULT_OS_IMAGE_SIZE_GIB } - fn default_data_image_size_gib() -> u32 { + fn default_data_image_size_gib() -> ImageSize { DEFAULT_DATA_IMAGE_SIZE_GIB } + fn default_publish_image_size_hint_gib() -> ImageSize { + DEFAULT_PUBLISH_IMAGE_SIZE_HINT_GIB + } + fn default_partition_plan() -> PartitionPlan { DEFAULT_PARTITION_PLAN } + + // At publish time we will need specific sizes for the OS image and the (optional) data image. + // The sizes returned by this function depend on the image layout, and whether the publish + // image hint is larger than the required minimum size. + pub fn publish_image_sizes_gib(&self) -> (i32, i32) { + let os_image_base_size_gib = self.os_image_size_gib.0; + let data_image_base_size_gib = self.data_image_size_gib.0; + let publish_image_size_hint_gib = self.publish_image_size_hint_gib.0; + + let min_publish_image_size_gib = os_image_base_size_gib + data_image_base_size_gib; + let publish_image_size_gib = max(publish_image_size_hint_gib, min_publish_image_size_gib); + + match self.partition_plan { + PartitionPlan::Split => { + let os_image_publish_size_gib = os_image_base_size_gib; + let data_image_publish_size_gib = publish_image_size_gib - os_image_base_size_gib; + ( + os_image_publish_size_gib.into(), + data_image_publish_size_gib.into(), + ) + } + PartitionPlan::Unified => (publish_image_size_gib.into(), -1), + } + } } impl Default for ImageLayout { @@ -380,6 +432,7 @@ impl Default for ImageLayout { Self { os_image_size_gib: Self::default_os_image_size_gib(), data_image_size_gib: Self::default_data_image_size_gib(), + publish_image_size_hint_gib: Self::default_publish_image_size_hint_gib(), partition_plan: Self::default_partition_plan(), } } diff --git a/tools/buildsys/src/manifest/error.rs b/tools/buildsys/src/manifest/error.rs index aad287e5..5830aa65 100644 --- a/tools/buildsys/src/manifest/error.rs +++ b/tools/buildsys/src/manifest/error.rs @@ -13,4 +13,7 @@ pub(super) enum Error { path: PathBuf, source: toml::de::Error, }, + + #[snafu(display("Invalid image size {}; must be between 1 and 1024", value))] + InvalidImageSize { value: i32 }, } From a95b577daeb7080b4d01983fab52c31831955565 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 10 Oct 2022 15:22:46 +0000 Subject: [PATCH 0790/1356] buildsys: generate OVAs during image builds The larger goal of this change is to make image builds more hermetic, rather than relying on additional scripts to run afterwards to create files and symlinks. That allows the artifacts in the directory to be archived and extracted elsewhere more easily. The main historical blocker for generating OVAs along with VMDKs was that the published image sizes were specified through environment variables rather than in the variant manifest. Now that the manifest provides these values, they can be passed through to image builds. The bulk of this change consists of moving the OVA logic around, and dropping the unnecessary size checks. As part of this change, some OVF template fields are also renamed to standardize on the "os volume" designation for the primary full disk image. Previously this was called the "root volume" in places, which can lead to ambiguity because there is also a "root image" used for in-place updates. Signed-off-by: Ben Cressey --- tools/buildsys/src/builder.rs | 12 ++++++ tools/rpm2img | 77 +++++++++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index c1811c9e..9e113593 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -139,8 +139,12 @@ impl VariantBuilder { os_image_size_gib, data_image_size_gib, partition_plan, + .. } = image_layout; + let (os_image_publish_size_gib, data_image_publish_size_gib) = + image_layout.publish_image_sizes_gib(); + let mut args = Vec::new(); args.build_arg("PACKAGES", packages.join(" ")); args.build_arg("ARCH", &arch); @@ -160,6 +164,14 @@ impl VariantBuilder { ); args.build_arg("OS_IMAGE_SIZE_GIB", format!("{}", os_image_size_gib)); args.build_arg("DATA_IMAGE_SIZE_GIB", format!("{}", data_image_size_gib)); + args.build_arg( + "OS_IMAGE_PUBLISH_SIZE_GIB", + format!("{}", os_image_publish_size_gib), + ); + args.build_arg( + "DATA_IMAGE_PUBLISH_SIZE_GIB", + format!("{}", data_image_publish_size_gib), + ); args.build_arg( "PARTITION_PLAN", match partition_plan { diff --git a/tools/rpm2img b/tools/rpm2img index a9d801ce..7a0518a0 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -10,6 +10,7 @@ shopt -qs failglob OUTPUT_FMT="raw" BUILDER_ARCH="$(uname -m)" +OVF_TEMPLATE="" for opt in "$@"; do optarg="$(expr "${opt}" : '[^=]*=\(.*\)')" @@ -19,7 +20,10 @@ for opt in "$@"; do --output-fmt=*) OUTPUT_FMT="${optarg}" ;; --os-image-size-gib=*) OS_IMAGE_SIZE_GIB="${optarg}" ;; --data-image-size-gib=*) DATA_IMAGE_SIZE_GIB="${optarg}" ;; + --os-image-publish-size-gib=*) OS_IMAGE_PUBLISH_SIZE_GIB="${optarg}" ;; + --data-image-publish-size-gib=*) DATA_IMAGE_PUBLISH_SIZE_GIB="${optarg}" ;; --partition-plan=*) PARTITION_PLAN="${optarg}" ;; + --ovf-template=*) OVF_TEMPLATE="${optarg}" ;; esac done @@ -39,6 +43,28 @@ case "${PARTITION_PLAN}" in ;; esac +# Fail fast if the OVF template doesn't exist, or doesn't match the layout. +if [ "${OUTPUT_FMT}" == "vmdk" ] ; then + if [ ! -s "${OVF_TEMPLATE}" ] ; then + echo "required OVF template not found: ${OVF_TEMPLATE}" >&2 + exit 1 + fi + + if [ "${PARTITION_PLAN}" == "split" ] ; then + if ! grep -Fq '{{DATA_DISK}}' "${OVF_TEMPLATE}" ; then + echo "Missing data disk in OVF template, which is required for 'split' layout." >&2 + exit 1 + fi + fi + + if [ "${PARTITION_PLAN}" == "unified" ] ; then + if grep -Fq '{{DATA_DISK}}' "${OVF_TEMPLATE}" ; then + echo "Incorrect data disk in OVF template, which is not supported for 'unified' layout." >&2 + exit 1 + fi + fi +fi + # Store output artifacts in a versioned directory. OUTPUT_DIR="${OUTPUT_DIR}/${VERSION_ID}-${BUILD_ID}" mkdir -p "${OUTPUT_DIR}" @@ -392,6 +418,57 @@ elif [[ ${OUTPUT_FMT} == "vmdk" ]]; then fi fi +# Now create the OVA if needed. +if [ "${OUTPUT_FMT}" == "vmdk" ] ; then + os_vmdk="${OS_IMAGE_NAME}.vmdk" + data_vmdk="${DATA_IMAGE_NAME}.vmdk" + ovf="${OS_IMAGE_NAME}.ovf" + ova_dir="$(mktemp -d)" + + # The manifest expects disk sizes in bytes. + bytes_in_gib="$((1024 * 1024 * 1024))" + os_disk_bytes="$((OS_IMAGE_PUBLISH_SIZE_GIB * bytes_in_gib))" + data_disk_bytes="$((DATA_IMAGE_PUBLISH_SIZE_GIB * bytes_in_gib))" + sed "${OVF_TEMPLATE}" \ + -e "s/{{OS_DISK}}/${os_vmdk}/g" \ + -e "s/{{DATA_DISK}}/${data_vmdk}/g" \ + -e "s/{{OS_DISK_BYTES}}/${os_disk_bytes}/g" \ + -e "s/{{DATA_DISK_BYTES}}/${data_disk_bytes}/g" \ + > "${ova_dir}/${ovf}" + + # Make sure we replaced all the '{{...}}' fields with real values. + if grep -F -e '{{' -e '}}' "${ova_dir}/${ovf}" ; then + echo "Failed to fully render the OVF template" >&2 + exit 1 + fi + + # Create the manifest file with the hashes of the VMDKs and the OVF. + manifest="${OS_IMAGE_NAME}.mf" + pushd "${OUTPUT_DIR}" >/dev/null + sha256sum --tag "${os_vmdk}" > "${ova_dir}/${manifest}" + if [ -s "${DATA_IMAGE}" ] ; then + sha256sum --tag "${data_vmdk}" >> "${ova_dir}/${manifest}" + fi + popd >/dev/null + pushd "${ova_dir}" >/dev/null + sha256sum --tag "${ovf}" >> "${manifest}" + popd >/dev/null + + # According to the OVF spec: + # https://www.dmtf.org/sites/default/files/standards/documents/DSP0243_2.1.1.pdf, + # the OVF must be first in the tar bundle. Manifest is next, and then the + # files must fall in the same order as listed in the References section of the + # OVF file + ova="${OS_IMAGE_NAME}.ova" + tar -cf "${OUTPUT_DIR}/${ova}" -C "${ova_dir}" "${ovf}" "${manifest}" + tar -rf "${OUTPUT_DIR}/${ova}" -C "${OUTPUT_DIR}" "${os_vmdk}" + if [ -s "${DATA_IMAGE}" ] ; then + tar -rf "${OUTPUT_DIR}/${ova}" -C "${OUTPUT_DIR}" "${data_vmdk}" + fi + + symlink_image "ova" "os_image" +fi + lz4 -9vc "${BOOT_IMAGE}" >"${OUTPUT_DIR}/${BOOT_IMAGE_NAME}" lz4 -9vc "${VERITY_IMAGE}" >"${OUTPUT_DIR}/${VERITY_IMAGE_NAME}" lz4 -9vc "${ROOT_IMAGE}" >"${OUTPUT_DIR}/${ROOT_IMAGE_NAME}" From c6fd24d698de802e158894c0dfc81b709a5ee0de Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 10 Oct 2022 15:24:19 +0000 Subject: [PATCH 0791/1356] pubsys: use variant manifest for AMI volume sizes Switch pubsys over to use the variant manifest to determine published AMI volume sizes, rather than relying on environment variables. This aligns AMIs and OVAs around the variant manifest as the source of truth for properties related to publication, which is a necessary step for Secure Boot support. As part of this migration, some fields and variables are renamed to standardize on the "os volume" designation for the primary full disk image. Previously this was called the "root volume" in places, which can lead to ambiguity because there is also a "root image" used for in-place updates. Signed-off-by: Ben Cressey --- tools/Cargo.lock | 1 + tools/pubsys/Cargo.toml | 1 + tools/pubsys/src/aws/ami/mod.rs | 16 ++++----- tools/pubsys/src/aws/ami/register.rs | 50 ++++++++++++++++++++-------- 4 files changed, 45 insertions(+), 23 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index e9974565..c87c5200 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -2184,6 +2184,7 @@ dependencies = [ "aws-sdk-sts", "aws-smithy-types", "aws-types", + "buildsys", "chrono", "clap 3.2.22", "coldsnap", diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index 903909a0..05392b55 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -16,6 +16,7 @@ aws-sdk-ssm = "0.18.0" aws-sdk-sts = "0.18.0" aws-smithy-types = "0.48.0" aws-types = "0.48.0" +buildsys = { path = "../buildsys", version = "0.1.0" } chrono = "0.4" clap = "3.1" coldsnap = { version = "0.4", default-features = false, features = ["aws-sdk-rust-rustls"] } diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index 6742eb39..da776866 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -34,21 +34,17 @@ use wait::wait_for_ami; #[derive(Debug, StructOpt)] #[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] pub(crate) struct AmiArgs { - /// Path to the image containing the root volume - #[structopt(short = "r", long, parse(from_os_str))] - root_image: PathBuf, + /// Path to the image containing the os volume + #[structopt(short = "o", long, parse(from_os_str))] + os_image: PathBuf, /// Path to the image containing the data volume #[structopt(short = "d", long, parse(from_os_str))] data_image: Option, - /// Desired root volume size in gibibytes - #[structopt(long)] - root_volume_size: Option, - - /// Desired data volume size in gibibytes - #[structopt(long)] - data_volume_size: Option, + /// Path to the variant manifest + #[structopt(short = "v", long, parse(from_os_str))] + variant_manifest: PathBuf, /// The architecture of the machine image #[structopt(short = "a", long, parse(try_from_str = parse_arch))] diff --git a/tools/pubsys/src/aws/ami/register.rs b/tools/pubsys/src/aws/ami/register.rs index ace09424..e125a63c 100644 --- a/tools/pubsys/src/aws/ami/register.rs +++ b/tools/pubsys/src/aws/ami/register.rs @@ -4,6 +4,7 @@ use aws_sdk_ec2::model::{ ArchitectureValues, BlockDeviceMapping, EbsBlockDevice, Filter, VolumeType, }; use aws_sdk_ec2::{Client as Ec2Client, Region}; +use buildsys::manifest; use coldsnap::{SnapshotUploader, SnapshotWaiter}; use log::{debug, info, warn}; use snafu::{ensure, OptionExt, ResultExt}; @@ -32,23 +33,37 @@ async fn _register_image( ec2_client: &Ec2Client, cleanup_snapshot_ids: &mut Vec, ) -> Result { + let variant_manifest = manifest::ManifestInfo::new(&ami_args.variant_manifest).context( + error::LoadVariantManifestSnafu { + path: &ami_args.variant_manifest, + }, + )?; + + let image_layout = variant_manifest + .image_layout() + .context(error::MissingImageLayoutSnafu { + path: &ami_args.variant_manifest, + })?; + + let (os_volume_size, data_volume_size) = image_layout.publish_image_sizes_gib(); + debug!("Uploading images into EBS snapshots in {}", region); let uploader = SnapshotUploader::new(ebs_client); - let root_snapshot = - snapshot_from_image(&ami_args.root_image, &uploader, None, ami_args.no_progress) + let os_snapshot = + snapshot_from_image(&ami_args.os_image, &uploader, None, ami_args.no_progress) .await .context(error::SnapshotSnafu { - path: &ami_args.root_image, + path: &ami_args.os_image, region: region.as_ref(), })?; - cleanup_snapshot_ids.push(root_snapshot.clone()); + cleanup_snapshot_ids.push(os_snapshot.clone()); let mut data_snapshot = None; if let Some(data_image) = &ami_args.data_image { let snapshot = snapshot_from_image(data_image, &uploader, None, ami_args.no_progress) .await .context(error::SnapshotSnafu { - path: &ami_args.root_image, + path: &ami_args.os_image, region: region.as_ref(), })?; cleanup_snapshot_ids.push(snapshot.clone()); @@ -58,7 +73,7 @@ async fn _register_image( info!("Waiting for snapshots to become available in {}", region); let waiter = SnapshotWaiter::new(ec2_client.clone()); waiter - .wait(&root_snapshot, Default::default()) + .wait(&os_snapshot, Default::default()) .await .context(error::WaitSnapshotSnafu { snapshot_type: "root", @@ -74,30 +89,30 @@ async fn _register_image( } // Prepare parameters for AMI registration request - let root_bdm = BlockDeviceMapping::builder() + let os_bdm = BlockDeviceMapping::builder() .set_device_name(Some(ROOT_DEVICE_NAME.to_string())) .set_ebs(Some( EbsBlockDevice::builder() .set_delete_on_termination(Some(true)) - .set_snapshot_id(Some(root_snapshot.clone())) + .set_snapshot_id(Some(os_snapshot.clone())) .set_volume_type(Some(VolumeType::from(VOLUME_TYPE))) - .set_volume_size(ami_args.root_volume_size) + .set_volume_size(Some(os_volume_size)) .build(), )) .build(); let mut data_bdm = None; if let Some(ref data_snapshot) = data_snapshot { - let mut bdm = root_bdm.clone(); + let mut bdm = os_bdm.clone(); bdm.device_name = Some(DATA_DEVICE_NAME.to_string()); if let Some(ebs) = bdm.ebs.as_mut() { ebs.snapshot_id = Some(data_snapshot.clone()); - ebs.volume_size = ami_args.data_volume_size; + ebs.volume_size = Some(data_volume_size); } data_bdm = Some(bdm); } - let mut block_device_mappings = vec![root_bdm]; + let mut block_device_mappings = vec![os_bdm]; if let Some(data_bdm) = data_bdm { block_device_mappings.push(data_bdm); } @@ -125,7 +140,7 @@ async fn _register_image( region: region.as_ref(), })?; - let mut snapshot_ids = vec![root_snapshot]; + let mut snapshot_ids = vec![os_snapshot]; if let Some(data_snapshot) = data_snapshot { snapshot_ids.push(data_snapshot); } @@ -250,6 +265,15 @@ mod error { source: SdkError, }, + #[snafu(display("Failed to load variant manifest from {}: {}", path.display(), source))] + LoadVariantManifest { + path: PathBuf, + source: buildsys::manifest::Error, + }, + + #[snafu(display("Could not find image layout for {}", path.display()))] + MissingImageLayout { path: PathBuf }, + #[snafu(display("Image response in {} did not include image ID", region))] MissingImageId { region: String }, From 2b8547b8f5f8cdd799f2b0115735acbbd9e6b2a6 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 18 Oct 2022 21:10:51 +0000 Subject: [PATCH 0792/1356] build: refactor grub features as image features Presently there is only one image feature flag, indicating whether GRUB has the `search` module built-in along with a config file that sets the "private" variable. For Secure Boot, we will end up with additional image-level feature flags. One will be for GRUB, to indicate whether to enforce a signed config file, and the other will be to enable UEFI Secure Boot. Since not all of these flags will necessarily be tied to GRUB, take this opportunity to migrate to a generic "image features" structure to track the flags before any more are added. Signed-off-by: Ben Cressey --- tools/buildsys/src/builder.rs | 20 ++++------ tools/buildsys/src/main.rs | 4 +- tools/buildsys/src/manifest.rs | 56 +++++++++++++++++----------- tools/buildsys/src/manifest/error.rs | 3 ++ tools/rpm2img | 6 ++- 5 files changed, 51 insertions(+), 38 deletions(-) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index 9e113593..4515eb7c 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -22,7 +22,7 @@ use std::path::{Path, PathBuf}; use std::process::Output; use walkdir::{DirEntry, WalkDir}; -use buildsys::manifest::{GrubFeature, ImageFormat, ImageLayout, PartitionPlan, SupportedArch}; +use buildsys::manifest::{ImageFeature, ImageFormat, ImageLayout, PartitionPlan, SupportedArch}; /* There's a bug in BuildKit that can lead to a build failure during parallel @@ -124,7 +124,7 @@ impl VariantBuilder { image_format: Option<&ImageFormat>, image_layout: Option<&ImageLayout>, kernel_parameters: Option<&Vec>, - grub_features: Option<&Vec>, + image_features: Option>, ) -> Result { let output_dir: PathBuf = getenv("BUILDSYS_OUTPUT_DIR")?.into(); @@ -186,17 +186,11 @@ impl VariantBuilder { .unwrap_or_else(|| "".to_string()), ); - args.build_arg( - "GRUB_FEATURES", - grub_features - .map(|v| { - v.iter() - .map(|f| f.to_string()) - .collect::>() - .join(" ") - }) - .unwrap_or_else(|| "".to_string()), - ); + if let Some(image_features) = image_features { + for image_feature in image_features.iter() { + args.build_arg(format!("{}", image_feature), "1"); + } + } // Always rebuild variants since they are located in a different workspace, // and don't directly track changes in the underlying packages. diff --git a/tools/buildsys/src/main.rs b/tools/buildsys/src/main.rs index 9cf3a5e6..dba6ded0 100644 --- a/tools/buildsys/src/main.rs +++ b/tools/buildsys/src/main.rs @@ -231,13 +231,13 @@ fn build_variant() -> Result<()> { let image_format = manifest.image_format(); let image_layout = manifest.image_layout(); let kernel_parameters = manifest.kernel_parameters(); - let grub_features = manifest.grub_features(); + let image_features = manifest.image_features(); VariantBuilder::build( packages, image_format, image_layout, kernel_parameters, - grub_features, + image_features, ) .context(error::BuildAttemptSnafu)?; } else { diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index 72288ad6..a3eee8f8 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -175,18 +175,15 @@ kernel-parameters = [ "console=ttyS42", ] -`grub-features` is a list of supported grub features. -This list allows us to conditionally use or exclude certain grub features in specific variants. -The only supported value at this time is `set-private-var`. -This value means that the grub config for the current variant includes the command to find the -BOTTLEROCKET_PRIVATE partition and set the appropriate `$private` variable for the grub to -consume. -Adding this value to `grub-features` enables the use of Boot Config. +`image-features` is a map of image feature flags, which can be enabled or disabled. This allows us +to conditionally use or exclude certain firmware-level features in variants. + +`grub-set-private-var` means that the grub image for the current variant includes the command to +find the BOTTLEROCKET_PRIVATE partition and set the appropriate `$private` variable for the grub +config file to consume. This feature flag is a prerequisite for Boot Config support. ``` -[package.metadata.build-variant] -grub-features = [ - "set-private-var", -] +[package.metadata.build-variant.image-features] +grub-set-private-var = true ``` */ @@ -195,7 +192,7 @@ mod error; use serde::Deserialize; use snafu::{ResultExt, Snafu}; use std::cmp::max; -use std::collections::HashSet; +use std::collections::{HashMap, HashSet}; use std::convert::TryFrom; use std::fmt::{self, Display}; use std::fs; @@ -273,9 +270,16 @@ impl ManifestInfo { .and_then(|b| b.kernel_parameters.as_ref()) } - /// Convenience method to return the GRUB features for this variant. - pub fn grub_features(&self) -> Option<&Vec> { - self.build_variant().and_then(|b| b.grub_features.as_ref()) + /// Convenience method to return the enabled image features for this variant. + pub fn image_features(&self) -> Option> { + self.build_variant().and_then(|b| { + b.image_features.as_ref().and_then(|m| { + m.iter() + .filter(|(_k, v)| **v) + .map(|(k, _v)| Some(k)) + .collect() + }) + }) } /// Helper methods to navigate the series of optional struct fields. @@ -344,7 +348,7 @@ pub struct BuildVariant { pub image_layout: ImageLayout, pub supported_arches: Option>, pub kernel_parameters: Option>, - pub grub_features: Option>, + pub image_features: Option>, } #[derive(Deserialize, Debug)] @@ -463,15 +467,25 @@ impl SupportedArch { } #[derive(Deserialize, Debug, PartialEq, Eq, Hash)] -#[serde(rename_all = "kebab-case")] -pub enum GrubFeature { - SetPrivateVar, +#[serde(try_from = "String")] +pub enum ImageFeature { + GrubSetPrivateVar, +} + +impl TryFrom for ImageFeature { + type Error = Error; + fn try_from(s: String) -> Result { + match s.as_str() { + "grub-set-private-var" => Ok(ImageFeature::GrubSetPrivateVar), + _ => error::ParseImageFeatureSnafu { what: s }.fail()?, + } + } } -impl fmt::Display for GrubFeature { +impl fmt::Display for ImageFeature { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - GrubFeature::SetPrivateVar => write!(f, "GRUB_SET_PRIVATE_VAR"), + ImageFeature::GrubSetPrivateVar => write!(f, "GRUB_SET_PRIVATE_VAR"), } } } diff --git a/tools/buildsys/src/manifest/error.rs b/tools/buildsys/src/manifest/error.rs index 5830aa65..788cbb1a 100644 --- a/tools/buildsys/src/manifest/error.rs +++ b/tools/buildsys/src/manifest/error.rs @@ -14,6 +14,9 @@ pub(super) enum Error { source: toml::de::Error, }, + #[snafu(display("Failed to parse image feature '{}'", what))] + ParseImageFeature { what: String }, + #[snafu(display("Invalid image size {}; must be between 1 and 1024", value))] InvalidImageSize { value: i32 }, } diff --git a/tools/rpm2img b/tools/rpm2img index 7a0518a0..ebc33db2 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -12,6 +12,8 @@ OUTPUT_FMT="raw" BUILDER_ARCH="$(uname -m)" OVF_TEMPLATE="" +GRUB_SET_PRIVATE_VAR="no" + for opt in "$@"; do optarg="$(expr "${opt}" : '[^=]*=\(.*\)')" case "${opt}" in @@ -24,6 +26,7 @@ for opt in "$@"; do --data-image-publish-size-gib=*) DATA_IMAGE_PUBLISH_SIZE_GIB="${optarg}" ;; --partition-plan=*) PARTITION_PLAN="${optarg}" ;; --ovf-template=*) OVF_TEMPLATE="${optarg}" ;; + --with-grub-set-private-var=*) GRUB_SET_PRIVATE_VAR="${optarg}" ;; esac done @@ -305,9 +308,8 @@ veritysetup verify "${ROOT_IMAGE}" "${VERITY_IMAGE}" "${VERITY_ROOT_HASH}" dd if="${VERITY_IMAGE}" of="${OS_IMAGE}" conv=notrunc bs=1M seek="${partoff[HASH-A]}" # write GRUB config -GRUB_FEATURES_ARRAY=("${GRUB_FEATURES}") # If GRUB_SET_PRIVATE_VAR is set, include the parameters that support Boot Config -if printf '%s\n' "${GRUB_FEATURES_ARRAY[@]}" | grep -Fxq 'GRUB_SET_PRIVATE_VAR' ; then +if [ "${GRUB_SET_PRIVATE_VAR}" == "yes" ] ; then BOOTCONFIG='bootconfig' INITRD='initrd ($private)/bootconfig.data' else From 9836eed0120bf7f3ca1ad32846e67e5f8d2e17ba Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 18 Oct 2022 21:19:12 +0000 Subject: [PATCH 0793/1356] build: pass image features to package builds This allows package builds to depend on image feature flags, which are set as build conditionals in the spec file. Signed-off-by: Ben Cressey --- tools/buildsys/src/builder.rs | 8 +++++++- tools/buildsys/src/main.rs | 5 ++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index 4515eb7c..86768fa5 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -74,7 +74,7 @@ pub(crate) struct PackageBuilder; impl PackageBuilder { /// Build RPMs for the specified package. - pub(crate) fn build(package: &str) -> Result { + pub(crate) fn build(package: &str, image_features: Option>) -> Result { let output_dir: PathBuf = getenv("BUILDSYS_PACKAGES_DIR")?.into(); let arch = getenv("BUILDSYS_ARCH")?; let goarch = serde_plain::from_str::(&arch) @@ -109,6 +109,12 @@ impl PackageBuilder { arch = arch, ); + if let Some(image_features) = image_features { + for image_feature in image_features.iter() { + args.build_arg(format!("{}", image_feature), "1"); + } + } + build(BuildType::Package, package, &arch, args, &tag, &output_dir)?; Ok(Self) diff --git a/tools/buildsys/src/main.rs b/tools/buildsys/src/main.rs index dba6ded0..cf64c24e 100644 --- a/tools/buildsys/src/main.rs +++ b/tools/buildsys/src/main.rs @@ -131,9 +131,12 @@ fn build_package() -> Result<()> { let root_dir: PathBuf = getenv("BUILDSYS_ROOT_DIR")?.into(); let variant = getenv("BUILDSYS_VARIANT")?; let variant_manifest_path = root_dir.join("variants").join(variant).join(manifest_file); + println!("cargo:rerun-if-changed={}", variant_manifest_path.display()); + let variant_manifest = ManifestInfo::new(variant_manifest_path).context(error::ManifestParseSnafu)?; supported_arch(&variant_manifest)?; + let image_features = variant_manifest.image_features(); let manifest_dir: PathBuf = getenv("CARGO_MANIFEST_DIR")?.into(); let manifest = @@ -212,7 +215,7 @@ fn build_package() -> Result<()> { println!("cargo:rerun-if-changed={}", f.display()); } - PackageBuilder::build(&package).context(error::BuildAttemptSnafu)?; + PackageBuilder::build(&package, image_features).context(error::BuildAttemptSnafu)?; Ok(()) } From 8960b9cea4b1029f58fad2678d954aa96957efb1 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Fri, 7 Oct 2022 21:25:16 +0000 Subject: [PATCH 0794/1356] packages: Add AWS config Adds configuration settings for controlling AWS credentials and configuration. This enables setting the `~/.aws/config` and `~/.aws/credentials` file contents so anything using an aws client can be configured to use something other than the default instance role. This adds the AwsSettings to the non-AWS k8s variants in preparation of their use for configuring credential providers. Signed-off-by: Sean McGinnis --- README.md | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index c62de765..fd303b81 100644 --- a/README.md +++ b/README.md @@ -951,10 +951,26 @@ They can be overridden for testing purposes in [the same way as other settings]( ##### AWS-specific settings -AWS-specific settings are automatically set based on calls to the Instance MetaData Service (IMDS). +* `settings.aws.config`: The base64 encoded content to use for AWS configuration (e.g. `base64 -w0 ~/.aws/config`). +* `settings.aws.credentials`: The base64 encoded content to use for AWS credentials (e.g. `base64 -w0 ~/.aws/credentials`). +* `settings.aws.profile`: The profile name to use from the provided `config` and `credentials` settings. + + For example: + + ```toml + [settings.aws] + profile = "myprofile" + ``` + + **Note**: If `settings.aws.profile` is not set, the setting will fallback to the "default" profile. + + **Note:** The `config`, `credentials`, and `profile` are optional and do not need to be set when using an Instance Profile when running on an AWS instance. * `settings.aws.region`: This is set to the AWS region in which the instance is running, for example `us-west-2`. + The `region` setting is automatically inferred based on calls to the Instance MetaData Service (IMDS) when running within AWS. + It does not need to be explicitly set unless you have a reason to override this default value. + ### Logs You can use `logdog` through the [admin container](#admin-container) to obtain an archive of log files from your Bottlerocket host. From 076a72ac025ef0410f41b84db8a08b5e93ff6923 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 3 Nov 2022 17:46:49 +0000 Subject: [PATCH 0795/1356] buildsys: fix package rebuilds for feature flags The previous approach for detecting changes to image feature flags that should trigger a rebuild was incorrect. It would cause packages to be rebuilt whenever the variant manifest that was the target at the time of last rebuild changed. This would happen regardless of whether image feature flags changed, whether the same flags were set for the current variant manifest, or whether the flags were even used by the package. Fixing this requires two prerequisites. First, the current image feature flags must be set in the environment prior to invoking `cargo build`, or it will be too late to emit the `cargo:rerun-if-env-changed` directives, since the environment that `cargo` runs in is not influenced by changes from the build script. Second, each package must declare which image feature flags it cares about, if any, so that the correct cargo directive can be omitted. The environment does not carry this information, since the absence of a particular environment variable can also mean that a package needs to be rebuilt to disable a previously-enabled feature. With this functionality in place, each package build can emit the directives needed for correct change detection. For robustness, only the feature flags tracked by the package are passed through to the build, meaning the feature will always be treated as "disabled" by bconds in the spec file. Fixes: eb44f13a7877 ("build: pass image features to package builds") Signed-off-by: Ben Cressey --- .../src/bin/bottlerocket-variant/main.rs | 24 +++++++++++++-- tools/buildsys/src/builder.rs | 7 +++-- tools/buildsys/src/main.rs | 25 ++++++++++++++-- tools/buildsys/src/manifest.rs | 30 ++++++++++++++----- 4 files changed, 71 insertions(+), 15 deletions(-) diff --git a/tools/buildsys/src/bin/bottlerocket-variant/main.rs b/tools/buildsys/src/bin/bottlerocket-variant/main.rs index 77f9e5e2..d994a983 100644 --- a/tools/buildsys/src/bin/bottlerocket-variant/main.rs +++ b/tools/buildsys/src/bin/bottlerocket-variant/main.rs @@ -1,5 +1,7 @@ use bottlerocket_variant::Variant; +use buildsys::manifest::ManifestInfo; use snafu::ResultExt; +use std::path::PathBuf; use std::{env, process}; // Returning a Result from main makes it print a Debug representation of the error, but with Snafu @@ -12,10 +14,12 @@ fn main() { } } -/// Read `BUILDSYS_VARIANT` from the environment, parse into its components, -/// and emit related environment variables to set. +/// Read `BUILDSYS_VARIANT` from the environment, parse into its components, and emit related +/// environment variables to set (or export). Do the same for features defined in the variant +/// manifest. fn run() -> Result<()> { - let variant = Variant::new(getenv("BUILDSYS_VARIANT")?).context(error::VariantParseSnafu)?; + let env = getenv("BUILDSYS_VARIANT")?; + let variant = Variant::new(&env).context(error::VariantParseSnafu)?; println!("BUILDSYS_VARIANT_PLATFORM={}", variant.platform()); println!("BUILDSYS_VARIANT_RUNTIME={}", variant.runtime()); println!("BUILDSYS_VARIANT_FAMILY={}", variant.family()); @@ -23,6 +27,16 @@ fn run() -> Result<()> { "BUILDSYS_VARIANT_FLAVOR={}", variant.variant_flavor().unwrap_or("''") ); + let manifest = PathBuf::from(getenv("BUILDSYS_ROOT_DIR")?) + .join("variants") + .join(&env) + .join("Cargo.toml"); + let variant_manifest = ManifestInfo::new(manifest).context(error::ManifestParseSnafu)?; + if let Some(image_features) = variant_manifest.image_features() { + for image_feature in image_features { + println!("export BUILDSYS_VARIANT_IMAGE_FEATURE_{}=1", image_feature); + } + } Ok(()) } @@ -41,6 +55,10 @@ mod error { source: bottlerocket_variant::error::Error, }, + ManifestParse { + source: buildsys::manifest::Error, + }, + #[snafu(display("Missing environment variable '{}'", var))] Environment { var: String, diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index 86768fa5..d6aabe27 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -74,7 +74,10 @@ pub(crate) struct PackageBuilder; impl PackageBuilder { /// Build RPMs for the specified package. - pub(crate) fn build(package: &str, image_features: Option>) -> Result { + pub(crate) fn build( + package: &str, + image_features: Option>, + ) -> Result { let output_dir: PathBuf = getenv("BUILDSYS_PACKAGES_DIR")?.into(); let arch = getenv("BUILDSYS_ARCH")?; let goarch = serde_plain::from_str::(&arch) @@ -130,7 +133,7 @@ impl VariantBuilder { image_format: Option<&ImageFormat>, image_layout: Option<&ImageLayout>, kernel_parameters: Option<&Vec>, - image_features: Option>, + image_features: Option>, ) -> Result { let output_dir: PathBuf = getenv("BUILDSYS_OUTPUT_DIR")?.into(); diff --git a/tools/buildsys/src/main.rs b/tools/buildsys/src/main.rs index cf64c24e..5ea01121 100644 --- a/tools/buildsys/src/main.rs +++ b/tools/buildsys/src/main.rs @@ -131,16 +131,35 @@ fn build_package() -> Result<()> { let root_dir: PathBuf = getenv("BUILDSYS_ROOT_DIR")?.into(); let variant = getenv("BUILDSYS_VARIANT")?; let variant_manifest_path = root_dir.join("variants").join(variant).join(manifest_file); - println!("cargo:rerun-if-changed={}", variant_manifest_path.display()); - let variant_manifest = ManifestInfo::new(variant_manifest_path).context(error::ManifestParseSnafu)?; supported_arch(&variant_manifest)?; - let image_features = variant_manifest.image_features(); + let mut image_features = variant_manifest.image_features(); let manifest_dir: PathBuf = getenv("CARGO_MANIFEST_DIR")?.into(); let manifest = ManifestInfo::new(manifest_dir.join(manifest_file)).context(error::ManifestParseSnafu)?; + let package_features = manifest.package_features(); + + // For any package feature specified in the package manifest, track the corresponding + // environment variable for changes to the ambient set of image features for the current + // variant. + if let Some(package_features) = &package_features { + for package_feature in package_features { + println!( + "cargo:rerun-if-env-changed=BUILDSYS_VARIANT_IMAGE_FEATURE_{}", + package_feature + ); + } + } + + // Keep only the image features that the package has indicated that it tracks, if any. + if let Some(image_features) = &mut image_features { + match package_features { + Some(package_features) => image_features.retain(|k| package_features.contains(k)), + None => image_features.clear(), + } + } // If manifest has package.metadata.build-package.variant-sensitive set, then track the // appropriate environment variable for changes. diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index a3eee8f8..cf1498f2 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -105,6 +105,18 @@ variant-sensitive = "runtime" variant-sensitive = "family" ``` +`package-features` is a list of image features that the package tracks. This is +useful when the way the package is built changes based on whether a particular +image feature is enabled for the current variant, rather than when the variant +tuple changes. + +``` +[package.metadata.build-package] +package-features = [ + "grub-set-private-var", +] +``` + `releases-url` is ignored by buildsys, but can be used by packager maintainers to indicate a good URL for checking whether the software has had a new release. ``` @@ -242,6 +254,12 @@ impl ManifestInfo { .and_then(|b| b.variant_sensitive.as_ref()) } + /// Convenience method to return the image features tracked by this package. + pub fn package_features(&self) -> Option> { + self.build_package() + .and_then(|b| b.package_features.as_ref().map(|m| m.iter().collect())) + } + /// Convenience method to return the list of included packages. pub fn included_packages(&self) -> Option<&Vec> { self.build_variant() @@ -271,14 +289,11 @@ impl ManifestInfo { } /// Convenience method to return the enabled image features for this variant. - pub fn image_features(&self) -> Option> { + pub fn image_features(&self) -> Option> { self.build_variant().and_then(|b| { - b.image_features.as_ref().and_then(|m| { - m.iter() - .filter(|(_k, v)| **v) - .map(|(k, _v)| Some(k)) - .collect() - }) + b.image_features + .as_ref() + .map(|m| m.iter().filter(|(_k, v)| **v).map(|(k, _v)| k).collect()) }) } @@ -320,6 +335,7 @@ pub struct BuildPackage { pub releases_url: Option, pub source_groups: Option>, pub variant_sensitive: Option, + pub package_features: Option>, } #[derive(Deserialize, Debug)] From d029e69223d68453d9ccffb890b4dfc52ad6f8ee Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Thu, 1 Sep 2022 11:05:00 -0500 Subject: [PATCH 0796/1356] kubelet: add image credential provider settings Add kubelet config option `credential-providers` to allow configuring image credential provider settings. Mention of the new settings have been added to the README, but more detailed documentation on how to use the feature will be necessary. Those docs will be added in a future commit. Signed-off-by: Sean McGinnis --- README.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/README.md b/README.md index fd303b81..56f223d1 100644 --- a/README.md +++ b/README.md @@ -421,6 +421,26 @@ The following settings are optional and allow you to further configure your clus * `settings.kubernetes.container-log-max-size`: The maximum size of container log file before it is rotated. * `settings.kubernetes.cpu-manager-policy`: Specifies the CPU manager policy. Possible values are `static` and `none`. Defaults to `none`. If you want to allow pods with certain resource characteristics to be granted increased CPU affinity and exclusivity on the node, you can set this setting to `static`. You should reboot if you change this setting after startup - try `apiclient reboot`. * `settings.kubernetes.cpu-manager-reconcile-period`: Specifies the CPU manager reconcile period, which controls how often updated CPU assignments are written to cgroupfs. The value is a duration like `30s` for 30 seconds or `1h5m` for 1 hour and 5 minutes. +* `settings.kubernetes.credential-providers`: Contains a collection of Kubelet image credential provider settings. + Each name under `credential-providers` is the name of the plugin to configure. + + Example user data for configuring the `ecr-credential-provider` credential provider plugin: + + ```toml + [settings.kubernetes.credential-providers.ecr-credential-provider] + enabled = true + # (optional - defaults to "12h") + cache-duration = "30m" + image-patterns = [ + # One or more URL paths to match an image prefix. Supports globbing of subdomains. + "*.dkr.ecr.us-east-2.amazonaws.com", + "*.dkr.ecr.us-west-2.amazonaws.com" + ] + ``` + + **Note:** `ecr-credential-provider` is currently the only supported provider. + To manage its AWS credentials, see the `settings.aws.config` and `settings.aws.credentials` settings. + * `settings.kubernetes.event-burst`: The maximum size of a burst of event creations. * `settings.kubernetes.event-qps`: The maximum event creations per second. * `settings.kubernetes.eviction-hard`: The signals and thresholds that trigger pod eviction. From ecf6ca54f078d029f4da67f895f2a6744409076d Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Sat, 29 Oct 2022 01:57:51 +0000 Subject: [PATCH 0797/1356] feature: Enable configuration of Kubelet TLS certs This enables the ability to provide a TLS public and private key to be used by the kubelet process for HTTPS communication. This corresponds to the `--tls-cert-file` and `--tls-key-file` arguments (or the `tlsCertFile` and `tlsPrivateKeyFile` config settings). Signed-off-by: Sean McGinnis --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 56f223d1..931ac2b8 100644 --- a/README.md +++ b/README.md @@ -476,6 +476,8 @@ The following settings are optional and allow you to further configure your clus ephemeral-storage= "1Gi" ``` +* `settings.kubernetes.server-certificate`: The base64 encoded content of an x509 certificate for the Kubelet web server, which is used for retrieving logs and executing commands. +* `settings.kubernetes.server-key`: The base64 encoded content of an x509 private key for the Kubelet web server. * `settings.kubernetes.topology-manager-policy`: Specifies the topology manager policy. Possible values are `none`, `restricted`, `best-effort`, and `single-numa-node`. Defaults to `none`. * `settings.kubernetes.topology-manager-scope`: Specifies the topology manager scope. Possible values are `container` and `pod`. Defaults to `container`. If you want to group all containers in a pod to a common set of NUMA nodes, you can set this setting to `pod`. From 3b37b7c8b7aa68c0befb110dcf87fc5075aa7dfe Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Fri, 28 Oct 2022 18:41:05 +0000 Subject: [PATCH 0798/1356] tools: fix ShellCheck warnings --- tools/docker-go | 1 + tools/partyplanner | 3 ++- tools/start-local-vm | 14 ++++++++++---- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/tools/docker-go b/tools/docker-go index 7935ae3c..d0b625c4 100755 --- a/tools/docker-go +++ b/tools/docker-go @@ -30,6 +30,7 @@ required_arg() { fi } +# shellcheck disable=SC2124 # TODO: improve command interface (#2534) parse_args() { while [ ${#} -gt 0 ] ; do case "${1}" in diff --git a/tools/partyplanner b/tools/partyplanner index edd852f8..259ee4bf 100755 --- a/tools/partyplanner +++ b/tools/partyplanner @@ -1,4 +1,5 @@ -#!/bin/bash +#!/usr/bin/env bash +# shellcheck disable=SC2034 # Variables are used externally by rpm2img ############################################################################### # Section 1: partition type GUIDs diff --git a/tools/start-local-vm b/tools/start-local-vm index c71c033b..4203b55e 100755 --- a/tools/start-local-vm +++ b/tools/start-local-vm @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# shellcheck disable=SC2054 # Arrays are formatted for passing args to other tools shopt -s nullglob @@ -14,13 +15,17 @@ current_images=() boot_image= data_image= -readonly repo_root=$(git rev-parse --show-toplevel) - bail() { >&2 echo "$@" exit 1 } +if ! git_toplevel=$(git rev-parse --show-toplevel); then + bail "Failed to get the root of the repo." +else + readonly repo_root="${git_toplevel}" +fi + show_usage() { echo "\ usage: ${0##*/} [--arch BUILDSYS_ARCH] [--variant BUILDSYS_VARIANT] @@ -116,8 +121,9 @@ parse_args() { [[ -n ${arch} ]] || usage_error 'Architecture needs to be set via either --arch or BUILDSYS_ARCH.' [[ -n ${variant} ]] || usage_error 'Variant needs to be set via either --variant or BUILDSYS_VARIANT.' - local host_arch=$(uname -m) - [[ ${arch} = ${host_arch} ]] || bail "Architecture needs to match host architecture (${host_arch}) for hardware virtualization." + declare -l host_arch + host_arch=$(uname -m) + [[ ${arch} == "${host_arch}" ]] || bail "Architecture needs to match host architecture (${host_arch}) for hardware virtualization." for path in "${!extra_files[@]}"; do [[ -e ${path} ]] || bail "Cannot find local file '${path}' to inject." From 33f5b804494e96415a6524dd9e308542a8e32601 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Wed, 26 Oct 2022 00:35:43 +0000 Subject: [PATCH 0799/1356] ecs: add additional API configurations With this, 4 additional configurations for the ECS agent are supported though the API. There are two configuration files used to set up the ECS agent: - /etc/ecs/ecs.config.json - /etc/ecs/ecs.config We favor the former to add new configurations, and we only use the latter on special cases, i.e. when the configurations to be added aren't modeled as part of the struct that represents the agent's configuration, or when special deserialization is used to parse the configurations. The configurations added in this change are as follows: ECS_CONTAINER_STOP_TIMEOUT: supported through the container-stop-timeout API; this configuration is rendered in the /etc/ecs/ecs.config file since this configuration is of type Duration (1m, 1s, 1h). This type must be parsed by calling the time.ParseDuration function which isn't called under the hood by the serialization libraries used in the ECS agent. ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION: supported through the task-cleanup-wait API; this configuration is of the same type as the previous configuration and was rendered following the same reasoning. ECS_RESERVED_MEMORY: supported through the reserved-memory API; this configuration is rendered in /etc/ecs/config.ecs.json since the configuration's type can be deserialized without additional helper functions. ECS_TASK_METADATA_RPS_LIMIT: this configuration represents a comma-separated string with two values used to set the throttling rates in the metadata service exposed by the ECS agent. These values don't have to be set together, since the ECS agent will use default values if either is missing. Thus, this configuration is supported through the metadata-service-rps and metadata-service-burst APIs. Both configurations are rendered in the /etc/ecs/confing.ecs.json file, since the configurations' type can be deserialized without additional helper functions. Signed-off-by: Arnaldo Garcia Rincon --- README.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/README.md b/README.md index 931ac2b8..f92d2911 100644 --- a/README.md +++ b/README.md @@ -533,6 +533,8 @@ These settings can be changed at any time. * `settings.ecs.allow-privileged-containers`: Whether launching privileged containers is allowed on the container instance. If this value is set to false, privileged containers are not permitted. Bottlerocket sets this value to false by default. +* `settings.ecs.container-stop-timeout`: Time to wait for the task's containers to stop on their own before they are forcefully stopped. +Valid time units include `s`, `m`, and `h`, e.g. `1h`, `1m1s`. * `settings.ecs.enable-spot-instance-draining`: If the instance receives a spot termination notice, the agent will set the instance's state to `DRAINING`, so the workload can be moved gracefully before the instance is removed. Defaults to `false`. * `settings.ecs.image-pull-behavior`: The behavior used to customize the [pull image process](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html#ecs-agent-availparam) for your container instances. Supported values are `default`, `always`, `once`, `prefer-cached`, and the default is `default`. @@ -541,6 +543,13 @@ These settings can be changed at any time. Bottlerocket enables the `json-file`, `awslogs`, and `none` drivers by default. * `settings.ecs.loglevel`: The level of verbosity for the ECS agent's logs. Supported values are `debug`, `info`, `warn`, `error`, and `crit`, and the default is `info`. +* `settings.ecs.metadata-service-rps`: The steady state rate limit of the throttling configurations set for the task metadata service. +* `settings.ecs.metadata-service-burst`: The burst rate limit of the throttling configurations set for the task metadata service. +* `settings.ecs.reserved-memory`: The amount of memory, in MiB, reserved for critical system processes. +* `settings.ecs.task-cleanup-wait`: Time to wait before the task's containers are removed after they are stopped. +Valid time units are `s`, `m`, and `h`, e.g. `1h`, `1m1s`. + + **Note**: `metadata-service-rps` and `metadata-service-burst` directly map to the values set by the `ECS_TASK_METADATA_RPS_LIMIT` environment variable. #### CloudFormation signal helper settings From f3390dda7946db528a8866f743995e0a9a1c7717 Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Fri, 4 Nov 2022 15:16:30 +0000 Subject: [PATCH 0800/1356] diff-kernel-config: pass new required env vars for package build Commit 24a7161a ("build: pass variant-related variables into builds") started requiring three new environment variables to be set for a direct package build to denote the platform, runtime, and family of a variant. Adapt diff-kernel-config to pass those when building the kernel packages to avoid build failures. Signed-off-by: Markus Boehme --- tools/diff-kernel-config | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/diff-kernel-config b/tools/diff-kernel-config index 3be78fba..c052a236 100755 --- a/tools/diff-kernel-config +++ b/tools/diff-kernel-config @@ -171,6 +171,11 @@ for state in before after; do debug_id="state=${state} arch=${arch} variant=${variant}" + IFS=- read -ra variant_parts <<<"${variant}" + variant_platform="${variant_parts[0]}" + variant_runtime="${variant_parts[1]}" + variant_family="${variant_platform}-${variant_runtime}" + # # Run build # @@ -178,6 +183,9 @@ for state in before after; do cargo make \ -e BUILDSYS_ARCH="${arch}" \ -e BUILDSYS_VARIANT="${variant}" \ + -e BUILDSYS_VARIANT_PLATFORM="${variant_platform}" \ + -e BUILDSYS_VARIANT_RUNTIME="${variant_runtime}" \ + -e BUILDSYS_VARIANT_FAMILY="${variant_family}" \ -e PACKAGE="kernel-${kver/./_}" \ build-package \ || bail "Build failed for ${debug_id}" From 1043cc81e75ac8aabc9fe2db7b513723979fb03c Mon Sep 17 00:00:00 2001 From: Richard Kelly Date: Fri, 7 Oct 2022 22:24:39 +0000 Subject: [PATCH 0801/1356] Add support for AWS Organizations to Pubsys Updated Pubsys grant image to be able to modify images attributes to add permissions for AWS Organizations and Organizational Units. EBS Snapshots do not currently support organizations so they have not been changed. --- tools/pubsys/src/aws/ami/mod.rs | 15 ++++-- tools/pubsys/src/aws/publish_ami/mod.rs | 70 ++++++++++++------------- 2 files changed, 43 insertions(+), 42 deletions(-) diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index da776866..b6099d00 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -5,7 +5,7 @@ mod register; mod snapshot; pub(crate) mod wait; -use crate::aws::publish_ami::{get_snapshots, modify_image, modify_snapshots}; +use crate::aws::publish_ami::{get_snapshots, modify_image, modify_snapshots, ModifyOptions}; use crate::aws::{client::build_client_config, parse_arch, region_from_string}; use crate::Args; use aws_sdk_ebs::Client as EbsClient; @@ -225,9 +225,15 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> info!("Granting access to target accounts so we can copy the AMI"); let account_id_vec: Vec<_> = account_ids.into_iter().collect(); + let modify_options = ModifyOptions { + user_ids: account_id_vec, + group_names: Vec::new(), + organization_arns: Vec::new(), + organizational_unit_arns: Vec::new(), + }; + modify_snapshots( - Some(account_id_vec.clone()), - None, + &modify_options, &OperationType::Add, &ids_of_image.snapshot_ids, &base_ec2_client, @@ -240,8 +246,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> })?; modify_image( - Some(account_id_vec.clone()), - None, + &modify_options, &OperationType::Add, &ids_of_image.image_id, &base_ec2_client, diff --git a/tools/pubsys/src/aws/publish_ami/mod.rs b/tools/pubsys/src/aws/publish_ami/mod.rs index b5017e98..69f412f8 100644 --- a/tools/pubsys/src/aws/publish_ami/mod.rs +++ b/tools/pubsys/src/aws/publish_ami/mod.rs @@ -22,6 +22,22 @@ use std::iter::FromIterator; use std::path::PathBuf; use structopt::{clap, StructOpt}; +#[derive(Debug, StructOpt)] +pub(crate) struct ModifyOptions { + /// User IDs to give/remove access + #[structopt(long, use_delimiter = true, group = "who")] + pub(crate) user_ids: Vec, + /// Group names to give/remove access + #[structopt(long, use_delimiter = true, group = "who")] + pub(crate) group_names: Vec, + /// Organization arns to give/remove access + #[structopt(long, use_delimiter = true, group = "who")] + pub(crate) organization_arns: Vec, + /// Organizational unit arns to give/remove access + #[structopt(long, use_delimiter = true, group = "who")] + pub(crate) organizational_unit_arns: Vec, +} + /// Grants or revokes permissions to Bottlerocket AMIs #[derive(Debug, StructOpt)] #[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] @@ -44,12 +60,8 @@ pub(crate) struct PublishArgs { #[structopt(long, group = "mode")] revoke: bool, - /// User IDs to give/remove access - #[structopt(long, use_delimiter = true, group = "who")] - user_ids: Vec, - /// Group names to give/remove access - #[structopt(long, use_delimiter = true, group = "who")] - group_names: Vec, + #[structopt(flatten)] + modify_opts: ModifyOptions, } /// Common entrypoint from main() @@ -170,8 +182,7 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { info!("Updating snapshot permissions - {}", description); modify_regional_snapshots( - Some(publish_args.user_ids.clone()), - Some(publish_args.group_names.clone()), + &publish_args.modify_opts, &operation, &snapshots, &ec2_clients, @@ -184,8 +195,7 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { .map(|(region, image)| (region, image.id)) .collect(); modify_regional_images( - Some(publish_args.user_ids.clone()), - Some(publish_args.group_names.clone()), + &publish_args.modify_opts, &operation, &ami_ids, &ec2_clients, @@ -300,8 +310,7 @@ async fn get_regional_snapshots( /// Modify createVolumePermission for the given users/groups on the given snapshots. The /// `operation` should be "add" or "remove" to allow/deny permission. pub(crate) async fn modify_snapshots( - user_ids: Option>, - group_names: Option>, + modify_opts: &ModifyOptions, operation: &OperationType, snapshot_ids: &[String], ec2_client: &Ec2Client, @@ -312,8 +321,8 @@ pub(crate) async fn modify_snapshots( let response_future = ec2_client .modify_snapshot_attribute() .set_attribute(Some(SnapshotAttributeName::CreateVolumePermission)) - .set_user_ids(user_ids.clone()) - .set_group_names(group_names.clone()) + .set_user_ids(Some(modify_opts.user_ids.clone())) + .set_group_names(Some(modify_opts.group_names.clone())) .set_operation_type(Some(operation.clone())) .set_snapshot_id(Some(snapshot_id.clone())) .send(); @@ -342,8 +351,7 @@ pub(crate) async fn modify_snapshots( /// Modify createVolumePermission for the given users/groups, across all of the snapshots in the /// given regional mapping. The `operation` should be "add" or "remove" to allow/deny permission. pub(crate) async fn modify_regional_snapshots( - user_ids: Option>, - group_names: Option>, + modify_opts: &ModifyOptions, operation: &OperationType, snapshots: &HashMap>, clients: &HashMap, @@ -352,14 +360,8 @@ pub(crate) async fn modify_regional_snapshots( let mut requests = Vec::new(); for (region, snapshot_ids) in snapshots { let ec2_client = &clients[region]; - let modify_snapshot_future = modify_snapshots( - user_ids.clone(), - group_names.clone(), - operation, - snapshot_ids, - ec2_client, - region, - ); + let modify_snapshot_future = + modify_snapshots(modify_opts, operation, snapshot_ids, ec2_client, region); // Store the region and snapshot ID so we can include it in errors let info_future = ready((region.clone(), snapshot_ids.clone())); @@ -411,8 +413,7 @@ pub(crate) async fn modify_regional_snapshots( /// Modify launchPermission for the given users/groups on the given images. The `operation` /// should be "add" or "remove" to allow/deny permission. pub(crate) async fn modify_image( - user_ids: Option>, - user_groups: Option>, + modify_opts: &ModifyOptions, operation: &OperationType, image_id: &str, ec2_client: &Ec2Client, @@ -422,8 +423,10 @@ pub(crate) async fn modify_image( .set_attribute(Some( ImageAttributeName::LaunchPermission.as_ref().to_string(), )) - .set_user_ids(user_ids.clone()) - .set_user_groups(user_groups.clone()) + .set_user_ids(Some(modify_opts.user_ids.clone())) + .set_user_groups(Some(modify_opts.group_names.clone())) + .set_organization_arns(Some(modify_opts.organization_arns.clone())) + .set_organizational_unit_arns(Some(modify_opts.organizational_unit_arns.clone())) .set_operation_type(Some(operation.clone())) .set_image_id(Some(image_id.to_string())) .send() @@ -433,8 +436,7 @@ pub(crate) async fn modify_image( /// Modify launchPermission for the given users/groups, across all of the images in the given /// regional mapping. The `operation` should be "add" or "remove" to allow/deny permission. pub(crate) async fn modify_regional_images( - user_ids: Option>, - user_groups: Option>, + modify_opts: &ModifyOptions, operation: &OperationType, images: &HashMap, clients: &HashMap, @@ -443,13 +445,7 @@ pub(crate) async fn modify_regional_images( for (region, image_id) in images { let ec2_client = &clients[region]; - let modify_image_future = modify_image( - user_ids.clone(), - user_groups.clone(), - operation, - image_id, - ec2_client, - ); + let modify_image_future = modify_image(modify_opts, operation, image_id, ec2_client); // Store the region and image ID so we can include it in errors let info_future = ready((region.as_ref().to_string(), image_id.clone())); From 92266075ba6fdaa608766ad2c5305dd38afcaa5c Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Thu, 6 Oct 2022 15:54:06 +0000 Subject: [PATCH 0802/1356] kernel: build code to boot from IDE disks for aws variants Relevant code to boot from IDE disks needs to be built-in, not a module, as without disk drivers the kernel cannot load any modules. SCSI code needs to be pulled in since libata treats IDE disks as a SCSI device. Signed-off-by: Markus Boehme --- packages/kernel-5.10/config-bottlerocket-aws | 5 +++++ packages/kernel-5.15/config-bottlerocket-aws | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket-aws b/packages/kernel-5.10/config-bottlerocket-aws index e69de29b..5cba3885 100644 --- a/packages/kernel-5.10/config-bottlerocket-aws +++ b/packages/kernel-5.10/config-bottlerocket-aws @@ -0,0 +1,5 @@ +# Support boot from IDE disks +CONFIG_ATA=y +CONFIG_ATA_PIIX=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y diff --git a/packages/kernel-5.15/config-bottlerocket-aws b/packages/kernel-5.15/config-bottlerocket-aws index e69de29b..5cba3885 100644 --- a/packages/kernel-5.15/config-bottlerocket-aws +++ b/packages/kernel-5.15/config-bottlerocket-aws @@ -0,0 +1,5 @@ +# Support boot from IDE disks +CONFIG_ATA=y +CONFIG_ATA_PIIX=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y From b850c6faed5c9c128f5b71695ae8f7620c51a54a Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Fri, 14 Oct 2022 08:28:20 +0000 Subject: [PATCH 0803/1356] kernel: build mlx5 driver for aws variants Signed-off-by: Markus Boehme --- packages/kernel-5.10/config-bottlerocket-aws | 8 ++++++++ packages/kernel-5.15/config-bottlerocket-aws | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket-aws b/packages/kernel-5.10/config-bottlerocket-aws index 5cba3885..6b4ed404 100644 --- a/packages/kernel-5.10/config-bottlerocket-aws +++ b/packages/kernel-5.10/config-bottlerocket-aws @@ -3,3 +3,11 @@ CONFIG_ATA=y CONFIG_ATA_PIIX=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y + +# Mellanox network support +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX5_CORE=m +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_INFINIBAND=m +CONFIG_MLXFW=m diff --git a/packages/kernel-5.15/config-bottlerocket-aws b/packages/kernel-5.15/config-bottlerocket-aws index 5cba3885..6b4ed404 100644 --- a/packages/kernel-5.15/config-bottlerocket-aws +++ b/packages/kernel-5.15/config-bottlerocket-aws @@ -3,3 +3,11 @@ CONFIG_ATA=y CONFIG_ATA_PIIX=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y + +# Mellanox network support +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX5_CORE=m +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_INFINIBAND=m +CONFIG_MLXFW=m From 3ccbcd1cbe2b8dbde638cb55634566d3100efc48 Mon Sep 17 00:00:00 2001 From: mjsterckx Date: Wed, 9 Nov 2022 18:23:12 +0000 Subject: [PATCH 0804/1356] testsys: added comment on how to use k8s secret --- tools/testsys/src/run.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testsys/src/run.rs b/tools/testsys/src/run.rs index 81d2c6f1..f6eb56cf 100644 --- a/tools/testsys/src/run.rs +++ b/tools/testsys/src/run.rs @@ -108,7 +108,7 @@ struct CliConfig { #[clap(long, env = "TESTSYS_INSTANCE_TYPE")] instance_type: Option, - /// Add secrets to the testsys agents (`--secret aws-credentials=my-secret`) + /// Add secrets to the testsys agents (`--secret awsCredentials=my-secret`) #[clap(long, short, parse(try_from_str = parse_key_val), number_of_values = 1)] secret: Vec<(String, SecretName)>, } From 9b89186dd0cb232c4c7be66057cc076413b71b99 Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Fri, 4 Nov 2022 15:33:11 +0000 Subject: [PATCH 0805/1356] diff-kernel-config: handle absolute paths for output directory When extracting the kernel config of a just built kernel diff-kernel-config would always treat the given output directory as a relative path due to a gratuitous "./" in the path it assembles. This would try to write to the wrong path when the script was actually invoked with an absolute path, most likely making it fail because parent directories wouldn't exist. Correctly handle absolute paths by dropping the "./". Other parts of the code dealing with the output directory already don't care whether it's an absolute or relative path. Signed-off-by: Markus Boehme --- tools/diff-kernel-config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/diff-kernel-config b/tools/diff-kernel-config index c052a236..cb38fc26 100755 --- a/tools/diff-kernel-config +++ b/tools/diff-kernel-config @@ -211,7 +211,7 @@ for state in before after; do # Extract kernel config # - config_path=./${output_dir}/config-${arch}-${kver}-${variant}-${state} + config_path=${output_dir}/config-${arch}-${kver}-${variant}-${state} rpm2cpio "${kernel_rpm}" \ | cpio --quiet --extract --to-stdout ./boot/config >"${config_path}" [[ -s "${config_path}" ]] || bail "Failed to extract config for ${debug_id}" From 05ab7a1958647f0fc0d65e3e5f51a9def587fe4d Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Fri, 4 Nov 2022 17:45:42 +0000 Subject: [PATCH 0806/1356] packages: Add aws_signing_helper for IAM Roles Anywhere This adds a new package to place the `aws_signing_helper` binary in the /usr/bin PATH to enable its use for k8s credential provider support of IAM Roles Anywhere. This adds documentation to our README settings docs to give an example of how to configure IAM Roles Anywhere support with the k8s image credential provider plugin. Signed-off-by: Sean McGinnis --- README.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/README.md b/README.md index f92d2911..87d68075 100644 --- a/README.md +++ b/README.md @@ -441,6 +441,23 @@ The following settings are optional and allow you to further configure your clus **Note:** `ecr-credential-provider` is currently the only supported provider. To manage its AWS credentials, see the `settings.aws.config` and `settings.aws.credentials` settings. + The `ecr-credential-provider` plugin can also be used for AWS IAM Roles Anywhere support. + IAM Roles Anywhere is configured using the `settings.aws.config` setting. + The content of that setting needs to configure the `credential_process` using the `aws_signing_helper` using your IAM Roles Anywhere settings, similar to the following: + + ```ini + [default] + region = us-west-2 + credential_process = aws_signing_helper credential-process \ + --certificate /var/lib/kubelet/pki/kubelet-client-current.pem \ + --private-key /var/lib/kubelet/pki/kubelet-client-current.pem \ + --profile-arn [profile ARN] + --role-arn [role ARN] + --trust-anchor-arn [trust anchor ARN] + ``` + + See the [Roles Anywhere documentation](https://docs.aws.amazon.com/rolesanywhere/latest/userguide/credential-helper.html) for more details on the `aws_signing_helper` arguments. + * `settings.kubernetes.event-burst`: The maximum size of a burst of event creations. * `settings.kubernetes.event-qps`: The maximum event creations per second. * `settings.kubernetes.eviction-hard`: The signals and thresholds that trigger pod eviction. From 2dffdc056c3362f3991c3917582a23b596d9d944 Mon Sep 17 00:00:00 2001 From: ecpullen Date: Mon, 31 Oct 2022 17:50:24 +0000 Subject: [PATCH 0807/1356] testsys: Refactor code base Changes the way TestSys crd's are created while keeping the same interface. --- tools/Cargo.lock | 50 +- tools/testsys-config/src/lib.rs | 53 -- tools/testsys/Cargo.toml | 4 +- tools/testsys/src/aws_ecs.rs | 175 +++++ tools/testsys/src/aws_k8s.rs | 157 ++++ tools/testsys/src/aws_resources.rs | 1179 ++++++---------------------- tools/testsys/src/crds.rs | 402 ++++++++++ tools/testsys/src/delete.rs | 10 +- tools/testsys/src/error.rs | 70 ++ tools/testsys/src/install.rs | 6 +- tools/testsys/src/logs.rs | 17 +- tools/testsys/src/main.rs | 18 +- tools/testsys/src/restart_test.rs | 7 +- tools/testsys/src/run.rs | 199 ++--- tools/testsys/src/secret.rs | 19 +- tools/testsys/src/sonobuoy.rs | 91 +++ tools/testsys/src/status.rs | 11 +- tools/testsys/src/uninstall.rs | 6 +- 18 files changed, 1295 insertions(+), 1179 deletions(-) create mode 100644 tools/testsys/src/aws_ecs.rs create mode 100644 tools/testsys/src/aws_k8s.rs create mode 100644 tools/testsys/src/crds.rs create mode 100644 tools/testsys/src/error.rs create mode 100644 tools/testsys/src/sonobuoy.rs diff --git a/tools/Cargo.lock b/tools/Cargo.lock index c87c5200..2edda2dd 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -44,12 +44,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "anyhow" -version = "1.0.65" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98161a4e3e2184da77bb14f02184cdd111e83bbbcc9979dfee3c44b9a85f5602" - [[package]] name = "argh" version = "0.1.9" @@ -1395,6 +1389,24 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-openssl" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6ee5d7a8f718585d1c3c61dfde28ef5b0bb14734b4db13f5ada856cdc6c612b" +dependencies = [ + "http", + "hyper", + "linked_hash_set", + "once_cell", + "openssl", + "openssl-sys", + "parking_lot", + "tokio", + "tokio-openssl", + "tower-layer", +] + [[package]] name = "hyper-rustls" version = "0.22.1" @@ -1622,6 +1634,7 @@ dependencies = [ "http", "http-body", "hyper", + "hyper-openssl", "hyper-timeout", "hyper-tls", "jsonpath_lib", @@ -1694,6 +1707,15 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "linked_hash_set" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47186c6da4d81ca383c7c47c1bfc80f4b95f4720514d860a5407aaf4233f9588" +dependencies = [ + "linked-hash-map", +] + [[package]] name = "lock_api" version = "0.4.9" @@ -2916,7 +2938,7 @@ dependencies = [ name = "testsys" version = "0.1.0" dependencies = [ - "anyhow", + "async-trait", "aws-config", "aws-sdk-ec2", "base64", @@ -2926,6 +2948,7 @@ dependencies = [ "env_logger", "futures", "k8s-openapi", + "kube-client", "log", "maplit", "model", @@ -2933,6 +2956,7 @@ dependencies = [ "serde", "serde_json", "serde_plain", + "snafu", "term_size", "testsys-config", "tokio", @@ -3097,6 +3121,18 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-openssl" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08f9ffb7809f1b20c1b398d92acf4cc719874b3b2b2d9ea2f09b4a80350878a" +dependencies = [ + "futures-util", + "openssl", + "openssl-sys", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.22.0" diff --git a/tools/testsys-config/src/lib.rs b/tools/testsys-config/src/lib.rs index 0e003d88..e37088c6 100644 --- a/tools/testsys-config/src/lib.rs +++ b/tools/testsys-config/src/lib.rs @@ -102,35 +102,6 @@ pub struct Test { pub testsys_image_registry: Option, } -#[derive(Debug, Default)] -pub struct AwsK8sVariantConfig { - /// The names of all clusters this variant should be tested over. This is particularly useful - /// for testing Bottlerocket on ipv4 and ipv6 clusters. - pub cluster_names: Vec, - /// The instance type that instances should be launched with - pub instance_type: Option, - /// The secrets needed by the agents - pub secrets: BTreeMap, - /// The role that should be assumed for this particular variant - pub assume_role: Option, - /// The kubernetes conformance image that should be used for this variant - pub kube_conformance_image: Option, - /// The e2e repo containing sonobuoy images - pub e2e_repo_registry: Option, -} - -#[derive(Debug, Default)] -pub struct AwsEcsVariantConfig { - /// The names of all clusters this variant should be tested over - pub cluster_names: Vec, - /// The instance type that instances should be launched with - pub instance_type: Option, - /// The secrets needed by the agents - pub secrets: BTreeMap, - /// The role that should be assumed for this particular variant - pub assume_role: Option, -} - /// Create a vec of relevant keys for this variant ordered from most specific to least specific. fn config_keys(variant: &Variant) -> Vec { let (family_flavor, platform_flavor) = variant @@ -228,30 +199,6 @@ impl GenericVariantConfig { } } -impl From for AwsK8sVariantConfig { - fn from(val: GenericVariantConfig) -> Self { - Self { - cluster_names: val.cluster_names, - instance_type: val.instance_type, - secrets: val.secrets, - assume_role: val.agent_role, - kube_conformance_image: val.conformance_image, - e2e_repo_registry: val.conformance_registry, - } - } -} - -impl From for AwsEcsVariantConfig { - fn from(val: GenericVariantConfig) -> Self { - Self { - cluster_names: val.cluster_names, - instance_type: val.instance_type, - secrets: val.secrets, - assume_role: val.agent_role, - } - } -} - /// Fill in the templated cluster name with `arch` and `variant`. pub fn rendered_cluster_name(cluster_name: String, arch: S1, variant: S2) -> Result where diff --git a/tools/testsys/Cargo.toml b/tools/testsys/Cargo.toml index 4dfdafa3..dd488629 100644 --- a/tools/testsys/Cargo.toml +++ b/tools/testsys/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" publish = false [dependencies] -anyhow = "1.0" +async-trait = "0.1" aws-config = "0.48" aws-sdk-ec2 = "0.18" base64 = "0.13" @@ -17,6 +17,7 @@ clap = { version = "3", features = ["derive", "env"] } env_logger = "0.9" futures = "0.3.8" k8s-openapi = { version = "0.16", features = ["v1_20", "api"], default-features = false } +kube-client = { version = "0.75"} log = "0.4" maplit = "1.0.2" model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.3", tag = "v0.0.3"} @@ -24,6 +25,7 @@ pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } serde = { version = "1", features = ["derive"] } serde_json = "1" serde_plain = "1" +snafu = "0.7" term_size = "0.3" testsys-config = { path = "../testsys-config/", version = "0.1.0" } tokio = { version = "1", features = ["macros", "rt-multi-thread", "fs"] } diff --git a/tools/testsys/src/aws_ecs.rs b/tools/testsys/src/aws_ecs.rs new file mode 100644 index 00000000..49c658da --- /dev/null +++ b/tools/testsys/src/aws_ecs.rs @@ -0,0 +1,175 @@ +use crate::aws_resources::{ami, ami_name, ec2_crd, get_ami_id, migration_crd}; +use crate::crds::{ + BottlerocketInput, ClusterInput, CrdCreator, CrdInput, CreateCrdOutput, MigrationInput, + TestInput, +}; +use crate::error::{self, Result}; +use bottlerocket_types::agent_config::{ClusterType, EcsClusterConfig, EcsTestConfig}; +use log::debug; +use maplit::btreemap; +use model::{Crd, DestructionPolicy}; +use snafu::OptionExt; + +/// A `CrdCreator` responsible for creating crd related to `aws-ecs` variants. +pub(crate) struct AwsEcsCreator { + pub(crate) region: String, + pub(crate) ami_input: String, + pub(crate) migrate_starting_commit: Option, +} + +#[async_trait::async_trait] +impl CrdCreator for AwsEcsCreator { + /// Determine the AMI from `amis.json`. + fn image_id(&self, _: &CrdInput) -> Result { + ami(&self.ami_input, &self.region) + } + + /// Determine the starting image from EC2 using standard Bottlerocket naming conventions. + async fn starting_image_id(&self, crd_input: &CrdInput) -> Result { + get_ami_id(ami_name(&crd_input.arch,&crd_input.variant,crd_input.starting_version + .as_ref() + .context(error::InvalidSnafu{ + what: "The starting version must be provided for migration testing" + })?, self.migrate_starting_commit + .as_ref() + .context(error::InvalidSnafu{ + what: "The commit for the starting version must be provided if the starting image id is not" + })?) + , &crd_input.arch, + & self.region, + ) + .await + } + + /// Create an ECS cluster CRD with the `cluster_name` in `cluster_input`. + async fn cluster_crd<'a>(&self, cluster_input: ClusterInput<'a>) -> Result { + debug!("Creating ECS cluster CRD"); + // Create labels that will be used for identifying existing CRDs for an ECS cluster. + let labels = cluster_input.crd_input.labels(btreemap! { + "testsys/type".to_string() => "cluster".to_string(), + "testsys/cluster".to_string() => cluster_input.cluster_name.to_string(), + "testsys/region".to_string() => self.region.clone() + }); + + // Check if the cluster already has a CRD in the TestSys cluster. + if let Some(cluster_crd) = cluster_input + .crd_input + .existing_crds( + &labels, + &["testsys/cluster", "testsys/type", "testsys/region"], + ) + .await? + .pop() + { + // Return the name of the existing CRD for the cluster. + debug!("ECS cluster CRD already exists with name '{}'", cluster_crd); + return Ok(CreateCrdOutput::ExistingCrd(cluster_crd)); + } + + // Create the CRD for ECS cluster creation. + let ecs_crd = EcsClusterConfig::builder() + .cluster_name(cluster_input.cluster_name) + .region(Some(self.region.to_owned())) + .assume_role(cluster_input.crd_input.config.agent_role.clone()) + .destruction_policy(DestructionPolicy::OnTestSuccess) + .image( + cluster_input + .crd_input + .images + .ecs_resource_agent_image + .as_ref() + .expect("The default ecs resource provider image uri is missing."), + ) + .set_image_pull_secret( + cluster_input + .crd_input + .images + .testsys_agent_pull_secret + .to_owned(), + ) + .set_secrets(Some(cluster_input.crd_input.config.secrets.clone())) + .build(cluster_input.cluster_name) + .map_err(|e| error::Error::Build { + what: "ECS cluster CRD".to_string(), + error: e.to_string(), + })?; + + Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Resource(ecs_crd)))) + } + + /// Create an EC2 provider CRD to launch Bottlerocket instances on the cluster created by + /// `cluster_crd`. + async fn bottlerocket_crd<'a>( + &self, + bottlerocket_input: BottlerocketInput<'a>, + ) -> Result { + Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Resource( + ec2_crd(bottlerocket_input, ClusterType::Ecs, &self.region).await?, + )))) + } + + async fn migration_crd<'a>( + &self, + migration_input: MigrationInput<'a>, + ) -> Result { + Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(migration_crd( + migration_input, + )?)))) + } + + async fn test_crd<'a>(&self, test_input: TestInput<'a>) -> Result { + let cluster_resource_name = test_input + .cluster_crd_name + .as_ref() + .expect("A cluster name is required for migrations"); + let bottlerocket_resource_name = test_input + .bottlerocket_crd_name + .as_ref() + .expect("A cluster name is required for migrations"); + + // Create labels that are used to help filter status. + let labels = test_input.crd_input.labels(btreemap! { + "testsys/type".to_string() => test_input.test_type.to_string(), + "testsys/cluster".to_string() => cluster_resource_name.to_string(), + }); + + let test_crd = EcsTestConfig::builder() + .cluster_name_template(cluster_resource_name, "clusterName") + .region(Some(self.region.to_owned())) + .task_count(1) + .assume_role(test_input.crd_input.config.agent_role.to_owned()) + .resources(bottlerocket_resource_name) + .resources(cluster_resource_name) + .set_depends_on(Some(test_input.prev_tests)) + .set_retries(Some(5)) + .image( + test_input + .crd_input + .images + .ecs_test_agent_image + .to_owned() + .expect("The default ECS testing image is missing"), + ) + .set_image_pull_secret( + test_input + .crd_input + .images + .testsys_agent_pull_secret + .to_owned(), + ) + .keep_running(true) + .set_secrets(Some(test_input.crd_input.config.secrets.to_owned())) + .set_labels(Some(labels)) + .build(format!( + "{}-test{}", + cluster_resource_name, + test_input.name_suffix.unwrap_or_default() + )) + .map_err(|e| error::Error::Build { + what: "ECS test CRD".to_string(), + error: e.to_string(), + })?; + + Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(test_crd)))) + } +} diff --git a/tools/testsys/src/aws_k8s.rs b/tools/testsys/src/aws_k8s.rs new file mode 100644 index 00000000..dc171317 --- /dev/null +++ b/tools/testsys/src/aws_k8s.rs @@ -0,0 +1,157 @@ +use crate::aws_resources::{ami, ami_name, ec2_crd, get_ami_id, migration_crd}; +use crate::crds::{ + BottlerocketInput, ClusterInput, CrdCreator, CrdInput, CreateCrdOutput, MigrationInput, + TestInput, +}; +use crate::error::{self, Result}; +use crate::sonobuoy::sonobuoy_crd; +use bottlerocket_types::agent_config::{ + ClusterType, CreationPolicy, EksClusterConfig, EksctlConfig, K8sVersion, +}; +use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta; +use maplit::btreemap; +use model::constants::NAMESPACE; +use model::{Agent, Configuration, Crd, DestructionPolicy, Resource, ResourceSpec}; +use snafu::{OptionExt, ResultExt}; +use std::str::FromStr; + +/// A `CrdCreator` responsible for creating crd related to `aws-k8s` variants. +pub(crate) struct AwsK8sCreator { + pub(crate) region: String, + pub(crate) ami_input: String, + pub(crate) migrate_starting_commit: Option, +} + +#[async_trait::async_trait] +impl CrdCreator for AwsK8sCreator { + /// Determine the AMI from `amis.json`. + fn image_id(&self, _: &CrdInput) -> Result { + ami(&self.ami_input, &self.region) + } + + /// Determine the starting image from EC2 using standard Bottlerocket naming conventions. + async fn starting_image_id(&self, crd_input: &CrdInput) -> Result { + get_ami_id(ami_name(&crd_input.arch,&crd_input.variant,crd_input.starting_version + .as_ref() + .context(error::InvalidSnafu{ + what: "The starting version must be provided for migration testing" + })?, self.migrate_starting_commit + .as_ref() + .context(error::InvalidSnafu{ + what: "The commit for the starting version must be provided if the starting image id is not" + })?) + , &crd_input.arch, + & self.region, + ) + .await + } + + /// Create an EKS cluster CRD with the `cluster_name` in `cluster_input`. + async fn cluster_crd<'a>(&self, cluster_input: ClusterInput<'a>) -> Result { + let labels = cluster_input.crd_input.labels(btreemap! { + "testsys/type".to_string() => "cluster".to_string(), + "testsys/cluster".to_string() => cluster_input.cluster_name.to_string(), + "testsys/region".to_string() => self.region.clone() + }); + + // Check if the cluster already has a crd + if let Some(cluster_crd) = cluster_input + .crd_input + .existing_crds( + &labels, + &["testsys/cluster", "testsys/type", "testsys/region"], + ) + .await? + .pop() + { + return Ok(CreateCrdOutput::ExistingCrd(cluster_crd)); + } + + let cluster_version = + K8sVersion::from_str(cluster_input.crd_input.variant.version().context( + error::MissingSnafu { + item: "K8s version".to_string(), + what: "aws-k8s variant".to_string(), + }, + )?) + .map_err(|_| error::Error::K8sVersion { + version: cluster_input.crd_input.variant.to_string(), + })?; + + let eks_crd = Resource { + metadata: ObjectMeta { + name: Some(cluster_input.cluster_name.to_string()), + namespace: Some(NAMESPACE.into()), + labels: Some(labels), + ..Default::default() + }, + spec: ResourceSpec { + depends_on: None, + conflicts_with: None, + agent: Agent { + name: "eks-provider".to_string(), + image: cluster_input + .crd_input + .images + .eks_resource_agent_image + .to_owned() + .expect("Missing default image for EKS resource agent"), + pull_secret: cluster_input + .crd_input + .images + .testsys_agent_pull_secret + .clone(), + keep_running: false, + timeout: None, + configuration: Some( + EksClusterConfig { + creation_policy: Some(CreationPolicy::IfNotExists), + assume_role: cluster_input.crd_input.config.agent_role.clone(), + config: EksctlConfig::Args { + cluster_name: cluster_input.cluster_name.to_string(), + region: Some(self.region.clone()), + zones: None, + version: Some(cluster_version), + }, + } + .into_map() + .context(error::IntoMapSnafu { + what: "eks crd config".to_string(), + })?, + ), + secrets: Some(cluster_input.crd_input.config.secrets.clone()), + ..Default::default() + }, + destruction_policy: DestructionPolicy::Never, + }, + status: None, + }; + Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Resource(eks_crd)))) + } + + /// Create an EC2 provider CRD to launch Bottlerocket instances on the cluster created by + /// `cluster_crd`. + async fn bottlerocket_crd<'a>( + &self, + bottlerocket_input: BottlerocketInput<'a>, + ) -> Result { + Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Resource( + ec2_crd(bottlerocket_input, ClusterType::Eks, &self.region).await?, + )))) + } + + async fn migration_crd<'a>( + &self, + migration_input: MigrationInput<'a>, + ) -> Result { + Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(migration_crd( + migration_input, + )?)))) + } + + async fn test_crd<'a>(&self, test_input: TestInput<'a>) -> Result { + Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(sonobuoy_crd( + test_input, + )?)))) + } +} diff --git a/tools/testsys/src/aws_resources.rs b/tools/testsys/src/aws_resources.rs index c8b265c4..1bf45241 100644 --- a/tools/testsys/src/aws_resources.rs +++ b/tools/testsys/src/aws_resources.rs @@ -1,954 +1,39 @@ -use crate::run::TestType; -use anyhow::{anyhow, Context, Result}; -use bottlerocket_types::agent_config::{ - ClusterType, CreationPolicy, Ec2Config, EcsClusterConfig, EcsTestConfig, EksClusterConfig, - EksctlConfig, K8sVersion, MigrationConfig, SonobuoyConfig, SonobuoyMode, TufRepoConfig, -}; - +use crate::crds::{BottlerocketInput, MigrationDirection, MigrationInput}; +use crate::error::{self, Result}; use aws_sdk_ec2::model::{Filter, Image}; use aws_sdk_ec2::Region; -use bottlerocket_variant::Variant; -use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta; -use k8s_openapi::serde_json::Value; -use log::debug; +use bottlerocket_types::agent_config::{ClusterType, Ec2Config, MigrationConfig}; use maplit::btreemap; -use model::clients::{AllowNotFound, CrdClient}; -use model::constants::NAMESPACE; -use model::test_manager::{SelectionParams, TestManager}; -use model::{ - Agent, Configuration, Crd, DestructionPolicy, Resource, ResourceSpec, SecretName, Test, - TestSpec, -}; -use std::collections::BTreeMap; -use testsys_config::{ - rendered_cluster_name, AwsEcsVariantConfig, AwsK8sVariantConfig, TestsysImages, -}; - -pub(crate) struct AwsK8s { - pub(crate) arch: String, - pub(crate) variant: String, - pub(crate) region: String, - pub(crate) ami: String, - pub(crate) config: AwsK8sVariantConfig, - pub(crate) tuf_repo: Option, - pub(crate) starting_version: Option, - pub(crate) migrate_starting_commit: Option, - pub(crate) starting_image_id: Option, - pub(crate) migrate_to_version: Option, - pub(crate) capabilities: Option>, -} - -impl AwsK8s { - /// Create the necessary test and resource crds for the specified test type. - pub(crate) async fn create_crds( - &self, - client: &TestManager, - test: TestType, - testsys_images: &TestsysImages, - ) -> Result> { - let mut crds = Vec::new(); - let target_cluster_names = if self.config.cluster_names.is_empty() { - debug!("No cluster names were provided using default name"); - vec![self.default_cluster_name()] - } else { - self.config.cluster_names.clone() - }; - for template_cluster_name in target_cluster_names { - let cluster_name = &rendered_cluster_name( - template_cluster_name, - self.kube_arch(), - self.kube_variant(), - )?; - // Check for existing cluster crd - let cluster_exists = client - .resource_client() - .get(&cluster_name) - .await - .allow_not_found(|_| ())? - .is_some(); - if !cluster_exists { - debug!("Cluster crd does not exist"); - crds.push(self.eks_crd(cluster_name, testsys_images, &test)?) - } - // Check for conflicting resources (ones that use the same cluster) - let conflicting_resources: Vec = if cluster_exists { - client - .list(&SelectionParams::Label(format!( - "testsys/cluster={}, testsys/type=instances", - cluster_name - ))) - .await? - .into_iter() - // Retrieve the name from each resource - .filter_map(|crd| crd.name()) - .collect() - } else { - Default::default() - }; - - crds.append(&mut match &test { - TestType::Conformance => self.sonobuoy_test_crds( - &test, - testsys_images, - SonobuoyMode::CertifiedConformance, - cluster_name, - &conflicting_resources, - )?, - TestType::Quick => self.sonobuoy_test_crds( - &test, - testsys_images, - SonobuoyMode::Quick, - cluster_name, - &conflicting_resources, - )?, - TestType::Migration => { - self.migration_test_crds( - cluster_name, - &test, - &conflicting_resources, - testsys_images, - ) - .await? - } - }) - } - Ok(crds) - } - - fn sonobuoy_test_crds( - &self, - test_type: &TestType, - testsys_images: &TestsysImages, - sonobuoy_mode: SonobuoyMode, - cluster_name: &str, - conflicting_resources: &[String], - ) -> Result> { - let crds = vec![ - self.ec2_crd( - cluster_name, - "test", - test_type, - conflicting_resources, - testsys_images, - None, - )?, - self.sonobuoy_crd( - "-test", - cluster_name, - test_type, - "test", - sonobuoy_mode, - None, - testsys_images, - )?, - ]; - Ok(crds) - } - - /// Creates `Test` crds for migration testing. - async fn migration_test_crds( - &self, - cluster_name: &str, - test_type: &TestType, - conflicting_resources: &[String], - testsys_images: &TestsysImages, - ) -> Result> { - let ami = if let Some(ami) = self.starting_image_id.to_owned() { - ami - } else { - get_ami_id( - format!( - "bottlerocket-{}-{}-{}-{}", - self.variant, self.arch, self.starting_version.as_ref().context("The starting version must be provided for migration testing")?, self.migrate_starting_commit.as_ref().context("The commit for the starting version must be provided if the starting image id is not")? - ), & self.arch, - self.region.to_string(), - ) - .await? - }; - let ec2 = self.ec2_crd( - cluster_name, - "migration", - test_type, - conflicting_resources, - testsys_images, - Some(ami), - )?; - let instance_provider = ec2 - .name() - .expect("The EC2 instance provider crd is missing a name."); - let mut depends_on = Vec::new(); - // Start with a `quick` test to make sure instances launched properly - let initial = self.sonobuoy_crd( - "-1-initial", - cluster_name, - test_type, - "migration", - SonobuoyMode::Quick, - None, - testsys_images, - )?; - depends_on.push(initial.name().context("Crd missing name")?); - // Migrate instances to the target version - let start_migrate = self.migration_crd( - format!("{}-2-migrate", cluster_name), - instance_provider.clone(), - MigrationVersion::Migrated, - Some(depends_on.clone()), - testsys_images, - )?; - // A `quick` test to validate the migration - depends_on.push(start_migrate.name().context("Crd missing name")?); - let migrated = self.sonobuoy_crd( - "-3-migrated", - cluster_name, - test_type, - "migration", - SonobuoyMode::Quick, - Some(depends_on.clone()), - testsys_images, - )?; - // Migrate instances to the starting version - depends_on.push(migrated.name().context("Crd missing name")?); - let end_migrate = self.migration_crd( - format!("{}-4-migrate", cluster_name), - instance_provider, - MigrationVersion::Starting, - Some(depends_on.clone()), - testsys_images, - )?; - // A final quick test to validate the migration back to the starting version - depends_on.push(end_migrate.name().context("Crd missing name")?); - let last = self.sonobuoy_crd( - "-5-final", - cluster_name, - test_type, - "migration", - SonobuoyMode::Quick, - Some(depends_on.clone()), - testsys_images, - )?; - Ok(vec![ - ec2, - initial, - start_migrate, - migrated, - end_migrate, - last, - ]) - } - - /// Labels help filter test results with `testsys status`. - fn labels( - &self, - cluster_name: S1, - testsys_type: S2, - test_type: &TestType, - ) -> BTreeMap - where - S1: Into, - S2: Into, - { - btreemap! { - "testsys/arch".to_string() => self.arch.to_string(), - "testsys/variant".to_string() => self.variant.to_string(), - "testsys/cluster".to_string() => cluster_name.into(), - "testsys/type".to_string() => testsys_type.into(), - "testsys/test".to_string() => test_type.to_string(), - } - } - - fn kube_arch(&self) -> String { - self.arch.replace('_', "-") - } - - fn kube_variant(&self) -> String { - self.variant.replace('.', "") - } - - /// Bottlerocket cluster naming convention. - fn default_cluster_name(&self) -> String { - format!("{}-{}", self.kube_arch(), self.kube_variant()) - } - - fn eks_crd( - &self, - cluster_name: &str, - testsys_images: &TestsysImages, - test_type: &TestType, - ) -> Result { - let cluster_version = K8sVersion::parse( - Variant::new(&self.variant) - .context("The provided variant cannot be interpreted.")? - .version() - .context("aws-k8s variant is missing k8s version")?, - ) - .map_err(|e| anyhow!(e))?; - let eks_crd = Resource { - metadata: ObjectMeta { - name: Some(cluster_name.to_string()), - namespace: Some(NAMESPACE.into()), - labels: Some(self.labels(cluster_name, "cluster", test_type)), - ..Default::default() - }, - spec: ResourceSpec { - depends_on: None, - conflicts_with: None, - agent: Agent { - name: "eks-provider".to_string(), - image: testsys_images - .eks_resource_agent_image - .to_owned() - .expect("Missing default image for EKS resource agent"), - pull_secret: testsys_images.testsys_agent_pull_secret.clone(), - keep_running: false, - timeout: None, - configuration: Some( - EksClusterConfig { - creation_policy: Some(CreationPolicy::IfNotExists), - assume_role: self.config.assume_role.clone(), - config: EksctlConfig::Args { - cluster_name: cluster_name.to_string(), - region: Some(self.region.clone()), - zones: None, - version: Some(cluster_version), - }, - } - .into_map() - .context("Unable to convert eks config to map")?, - ), - secrets: Some(self.config.secrets.clone()), - ..Default::default() - }, - destruction_policy: DestructionPolicy::Never, - }, - status: None, - }; - Ok(Crd::Resource(eks_crd)) - } - - fn ec2_crd( - &self, - cluster_name: &str, - resource_name_suffix: &str, - test_type: &TestType, - conflicting_resources: &[String], - testsys_images: &TestsysImages, - override_ami: Option, - ) -> Result { - let mut ec2_config = Ec2Config { - node_ami: override_ami.unwrap_or_else(|| self.ami.clone()), - instance_count: Some(2), - instance_types: self.config.instance_type.iter().cloned().collect(), - cluster_name: format!("${{{}.clusterName}}", cluster_name), - region: format!("${{{}.region}}", cluster_name), - instance_profile_arn: format!("${{{}.iamInstanceProfileArn}}", cluster_name), - subnet_ids: Default::default(), - cluster_type: ClusterType::Eks, - endpoint: Some(format!("${{{}.endpoint}}", cluster_name)), - certificate: Some(format!("${{{}.certificate}}", cluster_name)), - cluster_dns_ip: Some(format!("${{{}.clusterDnsIp}}", cluster_name)), - security_groups: vec![], - assume_role: self.config.assume_role.clone(), - } - .into_map() - .context("Unable to create ec2 config")?; - - // TODO - we have change the raw map to reference/template a non string field. - ec2_config.insert( - "securityGroups".to_owned(), - Value::String(format!("${{{}.securityGroups}}", cluster_name)), - ); - - ec2_config.insert( - "subnetIds".to_owned(), - Value::String(format!("${{{}.privateSubnetIds}}", cluster_name)), - ); - - let ec2_resource = Resource { - metadata: ObjectMeta { - name: Some(format!( - "{}-instances-{}", - cluster_name, resource_name_suffix - )), - namespace: Some(NAMESPACE.into()), - labels: Some(self.labels(cluster_name, "instances", test_type)), - ..Default::default() - }, - spec: ResourceSpec { - depends_on: Some(vec![cluster_name.to_string()]), - conflicts_with: Some(conflicting_resources.into()), - agent: Agent { - name: "ec2-provider".to_string(), - image: testsys_images - .ec2_resource_agent_image - .to_owned() - .expect("Missing default image for EC2 resource agent"), - pull_secret: testsys_images.testsys_agent_pull_secret.clone(), - keep_running: false, - timeout: None, - configuration: Some(ec2_config), - secrets: Some(self.config.secrets.clone()), - ..Default::default() - }, - destruction_policy: DestructionPolicy::OnTestSuccess, - }, - status: None, - }; - Ok(Crd::Resource(ec2_resource)) - } - - #[allow(clippy::too_many_arguments)] - fn sonobuoy_crd( - &self, - test_name_suffix: &str, - cluster_name: &str, - test_type: &TestType, - ec2_resource_name_suffix: &str, - sonobuoy_mode: SonobuoyMode, - depends_on: Option>, - testsys_images: &TestsysImages, - ) -> Result { - let ec2_resource_name = format!("{}-instances-{}", cluster_name, ec2_resource_name_suffix); - let test_name = format!("{}{}", cluster_name, test_name_suffix); - let sonobuoy = Test { - metadata: ObjectMeta { - name: Some(test_name), - namespace: Some(NAMESPACE.into()), - labels: Some(self.labels(cluster_name, "test", test_type)), - ..Default::default() - }, - spec: TestSpec { - resources: vec![ec2_resource_name, cluster_name.to_string()], - depends_on, - retries: Some(5), - agent: Agent { - name: "sonobuoy-test-agent".to_string(), - image: testsys_images - .sonobuoy_test_agent_image - .to_owned() - .expect("Missing default image for Sonobuoy test agent"), - pull_secret: testsys_images.testsys_agent_pull_secret.clone(), - keep_running: true, - timeout: None, - configuration: Some( - SonobuoyConfig { - kubeconfig_base64: format!("${{{}.encodedKubeconfig}}", cluster_name), - plugin: "e2e".to_string(), - mode: sonobuoy_mode, - kubernetes_version: None, - kube_conformance_image: self.config.kube_conformance_image.clone(), - e2e_repo_config_base64: self.config.e2e_repo_registry.as_ref().map( - |e2e_registry| { - base64::encode(format!( - r#"buildImageRegistry: {e2e_registry} -dockerGluster: {e2e_registry} -dockerLibraryRegistry: {e2e_registry} -e2eRegistry: {e2e_registry} -e2eVolumeRegistry: {e2e_registry} -gcRegistry: {e2e_registry} -gcEtcdRegistry: {e2e_registry} -promoterE2eRegistry: {e2e_registry} -sigStorageRegistry: {e2e_registry}"# - )) - }, - ), - assume_role: self.config.assume_role.clone(), - } - .into_map() - .context("Unable to convert sonobuoy config to `Map`")?, - ), - secrets: Some(self.config.secrets.clone()), - ..Default::default() - }, - }, - status: None, - }; - - Ok(Crd::Test(sonobuoy)) - } -} - -/// In order to easily create migration tests for `aws-k8s` variants we need to implement -/// `Migration` for it. -impl Migration for AwsK8s { - fn migration_config(&self) -> Result { - Ok(MigrationsConfig { - tuf_repo: self - .tuf_repo - .to_owned() - .context("Tuf repo metadata is required for upgrade downgrade testing.")?, - starting_version: self - .starting_version - .to_owned() - .context("You must provide a starting version for upgrade downgrade testing.")?, - migrate_to_version: self - .migrate_to_version - .to_owned() - .context("You must provide a target version for upgrade downgrade testing.")?, - region: self.region.to_string(), - secrets: Some(self.config.secrets.clone()), - capabilities: self.capabilities.clone(), - assume_role: self.config.assume_role.clone(), - }) - } - - fn migration_labels(&self) -> BTreeMap { - btreemap! { - "testsys/arch".to_string() => self.arch.to_string(), - "testsys/variant".to_string() => self.variant.to_string(), - "testsys/type".to_string() => "test".to_string(), - "testsys/test".to_string() => TestType::Migration.to_string(), +use model::{DestructionPolicy, Resource, Test}; +use serde::Deserialize; +use snafu::{ensure, OptionExt, ResultExt}; +use std::collections::HashMap; +use std::fs::File; + +/// Get the AMI for the given `region` from the `ami_input` file. +pub(crate) fn ami(ami_input: &str, region: &str) -> Result { + let file = File::open(ami_input).context(error::IOSnafu { + what: "Unable to open amis.json", + })?; + // Convert the `ami_input` file to a `HashMap` that maps regions to AMI id. + let amis: HashMap = + serde_json::from_reader(file).context(error::SerdeJsonSnafu { + what: format!("Unable to deserialize '{}'", ami_input), + })?; + // Make sure there are some AMIs present in the `ami_input` file. + ensure!( + !amis.is_empty(), + error::InvalidSnafu { + what: format!("{} is empty", ami_input) } - } -} - -/// All information required to test ECS variants of Bottlerocket are captured in the `AwsEcs` -/// struct for migration testing, either `starting_version` and `migration_starting_commit`, or -/// `starting_image_id` must be set. TestSys supports `quick` and `migration` testing on ECS -/// variants. -pub(crate) struct AwsEcs { - /// The architecture to test (`x86_64`,`aarch64') - pub(crate) arch: String, - /// The variant to test (`aws-ecs-1`) - pub(crate) variant: String, - /// The region testing should be performed in - pub(crate) region: String, - /// Configuration for the variant - pub(crate) config: AwsEcsVariantConfig, - /// The ami that should be used for quick testing - pub(crate) ami: String, - - // Migrations - /// The TUF repos for migration testing. If no TUF repos are used, the default Bottlerocket - /// repos will be used - pub(crate) tuf_repo: Option, - /// The starting version for migration testing - pub(crate) starting_version: Option, - /// The AMI id of the starting version for migration testing - pub(crate) starting_image_id: Option, - /// The short commit SHA of the starting version - pub(crate) migrate_starting_commit: Option, - /// The target version for Bottlerocket migrations - pub(crate) migrate_to_version: Option, - /// Additional capabilities that need to be enabled on the agent's pods - pub(crate) capabilities: Option>, -} - -impl AwsEcs { - /// Create the necessary test and resource crds for the specified test type. - pub(crate) async fn create_crds( - &self, - test: TestType, - testsys_images: &TestsysImages, - ) -> Result> { - let mut crds = Vec::new(); - let target_cluster_names = if self.config.cluster_names.is_empty() { - debug!("No cluster names were provided using default name"); - vec![self.default_cluster_name()] - } else { - self.config.cluster_names.clone() - }; - for template_cluster_name in target_cluster_names { - let cluster_name = &rendered_cluster_name( - template_cluster_name, - self.kube_arch(), - self.kube_variant(), - )?; - crds.append(&mut match test { - TestType::Conformance => { - return Err(anyhow!( - "Conformance testing for ECS variants is not supported." - )) - } - TestType::Quick => self.ecs_test_crds(cluster_name, testsys_images)?, - TestType::Migration => { - self.migration_test_crds(cluster_name, testsys_images) - .await? - } - }); - } - - Ok(crds) - } - - fn ecs_test_crds( - &self, - cluster_name: &str, - testsys_images: &TestsysImages, - ) -> Result> { - let crds = vec![ - self.ecs_crd(cluster_name, testsys_images)?, - self.ec2_crd(cluster_name, testsys_images, None)?, - self.ecs_test_crd(cluster_name, "-test", None, testsys_images)?, - ]; - Ok(crds) - } - - async fn migration_test_crds( - &self, - cluster_name: &str, - testsys_images: &TestsysImages, - ) -> Result> { - let ami = self - .starting_image_id - .as_ref() - .unwrap_or( - &get_ami_id( - format!( - "bottlerocket-{}-{}-{}-{}", - self.variant, - self.arch, - self.starting_version.as_ref().context("The starting version must be provided for migration testing")?, - self.migrate_starting_commit.as_ref().context("The commit for the starting version must be provided if the starting image id is not")? - ), & self.arch, - self.region.to_string(), - ) - .await?, - ) - .to_string(); - let ecs = self.ecs_crd(cluster_name, testsys_images)?; - let ec2 = self.ec2_crd(cluster_name, testsys_images, Some(ami))?; - let instance_provider = ec2 - .name() - .expect("The EC2 instance provider crd is missing a name."); - let mut depends_on = Vec::new(); - let initial = self.ecs_test_crd(cluster_name, "-1-initial", None, testsys_images)?; - depends_on.push(initial.name().context("Crd missing name")?); - let start_migrate = self.migration_crd( - format!("{}-2-migrate", cluster_name), - instance_provider.clone(), - MigrationVersion::Migrated, - Some(depends_on.clone()), - testsys_images, - )?; - depends_on.push(start_migrate.name().context("Crd missing name")?); - let migrated = self.ecs_test_crd( - cluster_name, - "-3-migrated", - Some(depends_on.clone()), - testsys_images, - )?; - depends_on.push(migrated.name().context("Crd missing name")?); - let end_migrate = self.migration_crd( - format!("{}-4-migrate", cluster_name), - instance_provider, - MigrationVersion::Starting, - Some(depends_on.clone()), - testsys_images, - )?; - depends_on.push(end_migrate.name().context("Crd missing name")?); - let last = self.ecs_test_crd( - cluster_name, - "-5-final", - Some(depends_on.clone()), - testsys_images, - )?; - Ok(vec![ - ecs, - ec2, - initial, - start_migrate, - migrated, - end_migrate, - last, - ]) - } - - /// Labels help filter test results with `testsys status`. - fn labels(&self) -> BTreeMap { - btreemap! { - "testsys/arch".to_string() => self.arch.to_string(), - "testsys/variant".to_string() => self.variant.to_string(), - } - } - - fn kube_arch(&self) -> String { - self.arch.replace('_', "-") - } - - fn kube_variant(&self) -> String { - self.variant.replace('.', "") - } - - /// Bottlerocket cluster naming convention. - fn default_cluster_name(&self) -> String { - format!("{}-{}", self.kube_arch(), self.kube_variant()) - } - - fn ecs_crd(&self, cluster_name: &str, testsys_images: &TestsysImages) -> Result { - let ecs_crd = Resource { - metadata: ObjectMeta { - name: Some(cluster_name.to_string()), - namespace: Some(NAMESPACE.into()), - labels: Some(self.labels()), - ..Default::default() - }, - spec: ResourceSpec { - depends_on: None, - conflicts_with: None, - agent: Agent { - name: "ecs-provider".to_string(), - image: testsys_images - .ecs_resource_agent_image - .to_owned() - .expect("Missing default image for ECS resource agent"), - pull_secret: testsys_images.testsys_agent_pull_secret.clone(), - keep_running: false, - timeout: None, - configuration: Some( - EcsClusterConfig { - cluster_name: cluster_name.to_string(), - region: Some(self.region.clone()), - assume_role: self.config.assume_role.clone(), - vpc: None, - iam_instance_profile_name: None, - } - .into_map() - .context("Unable to convert ECS config to map")?, - ), - secrets: Some(self.config.secrets.clone()), - ..Default::default() - }, - destruction_policy: DestructionPolicy::OnTestSuccess, - }, - status: None, - }; - Ok(Crd::Resource(ecs_crd)) - } - - fn ec2_crd( - &self, - cluster_name: &str, - testsys_images: &TestsysImages, - override_ami: Option, - ) -> Result { - let mut ec2_config = Ec2Config { - node_ami: override_ami.unwrap_or_else(|| self.ami.clone()), - instance_count: Some(2), - instance_types: self.config.instance_type.iter().cloned().collect(), - cluster_name: format!("${{{}.clusterName}}", cluster_name), - region: format!("${{{}.region}}", cluster_name), - instance_profile_arn: format!("${{{}.iamInstanceProfileArn}}", cluster_name), - subnet_ids: Default::default(), - cluster_type: ClusterType::Ecs, - endpoint: None, - certificate: None, - cluster_dns_ip: None, - security_groups: vec![], - assume_role: self.config.assume_role.clone(), - } - .into_map() - .context("Unable to create EC2 config")?; - - ec2_config.insert( - "subnetIds".to_owned(), - Value::String(format!("${{{}.privateSubnetIds}}", cluster_name)), - ); - - let ec2_resource = Resource { - metadata: ObjectMeta { - name: Some(format!("{}-instances", cluster_name)), - namespace: Some(NAMESPACE.into()), - labels: Some(self.labels()), - ..Default::default() - }, - spec: ResourceSpec { - depends_on: Some(vec![cluster_name.to_string()]), - conflicts_with: None, - agent: Agent { - name: "ec2-provider".to_string(), - image: testsys_images - .ec2_resource_agent_image - .to_owned() - .expect("Missing default image for EC2 resource agent"), - pull_secret: testsys_images.testsys_agent_pull_secret.clone(), - keep_running: false, - timeout: None, - configuration: Some(ec2_config), - secrets: Some(self.config.secrets.clone()), - ..Default::default() - }, - destruction_policy: DestructionPolicy::OnTestSuccess, - }, - status: None, - }; - Ok(Crd::Resource(ec2_resource)) - } - - fn ecs_test_crd( - &self, - cluster_name: &str, - test_name_suffix: &str, - depends_on: Option>, - testsys_images: &TestsysImages, - ) -> Result { - let ec2_resource_name = format!("{}-instances", cluster_name); - let test_name = format!("{}{}", cluster_name, test_name_suffix); - let ecs_test = Test { - metadata: ObjectMeta { - name: Some(test_name), - namespace: Some(NAMESPACE.into()), - labels: Some(self.labels()), - ..Default::default() - }, - spec: TestSpec { - resources: vec![ec2_resource_name, cluster_name.to_string()], - depends_on, - retries: Some(5), - agent: Agent { - name: "ecs-test-agent".to_string(), - image: testsys_images - .ecs_test_agent_image - .to_owned() - .expect("Missing default image for ECS test agent"), - pull_secret: testsys_images.testsys_agent_pull_secret.clone(), - keep_running: true, - timeout: None, - configuration: Some( - EcsTestConfig { - assume_role: self.config.assume_role.clone(), - region: Some(self.region.clone()), - cluster_name: cluster_name.to_string(), - task_count: 1, - task_definition_name_and_revision: None, - } - .into_map() - .context("Unable to convert sonobuoy config to `Map`")?, - ), - secrets: Some(self.config.secrets.clone()), - ..Default::default() - }, - }, - status: None, - }; - - Ok(Crd::Test(ecs_test)) - } -} - -/// In order to easily create migration tests for `aws-ecs` variants we need to implement -/// `Migration` for it. -impl Migration for AwsEcs { - fn migration_config(&self) -> Result { - Ok(MigrationsConfig { - tuf_repo: self - .tuf_repo - .to_owned() - .context("Tuf repo metadata is required for upgrade downgrade testing.")?, - starting_version: self - .starting_version - .to_owned() - .context("You must provide a starting version for upgrade downgrade testing.")?, - migrate_to_version: self - .migrate_to_version - .to_owned() - .context("You must provide a target version for upgrade downgrade testing.")?, - region: self.region.to_string(), - secrets: Some(self.config.secrets.clone()), - capabilities: self.capabilities.clone(), - assume_role: self.config.assume_role.clone(), - }) - } - - fn migration_labels(&self) -> BTreeMap { - btreemap! { - "testsys/arch".to_string() => self.arch.to_string(), - "testsys/variant".to_string() => self.variant.to_string(), - "testsys/flavor".to_string() => "updown".to_string(), - } - } -} - -/// An enum to differentiate between upgrade and downgrade tests. -enum MigrationVersion { - ///`MigrationVersion::Starting` will create a migration to the starting version. - Starting, - ///`MigrationVersion::Migrated` will create a migration to the target version. - Migrated, -} - -/// A configuration containing all information needed to create a migration test for a given -/// variant. -struct MigrationsConfig { - tuf_repo: TufRepoConfig, - starting_version: String, - migrate_to_version: String, - region: String, - secrets: Option>, - capabilities: Option>, - assume_role: Option, -} - -/// Migration is a trait that should be implemented for all traits that use upgrade/downgrade -/// testing. It provides the infrastructure to easily create migration tests. -trait Migration { - /// Create a migration config that is used to create migration tests. - fn migration_config(&self) -> Result; - - /// Create the labels that should be used for the migration tests. - fn migration_labels(&self) -> BTreeMap; - - /// Create a migration test for a given arch/variant. - fn migration_crd( - &self, - test_name: String, - instance_provider: String, - migration_version: MigrationVersion, - depends_on: Option>, - testsys_images: &TestsysImages, - ) -> Result { - // Get the migration configuration for the given type. - let migration = self.migration_config()?; - - // Determine which version we are migrating to. - let version = match migration_version { - MigrationVersion::Starting => migration.starting_version, - MigrationVersion::Migrated => migration.migrate_to_version, - }; - - // Create the migration test crd. - let mut migration_config = MigrationConfig { - aws_region: migration.region, - instance_ids: Default::default(), - migrate_to_version: version, - tuf_repo: Some(migration.tuf_repo.clone()), - assume_role: migration.assume_role.clone(), - } - .into_map() - .context("Unable to convert migration config to map")?; - migration_config.insert( - "instanceIds".to_string(), - Value::String(format!("${{{}.ids}}", instance_provider)), - ); - Ok(Crd::Test(Test { - metadata: ObjectMeta { - name: Some(test_name), - namespace: Some(NAMESPACE.into()), - labels: Some(self.migration_labels()), - ..Default::default() - }, - spec: TestSpec { - resources: vec![instance_provider], - depends_on, - retries: None, - agent: Agent { - name: "migration-test-agent".to_string(), - image: testsys_images - .migration_test_agent_image - .to_owned() - .expect("Missing default image for migration test agent"), - pull_secret: testsys_images.testsys_agent_pull_secret.clone(), - keep_running: true, - timeout: None, - configuration: Some(migration_config), - secrets: migration.secrets.clone(), - capabilities: migration.capabilities, - ..Default::default() - }, - }, - status: None, - })) - } + ); + Ok(amis + .get(region) + .context(error::InvalidSnafu { + what: format!("AMI not found for region '{}'", region), + })? + .id + .clone()) } /// Queries EC2 for the given AMI name. If found, returns Ok(Some(id)), if not returns Ok(None). @@ -958,11 +43,14 @@ where S2: Into, S3: Into, { + // Create the `aws_config` that will be used to search EC2 for AMIs. + // TODO: Follow chain of assumed roles for creating config like pubsys uses. let config = aws_config::from_env() .region(Region::new(region.into())) .load() .await; let ec2_client = aws_sdk_ec2::Client::new(&config); + // Find all images named `name` on `arch` in the `region`. let describe_images = ec2_client .describe_images() .owners("self") @@ -984,12 +72,203 @@ where .await? .images; let images: Vec<&Image> = describe_images.iter().flatten().collect(); + // Make sure there is exactly 1 image that matches the parameters. if images.len() > 1 { - return Err(anyhow!("Multiple images were found")); + return Err(error::Error::Invalid { + what: "Unable to determine AMI. Multiple images were found".to_string(), + }); }; if let Some(image) = images.last().as_ref() { - Ok(image.image_id().context("No image id for AMI")?.to_string()) + Ok(image + .image_id() + .context(error::InvalidSnafu { + what: "No image id for AMI", + })? + .to_string()) } else { - Err(anyhow!("No images were found")) + Err(error::Error::Invalid { + what: "Unable to determine AMI. No images were found".to_string(), + }) } } + +/// Get the standard Bottlerocket AMI name. +pub(crate) fn ami_name(arch: &str, variant: &str, version: &str, commit_id: &str) -> String { + format!( + "bottlerocket-{}-{}-{}-{}", + variant, arch, version, commit_id + ) +} + +#[derive(Clone, Debug, Deserialize)] +pub(crate) struct AmiImage { + pub(crate) id: String, +} + +/// Create a CRD to launch Bottlerocket instances on an EKS or ECS cluster. +pub(crate) async fn ec2_crd<'a>( + bottlerocket_input: BottlerocketInput<'a>, + cluster_type: ClusterType, + region: &str, +) -> Result { + let cluster_name = bottlerocket_input + .cluster_crd_name + .as_ref() + .expect("A cluster provider is required"); + + // Create the labels for this EC2 provider. + let labels = bottlerocket_input.crd_input.labels(btreemap! { + "testsys/type".to_string() => "instances".to_string(), + "testsys/cluster".to_string() => cluster_name.to_string(), + "testsys/region".to_string() => region.to_string() + }); + + // Find all resources using the same cluster. + let conflicting_resources = bottlerocket_input + .crd_input + .existing_crds( + &labels, + &["testsys/cluster", "testsys/type", "testsys/region"], + ) + .await?; + + let mut ec2_builder = Ec2Config::builder(); + ec2_builder + .node_ami(bottlerocket_input.image_id) + .instance_count(2) + .instance_types::>( + bottlerocket_input + .crd_input + .config + .instance_type + .iter() + .cloned() + .collect(), + ) + .cluster_name_template(cluster_name, "clusterName") + .region_template(cluster_name, "region") + .instance_profile_arn_template(cluster_name, "iamInstanceProfileArn") + .assume_role(bottlerocket_input.crd_input.config.agent_role.clone()) + .cluster_type(cluster_type.clone()) + .depends_on(cluster_name) + .image( + bottlerocket_input + .crd_input + .images + .ec2_resource_agent_image + .as_ref() + .expect("Missing default image for EC2 resource agent"), + ) + .set_image_pull_secret( + bottlerocket_input + .crd_input + .images + .testsys_agent_pull_secret + .clone(), + ) + .set_labels(Some(labels)) + .set_conflicts_with(conflicting_resources.into()) + .set_secrets(Some(bottlerocket_input.crd_input.config.secrets.clone())) + .destruction_policy(DestructionPolicy::OnTestSuccess); + + // Add in the EKS specific configuration. + if cluster_type == ClusterType::Eks { + ec2_builder + .subnet_ids_template(cluster_name, "privateSubnetIds") + .endpoint_template(cluster_name, "endpoint") + .certificate_template(cluster_name, "certificate") + .cluster_dns_ip_template(cluster_name, "clusterDnsIp") + .security_groups_template(cluster_name, "securityGroups"); + } else { + // The default VPC doesn't attach private subnets to an ECS cluster, so public subnet ids + // are used instead. + ec2_builder + .subnet_ids_template(cluster_name, "publicSubnetIds") + // TODO If this is not set, the crd cannot be serialized since it is a `Vec` not + // `Option`. + .security_groups(Vec::new()); + } + + ec2_builder + .build(format!( + "{}-instances-{}", + cluster_name, bottlerocket_input.test_type + )) + .map_err(|e| error::Error::Build { + what: "EC2 instance provider CRD".to_string(), + error: e.to_string(), + }) +} + +/// Create a CRD for migrating Bottlerocket instances using SSM commands. +pub(crate) fn migration_crd(migration_input: MigrationInput) -> Result { + let cluster_resource_name = migration_input + .cluster_crd_name + .as_ref() + .expect("A cluster name is required for migrations"); + let bottlerocket_resource_name = migration_input + .bottlerocket_crd_name + .as_ref() + .expect("A cluster name is required for migrations"); + + let labels = migration_input.crd_input.labels(btreemap! { + "testsys/type".to_string() => "migration".to_string(), + "testsys/cluster".to_string() => cluster_resource_name.to_string(), + }); + + // Determine which version should be migrated to from `migration_input`. + let migration_version = match migration_input.migration_direction { + MigrationDirection::Upgrade => migration_input + .crd_input + .migrate_to_version + .as_ref() + .context(error::InvalidSnafu { + what: "The target migration version is required", + }), + MigrationDirection::Downgrade => migration_input + .crd_input + .starting_version + .as_ref() + .context(error::InvalidSnafu { + what: "The starting migration version is required", + }), + }?; + + // Create the migration CRD. + MigrationConfig::builder() + .aws_region_template(cluster_resource_name, "region") + .instance_ids_template(bottlerocket_resource_name, "ids") + .migrate_to_version(migration_version) + .tuf_repo(migration_input.crd_input.tuf_metadata()) + .assume_role(migration_input.crd_input.config.agent_role.clone()) + .resources(bottlerocket_resource_name) + .resources(cluster_resource_name) + .set_depends_on(Some(migration_input.prev_tests)) + .image( + migration_input + .crd_input + .images + .migration_test_agent_image + .as_ref() + .expect("Missing default image for migration test agent"), + ) + .set_image_pull_secret( + migration_input + .crd_input + .images + .testsys_agent_pull_secret + .to_owned(), + ) + .keep_running(true) + .set_secrets(Some(migration_input.crd_input.config.secrets.to_owned())) + .set_labels(Some(labels)) + .build(format!( + "{}{}", + cluster_resource_name, + migration_input.name_suffix.unwrap_or_default() + )) + .map_err(|e| error::Error::Build { + what: "migration CRD".to_string(), + error: e.to_string(), + }) +} diff --git a/tools/testsys/src/crds.rs b/tools/testsys/src/crds.rs new file mode 100644 index 00000000..67f493e0 --- /dev/null +++ b/tools/testsys/src/crds.rs @@ -0,0 +1,402 @@ +use crate::error::{self, Result}; +use crate::run::{KnownTestType, TestType}; +use bottlerocket_types::agent_config::TufRepoConfig; +use bottlerocket_variant::Variant; +use log::{debug, warn}; +use maplit::btreemap; +use model::test_manager::{SelectionParams, TestManager}; +use model::Crd; +use pubsys_config::RepoConfig; +use snafu::OptionExt; +use std::collections::BTreeMap; +use testsys_config::{rendered_cluster_name, GenericVariantConfig, TestsysImages}; + +/// A type that is used for the creation of all CRDs. +pub struct CrdInput<'a> { + pub client: &'a TestManager, + pub arch: String, + pub variant: Variant, + pub config: GenericVariantConfig, + pub repo_config: RepoConfig, + pub starting_version: Option, + pub migrate_to_version: Option, + /// `CrdCreator::starting_image_id` function should be used instead of using this field, so + /// it is not externally visible. + pub(crate) starting_image_id: Option, + pub images: TestsysImages, +} + +impl<'a> CrdInput<'a> { + /// Retrieve the TUF metadata from `Infra.toml` + pub fn tuf_metadata(&self) -> Option { + if let (Some(metadata_base_url), Some(targets_url)) = ( + &self.repo_config.metadata_base_url, + &self.repo_config.targets_url, + ) { + debug!( + "Using TUF metadata from Infra.toml, metadata: '{}', targets: '{}'", + metadata_base_url, targets_url + ); + Some(TufRepoConfig { + metadata_url: format!("{}{}/{}", metadata_base_url, &self.variant, &self.arch), + targets_url: targets_url.to_string(), + }) + } else { + warn!("No TUF metadata was found in Infra.toml using the default TUF repos"); + None + } + } + + /// Create a set of labels for the CRD by adding `additional_labels` to the standard labels. + pub fn labels(&self, additional_labels: BTreeMap) -> BTreeMap { + let mut labels = btreemap! { + "testsys/arch".to_string() => self.arch.to_string(), + "testsys/variant".to_string() => self.variant.to_string(), + }; + let mut add_labels = additional_labels; + labels.append(&mut add_labels); + labels + } + + /// Determine all CRDs that have the same value for each `id_labels` as `labels`. + pub async fn existing_crds( + &self, + labels: &BTreeMap, + id_labels: &[&str], + ) -> Result> { + // Create a single string containing all `label=value` pairs. + let checks = id_labels + .iter() + .map(|label| { + labels + .get(&label.to_string()) + .map(|value| format!("{}={}", label, value)) + .context(error::InvalidSnafu { + what: format!("The label '{}' was missing", label), + }) + }) + .collect::>>()? + .join(","); + + // Create a list of all CRD names that match all of the specified labels. + Ok(self + .client + .list(&SelectionParams::Label(checks)) + .await? + .iter() + .filter_map(Crd::name) + .collect()) + } + + /// Fill in the templated cluster name with `arch` and `variant`. + fn rendered_cluster_name(&self, raw_cluster_name: String) -> Result { + Ok(rendered_cluster_name( + raw_cluster_name, + self.kube_arch(), + self.kube_variant(), + )?) + } + + /// Get the k8s safe architecture name + fn kube_arch(&self) -> String { + self.arch.replace('_', "-") + } + + /// Get the k8s safe variant name + fn kube_variant(&self) -> String { + self.variant.to_string().replace('.', "") + } + + /// Bottlerocket cluster naming convention. + fn default_cluster_name(&self) -> String { + format!("{}-{}", self.kube_arch(), self.kube_variant()) + } + + /// Get a list of cluster_names for this variant. If there are no cluster names, the default + /// cluster name will be used. + fn cluster_names(&self) -> Result> { + Ok(if self.config.cluster_names.is_empty() { + vec![self.default_cluster_name()] + } else { + self.config + .cluster_names + .iter() + .map(String::to_string) + // Fill the template fields in the clusters name before using it. + .map(|cluster_name| self.rendered_cluster_name(cluster_name)) + .collect::>>()? + }) + } +} + +/// The `CrdCreator` trait is used to create CRDs. Each variant family should have a `CrdCreator` +/// that is responsible for creating the CRDs needed for testing. +#[async_trait::async_trait] +pub(crate) trait CrdCreator: Sync { + /// Return the image id that should be used for normal testing. + fn image_id(&self, crd_input: &CrdInput) -> Result; + + /// Return the image id that should be used as the starting point for migration testing. + async fn starting_image_id(&self, crd_input: &CrdInput) -> Result; + + /// Create a CRD for the cluster needed to launch Bottlerocket. If no cluster CRD is + /// needed, `CreateCrdOutput::None` can be returned. + async fn cluster_crd<'a>(&self, cluster_input: ClusterInput<'a>) -> Result; + + /// Create a CRD to launch Bottlerocket. `CreateCrdOutput::None` can be returned if this CRD is + /// not needed. + async fn bottlerocket_crd<'a>( + &self, + bottlerocket_input: BottlerocketInput<'a>, + ) -> Result; + + /// Create a CRD that migrates Bottlerocket from one version to another. + async fn migration_crd<'a>( + &self, + migration_input: MigrationInput<'a>, + ) -> Result; + + /// Create a testing CRD for this variant of Bottlerocket. + async fn test_crd<'a>(&self, test_input: TestInput<'a>) -> Result; + + /// Creates a set of CRDs for the specified variant and test type that can be added to a TestSys + /// cluster. + async fn create_crds(&self, test_type: TestType, crd_input: &CrdInput) -> Result> { + let mut crds = Vec::new(); + for cluster_name in &crd_input.cluster_names()? { + match &test_type { + TestType::Known(test_type) => { + let cluster_output = self + .cluster_crd(ClusterInput { + cluster_name, + crd_input, + }) + .await?; + let cluster_crd_name = cluster_output.crd_name(); + if let Some(crd) = cluster_output.crd() { + debug!("Cluster crd was created for '{}'", cluster_name); + crds.push(crd) + } + match &test_type { + KnownTestType::Conformance | KnownTestType::Quick => { + let bottlerocket_output = self + .bottlerocket_crd(BottlerocketInput { + cluster_crd_name: &cluster_crd_name, + image_id: self.image_id(crd_input)?, + test_type, + crd_input, + }) + .await?; + let bottlerocket_crd_name = bottlerocket_output.crd_name(); + if let Some(crd) = bottlerocket_output.crd() { + debug!("Bottlerocket crd was created for '{}'", cluster_name); + crds.push(crd) + } + let test_output = self + .test_crd(TestInput { + cluster_crd_name: &cluster_crd_name, + bottlerocket_crd_name: &bottlerocket_crd_name, + test_type, + crd_input, + prev_tests: Default::default(), + name_suffix: None, + }) + .await?; + if let Some(crd) = test_output.crd() { + crds.push(crd) + } + } + KnownTestType::Migration => { + let image_id = if let Some(image_id) = &crd_input.starting_image_id { + debug!("Using the provided starting image id for migration testing '{}'", image_id); + image_id.to_string() + } else { + let image_id = self.starting_image_id(crd_input).await?; + debug!("A starting image id was not provided, '{}' will be used instead.", image_id); + image_id + }; + let bottlerocket_output = self + .bottlerocket_crd(BottlerocketInput { + cluster_crd_name: &cluster_crd_name, + image_id, + test_type, + crd_input, + }) + .await?; + let bottlerocket_crd_name = bottlerocket_output.crd_name(); + if let Some(crd) = bottlerocket_output.crd() { + debug!("Bottlerocket crd was created for '{}'", cluster_name); + crds.push(crd) + } + let mut tests = Vec::new(); + let test_output = self + .test_crd(TestInput { + cluster_crd_name: &cluster_crd_name, + bottlerocket_crd_name: &bottlerocket_crd_name, + test_type, + crd_input, + prev_tests: tests.clone(), + name_suffix: "-1-initial".into(), + }) + .await?; + if let Some(name) = test_output.crd_name() { + tests.push(name) + } + if let Some(crd) = test_output.crd() { + crds.push(crd) + } + let migration_output = self + .migration_crd(MigrationInput { + cluster_crd_name: &cluster_crd_name, + bottlerocket_crd_name: &bottlerocket_crd_name, + crd_input, + prev_tests: tests.clone(), + name_suffix: "-2-migrate".into(), + migration_direction: MigrationDirection::Upgrade, + }) + .await?; + if let Some(name) = migration_output.crd_name() { + tests.push(name) + } + if let Some(crd) = migration_output.crd() { + crds.push(crd) + } + let test_output = self + .test_crd(TestInput { + cluster_crd_name: &cluster_crd_name, + bottlerocket_crd_name: &bottlerocket_crd_name, + test_type, + crd_input, + prev_tests: tests.clone(), + name_suffix: "-3-migrated".into(), + }) + .await?; + if let Some(name) = test_output.crd_name() { + tests.push(name) + } + if let Some(crd) = test_output.crd() { + crds.push(crd) + } + let migration_output = self + .migration_crd(MigrationInput { + cluster_crd_name: &cluster_crd_name, + bottlerocket_crd_name: &bottlerocket_crd_name, + crd_input, + prev_tests: tests.clone(), + name_suffix: "-4-migrate".into(), + migration_direction: MigrationDirection::Downgrade, + }) + .await?; + if let Some(name) = migration_output.crd_name() { + tests.push(name) + } + if let Some(crd) = migration_output.crd() { + crds.push(crd) + } + let test_output = self + .test_crd(TestInput { + cluster_crd_name: &cluster_crd_name, + bottlerocket_crd_name: &bottlerocket_crd_name, + test_type, + crd_input, + prev_tests: tests, + name_suffix: "-5-final".into(), + }) + .await?; + if let Some(crd) = test_output.crd() { + crds.push(crd) + } + } + } + } + TestType::Unknown(_) => { + return Err(error::Error::Unsupported { + what: "Custom test types".to_string(), + }) + } + } + } + Ok(crds) + } +} + +/// The input used for cluster crd creation +pub struct ClusterInput<'a> { + pub cluster_name: &'a String, + pub crd_input: &'a CrdInput<'a>, +} + +/// The input used for bottlerocket crd creation +pub struct BottlerocketInput<'a> { + pub cluster_crd_name: &'a Option, + /// The image id that should be used by this CRD + pub image_id: String, + pub test_type: &'a KnownTestType, + pub crd_input: &'a CrdInput<'a>, +} + +/// The input used for test crd creation +pub struct TestInput<'a> { + pub cluster_crd_name: &'a Option, + pub bottlerocket_crd_name: &'a Option, + pub test_type: &'a KnownTestType, + pub crd_input: &'a CrdInput<'a>, + /// The set of tests that have already been created that are related to this test + pub prev_tests: Vec, + /// The suffix that should be appended to the end of the test name to prevent naming conflicts + pub name_suffix: Option<&'a str>, +} + +/// The input used for migration crd creation +pub struct MigrationInput<'a> { + pub cluster_crd_name: &'a Option, + pub bottlerocket_crd_name: &'a Option, + pub crd_input: &'a CrdInput<'a>, + /// The set of tests that have already been created that are related to this test + pub prev_tests: Vec, + /// The suffix that should be appended to the end of the test name to prevent naming conflicts + pub name_suffix: Option<&'a str>, + pub migration_direction: MigrationDirection, +} + +pub enum MigrationDirection { + Upgrade, + Downgrade, +} + +pub enum CreateCrdOutput { + /// A new CRD was created and needs to be applied to the cluster. + NewCrd(Box), + /// An existing CRD is already representing this object. + ExistingCrd(String), + /// There is no CRD to create for this step of this family. + None, +} + +impl Default for CreateCrdOutput { + fn default() -> Self { + Self::None + } +} + +impl CreateCrdOutput { + /// Get the name of the CRD that was created or already existed + pub(crate) fn crd_name(&self) -> Option { + match self { + CreateCrdOutput::NewCrd(crd) => { + Some(crd.name().expect("A CRD is missing the name field.")) + } + CreateCrdOutput::ExistingCrd(name) => Some(name.to_string()), + CreateCrdOutput::None => None, + } + } + + /// Get the CRD if it was created + pub(crate) fn crd(self) -> Option { + match self { + CreateCrdOutput::NewCrd(crd) => Some(*crd), + CreateCrdOutput::ExistingCrd(_) => None, + CreateCrdOutput::None => None, + } + } +} diff --git a/tools/testsys/src/delete.rs b/tools/testsys/src/delete.rs index 09d87921..5b52c97d 100644 --- a/tools/testsys/src/delete.rs +++ b/tools/testsys/src/delete.rs @@ -1,4 +1,4 @@ -use anyhow::{Context, Result}; +use crate::error::Result; use clap::Parser; use futures::TryStreamExt; use log::info; @@ -10,13 +10,9 @@ pub(crate) struct Delete {} impl Delete { pub(crate) async fn run(self, client: TestManager) -> Result<()> { - let mut stream = client.delete_all().await.context("Unable to delete all")?; + let mut stream = client.delete_all().await?; - while let Some(delete) = stream - .try_next() - .await - .context("A deletion error occured")? - { + while let Some(delete) = stream.try_next().await? { match delete { DeleteEvent::Starting(crd) => println!("Starting delete for {}", crd.name()), DeleteEvent::Deleted(crd) => println!("Delete finished for {}", crd.name()), diff --git a/tools/testsys/src/error.rs b/tools/testsys/src/error.rs new file mode 100644 index 00000000..f2439036 --- /dev/null +++ b/tools/testsys/src/error.rs @@ -0,0 +1,70 @@ +use aws_sdk_ec2::error::DescribeImagesError; +use aws_sdk_ec2::types::SdkError; +use snafu::Snafu; + +pub type Result = std::result::Result; + +#[derive(Debug, Snafu)] +#[snafu(visibility(pub(super)))] +pub enum Error { + // `error` must be used instead of `source` because the build function returns + // `std::error::Error` but not `std::error::Error + Sync + Send`. + #[snafu(display("Unable to build '{}': {}", what, error))] + Build { what: String, error: String }, + + #[snafu(context(false), display("{}", source))] + DescribeImages { + source: SdkError, + }, + + #[snafu(display("Unable to create map from {}: {}", what, source))] + IntoMap { what: String, source: model::Error }, + + #[snafu(display("{}", what))] + Invalid { what: String }, + + #[snafu(display("{}: {}", what, source))] + IO { + what: String, + source: std::io::Error, + }, + + #[snafu(display("Unable to parse K8s version '{}'", version))] + K8sVersion { version: String }, + + #[snafu(display("{}", source))] + KubeClient { source: kube_client::error::Error }, + + #[snafu(display("{} was missing from {}", item, what))] + Missing { item: String, what: String }, + + #[snafu(context(false), display("{}", source))] + PubsysConfig { source: pubsys_config::Error }, + + #[snafu(display("Unable to create secret name for '{}': {}", secret_name, source))] + SecretName { + secret_name: String, + source: model::Error, + }, + + #[snafu(display("{}: {}", what, source))] + SerdeJson { + what: String, + source: serde_json::Error, + }, + + #[snafu(context(false), display("{}", source))] + TestManager { source: model::test_manager::Error }, + + #[snafu(context(false), display("{}", source))] + TestsysConfig { source: testsys_config::Error }, + + #[snafu(display("{} is not supported.", what))] + Unsupported { what: String }, + + #[snafu(display("Unable to create `Variant` from `{}`: {}", variant, source))] + Variant { + variant: String, + source: bottlerocket_variant::error::Error, + }, +} diff --git a/tools/testsys/src/install.rs b/tools/testsys/src/install.rs index ce54f611..0a01ca77 100644 --- a/tools/testsys/src/install.rs +++ b/tools/testsys/src/install.rs @@ -1,4 +1,4 @@ -use anyhow::{Context, Result}; +use crate::error::Result; use clap::Parser; use log::{info, trace}; use model::test_manager::{ImageConfig, TestManager}; @@ -36,9 +36,7 @@ impl Install { (Some(secret), image) => ImageConfig::WithCreds { secret, image }, (None, image) => ImageConfig::Image(image), }; - client.install(controller_image).await.context( - "Unable to install testsys to the cluster. (Some artifacts may be left behind)", - )?; + client.install(controller_image).await?; info!("testsys components were successfully installed."); diff --git a/tools/testsys/src/logs.rs b/tools/testsys/src/logs.rs index 07e5b2cb..89b442cd 100644 --- a/tools/testsys/src/logs.rs +++ b/tools/testsys/src/logs.rs @@ -1,7 +1,8 @@ -use anyhow::{Context, Error, Result}; +use crate::error::{self, Result}; use clap::Parser; use futures::TryStreamExt; use model::test_manager::{ResourceState, TestManager}; +use snafu::{OptionExt, ResultExt}; use unescape::unescape; /// Stream the logs of an object from a testsys cluster. @@ -28,18 +29,18 @@ impl Logs { pub(crate) async fn run(self, client: TestManager) -> Result<()> { match (self.test, self.resource, self.resource_state) { (Some(test), None, None) => { - let mut logs = client.test_logs(test, self.follow).await.context("Unable to get logs.")?; - while let Some(line) = logs.try_next().await? { - println!("{}", unescape(&String::from_utf8_lossy(&line)).context("Unable to unescape log string")?); + let mut logs = client.test_logs(test, self.follow).await?; + while let Some(line) = logs.try_next().await.context(error::KubeClientSnafu)? { + println!("{}", unescape(&String::from_utf8_lossy(&line)).context(error::InvalidSnafu{what: "Unable to unescape log string"})?); } } (None, Some(resource), Some(state)) => { - let mut logs = client.resource_logs(resource, state, self.follow).await.context("Unable to get logs.")?; - while let Some(line) = logs.try_next().await? { - println!("{}", unescape(&String::from_utf8_lossy(&line)).context("Unable to unescape log string")?); + let mut logs = client.resource_logs(resource, state, self.follow).await?; + while let Some(line) = logs.try_next().await.context(error::KubeClientSnafu)? { + println!("{}", unescape(&String::from_utf8_lossy(&line)).context(error::InvalidSnafu{what: "Unable to unescape log string"})?); } } - _ => return Err(Error::msg("Invalid arguments were provided. Exactly one of `--test` or `--resource` must be given.")), + _ => return Err(error::Error::Invalid{what: "Invalid arguments were provided. Exactly one of `--test` or `--resource` must be given.".to_string()}), }; Ok(()) } diff --git a/tools/testsys/src/main.rs b/tools/testsys/src/main.rs index bc86a991..73e3e1a9 100644 --- a/tools/testsys/src/main.rs +++ b/tools/testsys/src/main.rs @@ -1,7 +1,7 @@ -use anyhow::{Context, Result}; use clap::{Parser, Subcommand}; use delete::Delete; use env_logger::Builder; +use error::Result; use install::Install; use log::{debug, error, LevelFilter}; use logs::Logs; @@ -13,13 +13,18 @@ use status::Status; use std::path::PathBuf; use uninstall::Uninstall; +mod aws_ecs; +mod aws_k8s; mod aws_resources; +mod crds; mod delete; +mod error; mod install; mod logs; mod restart_test; mod run; mod secret; +mod sonobuoy; mod status; mod uninstall; @@ -44,15 +49,8 @@ struct TestsysArgs { impl TestsysArgs { async fn run(self) -> Result<()> { let client = match self.kubeconfig { - Some(path) => TestManager::new_from_kubeconfig_path(&path) - .await - .context(format!( - "Unable to create testsys client using kubeconfig '{}'", - path.display() - ))?, - None => TestManager::new().await.context( - "Unable to create testsys client using KUBECONFIG variable or default kubeconfig", - )?, + Some(path) => TestManager::new_from_kubeconfig_path(&path).await?, + None => TestManager::new().await?, }; match self.command { Command::Run(run) => run.run(client).await?, diff --git a/tools/testsys/src/restart_test.rs b/tools/testsys/src/restart_test.rs index cbd4264c..863802ca 100644 --- a/tools/testsys/src/restart_test.rs +++ b/tools/testsys/src/restart_test.rs @@ -1,4 +1,4 @@ -use anyhow::{Context, Result}; +use crate::error::Result; use clap::Parser; use model::test_manager::TestManager; @@ -13,9 +13,6 @@ pub(crate) struct RestartTest { impl RestartTest { pub(crate) async fn run(self, client: TestManager) -> Result<()> { - client - .restart_test(&self.test_name) - .await - .context(format!("Unable to restart test '{}'", self.test_name)) + Ok(client.restart_test(&self.test_name).await?) } } diff --git a/tools/testsys/src/run.rs b/tools/testsys/src/run.rs index f6eb56cf..6507bb86 100644 --- a/tools/testsys/src/run.rs +++ b/tools/testsys/src/run.rs @@ -1,6 +1,8 @@ -use crate::aws_resources::{AwsEcs, AwsK8s}; -use anyhow::{anyhow, ensure, Context, Result}; -use bottlerocket_types::agent_config::TufRepoConfig; +use crate::aws_ecs::AwsEcsCreator; +use crate::aws_k8s::AwsK8sCreator; +use crate::crds::{CrdCreator, CrdInput}; +use crate::error; +use crate::error::Result; use bottlerocket_variant::Variant; use clap::Parser; use log::{debug, info}; @@ -9,10 +11,9 @@ use model::SecretName; use pubsys_config::InfraConfig; use serde::{Deserialize, Serialize}; use serde_plain::{derive_display_from_serialize, derive_fromstr_from_deserialize}; -use std::collections::HashMap; -use std::fs::File; +use snafu::{OptionExt, ResultExt}; use std::path::PathBuf; -use testsys_config::{AwsEcsVariantConfig, AwsK8sVariantConfig, GenericVariantConfig, TestConfig}; +use testsys_config::{GenericVariantConfig, TestConfig}; /// Run a set of tests for a given arch and variant #[derive(Debug, Parser)] @@ -128,59 +129,33 @@ impl From for GenericVariantConfig { impl Run { pub(crate) async fn run(self, client: TestManager) -> Result<()> { - let variant = - Variant::new(&self.variant).context("The provided variant cannot be interpreted.")?; + // agent config (eventually with configuration) + let variant = Variant::new(&self.variant).context(error::VariantSnafu { + variant: self.variant, + })?; debug!("Using variant '{}'", variant); // Use Test.toml or default - let test_config = TestConfig::from_path_or_default(&self.test_config_path) - .context("Unable to read test config")?; + let test_config = TestConfig::from_path_or_default(&self.test_config_path)?; - let test_opts = test_config.test.as_ref().cloned().unwrap_or_default(); + let test_opts = test_config.test.to_owned().unwrap_or_default(); + + let variant_config = + test_config.reduced_config(&variant, &self.arch, Some(self.config.into())); // If a lock file exists, use that, otherwise use Infra.toml or default - let infra_config = InfraConfig::from_path_or_lock(&self.infra_config_path, true) - .context("Unable to read infra config")?; - - let aws = infra_config.aws.unwrap_or_default(); - - // If the user gave an override region, use that, otherwise use the first region from the - // config. - let region = if let Some(region) = self.target_region { - debug!("Using provided region for testing"); - region - } else { - debug!("No region was provided, determining region from `Infra.toml`"); - aws.regions - .clone() - .pop_front() - .context("No region was provided and no regions found in infra config")? - }; + let infra_config = InfraConfig::from_path_or_lock(&self.infra_config_path, true)?; let repo_config = infra_config .repo .unwrap_or_default() - .get( + .remove( &self .repo .or(test_opts.repo) .unwrap_or_else(|| "default".to_string()), ) - .and_then(|repo| { - if let (Some(metadata_base_url), Some(targets_url)) = - (&repo.metadata_base_url, &repo.targets_url) - { - Some(TufRepoConfig { - metadata_url: format!( - "{}{}/{}", - metadata_base_url, &self.variant, &self.arch - ), - targets_url: targets_url.to_string(), - }) - } else { - None - } - }); + .unwrap_or_default(); let images = vec![ Some(self.agent_images.into()), @@ -194,70 +169,62 @@ impl Run { .flatten() .fold(Default::default(), testsys_config::TestsysImages::merge); - let crds = match variant.family() { + // The `CrdCreator` is responsible for creating crds for the given architecture and variant. + let crd_creator: Box = match variant.family() { "aws-k8s" => { - debug!("Variant is in 'aws-k8s' family"); - let bottlerocket_ami = ami(&self.ami_input, ®ion)?; - debug!("Using ami '{}'", bottlerocket_ami); - let config: AwsK8sVariantConfig = test_config - .reduced_config(&variant, &self.arch, Some(self.config.into())) - .into(); - let aws_k8s = AwsK8s { - arch: self.arch, - variant: self.variant, + debug!("Using family 'aws-k8s'"); + let aws_config = infra_config.aws.unwrap_or_default(); + let region = aws_config + .regions + .front() + .map(String::to_string) + .unwrap_or_else(|| "us-west-2".to_string()); + Box::new(AwsK8sCreator { region, - config, - ami: bottlerocket_ami.to_string(), - tuf_repo: repo_config, - starting_version: self.migration_starting_version, - starting_image_id: self.starting_image_id, - migrate_to_version: self.migration_target_version, - capabilities: None, + ami_input: self.ami_input, migrate_starting_commit: self.migration_starting_commit, - }; - debug!("Creating crds for aws-k8s testing"); - - aws_k8s - .create_crds(&client, self.test_flavor, &images) - .await? + }) } "aws-ecs" => { - debug!("Variant is in 'aws-ecs' family"); - let bottlerocket_ami = ami(&self.ami_input, ®ion)?; - debug!("Using ami '{}'", bottlerocket_ami); - let config: AwsEcsVariantConfig = test_config - .reduced_config(&variant, &self.arch, Some(self.config.into())) - .into(); - let aws_ecs = AwsEcs { - arch: self.arch, - variant: self.variant, + debug!("Using family 'aws-ecs'"); + let aws_config = infra_config.aws.unwrap_or_default(); + let region = aws_config + .regions + .front() + .map(String::to_string) + .unwrap_or_else(|| "us-west-2".to_string()); + Box::new(AwsEcsCreator { region, - config, - ami: bottlerocket_ami.to_string(), - tuf_repo: repo_config, - starting_version: self.migration_starting_version, - starting_image_id: self.starting_image_id, + ami_input: self.ami_input, migrate_starting_commit: self.migration_starting_commit, - migrate_to_version: self.migration_target_version, - capabilities: None, - }; - debug!("Creating crds for aws-ecs testing"); - aws_ecs.create_crds(self.test_flavor, &images).await? + }) } - other => { - return Err(anyhow!( - "testsys has not yet added support for the '{}' variant family", - other - )) + unsupported => { + return Err(error::Error::Unsupported { + what: unsupported.to_string(), + }) } }; + let crd_input = CrdInput { + client: &client, + arch: self.arch, + variant, + config: variant_config, + repo_config, + starting_version: self.migration_starting_version, + migrate_to_version: self.migration_target_version, + starting_image_id: self.starting_image_id, + images, + }; + + let crds = crd_creator + .create_crds(self.test_flavor, &crd_input) + .await?; + debug!("Adding crds to testsys cluster"); for crd in crds { - let crd = client - .create_object(crd) - .await - .context("Unable to create object")?; + let crd = client.create_object(crd).await?; info!("Successfully added '{}'", crd.name().unwrap()); } @@ -265,28 +232,23 @@ impl Run { } } -fn ami(ami_input: &str, region: &str) -> Result { - let file = File::open(ami_input).context("Unable to open amis.json")?; - let ami_input: HashMap = - serde_json::from_reader(file).context(format!("Unable to deserialize '{}'", ami_input))?; - ensure!(!ami_input.is_empty(), "amis.json is empty"); - Ok(ami_input - .get(region) - .context(format!("ami not found for region '{}'", region))? - .id - .clone()) -} - fn parse_key_val(s: &str) -> Result<(String, SecretName)> { let mut iter = s.splitn(2, '='); - let key = iter.next().context("Key is missing")?; - let value = iter.next().context("Value is missing")?; - Ok((key.to_string(), SecretName::new(value)?)) + let key = iter.next().context(error::InvalidSnafu { + what: "Key is missing", + })?; + let value = iter.next().context(error::InvalidSnafu { + what: "Value is missing", + })?; + Ok(( + key.to_string(), + SecretName::new(value).context(error::SecretNameSnafu { secret_name: value })?, + )) } #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] -pub(crate) enum TestType { +pub enum KnownTestType { /// Conformance testing is a full integration test that asserts that Bottlerocket is working for /// customer workloads. For k8s variants, for example, this will run the full suite of sonobuoy /// conformance tests. @@ -301,13 +263,18 @@ pub(crate) enum TestType { Migration, } +/// If a test type is one that is supported by TestSys it will be created as `Known(KnownTestType)`. +/// All other test types will be stored as `Unknown()`. +#[derive(Debug, Serialize, Deserialize)] +#[serde(untagged)] +pub(crate) enum TestType { + Known(KnownTestType), + Unknown(String), +} + derive_fromstr_from_deserialize!(TestType); derive_display_from_serialize!(TestType); - -#[derive(Clone, Debug, Deserialize)] -pub(crate) struct Image { - pub(crate) id: String, -} +derive_display_from_serialize!(KnownTestType); /// This is a CLI parsable version of `testsys_config::TestsysImages` #[derive(Debug, Parser)] diff --git a/tools/testsys/src/secret.rs b/tools/testsys/src/secret.rs index a9d2faa3..75fe6b24 100644 --- a/tools/testsys/src/secret.rs +++ b/tools/testsys/src/secret.rs @@ -1,7 +1,8 @@ -use anyhow::{Context, Result}; +use crate::error::{self, Result}; use clap::Parser; use model::test_manager::TestManager; use model::SecretName; +use snafu::OptionExt; /// Add a testsys object to the testsys cluster. #[derive(Debug, Parser)] @@ -62,10 +63,7 @@ pub(crate) struct AddSecretMap { impl AddSecretMap { pub(crate) async fn run(self, client: TestManager) -> Result<()> { - client - .create_secret(&self.name, self.args) - .await - .context("Unable to create secret")?; + client.create_secret(&self.name, self.args).await?; println!("Successfully added '{}' to secrets.", self.name); Ok(()) } @@ -73,8 +71,12 @@ impl AddSecretMap { fn parse_key_val(s: &str) -> Result<(String, String)> { let mut iter = s.splitn(2, '='); - let key = iter.next().context("Key is missing")?; - let value = iter.next().context("Value is missing")?; + let key = iter.next().context(error::InvalidSnafu { + what: "Key is missing", + })?; + let value = iter.next().context(error::InvalidSnafu { + what: "Value is missing", + })?; Ok((key.to_string(), value.to_string())) } @@ -107,8 +109,7 @@ impl AddSecretImage { &self.pull_password, &self.image_uri, ) - .await - .context("Unable to create pull secret")?; + .await?; println!("The secret was added."); diff --git a/tools/testsys/src/sonobuoy.rs b/tools/testsys/src/sonobuoy.rs new file mode 100644 index 00000000..24501780 --- /dev/null +++ b/tools/testsys/src/sonobuoy.rs @@ -0,0 +1,91 @@ +use crate::crds::TestInput; +use crate::error::{self, Result}; +use crate::run::KnownTestType; +use bottlerocket_types::agent_config::{SonobuoyConfig, SonobuoyMode}; +use maplit::btreemap; +use model::Test; +use std::fmt::Display; + +/// Create a Sonobuoy CRD for K8s conformance and quick testing. +pub(crate) fn sonobuoy_crd(test_input: TestInput) -> Result { + let cluster_resource_name = test_input + .cluster_crd_name + .as_ref() + .expect("A cluster name is required for migrations"); + let bottlerocket_resource_name = test_input + .bottlerocket_crd_name + .as_ref() + .expect("A cluster name is required for migrations"); + let sonobuoy_mode = match test_input.test_type { + KnownTestType::Conformance => SonobuoyMode::CertifiedConformance, + KnownTestType::Quick | KnownTestType::Migration => SonobuoyMode::Quick, + }; + + let labels = test_input.crd_input.labels(btreemap! { + "testsys/type".to_string() => test_input.test_type.to_string(), + "testsys/cluster".to_string() => cluster_resource_name.to_string(), + }); + + SonobuoyConfig::builder() + .resources(bottlerocket_resource_name) + .resources(cluster_resource_name) + .set_depends_on(Some(test_input.prev_tests)) + .set_retries(Some(5)) + .image( + test_input + .crd_input + .images + .sonobuoy_test_agent_image + .to_owned() + .expect("The default Sonobuoy testing image is missing"), + ) + .set_image_pull_secret( + test_input + .crd_input + .images + .testsys_agent_pull_secret + .to_owned(), + ) + .keep_running(true) + .kubeconfig_base64_template(cluster_resource_name, "encodedKubeconfig") + .plugin("e2e") + .mode(sonobuoy_mode) + .e2e_repo_config_base64( + test_input + .crd_input + .config + .conformance_registry + .to_owned() + .map(e2e_repo_config_base64), + ) + .kube_conformance_image(test_input.crd_input.config.conformance_image.to_owned()) + .assume_role(test_input.crd_input.config.agent_role.to_owned()) + .set_secrets(Some(test_input.crd_input.config.secrets.to_owned())) + .set_labels(Some(labels)) + .build(format!( + "{}{}", + cluster_resource_name, + test_input.name_suffix.unwrap_or("-test") + )) + .map_err(|e| error::Error::Build { + what: "sonobuoy CRD".to_string(), + error: e.to_string(), + }) +} + +fn e2e_repo_config_base64(e2e_registry: S) -> String +where + S: Display, +{ + base64::encode(format!( + r#"buildImageRegistry: {e2e_registry} +dockerGluster: {e2e_registry} +dockerLibraryRegistry: {e2e_registry} +e2eRegistry: {e2e_registry} +e2eVolumeRegistry: {e2e_registry} +gcRegistry: {e2e_registry} +gcEtcdRegistry: {e2e_registry} +promoterE2eRegistry: {e2e_registry} +sigStorageRegistry: {e2e_registry}"# + )) +} diff --git a/tools/testsys/src/status.rs b/tools/testsys/src/status.rs index fe5f3b94..3222b82c 100644 --- a/tools/testsys/src/status.rs +++ b/tools/testsys/src/status.rs @@ -1,7 +1,8 @@ -use anyhow::{Context, Result}; +use crate::error::{self, Result}; use clap::Parser; use log::{debug, info}; use model::test_manager::{SelectionParams, TestManager}; +use snafu::ResultExt; /// Check the status of testsys objects. #[derive(Debug, Parser)] @@ -34,14 +35,14 @@ impl Status { }; let status = client .status(&SelectionParams::Label(labels.join(",")), self.controller) - .await - .context("Unable to get status")?; + .await?; if self.json { info!( "{}", - serde_json::to_string_pretty(&status) - .context("Could not create string from status.")? + serde_json::to_string_pretty(&status).context(error::SerdeJsonSnafu { + what: "Could not create string from status." + })? ); } else { let (width, _) = term_size::dimensions().unwrap_or((80, 0)); diff --git a/tools/testsys/src/uninstall.rs b/tools/testsys/src/uninstall.rs index aa4b8961..037acfa3 100644 --- a/tools/testsys/src/uninstall.rs +++ b/tools/testsys/src/uninstall.rs @@ -1,4 +1,4 @@ -use anyhow::{Context, Result}; +use crate::error::Result; use clap::Parser; use log::{info, trace}; use model::test_manager::TestManager; @@ -12,9 +12,7 @@ impl Uninstall { pub(crate) async fn run(self, client: TestManager) -> Result<()> { trace!("Uninstalling testsys"); - client.uninstall().await.context( - "Unable to uninstall testsys from the cluster. (Some artifacts may be left behind)", - )?; + client.uninstall().await?; info!("testsys components were successfully uninstalled."); From 6bb0cfffb123998232e6968e1cd43f967ac56edc Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Fri, 18 Nov 2022 12:58:36 -0800 Subject: [PATCH 0808/1356] docs, build: Update default variant to aws-k8s-1.24 --- BUILDING.md | 2 +- README.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/BUILDING.md b/BUILDING.md index 55dccfe7..b773439d 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -72,7 +72,7 @@ To build an image, run: cargo make ``` -This will build an image for the default variant, `aws-k8s-1.21`. +This will build an image for the default variant, `aws-k8s-1.24`. All packages will be built in turn, and then compiled into an `img` file in the `build/images/` directory. The version number in [Release.toml](Release.toml) will be used in naming the file, and will be used inside the image as the release version. diff --git a/README.md b/README.md index 87d68075..c631d060 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ We’re excited to get early feedback and to continue working on more use cases! Bottlerocket is architected such that different cloud environments and container orchestrators can be supported in the future. A build of Bottlerocket that supports different features or integration characteristics is known as a 'variant'. The artifacts of a build will include the architecture and variant name. -For example, an `x86_64` build of the `aws-k8s-1.21` variant will produce an image named `bottlerocket-aws-k8s-1.21-x86_64--.img`. +For example, an `x86_64` build of the `aws-k8s-1.24` variant will produce an image named `bottlerocket-aws-k8s-1.24-x86_64--.img`. The following variants support EKS, as described above: @@ -1098,7 +1098,7 @@ We currently package the following major third-party components: * systemd as init ([background](https://en.wikipedia.org/wiki/Systemd), [packaging](packages/systemd/)) * wicked for networking ([background](https://github.com/openSUSE/wicked), [packaging](packages/wicked/)) * containerd ([background](https://containerd.io/), [packaging](packages/containerd/)) -* Kubernetes ([background](https://kubernetes.io/), [packaging](packages/kubernetes-1.21/)) +* Kubernetes ([background](https://kubernetes.io/), [packaging](packages/kubernetes-1.24/)) * aws-iam-authenticator ([background](https://github.com/kubernetes-sigs/aws-iam-authenticator), [packaging](packages/aws-iam-authenticator/)) * Amazon ECS agent ([background](https://github.com/aws/amazon-ecs-agent), [packaging](packages/ecs-agent/)) From 4c57541eac6b0252adac0363069f037ee005554f Mon Sep 17 00:00:00 2001 From: Stefan Sundin Date: Sat, 12 Nov 2022 12:59:56 -0800 Subject: [PATCH 0809/1356] Fix various minor formatting issues, mostly in the documentation. --- BUILDING.md | 34 ++++++----- CONTRIBUTING.md | 2 +- README.md | 56 +++++++++---------- .../kms_key_setup.yml | 2 +- .../cloudformation-templates/s3_setup.yml | 8 +-- .../test_tomls/toml_yaml_conversion.toml | 10 ++-- tools/partyplanner | 10 ++-- tools/pubsys/policies/ssm/README.md | 4 +- tools/rpm2img | 2 +- tools/testsys/Test.toml.example | 4 +- 10 files changed, 69 insertions(+), 63 deletions(-) diff --git a/BUILDING.md b/BUILDING.md index b773439d..44af8ca9 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -25,13 +25,13 @@ Ensure the following OS packages are installed: ##### Ubuntu -``` +```shell apt install build-essential openssl libssl-dev pkg-config liblz4-tool ``` ##### Fedora -``` +```shell yum install make automake gcc openssl openssl-devel pkg-config lz4 perl-FindBin perl-lib ``` @@ -45,7 +45,7 @@ Rust 1.51.0 or higher is required. To organize build tasks, we use [cargo-make](https://sagiegurari.github.io/cargo-make/). To get it, run: -``` +```shell cargo install cargo-make ``` @@ -68,11 +68,11 @@ Docker's [post-installation steps for Linux](https://docs.docker.com/install/lin To build an image, run: -``` +```shell cargo make ``` -This will build an image for the default variant, `aws-k8s-1.24`. +This will build an image for the default variant (a recent `aws-k8s-*`, see the `BUILDSYS_VARIANT` variable in [Makefile.toml](Makefile.toml) to find the current default variant). All packages will be built in turn, and then compiled into an `img` file in the `build/images/` directory. The version number in [Release.toml](Release.toml) will be used in naming the file, and will be used inside the image as the release version. @@ -80,16 +80,22 @@ If you're planning on [publishing your build](PUBLISHING.md), you may want to ch To build an image for a different variant, run: -``` +```shell cargo make -e BUILDSYS_VARIANT=my-variant-here ``` To build an image for a different architecture, run: -``` +```shell cargo make -e BUILDSYS_ARCH=my-arch-here ``` +If you want to limit the build concurrency, set `BUILDSYS_JOBS` (the default is `8`): + +```shell +cargo make -e BUILDSYS_JOBS=4 +``` + (You can use variant and arch arguments together, too.) #### Package licenses @@ -160,7 +166,7 @@ If you're using an EC2 instance, the [EC2 instance's IAM role](https://docs.aws. For a simple start, pick an [EC2 region](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions), then run: -``` +```shell cargo make -e PUBLISH_REGIONS=your-region-here ami ``` @@ -170,7 +176,7 @@ Your new AMI ID will be printed after it's registered. If you built your image for a different architecture or variant, just use the same arguments here: -``` +```shell cargo make -e PUBLISH_REGIONS=your-region-here -e BUILDSYS_VARIANT=my-variant-here ami ``` @@ -200,19 +206,19 @@ kmod kits are included in the official Bottlerocket repos starting with Bottlero Let's say you want to download the kit for building x86_64 modules for v1.7.0 and variant aws-k8s-1.21. First, you need tuftool: -```bash +```shell cargo install tuftool ``` Next, you need the Bottlerocket root role, which is used by tuftool to verify the kmod kit. This will download and verify the root role itself: -```bash +```shell curl -O "https://cache.bottlerocket.aws/root.json" sha512sum -c <<<"b81af4d8eb86743539fbc4709d33ada7b118d9f929f0c2f6c04e1d41f46241ed80423666d169079d736ab79965b4dd25a5a6db5f01578b397496d49ce11a3aa2 root.json" ``` Next, set your desired parameters, and download the kmod kit: -```bash +```shell ARCH=x86_64 VERSION=v1.7.0 VARIANT=aws-k8s-1.21 @@ -227,11 +233,11 @@ tuftool download "${OUTDIR}" --target-name ${VARIANT}-${ARCH}-kmod-kit-${VERSION ### Using the kmod kit To use the kmod kit, extract it, and update your PATH to use its toolchain: -```bash +```shell tar xf "${VARIANT}-${ARCH}-kmod-kit-${VERSION}.tar.xz" export CROSS_COMPILE="${ARCH}-bottlerocket-linux-musl-" -export KERNELDIR="${PWD}/${VARIANT}-${ARCH}-kmod-kit-${VERSION}/kernel-devel +export KERNELDIR="${PWD}/${VARIANT}-${ARCH}-kmod-kit-${VERSION}/kernel-devel" export PATH="${PWD}/${VARIANT}-${ARCH}-kmod-kit-${VERSION}/toolchain/usr/bin:${PATH}" ``` diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index eae718f9..724daab6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -39,7 +39,7 @@ To send us a pull request, please: GitHub provides additional documentation on [forking a repository](https://help.github.com/articles/fork-a-repo/) and [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). -## Repo branch and tag structure +## Repo branch and tag structure Active development occurs under the `develop` branch. diff --git a/README.md b/README.md index c631d060..a1d3941d 100644 --- a/README.md +++ b/README.md @@ -164,7 +164,7 @@ Once the instance is started, you can start a session: If you prefer a command-line tool, you can start a session with a recent [AWS CLI](https://aws.amazon.com/cli/) and the [session-manager-plugin](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html). Then you'd be able to start a session using only your instance ID, like this: -```txt +```shell aws ssm start-session --target INSTANCE_ID ``` @@ -172,7 +172,7 @@ With the [default control container](https://github.com/bottlerocket-os/bottlero To do even more, read the next section about the [admin container](#admin-container). You can access the admin container from the control container like this: -```sh +```shell enter-admin-container ``` @@ -192,25 +192,25 @@ enabled = true If Bottlerocket is already running, you can enable the admin container from the default [control container](#control-container) like this: -```sh +```shell enable-admin-container ``` Or you can start an interactive session immediately like this: -```sh +```shell enter-admin-container ``` If you're using a custom control container, or want to make the API calls directly, you can enable the admin container like this instead: -```txt +```shell apiclient set host-containers.admin.enabled=true ``` Once you've enabled the admin container, you can either access it through SSH or execute commands from the control container like this: -```txt +```shell apiclient exec admin bash ``` @@ -245,7 +245,7 @@ apiclient knows how to handle those update APIs for you, and you can run it from To see what updates are available: -```txt +```shell apiclient update check ``` @@ -254,20 +254,20 @@ The `available_updates` field will show the full list of available versions, inc To apply the latest update: -```txt +```shell apiclient update apply ``` The next time you reboot, you'll start up in the new version, and system configuration will be automatically [migrated](sources/api/migration/). To reboot right away: -```txt +```shell apiclient reboot ``` If you're confident about updating, the `apiclient update apply` command has `--check` and `--reboot` flags to combine the above actions, so you can accomplish all of the above steps like this: -```txt +```shell apiclient update apply --check --reboot ``` @@ -278,7 +278,7 @@ See the [apiclient documentation](sources/api/apiclient/) for more details. The system will automatically roll back if it's unable to boot. If the update is not functional for a given container workload, you can do a manual rollback: -```txt +```shell signpost rollback-to-inactive reboot ``` @@ -297,7 +297,7 @@ Here we'll describe the settings you can configure on your Bottlerocket instance You can see the current settings with an API request: -```txt +```shell apiclient get settings ``` @@ -310,7 +310,7 @@ For example, here's an abbreviated response: You can change settings like this: -```txt +```shell apiclient set motd="hi there" kubernetes.node-labels.environment=test ``` @@ -619,7 +619,7 @@ It is recommended to programmatically set these settings via `apiclient` through An example `apiclient` call to set registry credentials for `gcr.io` and `docker.io` looks like this: - ```bash + ```shell apiclient set --json '{ "container-registry": { "credentials": [ @@ -868,7 +868,7 @@ trusted=false Here's the same example but using API calls: -```txt +```shell apiclient set \ pki.my-trusted-bundle.data="W3N..." \ pki.my-trusted-bundle.trusted=true \ @@ -900,11 +900,11 @@ Keep in mind that the default admin container (since Bottlerocket v1.0.6) relies Here's an example of adding a custom host container with API calls: -```txt +```shell apiclient set \ - host-containers.custom.source=MY-CONTAINER-URI \ - host-containers.custom.enabled=true \ - host-containers.custom.superpowered=false + host-containers.custom.source=MY-CONTAINER-URI \ + host-containers.custom.enabled=true \ + host-containers.custom.superpowered=false ``` Here's the same example, but with the settings you'd add to user data: @@ -922,7 +922,7 @@ All host containers will have the `apiclient` binary available at `/usr/local/bi You can also use `apiclient` to run programs in other host containers. For example, to access the admin container: -```txt +```shell apiclient exec admin bash ``` @@ -970,11 +970,11 @@ Bootstrap containers have three different modes: Here's an example of adding a bootstrap container with API calls: -```txt +```shell apiclient set \ - bootstrap-containers.bootstrap.source=MY-CONTAINER-URI \ - bootstrap-containers.bootstrap.mode=once \ - bootstrap-containers.bootstrap.essential=true + bootstrap-containers.bootstrap.source=MY-CONTAINER-URI \ + bootstrap-containers.bootstrap.mode=once \ + bootstrap-containers.bootstrap.essential=true ``` Here's the same example, but with the settings you'd add to user data: @@ -1024,7 +1024,7 @@ They can be overridden for testing purposes in [the same way as other settings]( You can use `logdog` through the [admin container](#admin-container) to obtain an archive of log files from your Bottlerocket host. SSH to the Bottlerocket host or `apiclient exec admin bash` to access the admin container, then run: -```bash +```shell sudo sheltie logdog ``` @@ -1034,10 +1034,10 @@ This archive is accessible from host containers at `/.bottlerocket/support`. You can use SSH to retrieve the file. Once you have exited from the Bottlerocket host, run a command like: -```bash +```shell ssh -i YOUR_KEY_FILE \ - ec2-user@YOUR_HOST \ - "cat /.bottlerocket/support/bottlerocket-logs.tar.gz" > bottlerocket-logs.tar.gz + ec2-user@YOUR_HOST \ + "cat /.bottlerocket/support/bottlerocket-logs.tar.gz" > bottlerocket-logs.tar.gz ``` (If your instance isn't accessible through SSH, you can use [SSH over SSM](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-getting-started-enable-ssh-connections.html).) diff --git a/tools/infrasys/cloudformation-templates/kms_key_setup.yml b/tools/infrasys/cloudformation-templates/kms_key_setup.yml index 09e3e113..38517452 100644 --- a/tools/infrasys/cloudformation-templates/kms_key_setup.yml +++ b/tools/infrasys/cloudformation-templates/kms_key_setup.yml @@ -25,6 +25,6 @@ Resources: AliasName: !Sub "alias/${Alias}" TargetKeyId: !Ref KMSKey -Outputs: +Outputs: KeyId: Value: !GetAtt KMSKey.Arn diff --git a/tools/infrasys/cloudformation-templates/s3_setup.yml b/tools/infrasys/cloudformation-templates/s3_setup.yml index 61cf5c66..31b4e9fe 100644 --- a/tools/infrasys/cloudformation-templates/s3_setup.yml +++ b/tools/infrasys/cloudformation-templates/s3_setup.yml @@ -1,11 +1,11 @@ Resources: TUFRepoBucket: Type: AWS::S3::Bucket - DeletionPolicy: Retain + DeletionPolicy: Retain Properties: VersioningConfiguration: Status: Enabled - AccessControl: LogDeliveryWrite + AccessControl: LogDeliveryWrite MetricsConfigurations: - Id: BucketMetrics BucketEncryption: @@ -17,8 +17,8 @@ Resources: BlockPublicPolicy: True IgnorePublicAcls: True RestrictPublicBuckets: True - -Outputs: + +Outputs: BucketName: Value: !Ref TUFRepoBucket RDN: diff --git a/tools/infrasys/test_tomls/toml_yaml_conversion.toml b/tools/infrasys/test_tomls/toml_yaml_conversion.toml index 57508f58..f2e58013 100644 --- a/tools/infrasys/test_tomls/toml_yaml_conversion.toml +++ b/tools/infrasys/test_tomls/toml_yaml_conversion.toml @@ -1,12 +1,12 @@ -[repo.default] +[repo.default] file_hosting_config_name = "TUF-Repo-S3-Buck" signing_keys = { kms = { available_keys = { "e4a8f7fe-2272-4e51-bc3e-3f719c77eb31" = "us-west-1" } } } root_keys = { kms = { available_keys = { "e4a8f7fe-2272-4e51-bc3e-3f719c77eb31" = "us-west-1" } } } root_key_threshold = 1 pub_key_threshold = 1 - -[aws] + +[aws] [aws.s3.TUF-Repo-S3-Buck] region = "us-west-2" - vpc_endpoint_id = "vpc-12345" - s3_prefix = "/my-bottlerocket-remix" + vpc_endpoint_id = "vpc-12345" + s3_prefix = "/my-bottlerocket-remix" diff --git a/tools/partyplanner b/tools/partyplanner index 259ee4bf..b3bd1a18 100755 --- a/tools/partyplanner +++ b/tools/partyplanner @@ -58,7 +58,7 @@ BIOS_MIB="4" # one per disk OVERHEAD_MIB="$((GPT_MIB * 2 + BIOS_MIB))" # The 'recommended' size for the EFI partition is 100MB but our EFI images are -# under 1MB, so this will suffice for now. It would be possible to increase the +# under 2MB, so this will suffice for now. It would be possible to increase the # EFI partition size by taking space from the "reserved" area below. EFI_MIB="5" # one per bank @@ -71,7 +71,7 @@ EFI_MIB="5" # one per bank # # !!! WARNING !!! # -# Increasing any of these constants is very likely break systems on update, +# Increasing any of these constants is very likely to break systems on update, # since the corresponding partitions are adjacent on disk and have no room to # grow. BOOT_SCALE_FACTOR="20" @@ -223,9 +223,9 @@ set_partition_types() { local typecode for part in BOOT ROOT HASH RESERVED ; do for bank in A B ; do - typecode="BOTTLEROCKET_${part}_TYPECODE" - typecode="${!typecode}" - pp_type["${part}-${bank}"]="${typecode}" + typecode="BOTTLEROCKET_${part}_TYPECODE" + typecode="${!typecode}" + pp_type["${part}-${bank}"]="${typecode}" done done } diff --git a/tools/pubsys/policies/ssm/README.md b/tools/pubsys/policies/ssm/README.md index d4751b03..9760125f 100644 --- a/tools/pubsys/policies/ssm/README.md +++ b/tools/pubsys/policies/ssm/README.md @@ -5,7 +5,7 @@ You can pass a different directory to `pubsys` to use a different set of paramet The directory is expected to contain a file named `defaults.toml` with a table entry per parameter, like this: -``` +```toml [[parameter]] name = "{variant}/{arch}/{image_version}/image_id" value = "{image_id}" @@ -30,7 +30,7 @@ The parameter will only be populated if the current `variant` or `arch` matches (If both `variant` and `arch` are listed, the build must match an entry from both lists.) For example, to add an extra parameter that's only set for "aarch64" builds of the "aws-ecs-1" variant: -``` +```toml [[parameter]] arch = ["aarch64"] variant = ["aws-ecs-1"] diff --git a/tools/rpm2img b/tools/rpm2img index ebc33db2..0e34fe2f 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -243,7 +243,7 @@ if [[ "${ARCH}" == "x86_64" ]]; then fi # We also need an EFI partition, formatted FAT32 with the -# EFI binary at the correct path, eg /efi/boot. The grub +# EFI binary at the correct path, e.g. /efi/boot. The grub # package has placed the image in /boot/efi/EFI/BOOT. mv "${ROOT_MOUNT}/boot/efi"/* "${EFI_MOUNT}" diff --git a/tools/testsys/Test.toml.example b/tools/testsys/Test.toml.example index 82190f87..c3b41db4 100644 --- a/tools/testsys/Test.toml.example +++ b/tools/testsys/Test.toml.example @@ -97,7 +97,7 @@ conformance-image = "" # Configurable values: # -# cluster-names: +# cluster-names: # All clusters the variant should be tested over. Cluster naming supports templated strings, and # both `arch` and `variant` are provided as variables (`{{arch}}-{{variant}}`). # @@ -116,4 +116,4 @@ conformance-image = "" # # conformance-registry: (K8s only) # Specify a custom registry for conformance testing images. -# For `aws-k8s` variants this will be used as the Sonobuoy e2e registry. \ No newline at end of file +# For `aws-k8s` variants this will be used as the Sonobuoy e2e registry. From 31e3572c63d318d5214a383393e4972a0183524e Mon Sep 17 00:00:00 2001 From: ecpullen Date: Mon, 14 Nov 2022 23:43:18 +0000 Subject: [PATCH 0810/1356] testsys: Support external tests Add support for the `Unknown` variant of the `TestType` enum. Templated yaml files can be tested using `-f`. --- tools/Cargo.lock | 2 + tools/testsys-config/src/lib.rs | 16 ++ tools/testsys/Cargo.toml | 2 + tools/testsys/src/aws_ecs.rs | 5 + tools/testsys/src/aws_k8s.rs | 5 + tools/testsys/src/crds.rs | 368 +++++++++++++++++++------------- tools/testsys/src/error.rs | 22 ++ tools/testsys/src/run.rs | 35 ++- 8 files changed, 304 insertions(+), 151 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 2edda2dd..62518f16 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -2947,6 +2947,7 @@ dependencies = [ "clap 3.2.22", "env_logger", "futures", + "handlebars", "k8s-openapi", "kube-client", "log", @@ -2956,6 +2957,7 @@ dependencies = [ "serde", "serde_json", "serde_plain", + "serde_yaml", "snafu", "term_size", "testsys-config", diff --git a/tools/testsys-config/src/lib.rs b/tools/testsys-config/src/lib.rs index e37088c6..ab28a725 100644 --- a/tools/testsys-config/src/lib.rs +++ b/tools/testsys-config/src/lib.rs @@ -61,6 +61,7 @@ impl TestConfig { variant: &Variant, arch: S, starting_config: Option, + test_type: &str, ) -> GenericVariantConfig where S: Into, @@ -74,6 +75,8 @@ impl TestConfig { // Convert the iterator of keys to and iterator of Configs. If the key does not have a // configuration in the config file, remove it from the iterator. .filter_map(|key| self.configs.get(&key).cloned()) + // Expand the `test_type` configuration + .flat_map(|config| vec![config.test(test_type), config]) // Take the iterator of configurations and extract the arch specific config and the // non-arch specific config for each config. Then, convert them into a single iterator. .flat_map(|config| vec![config.for_arch(&arch), config.config]) @@ -134,6 +137,8 @@ pub struct GenericConfig { x86_64: GenericVariantConfig, #[serde(default, flatten)] config: GenericVariantConfig, + #[serde(default)] + configuration: HashMap, } impl GenericConfig { @@ -148,6 +153,17 @@ impl GenericConfig { _ => Default::default(), } } + + /// Get the configuration for a specific test type. + pub fn test(&self, test_type: S) -> GenericConfig + where + S: AsRef, + { + self.configuration + .get(test_type.as_ref()) + .cloned() + .unwrap_or_default() + } } /// The configuration for a specific config level (-). This may or may not be arch diff --git a/tools/testsys/Cargo.toml b/tools/testsys/Cargo.toml index dd488629..5905ef7c 100644 --- a/tools/testsys/Cargo.toml +++ b/tools/testsys/Cargo.toml @@ -16,6 +16,7 @@ bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-var clap = { version = "3", features = ["derive", "env"] } env_logger = "0.9" futures = "0.3.8" +handlebars = "4.3" k8s-openapi = { version = "0.16", features = ["v1_20", "api"], default-features = false } kube-client = { version = "0.75"} log = "0.4" @@ -25,6 +26,7 @@ pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } serde = { version = "1", features = ["derive"] } serde_json = "1" serde_plain = "1" +serde_yaml = "0.8" snafu = "0.7" term_size = "0.3" testsys-config = { path = "../testsys-config/", version = "0.1.0" } diff --git a/tools/testsys/src/aws_ecs.rs b/tools/testsys/src/aws_ecs.rs index 49c658da..c8d6c06f 100644 --- a/tools/testsys/src/aws_ecs.rs +++ b/tools/testsys/src/aws_ecs.rs @@ -9,6 +9,7 @@ use log::debug; use maplit::btreemap; use model::{Crd, DestructionPolicy}; use snafu::OptionExt; +use std::collections::BTreeMap; /// A `CrdCreator` responsible for creating crd related to `aws-ecs` variants. pub(crate) struct AwsEcsCreator { @@ -172,4 +173,8 @@ impl CrdCreator for AwsEcsCreator { Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(test_crd)))) } + + fn additional_fields(&self, _test_type: &str) -> BTreeMap { + btreemap! {"region".to_string() => self.region.clone()} + } } diff --git a/tools/testsys/src/aws_k8s.rs b/tools/testsys/src/aws_k8s.rs index dc171317..99bc90a1 100644 --- a/tools/testsys/src/aws_k8s.rs +++ b/tools/testsys/src/aws_k8s.rs @@ -13,6 +13,7 @@ use maplit::btreemap; use model::constants::NAMESPACE; use model::{Agent, Configuration, Crd, DestructionPolicy, Resource, ResourceSpec}; use snafu::{OptionExt, ResultExt}; +use std::collections::BTreeMap; use std::str::FromStr; /// A `CrdCreator` responsible for creating crd related to `aws-k8s` variants. @@ -154,4 +155,8 @@ impl CrdCreator for AwsK8sCreator { test_input, )?)))) } + + fn additional_fields(&self, _test_type: &str) -> BTreeMap { + btreemap! {"region".to_string() => self.region.clone()} + } } diff --git a/tools/testsys/src/crds.rs b/tools/testsys/src/crds.rs index 67f493e0..510bf8ba 100644 --- a/tools/testsys/src/crds.rs +++ b/tools/testsys/src/crds.rs @@ -1,13 +1,16 @@ use crate::error::{self, Result}; -use crate::run::{KnownTestType, TestType}; +use crate::run::KnownTestType; use bottlerocket_types::agent_config::TufRepoConfig; use bottlerocket_variant::Variant; +use handlebars::Handlebars; use log::{debug, warn}; use maplit::btreemap; +use model::constants::{API_VERSION, NAMESPACE}; use model::test_manager::{SelectionParams, TestManager}; use model::Crd; use pubsys_config::RepoConfig; -use snafu::OptionExt; +use serde::Deserialize; +use snafu::{OptionExt, ResultExt}; use std::collections::BTreeMap; use testsys_config::{rendered_cluster_name, GenericVariantConfig, TestsysImages}; @@ -127,6 +130,26 @@ impl<'a> CrdInput<'a> { .collect::>>()? }) } + + /// Creates a `BTreeMap` of all configurable fields from this input + fn config_fields(&self, cluster_name: &str) -> BTreeMap { + btreemap! { + "arch".to_string() => self.arch.clone(), + "variant".to_string() => self.variant.to_string(), + "kube-arch".to_string() => self.kube_arch(), + "kube-variant".to_string() => self.kube_variant(), + "cluster-name".to_string() => cluster_name.to_string(), + "instance-type".to_string() => some_or_null(&self.config.instance_type), + "agent-role".to_string() => some_or_null(&self.config.agent_role), + "conformance-image".to_string() => some_or_null(&self.config.conformance_image), + "conformance-registry".to_string() => some_or_null(&self.config.conformance_registry), + } + } +} + +/// Take the value of the `Option` or `"null"` if the `Option` was `None` +fn some_or_null(field: &Option) -> String { + field.to_owned().unwrap_or_else(|| "null".to_string()) } /// The `CrdCreator` trait is used to create CRDs. Each variant family should have a `CrdCreator` @@ -159,161 +182,218 @@ pub(crate) trait CrdCreator: Sync { /// Create a testing CRD for this variant of Bottlerocket. async fn test_crd<'a>(&self, test_input: TestInput<'a>) -> Result; + /// Create a set of additional fields that may be used by an externally defined agent on top of + /// the ones in `CrdInput` + fn additional_fields(&self, _test_type: &str) -> BTreeMap { + Default::default() + } + /// Creates a set of CRDs for the specified variant and test type that can be added to a TestSys /// cluster. - async fn create_crds(&self, test_type: TestType, crd_input: &CrdInput) -> Result> { + async fn create_crds( + &self, + test_type: &KnownTestType, + crd_input: &CrdInput, + ) -> Result> { let mut crds = Vec::new(); for cluster_name in &crd_input.cluster_names()? { + let cluster_output = self + .cluster_crd(ClusterInput { + cluster_name, + crd_input, + }) + .await?; + let cluster_crd_name = cluster_output.crd_name(); + if let Some(crd) = cluster_output.crd() { + debug!("Cluster crd was created for '{}'", cluster_name); + crds.push(crd) + } match &test_type { - TestType::Known(test_type) => { - let cluster_output = self - .cluster_crd(ClusterInput { - cluster_name, + KnownTestType::Conformance | KnownTestType::Quick => { + let bottlerocket_output = self + .bottlerocket_crd(BottlerocketInput { + cluster_crd_name: &cluster_crd_name, + image_id: self.image_id(crd_input)?, + test_type, crd_input, }) .await?; - let cluster_crd_name = cluster_output.crd_name(); - if let Some(crd) = cluster_output.crd() { - debug!("Cluster crd was created for '{}'", cluster_name); + let bottlerocket_crd_name = bottlerocket_output.crd_name(); + if let Some(crd) = bottlerocket_output.crd() { + debug!("Bottlerocket crd was created for '{}'", cluster_name); crds.push(crd) } - match &test_type { - KnownTestType::Conformance | KnownTestType::Quick => { - let bottlerocket_output = self - .bottlerocket_crd(BottlerocketInput { - cluster_crd_name: &cluster_crd_name, - image_id: self.image_id(crd_input)?, - test_type, - crd_input, - }) - .await?; - let bottlerocket_crd_name = bottlerocket_output.crd_name(); - if let Some(crd) = bottlerocket_output.crd() { - debug!("Bottlerocket crd was created for '{}'", cluster_name); - crds.push(crd) - } - let test_output = self - .test_crd(TestInput { - cluster_crd_name: &cluster_crd_name, - bottlerocket_crd_name: &bottlerocket_crd_name, - test_type, - crd_input, - prev_tests: Default::default(), - name_suffix: None, - }) - .await?; - if let Some(crd) = test_output.crd() { - crds.push(crd) - } - } - KnownTestType::Migration => { - let image_id = if let Some(image_id) = &crd_input.starting_image_id { - debug!("Using the provided starting image id for migration testing '{}'", image_id); - image_id.to_string() - } else { - let image_id = self.starting_image_id(crd_input).await?; - debug!("A starting image id was not provided, '{}' will be used instead.", image_id); - image_id - }; - let bottlerocket_output = self - .bottlerocket_crd(BottlerocketInput { - cluster_crd_name: &cluster_crd_name, - image_id, - test_type, - crd_input, - }) - .await?; - let bottlerocket_crd_name = bottlerocket_output.crd_name(); - if let Some(crd) = bottlerocket_output.crd() { - debug!("Bottlerocket crd was created for '{}'", cluster_name); - crds.push(crd) - } - let mut tests = Vec::new(); - let test_output = self - .test_crd(TestInput { - cluster_crd_name: &cluster_crd_name, - bottlerocket_crd_name: &bottlerocket_crd_name, - test_type, - crd_input, - prev_tests: tests.clone(), - name_suffix: "-1-initial".into(), - }) - .await?; - if let Some(name) = test_output.crd_name() { - tests.push(name) - } - if let Some(crd) = test_output.crd() { - crds.push(crd) - } - let migration_output = self - .migration_crd(MigrationInput { - cluster_crd_name: &cluster_crd_name, - bottlerocket_crd_name: &bottlerocket_crd_name, - crd_input, - prev_tests: tests.clone(), - name_suffix: "-2-migrate".into(), - migration_direction: MigrationDirection::Upgrade, - }) - .await?; - if let Some(name) = migration_output.crd_name() { - tests.push(name) - } - if let Some(crd) = migration_output.crd() { - crds.push(crd) - } - let test_output = self - .test_crd(TestInput { - cluster_crd_name: &cluster_crd_name, - bottlerocket_crd_name: &bottlerocket_crd_name, - test_type, - crd_input, - prev_tests: tests.clone(), - name_suffix: "-3-migrated".into(), - }) - .await?; - if let Some(name) = test_output.crd_name() { - tests.push(name) - } - if let Some(crd) = test_output.crd() { - crds.push(crd) - } - let migration_output = self - .migration_crd(MigrationInput { - cluster_crd_name: &cluster_crd_name, - bottlerocket_crd_name: &bottlerocket_crd_name, - crd_input, - prev_tests: tests.clone(), - name_suffix: "-4-migrate".into(), - migration_direction: MigrationDirection::Downgrade, - }) - .await?; - if let Some(name) = migration_output.crd_name() { - tests.push(name) - } - if let Some(crd) = migration_output.crd() { - crds.push(crd) - } - let test_output = self - .test_crd(TestInput { - cluster_crd_name: &cluster_crd_name, - bottlerocket_crd_name: &bottlerocket_crd_name, - test_type, - crd_input, - prev_tests: tests, - name_suffix: "-5-final".into(), - }) - .await?; - if let Some(crd) = test_output.crd() { - crds.push(crd) - } - } + let test_output = self + .test_crd(TestInput { + cluster_crd_name: &cluster_crd_name, + bottlerocket_crd_name: &bottlerocket_crd_name, + test_type, + crd_input, + prev_tests: Default::default(), + name_suffix: None, + }) + .await?; + if let Some(crd) = test_output.crd() { + crds.push(crd) } } - TestType::Unknown(_) => { - return Err(error::Error::Unsupported { - what: "Custom test types".to_string(), - }) + KnownTestType::Migration => { + let image_id = if let Some(image_id) = &crd_input.starting_image_id { + debug!( + "Using the provided starting image id for migration testing '{}'", + image_id + ); + image_id.to_string() + } else { + let image_id = self.starting_image_id(crd_input).await?; + debug!( + "A starting image id was not provided, '{}' will be used instead.", + image_id + ); + image_id + }; + let bottlerocket_output = self + .bottlerocket_crd(BottlerocketInput { + cluster_crd_name: &cluster_crd_name, + image_id, + test_type, + crd_input, + }) + .await?; + let bottlerocket_crd_name = bottlerocket_output.crd_name(); + if let Some(crd) = bottlerocket_output.crd() { + debug!("Bottlerocket crd was created for '{}'", cluster_name); + crds.push(crd) + } + let mut tests = Vec::new(); + let test_output = self + .test_crd(TestInput { + cluster_crd_name: &cluster_crd_name, + bottlerocket_crd_name: &bottlerocket_crd_name, + test_type, + crd_input, + prev_tests: tests.clone(), + name_suffix: "-1-initial".into(), + }) + .await?; + if let Some(name) = test_output.crd_name() { + tests.push(name) + } + if let Some(crd) = test_output.crd() { + crds.push(crd) + } + let migration_output = self + .migration_crd(MigrationInput { + cluster_crd_name: &cluster_crd_name, + bottlerocket_crd_name: &bottlerocket_crd_name, + crd_input, + prev_tests: tests.clone(), + name_suffix: "-2-migrate".into(), + migration_direction: MigrationDirection::Upgrade, + }) + .await?; + if let Some(name) = migration_output.crd_name() { + tests.push(name) + } + if let Some(crd) = migration_output.crd() { + crds.push(crd) + } + let test_output = self + .test_crd(TestInput { + cluster_crd_name: &cluster_crd_name, + bottlerocket_crd_name: &bottlerocket_crd_name, + test_type, + crd_input, + prev_tests: tests.clone(), + name_suffix: "-3-migrated".into(), + }) + .await?; + if let Some(name) = test_output.crd_name() { + tests.push(name) + } + if let Some(crd) = test_output.crd() { + crds.push(crd) + } + let migration_output = self + .migration_crd(MigrationInput { + cluster_crd_name: &cluster_crd_name, + bottlerocket_crd_name: &bottlerocket_crd_name, + crd_input, + prev_tests: tests.clone(), + name_suffix: "-4-migrate".into(), + migration_direction: MigrationDirection::Downgrade, + }) + .await?; + if let Some(name) = migration_output.crd_name() { + tests.push(name) + } + if let Some(crd) = migration_output.crd() { + crds.push(crd) + } + let test_output = self + .test_crd(TestInput { + cluster_crd_name: &cluster_crd_name, + bottlerocket_crd_name: &bottlerocket_crd_name, + test_type, + crd_input, + prev_tests: tests, + name_suffix: "-5-final".into(), + }) + .await?; + if let Some(crd) = test_output.crd() { + crds.push(crd) + } + } + } + } + + Ok(crds) + } + + /// Creates a set of CRDs for the specified variant and test type that can be added to a TestSys + /// cluster. + async fn create_custom_crds( + &self, + test_type: &str, + crd_input: &CrdInput, + crd_template_file_path: &str, + ) -> Result> { + let mut crds = Vec::new(); + for cluster_name in &crd_input.cluster_names()? { + let mut fields = crd_input.config_fields(cluster_name); + fields.insert("api-version".to_string(), API_VERSION.to_string()); + fields.insert("namespace".to_string(), NAMESPACE.to_string()); + fields.insert("image-id".to_string(), self.image_id(crd_input)?); + fields.append(&mut self.additional_fields(test_type)); + + let mut handlebars = Handlebars::new(); + handlebars.set_strict_mode(true); + let rendered_manifest = handlebars.render_template( + &std::fs::read_to_string(crd_template_file_path).context(error::FileSnafu { + path: crd_template_file_path, + })?, + &fields, + )?; + + for crd_doc in serde_yaml::Deserializer::from_str(&rendered_manifest) { + let value = + serde_yaml::Value::deserialize(crd_doc).context(error::SerdeYamlSnafu { + what: "Unable to deserialize rendered manifest", + })?; + let mut crd: Crd = + serde_yaml::from_value(value).context(error::SerdeYamlSnafu { + what: "The manifest did not match a `CRD`", + })?; + // Add in the secrets from the config manually. + match &mut crd { + Crd::Test(test) => { + test.spec.agent.secrets = Some(crd_input.config.secrets.clone()) + } + Crd::Resource(resource) => { + resource.spec.agent.secrets = Some(crd_input.config.secrets.clone()) + } } + crds.push(crd); } } Ok(crds) diff --git a/tools/testsys/src/error.rs b/tools/testsys/src/error.rs index f2439036..bef2f8ea 100644 --- a/tools/testsys/src/error.rs +++ b/tools/testsys/src/error.rs @@ -1,6 +1,7 @@ use aws_sdk_ec2::error::DescribeImagesError; use aws_sdk_ec2::types::SdkError; use snafu::Snafu; +use std::path::PathBuf; pub type Result = std::result::Result; @@ -17,6 +18,21 @@ pub enum Error { source: SdkError, }, + #[snafu(display("Unable to read file '{}': {}", path.display(), source))] + File { + path: PathBuf, + source: std::io::Error, + }, + + #[snafu(context(false), display("Unable render templated yaml: {}", source))] + HandlebarsRender { source: handlebars::RenderError }, + + #[snafu( + context(false), + display("Unable create template from yaml: {}", source) + )] + HandlebarsTemplate { source: handlebars::TemplateError }, + #[snafu(display("Unable to create map from {}: {}", what, source))] IntoMap { what: String, source: model::Error }, @@ -53,6 +69,12 @@ pub enum Error { source: serde_json::Error, }, + #[snafu(display("{}: {}", what, source))] + SerdeYaml { + what: String, + source: serde_yaml::Error, + }, + #[snafu(context(false), display("{}", source))] TestManager { source: model::test_manager::Error }, diff --git a/tools/testsys/src/run.rs b/tools/testsys/src/run.rs index 6507bb86..1a5bb24a 100644 --- a/tools/testsys/src/run.rs +++ b/tools/testsys/src/run.rs @@ -78,6 +78,10 @@ pub(crate) struct Run { /// version that will be migrated to. #[clap(long, env = "BUILDSYS_VERSION_IMAGE")] migration_target_version: Option, + + /// The template file that should be used for custom testing. + #[clap(long = "template-file", short = 'f')] + custom_crd_template: Option, } /// This is a CLI parsable version of `testsys_config::GenericVariantConfig`. @@ -140,8 +144,12 @@ impl Run { let test_opts = test_config.test.to_owned().unwrap_or_default(); - let variant_config = - test_config.reduced_config(&variant, &self.arch, Some(self.config.into())); + let variant_config = test_config.reduced_config( + &variant, + &self.arch, + Some(self.config.into()), + &self.test_flavor.to_string(), + ); // If a lock file exists, use that, otherwise use Infra.toml or default let infra_config = InfraConfig::from_path_or_lock(&self.infra_config_path, true)?; @@ -218,9 +226,22 @@ impl Run { images, }; - let crds = crd_creator - .create_crds(self.test_flavor, &crd_input) - .await?; + let crds = match &self.test_flavor { + TestType::Known(test_type) => crd_creator.create_crds(test_type, &crd_input).await?, + TestType::Custom(test_type) => { + crd_creator + .create_custom_crds( + test_type, + &crd_input, + self.custom_crd_template + .as_ref() + .context(error::InvalidSnafu { + what: "A crd template file is required for custom test types.", + })?, + ) + .await? + } + }; debug!("Adding crds to testsys cluster"); for crd in crds { @@ -264,12 +285,12 @@ pub enum KnownTestType { } /// If a test type is one that is supported by TestSys it will be created as `Known(KnownTestType)`. -/// All other test types will be stored as `Unknown()`. +/// All other test types will be stored as `Custom()`. #[derive(Debug, Serialize, Deserialize)] #[serde(untagged)] pub(crate) enum TestType { Known(KnownTestType), - Unknown(String), + Custom(String), } derive_fromstr_from_deserialize!(TestType); From 7a86436bd8d534778e632d53fe2791cc822f6383 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Wed, 30 Nov 2022 14:54:42 -0600 Subject: [PATCH 0811/1356] Add default labels to issue templates This adds the `status/needs-triage` label to all issues and `type/bug` or `type/enhancement` to their respective issue types. This allows us to clearly mark new issues as needing triage and helps us start to categorize the types of issues being filed. Signed-off-by: Sean McGinnis --- .github/ISSUE_TEMPLATE/build.md | 1 + .github/ISSUE_TEMPLATE/feature.md | 1 + .github/ISSUE_TEMPLATE/image.md | 1 + 3 files changed, 3 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/build.md b/.github/ISSUE_TEMPLATE/build.md index 6e4c091d..78d51d32 100644 --- a/.github/ISSUE_TEMPLATE/build.md +++ b/.github/ISSUE_TEMPLATE/build.md @@ -1,6 +1,7 @@ --- name: Bug report - build process about: Let us know about a problem with the build process +labels: status/needs-triage, type/bug --- + +**What I'd like:** + +**Device type (e.g. network interface, disk controller):** + +**Device vendor:** + +**Device model:** + +**Driver used on other Linux distribition:** + +**Any alternatives you've considered:** + From c8168b154f63dcc7d3bf0ce1f3990f68ab10fb43 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Wed, 29 Mar 2023 08:27:42 +0000 Subject: [PATCH 0914/1356] kernel-5.10: update to 5.10.173 Rebase to Amazon Linux upstream version based on 5.10.173. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 1e06cebd..8f6374ca 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -14,8 +14,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/f4682a5336292f734ea4abbbad28a4d460ebad6578e4fbfd26de479f9ff8f84d/kernel-5.10.165-143.735.amzn2.src.rpm" -sha512 = "f5c0f3c2082f54fa052ecf4c8ed7752698f29765014456214e78d9af181c36926a26b2adb3a8644987cf99afe6eddcb335c758e06fce801953da2d7aa90c1133" +url = "https://cdn.amazonlinux.com/blobstore/bfdedd54405ee75070fa9b53342399680e3145e362f41deb1276de2082625061/kernel-5.10.173-154.642.amzn2.src.rpm" +sha512 = "b98f97a00dfbec2ba6681faa326782bbe02c8a57758890076f71bb07a149d6dee3dba1237c07fb195c6a65956bee572f0d8757898375f437244eec7e69938e0b" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 07885207..654821a6 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.165 +Version: 5.10.173 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/f4682a5336292f734ea4abbbad28a4d460ebad6578e4fbfd26de479f9ff8f84d/kernel-5.10.165-143.735.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/bfdedd54405ee75070fa9b53342399680e3145e362f41deb1276de2082625061/kernel-5.10.173-154.642.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From b27b24a08d639ef8ea27d872f606487a122051c3 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Wed, 29 Mar 2023 08:27:58 +0000 Subject: [PATCH 0915/1356] kernel-5.15: update to 5.15.102 Rebase to Amazon Linux upstream version based on 5.15.102. Signed-off-by: Leonard Foerster --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index d10da1a1..a80c4155 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -14,8 +14,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/c6618e1460b8ace4707e17615e7bcb6c9654796d739014a1d7f6d6e2a70d8bfe/kernel-5.15.90-54.138.amzn2.src.rpm" -sha512 = "225dea26f9e740a36c9df7d333688a9759761a080ca1586b2e515eec0f084919d1898ac5bfca4dfa067e56c49a86ca86ba7233d7117a13f85a6cf49df464d7f0" +url = "https://cdn.amazonlinux.com/blobstore/567d93a3639fa16d002a80a970223b8dc134fc4d1214125b379750ee689a76ea/kernel-5.15.102-61.139.amzn2.src.rpm" +sha512 = "6df4d568ef60cd631a7764d33f771cae6be576cbbf0400e86eafdad0a86ddeb65c96dc2ad40698573277fa8afe1076cdc9e45c9776f6f7f782a273f0e416fc88" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 13a04bfa..b4f8b59d 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.90 +Version: 5.15.102 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/c6618e1460b8ace4707e17615e7bcb6c9654796d739014a1d7f6d6e2a70d8bfe/kernel-5.15.90-54.138.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/567d93a3639fa16d002a80a970223b8dc134fc4d1214125b379750ee689a76ea/kernel-5.15.102-61.139.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 59ea7e2767103d83eb357c369e4a967d2fcfdd73 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Wed, 29 Mar 2023 15:33:26 +0000 Subject: [PATCH 0916/1356] kernel-5.15: Disable MLX5 offloading options In the past we have taken care of only shipping the base configuration of NIC drivers. No specialized offloading options. Keep this status quo until there is a use case for these specialized driver functions. Signed-off-by: Leonard Foerster --- packages/kernel-5.15/config-bottlerocket-aws | 4 ++++ packages/kernel-5.15/config-bottlerocket-metal | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/packages/kernel-5.15/config-bottlerocket-aws b/packages/kernel-5.15/config-bottlerocket-aws index 6b4ed404..1bfa27e1 100644 --- a/packages/kernel-5.15/config-bottlerocket-aws +++ b/packages/kernel-5.15/config-bottlerocket-aws @@ -11,3 +11,7 @@ CONFIG_MLX5_CORE=m CONFIG_MLX5_CORE_EN=y CONFIG_MLX5_INFINIBAND=m CONFIG_MLXFW=m +# CONFIG_MLX5_FPGA is not set +# CONFIG_MLX5_IPSEC is not set +# CONFIG_MLX5_CORE_IPOIB is not set +# CONFIG_MLX5_SF is not set diff --git a/packages/kernel-5.15/config-bottlerocket-metal b/packages/kernel-5.15/config-bottlerocket-metal index 5aaae1db..a162ccb5 100644 --- a/packages/kernel-5.15/config-bottlerocket-metal +++ b/packages/kernel-5.15/config-bottlerocket-metal @@ -76,6 +76,10 @@ CONFIG_MLX5_INFINIBAND=m CONFIG_NET_VENDOR_MELLANOX=y CONFIG_MLX5_CORE_EN=y CONFIG_NET_SWITCHDEV=y +# CONFIG_MLX5_FPGA is not set +# CONFIG_MLX5_IPSEC is not set +# CONFIG_MLX5_CORE_IPOIB is not set +# CONFIG_MLX5_SF is not set # Myricom network support CONFIG_NET_VENDOR_MYRI=y From 3d80b2a267a9335aeeb2492d858f10c8b822157a Mon Sep 17 00:00:00 2001 From: John McBride Date: Wed, 29 Mar 2023 23:02:28 +0000 Subject: [PATCH 0917/1356] GitHub actions: Use GOPROXY=direct during builds Signed-off-by: John McBride --- .github/workflows/build.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b7fafef2..d2389e0c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -22,6 +22,14 @@ concurrency: group: ${{ github.ref }} cancel-in-progress: true +env: + # When Go packages are built, buildsys will vendor in dependent Go code for + # that package and bundle it up in a tarball. This env variable is consumed + # and used to configure Go to directly download code from its upstream source. + # This is a useful early signal during GitHub actions to see if there are + # upstream Go code problems. + GOPROXY: direct + jobs: build: runs-on: From 8aa41a10d322a8cbcdcec96c4bad3a0391993f54 Mon Sep 17 00:00:00 2001 From: Shikha Vyaghra Date: Mon, 20 Mar 2023 17:54:55 +0000 Subject: [PATCH 0918/1356] buildsys: Add user agent header in http request Buildsys does not set the user agent in HTTP request when fetching upstream sources. But some servers may not like the requests without a user agent header and netfilter.org is one of them. All these requests succeed when a user agent is set, so include the header in requests that buildsys sends out. --- tools/buildsys/src/cache.rs | 28 ++++++++++++++++++++++++++-- tools/buildsys/src/cache/error.rs | 6 ++++++ 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/tools/buildsys/src/cache.rs b/tools/buildsys/src/cache.rs index d8bb53a3..1cb2084e 100644 --- a/tools/buildsys/src/cache.rs +++ b/tools/buildsys/src/cache.rs @@ -14,8 +14,10 @@ pub(crate) mod error; use error::Result; use buildsys::manifest; +use reqwest::header::{HeaderMap, HeaderValue, USER_AGENT}; use sha2::{Digest, Sha512}; use snafu::{ensure, OptionExt, ResultExt}; +use std::env; use std::fs::{self, File}; use std::io::{self, BufWriter}; use std::path::{Path, PathBuf}; @@ -77,8 +79,26 @@ impl LookasideCache { /// then verifies the contents against the SHA-512 hash provided. fn fetch_file>(url: &str, path: P, hash: &str) -> Result<()> { let path = path.as_ref(); - let mut resp = - reqwest::blocking::get(url).context(error::ExternalFileRequestSnafu { url })?; + + let version = Self::getenv("BUILDSYS_VERSION_FULL")?; + + let mut headers = HeaderMap::new(); + headers.insert( + USER_AGENT, + HeaderValue::from_str(&format!( + "Bottlerocket buildsys {version} (https://github.com/bottlerocket-os/bottlerocket)" + )) + .unwrap_or(HeaderValue::from_static( + "Bottlerocket buildsys (https://github.com/bottlerocket-os/bottlerocket)", + )), + ); + + let client = reqwest::blocking::Client::new(); + let mut resp = client + .get(url) + .headers(headers) + .send() + .context(error::ExternalFileRequestSnafu { url })?; let status = resp.status(); ensure!( status.is_success(), @@ -100,6 +120,10 @@ impl LookasideCache { } } + fn getenv(var: &str) -> Result { + env::var(var).context(error::EnvironmentSnafu { var: (var) }) + } + fn extract_file_name(url: &str) -> Result { let parsed = reqwest::Url::parse(url).context(error::ExternalFileUrlSnafu { url })?; let name = parsed diff --git a/tools/buildsys/src/cache/error.rs b/tools/buildsys/src/cache/error.rs index ec8e1ccb..7665ba68 100644 --- a/tools/buildsys/src/cache/error.rs +++ b/tools/buildsys/src/cache/error.rs @@ -6,6 +6,12 @@ use std::path::PathBuf; #[snafu(visibility(pub(super)))] #[allow(clippy::enum_variant_names)] pub(crate) enum Error { + #[snafu(display("Missing environment variable '{}'", var))] + Environment { + var: String, + source: std::env::VarError, + }, + #[snafu(display("Bad file name '{}'", path.display()))] ExternalFileName { path: PathBuf }, From 1be154edd65db049636a99dd522e906819b66a6d Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Wed, 5 Apr 2023 00:22:18 +0000 Subject: [PATCH 0919/1356] buildsys: ignore code snippets in doctests --- tools/buildsys/src/manifest.rs | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index 5f007098..7125725d 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -12,7 +12,7 @@ to store configuration for other tools. We recognize the following keys. `source-groups` is a list of directories in the top-level `sources` directory, each of which contains a set of related Rust projects. Changes to files in these groups should trigger a rebuild. -``` +```ignore [package.metadata.build-package] source-groups = ["api"] ``` @@ -21,7 +21,7 @@ source-groups = ["api"] as additional dependencies for the build. If the path for the external file name is not provided, it will be taken from the last path component of the URL. -``` +```ignore [[package.metadata.build-package.external-files]] path = "foo" url = "https://foo" @@ -61,7 +61,7 @@ of some upstream archive is "my-package.tar.gz", the output archive would be named `bundled-my-package.tar.gz`. This output path may then be referenced within an RPM spec or when creating a package in order to access the vendored upstream dependencies during build time. -``` +```ignore [[package.metadata.build-package.external-files]] path = "foo" url = "https://foo" @@ -75,7 +75,7 @@ bundle-output-path = "path/to/output.tar.gz" if you have a package with "." in its name, for example, which Cargo doesn't allow. This means the directory name and spec file name can use your preferred naming. -``` +```ignore [package.metadata.build-package] package-name = "better.name" ``` @@ -84,7 +84,7 @@ package-name = "better.name" building a new variant, and defaults to false; set it to true if a package is using the variant to affect its build process. -``` +```ignore [package.metadata.build-package] variant-sensitive = true ``` @@ -93,7 +93,7 @@ Some packages might only be sensitive to certain components of the variant tuple, such as the platform, runtime, or family. The `variant-sensitive` field can also take a string to indicate the source of the sensitivity. -``` +```ignore [package.metadata.build-package] # sensitive to platform, like "metal" or "aws" variant-sensitive = "platform" @@ -110,7 +110,7 @@ useful when the way the package is built changes based on whether a particular image feature is enabled for the current variant, rather than when the variant tuple changes. -``` +```ignore [package.metadata.build-package] package-features = [ "grub-set-private-var", @@ -119,7 +119,7 @@ package-features = [ `releases-url` is ignored by buildsys, but can be used by packager maintainers to indicate a good URL for checking whether the software has had a new release. -``` +```ignore [package.metadata.build-package] releases-url = "https://www.example.com/releases" ``` @@ -127,14 +127,14 @@ releases-url = "https://www.example.com/releases" ## Metadata for variants `included-packages` is a list of packages that should be included in a variant. -``` +```ignore [package.metadata.build-variant] included-packages = ["release"] ``` `image-format` is the desired format for the built images. This can be `raw` (the default), `vmdk`, or `qcow2`. -``` +```ignore [package.metadata.build-variant] image-format = "vmdk" ``` @@ -163,7 +163,7 @@ single "os" image volume. The hint will be ignored if the combined size of the `partition-plan` is the desired strategy for image partitioning. This can be `split` (the default) for "os" and "data" images backed by separate volumes, or `unified` to have "os" and "data" share the same volume. -``` +```ignore [package.metadata.build-variant.image-layout] os-image-size-gib = 2 data-image-size-gib = 1 @@ -174,14 +174,14 @@ partition-plan = "split" `supported-arches` is the list of architectures the variant is able to run on. The values can be `x86_64` and `aarch64`. If not specified, the variant can run on any of those architectures. -``` +```ignore [package.metadata.build-variant] supported-arches = ["x86_64"] ``` `kernel-parameters` is a list of extra parameters to be added to the kernel command line. The given parameters are inserted at the start of the command line. -``` +```ignore [package.metadata.build-variant] kernel-parameters = [ "console=ttyS42", @@ -193,14 +193,14 @@ to conditionally use or exclude certain firmware-level features in variants. `grub-set-private-var` means that the grub image for the current variant includes the command to find the BOTTLEROCKET_PRIVATE partition and set the appropriate `$private` variable for the grub config file to consume. This feature flag is a prerequisite for Boot Config support. -``` +```ignore [package.metadata.build-variant.image-features] grub-set-private-var = true ``` `systemd-networkd` uses the `systemd-networkd` network backend in place of `wicked`. This feature flag is meant primarily for development, and will be removed when development has completed. -``` +```ignore [package.metadata.build-variant.image-features] systemd-networkd = true ``` @@ -210,7 +210,7 @@ boot, i.e. the host will use cgroup v2 by default. This feature flag allows old variants to continue booting with cgroup v1 and new variants to move to cgroup v2, while users will still be able to override the default via command line arguments set in the boot configuration. -``` +```ignore [package.metadata.build-variant.image-features] unified-cgroup-hierarchy = true ``` From 2ba65f8b8113a5b991b0b0a3304754f1e7fe3e1d Mon Sep 17 00:00:00 2001 From: mjsterckx Date: Thu, 30 Mar 2023 18:16:07 +0000 Subject: [PATCH 0920/1356] pubsys: added SSM parameter validation Added a validate-ssm command to validate SSM parameters, given a JSON config file with regions and paths to files containing the expected parameters. --- tools/Cargo.lock | 1 + tools/pubsys/Cargo.toml | 1 + tools/pubsys/src/aws/mod.rs | 1 + tools/pubsys/src/aws/ssm/mod.rs | 2 +- tools/pubsys/src/aws/ssm/ssm.rs | 109 ++- tools/pubsys/src/aws/validate_ssm/mod.rs | 757 +++++++++++++++++++ tools/pubsys/src/aws/validate_ssm/results.rs | 686 +++++++++++++++++ tools/pubsys/src/main.rs | 17 +- 8 files changed, 1566 insertions(+), 8 deletions(-) create mode 100644 tools/pubsys/src/aws/validate_ssm/mod.rs create mode 100644 tools/pubsys/src/aws/validate_ssm/results.rs diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 6ceb5202..fc5d7c67 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -2488,6 +2488,7 @@ dependencies = [ "simplelog", "snafu", "structopt", + "tabled", "tempfile", "tinytemplate", "tokio", diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index 7db58a26..b5773934 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -41,6 +41,7 @@ serde_json = "1" simplelog = "0.12" snafu = "0.7" structopt = { version = "0.3", default-features = false } +tabled = "0.10" tempfile = "3" tinytemplate = "1" tokio = { version = "1", features = ["full"] } # LTS diff --git a/tools/pubsys/src/aws/mod.rs b/tools/pubsys/src/aws/mod.rs index defc6a01..7cd95a33 100644 --- a/tools/pubsys/src/aws/mod.rs +++ b/tools/pubsys/src/aws/mod.rs @@ -8,6 +8,7 @@ pub(crate) mod ami; pub(crate) mod promote_ssm; pub(crate) mod publish_ami; pub(crate) mod ssm; +pub(crate) mod validate_ssm; /// Builds a Region from the given region name. fn region_from_string(name: &str) -> Region { diff --git a/tools/pubsys/src/aws/ssm/mod.rs b/tools/pubsys/src/aws/ssm/mod.rs index 6f3d5e2a..c06a8b32 100644 --- a/tools/pubsys/src/aws/ssm/mod.rs +++ b/tools/pubsys/src/aws/ssm/mod.rs @@ -297,7 +297,7 @@ pub(crate) struct BuildContext<'a> { } /// A map of SsmKey to its value -type SsmParameters = HashMap; +pub(crate) type SsmParameters = HashMap; /// Parse the AMI input file fn parse_ami_input(regions: &[String], ssm_args: &SsmArgs) -> Result> { diff --git a/tools/pubsys/src/aws/ssm/ssm.rs b/tools/pubsys/src/aws/ssm/ssm.rs index e74eab88..fcfeb296 100644 --- a/tools/pubsys/src/aws/ssm/ssm.rs +++ b/tools/pubsys/src/aws/ssm/ssm.rs @@ -7,8 +7,8 @@ use aws_sdk_ssm::output::{GetParametersOutput, PutParameterOutput}; use aws_sdk_ssm::types::SdkError; use aws_sdk_ssm::{Client as SsmClient, Region}; use futures::future::{join, ready}; -use futures::stream::{self, StreamExt}; -use log::{debug, error, trace, warn}; +use futures::stream::{self, FuturesUnordered, StreamExt}; +use log::{debug, error, info, trace, warn}; use snafu::{ensure, OptionExt, ResultExt}; use std::collections::{HashMap, HashSet}; use std::time::Duration; @@ -135,6 +135,88 @@ where Ok(parameters) } +/// Fetches all SSM parameters under a given prefix using the given clients +pub(crate) async fn get_parameters_by_prefix<'a>( + clients: &'a HashMap, + ssm_prefix: &str, +) -> HashMap<&'a Region, Result> { + // Build requests for parameters; we have to request with a regional client so we split them by + // region + let mut requests = Vec::with_capacity(clients.len()); + for region in clients.keys() { + trace!("Requesting parameters in {}", region); + let ssm_client: &SsmClient = &clients[region]; + let get_future = get_parameters_by_prefix_in_region(region, ssm_client, ssm_prefix); + + requests.push(join(ready(region), get_future)); + } + + // Send requests in parallel and wait for responses, collecting results into a list. + requests + .into_iter() + .collect::>() + .collect() + .await +} + +/// Fetches all SSM parameters under a given prefix in a single region +pub(crate) async fn get_parameters_by_prefix_in_region( + region: &Region, + client: &SsmClient, + ssm_prefix: &str, +) -> Result { + info!("Retrieving SSM parameters in {}", region.to_string()); + let mut parameters = HashMap::new(); + + // Send the request + let mut get_future = client + .get_parameters_by_path() + .path(ssm_prefix) + .recursive(true) + .into_paginator() + .send(); + + // Iterate over the retrieved parameters + while let Some(page) = get_future.next().await { + let retrieved_parameters = page + .context(error::GetParametersByPathSnafu { + path: ssm_prefix, + region: region.to_string(), + })? + .parameters() + .unwrap_or_default() + .to_owned(); + for parameter in retrieved_parameters { + // Insert a new key-value pair into the map, with the key containing region and parameter name + // and the value containing the parameter value + parameters.insert( + SsmKey::new( + region.to_owned(), + parameter + .name() + .ok_or(error::Error::MissingField { + region: region.to_string(), + field: "name".to_string(), + })? + .to_owned(), + ), + parameter + .value() + .ok_or(error::Error::MissingField { + region: region.to_string(), + field: "value".to_string(), + })? + .to_owned(), + ); + } + } + info!( + "SSM parameters in {} have been retrieved", + region.to_string() + ); + Ok(parameters) +} + /// Sets the values of the given SSM keys using the given clients pub(crate) async fn set_parameters( parameters_to_set: &SsmParameters, @@ -324,8 +406,8 @@ pub(crate) async fn validate_parameters( Ok(()) } -mod error { - use aws_sdk_ssm::error::GetParametersError; +pub(crate) mod error { + use aws_sdk_ssm::error::{GetParametersByPathError, GetParametersError}; use aws_sdk_ssm::types::SdkError; use snafu::Snafu; use std::error::Error as _; @@ -334,13 +416,28 @@ mod error { #[derive(Debug, Snafu)] #[snafu(visibility(pub(super)))] #[allow(clippy::large_enum_variant)] - pub(crate) enum Error { + pub enum Error { #[snafu(display("Failed to fetch SSM parameters in {}: {}", region, source.source().map(|x| x.to_string()).unwrap_or("unknown".to_string())))] GetParameters { region: String, source: SdkError, }, + #[snafu(display( + "Failed to fetch SSM parameters by path {} in {}: {}", + path, + region, + source + ))] + GetParametersByPath { + path: String, + region: String, + source: SdkError, + }, + + #[snafu(display("Missing field in parameter in {}: {}", region, field))] + MissingField { region: String, field: String }, + #[snafu(display("Response to {} was missing {}", request_type, missing))] MissingInResponse { region: String, @@ -369,4 +466,4 @@ mod error { } } pub(crate) use error::Error; -type Result = std::result::Result; +pub(crate) type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/validate_ssm/mod.rs b/tools/pubsys/src/aws/validate_ssm/mod.rs new file mode 100644 index 00000000..a6a1394b --- /dev/null +++ b/tools/pubsys/src/aws/validate_ssm/mod.rs @@ -0,0 +1,757 @@ +//! The validate_ssm module owns the 'validate-ssm' subcommand and controls the process of +//! validating SSM parameters and AMIs + +pub mod results; + +use self::results::{SsmValidationResult, SsmValidationResultStatus, SsmValidationResults}; +use super::ssm::ssm::get_parameters_by_prefix; +use super::ssm::{SsmKey, SsmParameters}; +use crate::aws::client::build_client_config; +use crate::Args; +use aws_sdk_ssm::{Client as SsmClient, Region}; +use log::{info, trace}; +use pubsys_config::InfraConfig; +use serde::Deserialize; +use snafu::ResultExt; +use std::collections::{HashMap, HashSet}; +use std::fs::File; +use std::path::PathBuf; +use structopt::{clap, StructOpt}; + +/// Validates SSM parameters and AMIs +#[derive(Debug, StructOpt)] +#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +pub struct ValidateSsmArgs { + /// File holding the validation configuration + #[structopt(long, parse(from_os_str))] + validation_config_path: PathBuf, + + /// Optional path where the validation results should be written + #[structopt(long, parse(from_os_str))] + write_results_path: Option, + + #[structopt(long, requires = "write-results-path")] + /// Optional filter to only write validation results with these statuses to the above path + /// Available statuses are: `Correct`, `Incorrect`, `Missing`, `Unexpected` + write_results_filter: Option>, + + /// If this flag is added, print the results summary table as JSON instead of a + /// plaintext table + #[structopt(long)] + json: bool, +} + +/// Structure of the validation configuration file +#[derive(Debug, Deserialize)] +pub(crate) struct ValidationConfig { + /// Vec of paths to JSON files containing expected metadata (image ids and SSM parameters) + expected_metadata_lists: Vec, + + /// Vec of regions where the parameters should be validated + validation_regions: Vec, +} + +/// A structure that allows us to store a parameter value along with the AMI ID it refers to. In +/// some cases, then AMI ID *is* the parameter value and both fields will hold the AMI ID. In other +/// cases the parameter value is not the AMI ID, but we need to remember which AMI ID it refers to. +#[derive(Debug, PartialEq, Eq)] +pub(crate) struct SsmValue { + /// The value of the SSM parameter + pub(crate) value: String, + + /// The ID of the AMI the parameter is associated with, used for validation result reporting + pub(crate) ami_id: String, +} + +/// Performs SSM parameter validation and returns the `SsmValidationResults` object +pub async fn validate( + args: &Args, + validate_ssm_args: &ValidateSsmArgs, +) -> Result { + info!("Parsing Infra.toml file"); + + // If a lock file exists, use that, otherwise use Infra.toml + let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) + .context(error::ConfigSnafu)?; + + let aws = infra_config.aws.clone().unwrap_or_default(); + + trace!("Parsed infra config: {:#?}", infra_config); + + // Read the validation config file and parse it into the `ValidationConfig` struct + let validation_config_file = File::open(&validate_ssm_args.validation_config_path).context( + error::ReadValidationConfigSnafu { + path: validate_ssm_args.validation_config_path.clone(), + }, + )?; + let validation_config: ValidationConfig = serde_json::from_reader(validation_config_file) + .context(error::ParseValidationConfigSnafu)?; + + let ssm_prefix = aws.ssm_prefix.as_deref().unwrap_or(""); + + // Parse the parameter lists found in the validation config + info!("Parsing expected parameter lists"); + let expected_parameters = parse_parameter_lists( + validation_config.expected_metadata_lists, + &validation_config.validation_regions, + ) + .await?; + + info!("Parsed expected parameter lists"); + + // Create a Vec of Regions based on the region names in the validation config + let validation_regions: Vec = validation_config + .validation_regions + .iter() + .map(|s| Region::new(s.clone())) + .collect(); + + // Create a HashMap of SsmClients, one for each region where validation should happen + let base_region = &validation_regions[0]; + let mut ssm_clients = HashMap::with_capacity(validation_regions.len()); + + for region in &validation_regions { + let client_config = build_client_config(region, base_region, &aws).await; + let ssm_client = SsmClient::new(&client_config); + ssm_clients.insert(region.clone(), ssm_client); + } + + // Retrieve the SSM parameters using the SsmClients + info!("Retrieving SSM parameters"); + let parameters = get_parameters_by_prefix(&ssm_clients, ssm_prefix).await; + + // Validate the retrieved SSM parameters per region + info!("Validating SSM parameters"); + let results: HashMap>> = + parameters + .into_iter() + .map(|(region, region_result)| { + ( + region.clone(), + region_result.map(|result| { + validate_parameters_in_region( + expected_parameters.get(region).unwrap_or(&HashMap::new()), + &result, + ) + }), + ) + }) + .collect::>>>( + ); + + let validation_results = SsmValidationResults::new(results); + + // If a path was given to write the results to, write the results + if let Some(write_results_path) = &validate_ssm_args.write_results_path { + // Filter the results by given status, and if no statuses were given, get all results + info!("Writing results to file"); + let filtered_results = validation_results.get_results_for_status( + validate_ssm_args + .write_results_filter + .as_ref() + .unwrap_or(&vec![ + SsmValidationResultStatus::Correct, + SsmValidationResultStatus::Incorrect, + SsmValidationResultStatus::Missing, + SsmValidationResultStatus::Unexpected, + ]), + ); + + // Write the results as JSON + serde_json::to_writer_pretty( + &File::create(write_results_path).context(error::WriteValidationResultsSnafu { + path: write_results_path, + })?, + &filtered_results, + ) + .context(error::SerializeValidationResultsSnafu)?; + } + + Ok(validation_results) +} + +/// Validates SSM parameters in a single region, based on a HashMap (SsmKey, SsmValue) of expected +/// parameters and a HashMap (SsmKey, String) of actual retrieved parameters. Returns a HashSet of +/// SsmValidationResult objects. +pub(crate) fn validate_parameters_in_region( + expected_parameters: &HashMap, + actual_parameters: &SsmParameters, +) -> HashSet { + // Clone the HashMap of actual parameters so items can be removed + let mut actual_parameters = actual_parameters.clone(); + let mut results = HashSet::new(); + + // Validate all expected parameters, creating an SsmValidationResult object and + // removing the corresponding parameter from `actual_parameters` if found + for (ssm_key, ssm_value) in expected_parameters { + results.insert(SsmValidationResult::new( + ssm_key.name.to_owned(), + Some(ssm_value.value.clone()), + actual_parameters.get(ssm_key).map(|v| v.to_owned()), + ssm_key.region.clone(), + Some(ssm_value.ami_id.clone()), + )); + actual_parameters.remove(ssm_key); + } + + // Any remaining parameters in `actual_parameters` were not present in `expected_parameters` + // and therefore get the `Unexpected` status + for (ssm_key, ssm_value) in actual_parameters { + results.insert(SsmValidationResult::new( + ssm_key.name.to_owned(), + None, + Some(ssm_value), + ssm_key.region.clone(), + None, + )); + } + results +} + +type RegionName = String; +type AmiId = String; +type ParameterName = String; +type ParameterValue = String; + +/// Parse the lists of parameters whose paths are in `parameter_lists`. Only parse the parameters +/// in the regions present in `validation_regions`. Return a HashMap of Region mapped to a HashMap +/// of the parameters in that region, with each parameter being a mapping of `SsmKey` to `SsmValue`. +pub(crate) async fn parse_parameter_lists( + parameter_lists: Vec, + validation_regions: &[String], +) -> Result>> { + let mut parameter_map: HashMap> = HashMap::new(); + for parameter_list_path in parameter_lists { + // Parse the JSON list as a HashMap of region_name, mapped to a HashMap of ami_id, mapped to + // a HashMap of parameter_name and parameter_value + let parameter_list: HashMap< + RegionName, + HashMap>, + > = serde_json::from_reader(&File::open(parameter_list_path.clone()).context( + error::ReadExpectedParameterListSnafu { + path: parameter_list_path, + }, + )?) + .context(error::ParseExpectedParameterListSnafu)?; + + // Iterate over the parsed HashMap, converting the nested HashMap into a HashMap of Region + // mapped to a HashMap of SsmKey, SsmValue + parameter_list + .iter() + .filter(|(region, _)| validation_regions.contains(region)) + .flat_map(|(region, ami_ids)| { + ami_ids + .iter() + .map(move |(ami_id, param_names)| (region, ami_id, param_names)) + }) + .flat_map(|(region, ami_id, params)| { + params.iter().map(move |(parameter_name, parameter_value)| { + ( + region.clone(), + ami_id.clone(), + parameter_name.clone(), + parameter_value.clone(), + ) + }) + }) + .for_each(|(region, ami_id, parameter_name, parameter_value)| { + parameter_map + .entry(Region::new(region.clone())) + .or_insert(HashMap::new()) + .insert( + SsmKey::new(Region::new(region), parameter_name), + SsmValue { + value: parameter_value, + ami_id, + }, + ); + }); + } + Ok(parameter_map) +} + +/// Common entrypoint from main() +pub(crate) async fn run(args: &Args, validate_ssm_args: &ValidateSsmArgs) -> Result<()> { + let results = validate(args, validate_ssm_args).await?; + + if validate_ssm_args.json { + println!( + "{}", + serde_json::to_string_pretty(&results.get_json_summary()) + .context(error::SerializeResultsSummarySnafu)? + ) + } else { + println!("{}", results) + } + Ok(()) +} + +mod error { + use crate::aws::ssm::ssm; + use snafu::Snafu; + use std::path::PathBuf; + + #[derive(Debug, Snafu)] + #[snafu(visibility(pub(super)))] + pub enum Error { + #[snafu(display("Error reading config: {}", source))] + Config { source: pubsys_config::Error }, + + #[snafu(display("Error reading validation config at path {}: {}", path.display(), source))] + ReadValidationConfig { + source: std::io::Error, + path: PathBuf, + }, + + #[snafu(display("Error parsing validation config: {}", source))] + ParseValidationConfig { source: serde_json::Error }, + + #[snafu(display("Missing field in validation config: {}", missing))] + MissingField { missing: String }, + + #[snafu(display("Missing region in expected parameters: {}", missing))] + MissingExpectedRegion { missing: String }, + + #[snafu(display("Missing region in actual parameters: {}", missing))] + MissingActualRegion { missing: String }, + + #[snafu(display("Found no parameters in source version {}", version))] + EmptySource { version: String }, + + #[snafu(display("Failed to fetch parameters from SSM: {}", source))] + FetchSsm { source: ssm::error::Error }, + + #[snafu(display("Infra.toml is missing {}", missing))] + MissingConfig { missing: String }, + + #[snafu(display("Failed to validate SSM parameters: {}", missing))] + ValidateSsm { missing: String }, + + #[snafu(display("Failed to validate SSM parameters in region: {}", region))] + ValidateSsmRegion { region: String }, + + #[snafu(display("Failed to parse AMI list: {}", source))] + ParseExpectedParameterList { source: serde_json::Error }, + + #[snafu(display("Failed to read AMI list: {}", path.display()))] + ReadExpectedParameterList { + source: std::io::Error, + path: PathBuf, + }, + + #[snafu(display("Invalid validation status filter: {}", filter))] + InvalidStatusFilter { filter: String }, + + #[snafu(display("Failed to serialize validation results to json: {}", source))] + SerializeValidationResults { source: serde_json::Error }, + + #[snafu(display("Failed to write validation results to {}: {}", path.display(), source))] + WriteValidationResults { + path: PathBuf, + source: std::io::Error, + }, + + #[snafu(display("Failed to serialize results summary into JSON: {}", source))] + SerializeResultsSummary { source: serde_json::Error }, + } +} + +pub(crate) use error::Error; +type Result = std::result::Result; + +#[cfg(test)] +mod test { + use crate::aws::{ + ssm::{SsmKey, SsmParameters}, + validate_ssm::{results::SsmValidationResult, validate_parameters_in_region, SsmValue}, + }; + use aws_sdk_ssm::Region; + use std::collections::{HashMap, HashSet}; + + // These tests assert that the parameters can be validated correctly. + + // Tests validation of parameters where the expected value is equal to the actual value + #[test] + fn validate_parameters_all_correct() { + let expected_parameters: HashMap = HashMap::from([ + ( + SsmKey { + region: Region::new("us-west-2"), + name: "test1-parameter-name".to_string(), + }, + SsmValue { + value: "test1-parameter-value".to_string(), + ami_id: "test1-image-id".to_string(), + }, + ), + ( + SsmKey { + region: Region::new("us-west-2"), + name: "test2-parameter-name".to_string(), + }, + SsmValue { + value: "test2-parameter-value".to_string(), + ami_id: "test2-image-id".to_string(), + }, + ), + ( + SsmKey { + region: Region::new("us-east-1"), + name: "test3-parameter-name".to_string(), + }, + SsmValue { + value: "test3-parameter-value".to_string(), + ami_id: "test3-image-id".to_string(), + }, + ), + ]); + let actual_parameters: SsmParameters = HashMap::from([ + ( + SsmKey { + region: Region::new("us-west-2"), + name: "test1-parameter-name".to_string(), + }, + "test1-parameter-value".to_string(), + ), + ( + SsmKey { + region: Region::new("us-west-2"), + name: "test2-parameter-name".to_string(), + }, + "test2-parameter-value".to_string(), + ), + ( + SsmKey { + region: Region::new("us-east-1"), + name: "test3-parameter-name".to_string(), + }, + "test3-parameter-value".to_string(), + ), + ]); + let expected_results = HashSet::from_iter(vec![ + SsmValidationResult::new( + "test3-parameter-name".to_string(), + Some("test3-parameter-value".to_string()), + Some("test3-parameter-value".to_string()), + Region::new("us-east-1"), + Some("test3-image-id".to_string()), + ), + SsmValidationResult::new( + "test1-parameter-name".to_string(), + Some("test1-parameter-value".to_string()), + Some("test1-parameter-value".to_string()), + Region::new("us-west-2"), + Some("test1-image-id".to_string()), + ), + SsmValidationResult::new( + "test2-parameter-name".to_string(), + Some("test2-parameter-value".to_string()), + Some("test2-parameter-value".to_string()), + Region::new("us-west-2"), + Some("test2-image-id".to_string()), + ), + ]); + let results = validate_parameters_in_region(&expected_parameters, &actual_parameters); + + assert_eq!(results, expected_results); + } + + // Tests validation of parameters where the expected value is different from the actual value + #[test] + fn validate_parameters_all_incorrect() { + let expected_parameters: HashMap = HashMap::from([ + ( + SsmKey { + region: Region::new("us-west-2"), + name: "test1-parameter-name".to_string(), + }, + SsmValue { + value: "test1-parameter-value".to_string(), + ami_id: "test1-image-id".to_string(), + }, + ), + ( + SsmKey { + region: Region::new("us-west-2"), + name: "test2-parameter-name".to_string(), + }, + SsmValue { + value: "test2-parameter-value".to_string(), + ami_id: "test2-image-id".to_string(), + }, + ), + ( + SsmKey { + region: Region::new("us-east-1"), + name: "test3-parameter-name".to_string(), + }, + SsmValue { + value: "test3-parameter-value".to_string(), + ami_id: "test3-image-id".to_string(), + }, + ), + ]); + let actual_parameters: SsmParameters = HashMap::from([ + ( + SsmKey { + region: Region::new("us-west-2"), + name: "test1-parameter-name".to_string(), + }, + "test1-parameter-value-wrong".to_string(), + ), + ( + SsmKey { + region: Region::new("us-west-2"), + name: "test2-parameter-name".to_string(), + }, + "test2-parameter-value-wrong".to_string(), + ), + ( + SsmKey { + region: Region::new("us-east-1"), + name: "test3-parameter-name".to_string(), + }, + "test3-parameter-value-wrong".to_string(), + ), + ]); + let expected_results = HashSet::from_iter(vec![ + SsmValidationResult::new( + "test3-parameter-name".to_string(), + Some("test3-parameter-value".to_string()), + Some("test3-parameter-value-wrong".to_string()), + Region::new("us-east-1"), + Some("test3-image-id".to_string()), + ), + SsmValidationResult::new( + "test1-parameter-name".to_string(), + Some("test1-parameter-value".to_string()), + Some("test1-parameter-value-wrong".to_string()), + Region::new("us-west-2"), + Some("test1-image-id".to_string()), + ), + SsmValidationResult::new( + "test2-parameter-name".to_string(), + Some("test2-parameter-value".to_string()), + Some("test2-parameter-value-wrong".to_string()), + Region::new("us-west-2"), + Some("test2-image-id".to_string()), + ), + ]); + let results = validate_parameters_in_region(&expected_parameters, &actual_parameters); + + assert_eq!(results, expected_results); + } + + // Tests validation of parameters where the actual value is missing + #[test] + fn validate_parameters_all_missing() { + let expected_parameters: HashMap = HashMap::from([ + ( + SsmKey { + region: Region::new("us-west-2"), + name: "test1-parameter-name".to_string(), + }, + SsmValue { + value: "test1-parameter-value".to_string(), + ami_id: "test1-image-id".to_string(), + }, + ), + ( + SsmKey { + region: Region::new("us-west-2"), + name: "test2-parameter-name".to_string(), + }, + SsmValue { + value: "test2-parameter-value".to_string(), + ami_id: "test2-image-id".to_string(), + }, + ), + ( + SsmKey { + region: Region::new("us-east-1"), + name: "test3-parameter-name".to_string(), + }, + SsmValue { + value: "test3-parameter-value".to_string(), + ami_id: "test3-image-id".to_string(), + }, + ), + ]); + let actual_parameters: SsmParameters = HashMap::new(); + let expected_results = HashSet::from_iter(vec![ + SsmValidationResult::new( + "test3-parameter-name".to_string(), + Some("test3-parameter-value".to_string()), + None, + Region::new("us-east-1"), + Some("test3-image-id".to_string()), + ), + SsmValidationResult::new( + "test1-parameter-name".to_string(), + Some("test1-parameter-value".to_string()), + None, + Region::new("us-west-2"), + Some("test1-image-id".to_string()), + ), + SsmValidationResult::new( + "test2-parameter-name".to_string(), + Some("test2-parameter-value".to_string()), + None, + Region::new("us-west-2"), + Some("test2-image-id".to_string()), + ), + ]); + let results = validate_parameters_in_region(&expected_parameters, &actual_parameters); + + assert_eq!(results, expected_results); + } + + // Tests validation of parameters where the expected value is missing + #[test] + fn validate_parameters_all_unexpected() { + let expected_parameters: HashMap = HashMap::new(); + let actual_parameters: SsmParameters = HashMap::from([ + ( + SsmKey { + region: Region::new("us-west-2"), + name: "test1-parameter-name".to_string(), + }, + "test1-parameter-value".to_string(), + ), + ( + SsmKey { + region: Region::new("us-west-2"), + name: "test2-parameter-name".to_string(), + }, + "test2-parameter-value".to_string(), + ), + ( + SsmKey { + region: Region::new("us-east-1"), + name: "test3-parameter-name".to_string(), + }, + "test3-parameter-value".to_string(), + ), + ]); + let expected_results = HashSet::from_iter(vec![ + SsmValidationResult::new( + "test3-parameter-name".to_string(), + None, + Some("test3-parameter-value".to_string()), + Region::new("us-east-1"), + None, + ), + SsmValidationResult::new( + "test1-parameter-name".to_string(), + None, + Some("test1-parameter-value".to_string()), + Region::new("us-west-2"), + None, + ), + SsmValidationResult::new( + "test2-parameter-name".to_string(), + None, + Some("test2-parameter-value".to_string()), + Region::new("us-west-2"), + None, + ), + ]); + let results = validate_parameters_in_region(&expected_parameters, &actual_parameters); + + assert_eq!(results, expected_results); + } + + // Tests validation of parameters where each status (Correct, Incorrect, Missing, Unexpected) + // happens once + #[test] + fn validate_parameters_mixed() { + let expected_parameters: HashMap = HashMap::from([ + ( + SsmKey { + region: Region::new("us-west-2"), + name: "test1-parameter-name".to_string(), + }, + SsmValue { + value: "test1-parameter-value".to_string(), + ami_id: "test1-image-id".to_string(), + }, + ), + ( + SsmKey { + region: Region::new("us-west-2"), + name: "test2-parameter-name".to_string(), + }, + SsmValue { + value: "test2-parameter-value".to_string(), + ami_id: "test2-image-id".to_string(), + }, + ), + ( + SsmKey { + region: Region::new("us-east-1"), + name: "test3-parameter-name".to_string(), + }, + SsmValue { + value: "test3-parameter-value".to_string(), + ami_id: "test3-image-id".to_string(), + }, + ), + ]); + let actual_parameters: SsmParameters = HashMap::from([ + ( + SsmKey { + region: Region::new("us-west-2"), + name: "test1-parameter-name".to_string(), + }, + "test1-parameter-value".to_string(), + ), + ( + SsmKey { + region: Region::new("us-west-2"), + name: "test2-parameter-name".to_string(), + }, + "test2-parameter-value-wrong".to_string(), + ), + ( + SsmKey { + region: Region::new("us-east-1"), + name: "test4-parameter-name".to_string(), + }, + "test4-parameter-value".to_string(), + ), + ]); + let expected_results = HashSet::from_iter(vec![ + SsmValidationResult::new( + "test3-parameter-name".to_string(), + Some("test3-parameter-value".to_string()), + None, + Region::new("us-east-1"), + Some("test3-image-id".to_string()), + ), + SsmValidationResult::new( + "test1-parameter-name".to_string(), + Some("test1-parameter-value".to_string()), + Some("test1-parameter-value".to_string()), + Region::new("us-west-2"), + Some("test1-image-id".to_string()), + ), + SsmValidationResult::new( + "test2-parameter-name".to_string(), + Some("test2-parameter-value".to_string()), + Some("test2-parameter-value-wrong".to_string()), + Region::new("us-west-2"), + Some("test2-image-id".to_string()), + ), + SsmValidationResult::new( + "test4-parameter-name".to_string(), + None, + Some("test4-parameter-value".to_string()), + Region::new("us-east-1"), + None, + ), + ]); + let results = validate_parameters_in_region(&expected_parameters, &actual_parameters); + + assert_eq!(results, expected_results); + } +} diff --git a/tools/pubsys/src/aws/validate_ssm/results.rs b/tools/pubsys/src/aws/validate_ssm/results.rs new file mode 100644 index 00000000..f11c64d4 --- /dev/null +++ b/tools/pubsys/src/aws/validate_ssm/results.rs @@ -0,0 +1,686 @@ +//! The results module owns the reporting of SSM validation results. + +use crate::aws::ssm::ssm::Result; +use aws_sdk_ssm::Region; +use serde::{Deserialize, Serialize}; +use std::collections::{HashMap, HashSet}; +use std::fmt::{self, Display}; +use std::str::FromStr; +use tabled::{Table, Tabled}; + +/// Represent the possible status of an SSM validation +#[derive(Debug, Eq, Hash, PartialEq, Serialize, Deserialize)] +pub enum SsmValidationResultStatus { + /// The expected value was equal to the actual value + Correct, + + /// The expected value was different from the actual value + Incorrect, + + /// The parameter was expected but not included in the actual parameters + Missing, + + /// The parameter was present in the actual parameters but not expected + Unexpected, +} + +impl Display for SsmValidationResultStatus { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::Correct => write!(f, "Correct"), + Self::Incorrect => write!(f, "Incorrect"), + Self::Missing => write!(f, "Missing"), + Self::Unexpected => write!(f, "Unexpected"), + } + } +} + +impl FromStr for SsmValidationResultStatus { + type Err = super::Error; + + fn from_str(s: &str) -> std::result::Result { + match s { + "Correct" => Ok(Self::Correct), + "Incorrect" => Ok(Self::Incorrect), + "Missing" => Ok(Self::Missing), + "Unexpected" => Ok(Self::Unexpected), + filter => Err(Self::Err::InvalidStatusFilter { + filter: filter.to_string(), + }), + } + } +} + +/// Represents a single SSM validation result +#[derive(Debug, Eq, Hash, PartialEq, Tabled, Serialize)] +pub struct SsmValidationResult { + /// The name of the parameter + pub(crate) name: String, + + /// The expected value of the parameter + #[tabled(display_with = "display_option")] + pub(crate) expected_value: Option, + + /// The actual retrieved value of the parameter + #[tabled(display_with = "display_option")] + pub(crate) actual_value: Option, + + /// The region the parameter resides in + #[serde(serialize_with = "serialize_region")] + pub(crate) region: Region, + + /// The ID of the AMI the parameter is associated with + #[tabled(display_with = "display_option")] + pub(crate) ami_id: Option, + + /// The validation status of the parameter + pub(crate) status: SsmValidationResultStatus, +} + +fn display_option(option: &Option) -> &str { + match option { + Some(option) => option, + None => "N/A", + } +} + +fn serialize_region(region: &Region, serializer: S) -> std::result::Result +where + S: serde::Serializer, +{ + serializer.serialize_str(region.to_string().as_str()) +} + +impl SsmValidationResult { + pub(crate) fn new( + name: String, + expected_value: Option, + actual_value: Option, + region: Region, + ami_id: Option, + ) -> SsmValidationResult { + // Determine the validation status based on equality, presence, and absence of expected and + // actual parameter values + let status = match (&expected_value, &actual_value) { + (Some(expected_value), Some(actual_value)) if actual_value.eq(expected_value) => { + SsmValidationResultStatus::Correct + } + (Some(_), Some(_)) => SsmValidationResultStatus::Incorrect, + (_, None) => SsmValidationResultStatus::Missing, + (None, _) => SsmValidationResultStatus::Unexpected, + }; + SsmValidationResult { + name, + expected_value, + actual_value, + region, + ami_id, + status, + } + } +} + +#[derive(Tabled, Serialize)] +struct SsmValidationRegionSummary { + correct: i32, + incorrect: i32, + missing: i32, + unexpected: i32, + accessible: bool, +} + +impl From<&HashSet> for SsmValidationRegionSummary { + fn from(results: &HashSet) -> Self { + let mut region_validation = SsmValidationRegionSummary { + correct: 0, + incorrect: 0, + missing: 0, + unexpected: 0, + accessible: true, + }; + for validation_result in results { + match validation_result.status { + SsmValidationResultStatus::Correct => region_validation.correct += 1, + SsmValidationResultStatus::Incorrect => region_validation.incorrect += 1, + SsmValidationResultStatus::Missing => region_validation.missing += 1, + SsmValidationResultStatus::Unexpected => region_validation.unexpected += 1, + } + } + region_validation + } +} + +impl SsmValidationRegionSummary { + fn no_valid_results() -> Self { + // When the parameters in a region couldn't be retrieved, use `-1` to indicate this in the + // output table and set `accessible` to `false` + SsmValidationRegionSummary { + correct: -1, + incorrect: -1, + missing: -1, + unexpected: -1, + accessible: false, + } + } +} + +/// Represents all SSM validation results +#[derive(Debug)] +pub struct SsmValidationResults { + pub(crate) results: HashMap>>, +} + +impl Default for SsmValidationResults { + fn default() -> Self { + Self::new(HashMap::new()) + } +} + +impl Display for SsmValidationResults { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Create a summary for each region, counting the number of parameters per status + let region_validations: HashMap = + self.get_results_summary(); + + // Represent the HashMap of summaries as a `Table` + let table = Table::new( + region_validations + .iter() + .map(|(region, results)| (region.to_string(), results)) + .collect::>(), + ) + .to_string(); + write!(f, "{}", table) + } +} + +impl SsmValidationResults { + pub fn new(results: HashMap>>) -> Self { + SsmValidationResults { results } + } + + /// Returns a HashSet containing all validation results whose status is present in + /// `requested_status` + pub fn get_results_for_status( + &self, + requested_status: &[SsmValidationResultStatus], + ) -> HashSet<&SsmValidationResult> { + let mut results = HashSet::new(); + for region_results in self.results.values().flatten() { + results.extend( + region_results + .iter() + .filter(|result| requested_status.contains(&result.status)) + .collect::>(), + ) + } + results + } + + fn get_results_summary(&self) -> HashMap { + self.results + .iter() + .map(|(region, region_result)| { + region_result + .as_ref() + .map(|region_validation| { + ( + region.clone(), + SsmValidationRegionSummary::from(region_validation), + ) + }) + .unwrap_or(( + region.clone(), + SsmValidationRegionSummary::no_valid_results(), + )) + }) + .collect() + } + + pub(crate) fn get_json_summary(&self) -> serde_json::Value { + serde_json::json!(self + .get_results_summary() + .into_iter() + .map(|(region, results)| (region.to_string(), results)) + .collect::>()) + } +} + +#[cfg(test)] +mod test { + use std::collections::{HashMap, HashSet}; + + use crate::aws::validate_ssm::results::{ + SsmValidationResult, SsmValidationResultStatus, SsmValidationResults, + }; + use aws_sdk_ssm::Region; + + // These tests assert that the `get_results_for_status` function returns the correct values. + + // Tests empty SsmValidationResults + #[test] + fn get_results_for_status_empty() { + let results = SsmValidationResults::new(HashMap::from([ + (Region::new("us-west-2"), Ok(HashSet::from([]))), + (Region::new("us-east-1"), Ok(HashSet::from([]))), + ])); + let results_filtered = results.get_results_for_status(&vec![ + SsmValidationResultStatus::Correct, + SsmValidationResultStatus::Incorrect, + SsmValidationResultStatus::Missing, + SsmValidationResultStatus::Unexpected, + ]); + + assert_eq!(results_filtered, HashSet::new()); + } + + // Tests the `Correct` status + #[test] + fn get_results_for_status_correct() { + let results = SsmValidationResults::new(HashMap::from([ + ( + Region::new("us-west-2"), + Ok(HashSet::from([ + SsmValidationResult::new( + "test3-parameter-name".to_string(), + Some("test3-parameter-value".to_string()), + None, + Region::new("us-west-2"), + Some("test3-image-id".to_string()), + ), + SsmValidationResult::new( + "test1-parameter-name".to_string(), + Some("test1-parameter-value".to_string()), + Some("test1-parameter-value".to_string()), + Region::new("us-west-2"), + Some("test1-image-id".to_string()), + ), + SsmValidationResult::new( + "test2-parameter-name".to_string(), + Some("test2-parameter-value".to_string()), + Some("test2-parameter-value-wrong".to_string()), + Region::new("us-west-2"), + Some("test2-image-id".to_string()), + ), + SsmValidationResult::new( + "test4-parameter-name".to_string(), + None, + Some("test4-parameter-value".to_string()), + Region::new("us-west-2"), + None, + ), + ])), + ), + ( + Region::new("us-east-1"), + Ok(HashSet::from([ + SsmValidationResult::new( + "test3-parameter-name".to_string(), + Some("test3-parameter-value".to_string()), + None, + Region::new("us-east-1"), + Some("test3-image-id".to_string()), + ), + SsmValidationResult::new( + "test1-parameter-name".to_string(), + Some("test1-parameter-value".to_string()), + Some("test1-parameter-value".to_string()), + Region::new("us-east-1"), + Some("test1-image-id".to_string()), + ), + SsmValidationResult::new( + "test2-parameter-name".to_string(), + Some("test2-parameter-value".to_string()), + Some("test2-parameter-value-wrong".to_string()), + Region::new("us-east-1"), + Some("test2-image-id".to_string()), + ), + SsmValidationResult::new( + "test4-parameter-name".to_string(), + None, + Some("test4-parameter-value".to_string()), + Region::new("us-east-1"), + None, + ), + ])), + ), + ])); + let results_filtered = + results.get_results_for_status(&vec![SsmValidationResultStatus::Correct]); + + assert_eq!( + results_filtered, + HashSet::from([ + &SsmValidationResult::new( + "test1-parameter-name".to_string(), + Some("test1-parameter-value".to_string()), + Some("test1-parameter-value".to_string()), + Region::new("us-west-2"), + Some("test1-image-id".to_string()), + ), + &SsmValidationResult::new( + "test1-parameter-name".to_string(), + Some("test1-parameter-value".to_string()), + Some("test1-parameter-value".to_string()), + Region::new("us-east-1"), + Some("test1-image-id".to_string()), + ) + ]) + ); + } + + // Tests a filter containing the `Correct` and `Incorrect` statuses + #[test] + fn get_results_for_status_correct_incorrect() { + let results = SsmValidationResults::new(HashMap::from([ + ( + Region::new("us-west-2"), + Ok(HashSet::from([ + SsmValidationResult::new( + "test3-parameter-name".to_string(), + Some("test3-parameter-value".to_string()), + None, + Region::new("us-west-2"), + Some("test3-image-id".to_string()), + ), + SsmValidationResult::new( + "test1-parameter-name".to_string(), + Some("test1-parameter-value".to_string()), + Some("test1-parameter-value".to_string()), + Region::new("us-west-2"), + Some("test1-image-id".to_string()), + ), + SsmValidationResult::new( + "test2-parameter-name".to_string(), + Some("test2-parameter-value".to_string()), + Some("test2-parameter-value-wrong".to_string()), + Region::new("us-west-2"), + Some("test2-image-id".to_string()), + ), + SsmValidationResult::new( + "test4-parameter-name".to_string(), + None, + Some("test4-parameter-value".to_string()), + Region::new("us-west-2"), + None, + ), + ])), + ), + ( + Region::new("us-east-1"), + Ok(HashSet::from([ + SsmValidationResult::new( + "test3-parameter-name".to_string(), + Some("test3-parameter-value".to_string()), + None, + Region::new("us-east-1"), + Some("test3-image-id".to_string()), + ), + SsmValidationResult::new( + "test1-parameter-name".to_string(), + Some("test1-parameter-value".to_string()), + Some("test1-parameter-value".to_string()), + Region::new("us-east-1"), + Some("test1-image-id".to_string()), + ), + SsmValidationResult::new( + "test2-parameter-name".to_string(), + Some("test2-parameter-value".to_string()), + Some("test2-parameter-value-wrong".to_string()), + Region::new("us-east-1"), + Some("test2-image-id".to_string()), + ), + SsmValidationResult::new( + "test4-parameter-name".to_string(), + None, + Some("test4-parameter-value".to_string()), + Region::new("us-east-1"), + None, + ), + ])), + ), + ])); + let results_filtered = results.get_results_for_status(&vec![ + SsmValidationResultStatus::Correct, + SsmValidationResultStatus::Incorrect, + ]); + + assert_eq!( + results_filtered, + HashSet::from([ + &SsmValidationResult::new( + "test1-parameter-name".to_string(), + Some("test1-parameter-value".to_string()), + Some("test1-parameter-value".to_string()), + Region::new("us-west-2"), + Some("test1-image-id".to_string()), + ), + &SsmValidationResult::new( + "test1-parameter-name".to_string(), + Some("test1-parameter-value".to_string()), + Some("test1-parameter-value".to_string()), + Region::new("us-east-1"), + Some("test1-image-id".to_string()), + ), + &SsmValidationResult::new( + "test2-parameter-name".to_string(), + Some("test2-parameter-value".to_string()), + Some("test2-parameter-value-wrong".to_string()), + Region::new("us-west-2"), + Some("test2-image-id".to_string()), + ), + &SsmValidationResult::new( + "test2-parameter-name".to_string(), + Some("test2-parameter-value".to_string()), + Some("test2-parameter-value-wrong".to_string()), + Region::new("us-east-1"), + Some("test2-image-id".to_string()), + ) + ]) + ); + } + + // Tests a filter containing all statuses + #[test] + fn get_results_for_status_all() { + let results = SsmValidationResults::new(HashMap::from([ + ( + Region::new("us-west-2"), + Ok(HashSet::from([ + SsmValidationResult::new( + "test3-parameter-name".to_string(), + Some("test3-parameter-value".to_string()), + None, + Region::new("us-west-2"), + Some("test3-image-id".to_string()), + ), + SsmValidationResult::new( + "test1-parameter-name".to_string(), + Some("test1-parameter-value".to_string()), + Some("test1-parameter-value".to_string()), + Region::new("us-west-2"), + Some("test1-image-id".to_string()), + ), + SsmValidationResult::new( + "test2-parameter-name".to_string(), + Some("test2-parameter-value".to_string()), + Some("test2-parameter-value-wrong".to_string()), + Region::new("us-west-2"), + Some("test2-image-id".to_string()), + ), + SsmValidationResult::new( + "test4-parameter-name".to_string(), + None, + Some("test4-parameter-value".to_string()), + Region::new("us-west-2"), + None, + ), + ])), + ), + ( + Region::new("us-east-1"), + Ok(HashSet::from([ + SsmValidationResult::new( + "test3-parameter-name".to_string(), + Some("test3-parameter-value".to_string()), + None, + Region::new("us-east-1"), + Some("test3-image-id".to_string()), + ), + SsmValidationResult::new( + "test1-parameter-name".to_string(), + Some("test1-parameter-value".to_string()), + Some("test1-parameter-value".to_string()), + Region::new("us-east-1"), + Some("test1-image-id".to_string()), + ), + SsmValidationResult::new( + "test2-parameter-name".to_string(), + Some("test2-parameter-value".to_string()), + Some("test2-parameter-value-wrong".to_string()), + Region::new("us-east-1"), + Some("test2-image-id".to_string()), + ), + SsmValidationResult::new( + "test4-parameter-name".to_string(), + None, + Some("test4-parameter-value".to_string()), + Region::new("us-east-1"), + None, + ), + ])), + ), + ])); + let results_filtered = results.get_results_for_status(&vec![ + SsmValidationResultStatus::Correct, + SsmValidationResultStatus::Incorrect, + SsmValidationResultStatus::Missing, + SsmValidationResultStatus::Unexpected, + ]); + + assert_eq!( + results_filtered, + HashSet::from([ + &SsmValidationResult::new( + "test1-parameter-name".to_string(), + Some("test1-parameter-value".to_string()), + Some("test1-parameter-value".to_string()), + Region::new("us-west-2"), + Some("test1-image-id".to_string()), + ), + &SsmValidationResult::new( + "test1-parameter-name".to_string(), + Some("test1-parameter-value".to_string()), + Some("test1-parameter-value".to_string()), + Region::new("us-east-1"), + Some("test1-image-id".to_string()), + ), + &SsmValidationResult::new( + "test2-parameter-name".to_string(), + Some("test2-parameter-value".to_string()), + Some("test2-parameter-value-wrong".to_string()), + Region::new("us-west-2"), + Some("test2-image-id".to_string()), + ), + &SsmValidationResult::new( + "test2-parameter-name".to_string(), + Some("test2-parameter-value".to_string()), + Some("test2-parameter-value-wrong".to_string()), + Region::new("us-east-1"), + Some("test2-image-id".to_string()), + ), + &SsmValidationResult::new( + "test3-parameter-name".to_string(), + Some("test3-parameter-value".to_string()), + None, + Region::new("us-west-2"), + Some("test3-image-id".to_string()), + ), + &SsmValidationResult::new( + "test4-parameter-name".to_string(), + None, + Some("test4-parameter-value".to_string()), + Region::new("us-west-2"), + None, + ), + &SsmValidationResult::new( + "test3-parameter-name".to_string(), + Some("test3-parameter-value".to_string()), + None, + Region::new("us-east-1"), + Some("test3-image-id".to_string()), + ), + &SsmValidationResult::new( + "test4-parameter-name".to_string(), + None, + Some("test4-parameter-value".to_string()), + Region::new("us-east-1"), + None, + ) + ]) + ); + } + + // Tests the `Missing` filter when none of the SsmValidationResults have this status + #[test] + fn get_results_for_status_missing_none() { + let results = SsmValidationResults::new(HashMap::from([ + ( + Region::new("us-west-2"), + Ok(HashSet::from([ + SsmValidationResult::new( + "test1-parameter-name".to_string(), + Some("test1-parameter-value".to_string()), + Some("test1-parameter-value".to_string()), + Region::new("us-west-2"), + Some("test1-image-id".to_string()), + ), + SsmValidationResult::new( + "test2-parameter-name".to_string(), + Some("test2-parameter-value".to_string()), + Some("test2-parameter-value-wrong".to_string()), + Region::new("us-west-2"), + Some("test2-image-id".to_string()), + ), + SsmValidationResult::new( + "test4-parameter-name".to_string(), + None, + Some("test4-parameter-value".to_string()), + Region::new("us-west-2"), + None, + ), + ])), + ), + ( + Region::new("us-east-1"), + Ok(HashSet::from([ + SsmValidationResult::new( + "test1-parameter-name".to_string(), + Some("test1-parameter-value".to_string()), + Some("test1-parameter-value".to_string()), + Region::new("us-east-1"), + Some("test1-image-id".to_string()), + ), + SsmValidationResult::new( + "test2-parameter-name".to_string(), + Some("test2-parameter-value".to_string()), + Some("test2-parameter-value-wrong".to_string()), + Region::new("us-east-1"), + Some("test2-image-id".to_string()), + ), + SsmValidationResult::new( + "test4-parameter-name".to_string(), + None, + Some("test4-parameter-value".to_string()), + Region::new("us-east-1"), + None, + ), + ])), + ), + ])); + let results_filtered = + results.get_results_for_status(&vec![SsmValidationResultStatus::Missing]); + + assert_eq!(results_filtered, HashSet::new()); + } +} diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs index e0daee46..adf99931 100644 --- a/tools/pubsys/src/main.rs +++ b/tools/pubsys/src/main.rs @@ -10,6 +10,7 @@ Currently implemented: * Marking EC2 AMIs public (or private again) * setting SSM parameters based on built AMIs * promoting SSM parameters from versioned entries to named (e.g. 'latest') +* validating SSM parameters by comparing the returned parameters in a region to a given list of parameters To be implemented: * high-level document describing pubsys usage with examples @@ -114,6 +115,14 @@ fn run() -> Result<()> { .context(error::PromoteSsmSnafu) }) } + SubCommand::ValidateSsm(ref validate_ssm_args) => { + let rt = Runtime::new().context(error::RuntimeSnafu)?; + rt.block_on(async { + aws::validate_ssm::run(&args, validate_ssm_args) + .await + .context(error::ValidateSsmSnafu) + }) + } SubCommand::UploadOva(ref upload_args) => { vmware::upload_ova::run(&args, upload_args).context(error::UploadOvaSnafu) } @@ -130,7 +139,7 @@ fn main() { /// Automates publishing of Bottlerocket updates #[derive(Debug, StructOpt)] #[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] -struct Args { +pub struct Args { #[structopt(global = true, long, default_value = "INFO")] /// How much detail to log; from least to most: ERROR, WARN, INFO, DEBUG, TRACE log_level: LevelFilter, @@ -155,6 +164,7 @@ enum SubCommand { Ssm(aws::ssm::SsmArgs), PromoteSsm(aws::promote_ssm::PromoteArgs), + ValidateSsm(aws::validate_ssm::ValidateSsmArgs), UploadOva(vmware::upload_ova::UploadArgs), } @@ -224,6 +234,11 @@ mod error { UploadOva { source: crate::vmware::upload_ova::Error, }, + + #[snafu(display("Failed to validate SSM parameters: {}", source))] + ValidateSsm { + source: crate::aws::validate_ssm::Error, + }, } fn publish_ami_message(error: &crate::aws::publish_ami::Error) -> String { From ea5bd01e7bbc7a277173cbe0176c42c777a07f03 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Thu, 23 Mar 2023 18:04:31 +0000 Subject: [PATCH 0921/1356] kubelet: Enable env variables for cred providers This adds the capability to provide environment variables that can be passed through to the Kubernetes credential provider configuration. Signed-off-by: Sean McGinnis --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index bb7977ec..1258348e 100644 --- a/README.md +++ b/README.md @@ -434,6 +434,11 @@ The following settings are optional and allow you to further configure your clus "*.dkr.ecr.us-east-2.amazonaws.com", "*.dkr.ecr.us-west-2.amazonaws.com" ] + + [settings.kubernetes.credential-providers.ecr-credential-provider.environment] + # The following are not used with ecr-credential-provider, but are provided for illustration + "KEY" = "abc123xyz" + "GOMAXPROCS" = "2" ``` **Note:** `ecr-credential-provider` is currently the only supported provider. From 5aa76603c214cec7e1162fb0c1113d3bf0daebb6 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 4 Apr 2023 15:05:34 +0000 Subject: [PATCH 0922/1356] tools/diff-kernel-config: Adjust script to work on variants Specify the variants we want to work on instead of choosing hard coded variants based on kernel versions. This has multiple advantages: * we can automatically handle major kernel updates * we are not limited to hardcoded variants (we could not cover vmware variants before) * we can automatically decide which architectures to cover Signed-off-by: Leonard Foerster --- tools/diff-kernel-config | 140 ++++++++++++++++++--------------------- 1 file changed, 65 insertions(+), 75 deletions(-) diff --git a/tools/diff-kernel-config b/tools/diff-kernel-config index cb38fc26..ec4902bf 100755 --- a/tools/diff-kernel-config +++ b/tools/diff-kernel-config @@ -32,8 +32,9 @@ Compare kernel configurations before and after a series of commits. -a, --after new Git revision to compare from -b, --before baseline Git revision to compare against - -k, --kernel kernel versions to compare configs for, may be given - multiple times (optional, defaults to all kernels) + -v, --variant variant to pick kernel from to compare configs for, may + be given multiple times (optional, defaults to this list: + 'aws-k8s-1.23', 'metal-k8s-1.23', 'aws-dev', 'metal-dev') -o, --output-dir path to the output directory; must not exist yet -h, --help show this help text @@ -66,6 +67,7 @@ usage_error() { # kernel_versions=() +variants=() while [[ $# -gt 0 ]]; do case $1 in @@ -73,8 +75,8 @@ while [[ $# -gt 0 ]]; do shift; gitrev_after_arg=$1 ;; -b|--before) shift; gitrev_before_arg=$1 ;; - -k|--kernel) - shift; kernel_versions+=( "$1" ) ;; + -v|--variant) + shift; variants+=( "$1" ) ;; -o|--output-dir) shift; output_dir=$1 ;; -h|--help) @@ -85,18 +87,14 @@ while [[ $# -gt 0 ]]; do shift done -if [[ ${#kernel_versions[@]} -eq 0 ]]; then - kernel_versions=( 5.10 5.15 ) -else - for kver in "${kernel_versions[@]}"; do - case ${kver} in - 5.10) continue ;; - 5.15) continue ;; - *) bail "Unknown kernel version '${kver}'" ;; - esac - done +if [[ ${#variants[@]} -eq 0 ]]; then + variants=( aws-k8s-1.23 metal-k8s-1.23 aws-dev metal-dev ) fi -readonly kernel_versions + +for var in "${variants[@]}"; do + [[ -d variants/${var} ]] || bail "Unknown variant '${var}'" +done +readonly variants [[ -n ${output_dir} ]] || usage_error 'require -o|--output-dir' [[ -e ${output_dir} ]] && bail "Output directory '${output_dir}' exists already, not touching it" @@ -147,80 +145,72 @@ for state in before after; do gitrev_var=gitrev_${state} git checkout --quiet "${!gitrev_var}" || bail "Cannot check out '${!gitrev_var}'." - for arch in aarch64 x86_64; do - - for kver in "${kernel_versions[@]}"; do - - variants=() - case ${kver} in - 5.10) - variants+=( 'aws-k8s-1.23' ) - if [[ ${arch} = x86_64 ]]; then - variants+=( 'metal-k8s-1.23' ) - fi - ;; - 5.15) - variants+=( 'aws-dev' 'metal-dev' ) - ;; - *) - bail "No known variants build kernel ${kver}." - ;; - esac + for variant in "${variants[@]}"; do - for variant in "${variants[@]}"; do + arches=() + IFS=" " read -r -a arches <<< "$(grep "supported-arches" "variants/${variant}/Cargo.toml" | cut -d ' ' -f 3 | tr -d '"[]')" + if [[ ${#arches[@]} -eq 0 ]]; then + arches=( aarch64 x86_64 ) + fi - debug_id="state=${state} arch=${arch} variant=${variant}" + kver=$(grep "packages/kernel" "variants/${variant}/Cargo.toml" | cut -d ' ' -f 1 | cut -d '-' -f 2 | tr '_' '.') - IFS=- read -ra variant_parts <<<"${variant}" - variant_platform="${variant_parts[0]}" - variant_runtime="${variant_parts[1]}" - variant_family="${variant_platform}-${variant_runtime}" + kernel_versions+=( "${kver}" ) - # - # Run build - # + for arch in "${arches[@]}"; do - cargo make \ - -e BUILDSYS_ARCH="${arch}" \ - -e BUILDSYS_VARIANT="${variant}" \ - -e BUILDSYS_VARIANT_PLATFORM="${variant_platform}" \ - -e BUILDSYS_VARIANT_RUNTIME="${variant_runtime}" \ - -e BUILDSYS_VARIANT_FAMILY="${variant_family}" \ - -e PACKAGE="kernel-${kver/./_}" \ - build-package \ - || bail "Build failed for ${debug_id}" + debug_id="state=${state} arch=${arch} variant=${variant} kernel=${kver}" - # - # Find kernel RPM - # + IFS=- read -ra variant_parts <<<"${variant}" + variant_platform="${variant_parts[0]}" + variant_runtime="${variant_parts[1]}" + variant_family="${variant_platform}-${variant_runtime}" - shopt -s nullglob - kernel_rpms=( - ./build/rpms/bottlerocket-"${arch}"-*kernel-"${kver}"-"${kver}".*.rpm - ) - shopt -u nullglob + # + # Run build + # - case ${#kernel_rpms[@]} in - 0) bail "No kernel RPM found for ${debug_id}" ;; - 1) kernel_rpm=${kernel_rpms[0]} ;; - *) bail "More than one kernel RPM found for ${debug_id}" ;; - esac + cargo make \ + -e BUILDSYS_ARCH="${arch}" \ + -e BUILDSYS_VARIANT="${variant}" \ + -e BUILDSYS_VARIANT_PLATFORM="${variant_platform}" \ + -e BUILDSYS_VARIANT_RUNTIME="${variant_runtime}" \ + -e BUILDSYS_VARIANT_FAMILY="${variant_family}" \ + -e PACKAGE="kernel-${kver/./_}" \ + build-package \ + || bail "Build failed for ${debug_id}" + # + # Find kernel RPM + # + + shopt -s nullglob + kernel_rpms=( + ./build/rpms/bottlerocket-"${arch}"-*kernel-"${kver}"-"${kver}".*.rpm + ) + shopt -u nullglob + + case ${#kernel_rpms[@]} in + 0) bail "No kernel RPM found for ${debug_id}" ;; + 1) kernel_rpm=${kernel_rpms[0]} ;; + *) bail "More than one kernel RPM found for ${debug_id}" ;; + esac - # - # Extract kernel config - # + kver_full=$(echo "${kernel_rpm}" | cut -d '-' -f 5) + # + # Extract kernel config + # - config_path=${output_dir}/config-${arch}-${kver}-${variant}-${state} - rpm2cpio "${kernel_rpm}" \ - | cpio --quiet --extract --to-stdout ./boot/config >"${config_path}" - [[ -s "${config_path}" ]] || bail "Failed to extract config for ${debug_id}" + config_path=${output_dir}/config-${arch}-${variant}-${state} + rpm2cpio "${kernel_rpm}" \ + | cpio --quiet --extract --to-stdout ./boot/config >"${config_path}" + [[ -s "${config_path}" ]] || bail "Failed to extract config for ${debug_id}" - done # variant - done # kver + echo "config-${arch}-${variant}-${state} -> ${kver_full}" >> "${output_dir}"/kver_mapping + done #arch - done # arch + done #variant done # state From 3b00ffa6851d72d12fbe553d86943867b4f2bde8 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Wed, 5 Apr 2023 14:13:01 +0000 Subject: [PATCH 0923/1356] tools/diff-kernel-config: generate tabular report When doing major kernel updates we usually get large sets of config changes. To ensure the review can be focused the common set of changes it makes sense to represent the config changes more structured than separate lists for each variant. Hence, create a csv report with one column for each inspected variant and one row for each config change, mapping which change impacts what versions. Signed-off-by: Leonard Foerster --- tools/diff-kernel-config | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/tools/diff-kernel-config b/tools/diff-kernel-config index ec4902bf..2dbdb02d 100755 --- a/tools/diff-kernel-config +++ b/tools/diff-kernel-config @@ -256,3 +256,24 @@ echo # Generate combined report of changes head -v -n 999999 "${output_dir}"/*-diff >"${output_dir}"/diff-report echo "A full report has been placed in '${output_dir}/diff-report'" + +# Generate combined report in tabular form (csv) +echo "config change" > "${output_dir}"/diff-table +cat "${output_dir}"/*-diff | sort | uniq >> "${output_dir}"/diff-table + +for config_diff in "${output_dir}"/config-*-diff; do + variant_name=$(echo "${config_diff}" | sed -e "s%^${output_dir}/config-%%" -e "s%-diff$%%") + kver_before=$(grep "${variant_name}-before" "${output_dir}/kver_mapping" | cut -d ' ' -f 3) + kver_after=$(grep "${variant_name}-after" "${output_dir}/kver_mapping" | cut -d ' ' -f 3) + col_name="${variant_name} (${kver_before} -> ${kver_after})" + + sed -i "s/$/,/" "${output_dir}"/diff-table + sed -i "/^config change/ s/$/${col_name}/" "${output_dir}"/diff-table + + mapfile -t diff_lines < "${config_diff}" + + for line in "${diff_lines[@]}"; do + sed -i "/^${line}/ s/$/x/" "${output_dir}"/diff-table + done +done +echo "A tabular report in csv-format has been placed in '${output_dir}/diff-table'" From 4b67d087b899721c43607f8115ba38225c2e30dc Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 11 Apr 2023 13:07:26 +0000 Subject: [PATCH 0924/1356] kernel-5.10,5.15: Backport of upstream commit `bpf: Adjust insufficient default bpf_jit_limit` Backport upstream commit to adjust the memory size available to the bpf jit. While we will pick this patch up in due curse once we update kernel to 5.10.177 and 5.15.105, pick them up for now to ensure our next release has this one fixed. Signed-off-by: Leonard Foerster --- ...t-insufficient-default-bpf_jit_limit.patch | 76 +++++++++++++++++++ packages/kernel-5.10/kernel-5.10.spec | 2 + ...t-insufficient-default-bpf_jit_limit.patch | 76 +++++++++++++++++++ packages/kernel-5.15/kernel-5.15.spec | 2 + 4 files changed, 156 insertions(+) create mode 100644 packages/kernel-5.10/1003-bpf-Adjust-insufficient-default-bpf_jit_limit.patch create mode 100644 packages/kernel-5.15/1004-bpf-Adjust-insufficient-default-bpf_jit_limit.patch diff --git a/packages/kernel-5.10/1003-bpf-Adjust-insufficient-default-bpf_jit_limit.patch b/packages/kernel-5.10/1003-bpf-Adjust-insufficient-default-bpf_jit_limit.patch new file mode 100644 index 00000000..d47e3014 --- /dev/null +++ b/packages/kernel-5.10/1003-bpf-Adjust-insufficient-default-bpf_jit_limit.patch @@ -0,0 +1,76 @@ +From a4bbab27c4bf69486f5846d44134eb31c37e9b22 Mon Sep 17 00:00:00 2001 +From: Daniel Borkmann +Date: Mon, 20 Mar 2023 15:37:25 +0100 +Subject: [PATCH] bpf: Adjust insufficient default bpf_jit_limit + +[ Upstream commit 10ec8ca8ec1a2f04c4ed90897225231c58c124a7 ] + +We've seen recent AWS EKS (Kubernetes) user reports like the following: + + After upgrading EKS nodes from v20230203 to v20230217 on our 1.24 EKS + clusters after a few days a number of the nodes have containers stuck + in ContainerCreating state or liveness/readiness probes reporting the + following error: + + Readiness probe errored: rpc error: code = Unknown desc = failed to + exec in container: failed to start exec "4a11039f730203ffc003b7[...]": + OCI runtime exec failed: exec failed: unable to start container process: + unable to init seccomp: error loading seccomp filter into kernel: + error loading seccomp filter: errno 524: unknown + + However, we had not been seeing this issue on previous AMIs and it only + started to occur on v20230217 (following the upgrade from kernel 5.4 to + 5.10) with no other changes to the underlying cluster or workloads. + + We tried the suggestions from that issue (sysctl net.core.bpf_jit_limit=452534528) + which helped to immediately allow containers to be created and probes to + execute but after approximately a day the issue returned and the value + returned by cat /proc/vmallocinfo | grep bpf_jit | awk '{s+=$2} END {print s}' + was steadily increasing. + +I tested bpf tree to observe bpf_jit_charge_modmem, bpf_jit_uncharge_modmem +their sizes passed in as well as bpf_jit_current under tcpdump BPF filter, +seccomp BPF and native (e)BPF programs, and the behavior all looks sane +and expected, that is nothing "leaking" from an upstream perspective. + +The bpf_jit_limit knob was originally added in order to avoid a situation +where unprivileged applications loading BPF programs (e.g. seccomp BPF +policies) consuming all the module memory space via BPF JIT such that loading +of kernel modules would be prevented. The default limit was defined back in +2018 and while good enough back then, we are generally seeing far more BPF +consumers today. + +Adjust the limit for the BPF JIT pool from originally 1/4 to now 1/2 of the +module memory space to better reflect today's needs and avoid more users +running into potentially hard to debug issues. + +Fixes: fdadd04931c2 ("bpf: fix bpf_jit_limit knob for PAGE_SIZE >= 64K") +Reported-by: Stephen Haynes +Reported-by: Lefteris Alexakis +Signed-off-by: Daniel Borkmann +Link: https://github.com/awslabs/amazon-eks-ami/issues/1179 +Link: https://github.com/awslabs/amazon-eks-ami/issues/1219 +Reviewed-by: Kuniyuki Iwashima +Link: https://lore.kernel.org/r/20230320143725.8394-1-daniel@iogearbox.net +Signed-off-by: Alexei Starovoitov +Signed-off-by: Sasha Levin +--- + kernel/bpf/core.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c +index 73d4b1e32fbd..d3f6a070875c 100644 +--- a/kernel/bpf/core.c ++++ b/kernel/bpf/core.c +@@ -826,7 +826,7 @@ static int __init bpf_jit_charge_init(void) + { + /* Only used as heuristic here to derive limit. */ + bpf_jit_limit_max = bpf_jit_alloc_exec_limit(); +- bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2, ++ bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1, + PAGE_SIZE), LONG_MAX); + return 0; + } +-- +2.39.2 + diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 654821a6..ad5a5635 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -17,6 +17,8 @@ Source103: config-bottlerocket-vmware Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch # Enable INITRAMFS_FORCE config option for our use case. Patch1002: 1002-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch +# Backport of bpf jit limit adjustments, see https://github.com/awslabs/amazon-eks-ami/issues/1179 +Patch1003: 1003-bpf-Adjust-insufficient-default-bpf_jit_limit.patch # Add zstd support for compressed kernel modules Patch2000: 2000-kbuild-move-module-strip-compression-code-into-scrip.patch diff --git a/packages/kernel-5.15/1004-bpf-Adjust-insufficient-default-bpf_jit_limit.patch b/packages/kernel-5.15/1004-bpf-Adjust-insufficient-default-bpf_jit_limit.patch new file mode 100644 index 00000000..0bbb8036 --- /dev/null +++ b/packages/kernel-5.15/1004-bpf-Adjust-insufficient-default-bpf_jit_limit.patch @@ -0,0 +1,76 @@ +From 54869daa6a437887614274f65298ba44a3fac63a Mon Sep 17 00:00:00 2001 +From: Daniel Borkmann +Date: Mon, 20 Mar 2023 15:37:25 +0100 +Subject: [PATCH] bpf: Adjust insufficient default bpf_jit_limit + +[ Upstream commit 10ec8ca8ec1a2f04c4ed90897225231c58c124a7 ] + +We've seen recent AWS EKS (Kubernetes) user reports like the following: + + After upgrading EKS nodes from v20230203 to v20230217 on our 1.24 EKS + clusters after a few days a number of the nodes have containers stuck + in ContainerCreating state or liveness/readiness probes reporting the + following error: + + Readiness probe errored: rpc error: code = Unknown desc = failed to + exec in container: failed to start exec "4a11039f730203ffc003b7[...]": + OCI runtime exec failed: exec failed: unable to start container process: + unable to init seccomp: error loading seccomp filter into kernel: + error loading seccomp filter: errno 524: unknown + + However, we had not been seeing this issue on previous AMIs and it only + started to occur on v20230217 (following the upgrade from kernel 5.4 to + 5.10) with no other changes to the underlying cluster or workloads. + + We tried the suggestions from that issue (sysctl net.core.bpf_jit_limit=452534528) + which helped to immediately allow containers to be created and probes to + execute but after approximately a day the issue returned and the value + returned by cat /proc/vmallocinfo | grep bpf_jit | awk '{s+=$2} END {print s}' + was steadily increasing. + +I tested bpf tree to observe bpf_jit_charge_modmem, bpf_jit_uncharge_modmem +their sizes passed in as well as bpf_jit_current under tcpdump BPF filter, +seccomp BPF and native (e)BPF programs, and the behavior all looks sane +and expected, that is nothing "leaking" from an upstream perspective. + +The bpf_jit_limit knob was originally added in order to avoid a situation +where unprivileged applications loading BPF programs (e.g. seccomp BPF +policies) consuming all the module memory space via BPF JIT such that loading +of kernel modules would be prevented. The default limit was defined back in +2018 and while good enough back then, we are generally seeing far more BPF +consumers today. + +Adjust the limit for the BPF JIT pool from originally 1/4 to now 1/2 of the +module memory space to better reflect today's needs and avoid more users +running into potentially hard to debug issues. + +Fixes: fdadd04931c2 ("bpf: fix bpf_jit_limit knob for PAGE_SIZE >= 64K") +Reported-by: Stephen Haynes +Reported-by: Lefteris Alexakis +Signed-off-by: Daniel Borkmann +Link: https://github.com/awslabs/amazon-eks-ami/issues/1179 +Link: https://github.com/awslabs/amazon-eks-ami/issues/1219 +Reviewed-by: Kuniyuki Iwashima +Link: https://lore.kernel.org/r/20230320143725.8394-1-daniel@iogearbox.net +Signed-off-by: Alexei Starovoitov +Signed-off-by: Sasha Levin +--- + kernel/bpf/core.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c +index cea0d1296599..f7c27c1cc593 100644 +--- a/kernel/bpf/core.c ++++ b/kernel/bpf/core.c +@@ -829,7 +829,7 @@ static int __init bpf_jit_charge_init(void) + { + /* Only used as heuristic here to derive limit. */ + bpf_jit_limit_max = bpf_jit_alloc_exec_limit(); +- bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2, ++ bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1, + PAGE_SIZE), LONG_MAX); + return 0; + } +-- +2.39.2 + diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index b4f8b59d..06a0fa44 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -19,6 +19,8 @@ Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch Patch1002: 1002-Revert-kbuild-hide-tools-build-targets-from-external.patch # Enable INITRAMFS_FORCE config option for our use case. Patch1003: 1003-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch +# Backport of bpf jit limit adjustments, see https://github.com/awslabs/amazon-eks-ami/issues/1179 +Patch1004: 1004-bpf-Adjust-insufficient-default-bpf_jit_limit.patch BuildRequires: bc BuildRequires: elfutils-devel From 02ff12a0a04273fb3ef43e2c80407be1dd85054d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 25 Mar 2023 01:39:07 +0000 Subject: [PATCH 0925/1356] build(deps): bump openssl from 0.10.45 to 0.10.48 in /tools Bumps [openssl](https://github.com/sfackler/rust-openssl) from 0.10.45 to 0.10.48. - [Release notes](https://github.com/sfackler/rust-openssl/releases) - [Commits](https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.45...openssl-v0.10.48) --- updated-dependencies: - dependency-name: openssl dependency-type: indirect ... Signed-off-by: dependabot[bot] --- tools/Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index fc5d7c67..2eaa69ed 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -2153,9 +2153,9 @@ checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "openssl" -version = "0.10.45" +version = "0.10.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" +checksum = "518915b97df115dd36109bfa429a48b8f737bd05508cf9588977b599648926d2" dependencies = [ "bitflags", "cfg-if", @@ -2185,9 +2185,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.80" +version = "0.9.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7" +checksum = "666416d899cf077260dac8698d60a60b435a46d57e82acb1be3d0dad87284e5b" dependencies = [ "autocfg", "cc", From b0c6ab2777b2a3c731ee7ae0a114e96ab144cda0 Mon Sep 17 00:00:00 2001 From: mjsterckx Date: Mon, 10 Apr 2023 18:00:42 +0000 Subject: [PATCH 0926/1356] pubsys: changed validate-ssm input to single file --- tools/pubsys/src/aws/validate_ssm/mod.rs | 370 +++++++++---------- tools/pubsys/src/aws/validate_ssm/results.rs | 60 +-- 2 files changed, 175 insertions(+), 255 deletions(-) diff --git a/tools/pubsys/src/aws/validate_ssm/mod.rs b/tools/pubsys/src/aws/validate_ssm/mod.rs index a6a1394b..48a97a88 100644 --- a/tools/pubsys/src/aws/validate_ssm/mod.rs +++ b/tools/pubsys/src/aws/validate_ssm/mod.rs @@ -11,7 +11,6 @@ use crate::Args; use aws_sdk_ssm::{Client as SsmClient, Region}; use log::{info, trace}; use pubsys_config::InfraConfig; -use serde::Deserialize; use snafu::ResultExt; use std::collections::{HashMap, HashSet}; use std::fs::File; @@ -22,17 +21,22 @@ use structopt::{clap, StructOpt}; #[derive(Debug, StructOpt)] #[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] pub struct ValidateSsmArgs { - /// File holding the validation configuration + /// File holding the expected parameters #[structopt(long, parse(from_os_str))] - validation_config_path: PathBuf, + expected_parameters_path: PathBuf, + + /// If this flag is set, check for unexpected parameters in the validation regions. If not, + /// only the parameters present in the expected parameters file will be validated. + #[structopt(long)] + check_unexpected: bool, /// Optional path where the validation results should be written #[structopt(long, parse(from_os_str))] write_results_path: Option, - #[structopt(long, requires = "write-results-path")] /// Optional filter to only write validation results with these statuses to the above path /// Available statuses are: `Correct`, `Incorrect`, `Missing`, `Unexpected` + #[structopt(long, requires = "write-results-path")] write_results_filter: Option>, /// If this flag is added, print the results summary table as JSON instead of a @@ -41,28 +45,6 @@ pub struct ValidateSsmArgs { json: bool, } -/// Structure of the validation configuration file -#[derive(Debug, Deserialize)] -pub(crate) struct ValidationConfig { - /// Vec of paths to JSON files containing expected metadata (image ids and SSM parameters) - expected_metadata_lists: Vec, - - /// Vec of regions where the parameters should be validated - validation_regions: Vec, -} - -/// A structure that allows us to store a parameter value along with the AMI ID it refers to. In -/// some cases, then AMI ID *is* the parameter value and both fields will hold the AMI ID. In other -/// cases the parameter value is not the AMI ID, but we need to remember which AMI ID it refers to. -#[derive(Debug, PartialEq, Eq)] -pub(crate) struct SsmValue { - /// The value of the SSM parameter - pub(crate) value: String, - - /// The ID of the AMI the parameter is associated with, used for validation result reporting - pub(crate) ami_id: String, -} - /// Performs SSM parameter validation and returns the `SsmValidationResults` object pub async fn validate( args: &Args, @@ -78,40 +60,21 @@ pub async fn validate( trace!("Parsed infra config: {:#?}", infra_config); - // Read the validation config file and parse it into the `ValidationConfig` struct - let validation_config_file = File::open(&validate_ssm_args.validation_config_path).context( - error::ReadValidationConfigSnafu { - path: validate_ssm_args.validation_config_path.clone(), - }, - )?; - let validation_config: ValidationConfig = serde_json::from_reader(validation_config_file) - .context(error::ParseValidationConfigSnafu)?; - let ssm_prefix = aws.ssm_prefix.as_deref().unwrap_or(""); - // Parse the parameter lists found in the validation config - info!("Parsing expected parameter lists"); - let expected_parameters = parse_parameter_lists( - validation_config.expected_metadata_lists, - &validation_config.validation_regions, - ) - .await?; - - info!("Parsed expected parameter lists"); - - // Create a Vec of Regions based on the region names in the validation config - let validation_regions: Vec = validation_config - .validation_regions - .iter() - .map(|s| Region::new(s.clone())) - .collect(); + // Parse the file holding expected parameters + info!("Parsing expected parameters file"); + let expected_parameters = + parse_expected_parameters(&validate_ssm_args.expected_parameters_path).await?; + + info!("Parsed expected parameters file"); // Create a HashMap of SsmClients, one for each region where validation should happen - let base_region = &validation_regions[0]; - let mut ssm_clients = HashMap::with_capacity(validation_regions.len()); + let base_region = Region::new(aws.regions[0].clone()); + let mut ssm_clients = HashMap::with_capacity(expected_parameters.len()); - for region in &validation_regions { - let client_config = build_client_config(region, base_region, &aws).await; + for region in expected_parameters.keys() { + let client_config = build_client_config(region, &base_region, &aws).await; let ssm_client = SsmClient::new(&client_config); ssm_clients.insert(region.clone(), ssm_client); } @@ -132,6 +95,7 @@ pub async fn validate( validate_parameters_in_region( expected_parameters.get(region).unwrap_or(&HashMap::new()), &result, + validate_ssm_args.check_unexpected, ) }), ) @@ -170,12 +134,13 @@ pub async fn validate( Ok(validation_results) } -/// Validates SSM parameters in a single region, based on a HashMap (SsmKey, SsmValue) of expected +/// Validates SSM parameters in a single region, based on a HashMap (SsmKey, String) of expected /// parameters and a HashMap (SsmKey, String) of actual retrieved parameters. Returns a HashSet of /// SsmValidationResult objects. pub(crate) fn validate_parameters_in_region( - expected_parameters: &HashMap, + expected_parameters: &HashMap, actual_parameters: &SsmParameters, + check_unexpected: bool, ) -> HashSet { // Clone the HashMap of actual parameters so items can be removed let mut actual_parameters = actual_parameters.clone(); @@ -186,87 +151,68 @@ pub(crate) fn validate_parameters_in_region( for (ssm_key, ssm_value) in expected_parameters { results.insert(SsmValidationResult::new( ssm_key.name.to_owned(), - Some(ssm_value.value.clone()), + Some(ssm_value.clone()), actual_parameters.get(ssm_key).map(|v| v.to_owned()), ssm_key.region.clone(), - Some(ssm_value.ami_id.clone()), )); actual_parameters.remove(ssm_key); } - // Any remaining parameters in `actual_parameters` were not present in `expected_parameters` - // and therefore get the `Unexpected` status - for (ssm_key, ssm_value) in actual_parameters { - results.insert(SsmValidationResult::new( - ssm_key.name.to_owned(), - None, - Some(ssm_value), - ssm_key.region.clone(), - None, - )); + if check_unexpected { + // Any remaining parameters in `actual_parameters` were not present in `expected_parameters` + // and therefore get the `Unexpected` status + for (ssm_key, ssm_value) in actual_parameters { + results.insert(SsmValidationResult::new( + ssm_key.name.to_owned(), + None, + Some(ssm_value), + ssm_key.region.clone(), + )); + } } results } type RegionName = String; -type AmiId = String; type ParameterName = String; type ParameterValue = String; -/// Parse the lists of parameters whose paths are in `parameter_lists`. Only parse the parameters -/// in the regions present in `validation_regions`. Return a HashMap of Region mapped to a HashMap -/// of the parameters in that region, with each parameter being a mapping of `SsmKey` to `SsmValue`. -pub(crate) async fn parse_parameter_lists( - parameter_lists: Vec, - validation_regions: &[String], -) -> Result>> { - let mut parameter_map: HashMap> = HashMap::new(); - for parameter_list_path in parameter_lists { - // Parse the JSON list as a HashMap of region_name, mapped to a HashMap of ami_id, mapped to - // a HashMap of parameter_name and parameter_value - let parameter_list: HashMap< - RegionName, - HashMap>, - > = serde_json::from_reader(&File::open(parameter_list_path.clone()).context( - error::ReadExpectedParameterListSnafu { - path: parameter_list_path, +/// Parse the file holding expected parameters. Return a HashMap of Region mapped to a HashMap +/// of the parameters in that region, with each parameter being a mapping of `SsmKey` to its +/// value as `String`. +pub(crate) async fn parse_expected_parameters( + expected_parameters_file: &PathBuf, +) -> Result>> { + // Parse the JSON file as a HashMap of region_name, mapped to a HashMap of parameter_name and + // parameter_value + let expected_parameters: HashMap> = + serde_json::from_reader(&File::open(expected_parameters_file.clone()).context( + error::ReadExpectedParameterFileSnafu { + path: expected_parameters_file, }, )?) - .context(error::ParseExpectedParameterListSnafu)?; - - // Iterate over the parsed HashMap, converting the nested HashMap into a HashMap of Region - // mapped to a HashMap of SsmKey, SsmValue - parameter_list - .iter() - .filter(|(region, _)| validation_regions.contains(region)) - .flat_map(|(region, ami_ids)| { - ami_ids - .iter() - .map(move |(ami_id, param_names)| (region, ami_id, param_names)) - }) - .flat_map(|(region, ami_id, params)| { - params.iter().map(move |(parameter_name, parameter_value)| { - ( - region.clone(), - ami_id.clone(), - parameter_name.clone(), - parameter_value.clone(), - ) - }) - }) - .for_each(|(region, ami_id, parameter_name, parameter_value)| { - parameter_map - .entry(Region::new(region.clone())) - .or_insert(HashMap::new()) - .insert( - SsmKey::new(Region::new(region), parameter_name), - SsmValue { - value: parameter_value, - ami_id, - }, - ); - }); - } + .context(error::ParseExpectedParameterFileSnafu)?; + + // Iterate over the parsed HashMap, converting the nested HashMap into a HashMap of Region + // mapped to a HashMap of SsmKey, String + let parameter_map = expected_parameters + .into_iter() + .map(|(region, parameters)| { + ( + Region::new(region.clone()), + parameters + .into_iter() + .map(|(parameter_name, parameter_value)| { + ( + SsmKey::new(Region::new(region.clone()), parameter_name), + parameter_value, + ) + }) + .collect::>(), + ) + }) + .collect(); + Ok(parameter_map) } @@ -330,11 +276,11 @@ mod error { #[snafu(display("Failed to validate SSM parameters in region: {}", region))] ValidateSsmRegion { region: String }, - #[snafu(display("Failed to parse AMI list: {}", source))] - ParseExpectedParameterList { source: serde_json::Error }, + #[snafu(display("Failed to parse expected parameters file: {}", source))] + ParseExpectedParameterFile { source: serde_json::Error }, - #[snafu(display("Failed to read AMI list: {}", path.display()))] - ReadExpectedParameterList { + #[snafu(display("Failed to read expected parameters file: {}", path.display()))] + ReadExpectedParameterFile { source: std::io::Error, path: PathBuf, }, @@ -363,7 +309,7 @@ type Result = std::result::Result; mod test { use crate::aws::{ ssm::{SsmKey, SsmParameters}, - validate_ssm::{results::SsmValidationResult, validate_parameters_in_region, SsmValue}, + validate_ssm::{results::SsmValidationResult, validate_parameters_in_region}, }; use aws_sdk_ssm::Region; use std::collections::{HashMap, HashSet}; @@ -373,36 +319,27 @@ mod test { // Tests validation of parameters where the expected value is equal to the actual value #[test] fn validate_parameters_all_correct() { - let expected_parameters: HashMap = HashMap::from([ + let expected_parameters: HashMap = HashMap::from([ ( SsmKey { region: Region::new("us-west-2"), name: "test1-parameter-name".to_string(), }, - SsmValue { - value: "test1-parameter-value".to_string(), - ami_id: "test1-image-id".to_string(), - }, + "test1-parameter-value".to_string(), ), ( SsmKey { region: Region::new("us-west-2"), name: "test2-parameter-name".to_string(), }, - SsmValue { - value: "test2-parameter-value".to_string(), - ami_id: "test2-image-id".to_string(), - }, + "test2-parameter-value".to_string(), ), ( SsmKey { region: Region::new("us-east-1"), name: "test3-parameter-name".to_string(), }, - SsmValue { - value: "test3-parameter-value".to_string(), - ami_id: "test3-image-id".to_string(), - }, + "test3-parameter-value".to_string(), ), ]); let actual_parameters: SsmParameters = HashMap::from([ @@ -434,24 +371,21 @@ mod test { Some("test3-parameter-value".to_string()), Some("test3-parameter-value".to_string()), Region::new("us-east-1"), - Some("test3-image-id".to_string()), ), SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), Some("test1-parameter-value".to_string()), Region::new("us-west-2"), - Some("test1-image-id".to_string()), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), Some("test2-parameter-value".to_string()), Region::new("us-west-2"), - Some("test2-image-id".to_string()), ), ]); - let results = validate_parameters_in_region(&expected_parameters, &actual_parameters); + let results = validate_parameters_in_region(&expected_parameters, &actual_parameters, true); assert_eq!(results, expected_results); } @@ -459,36 +393,27 @@ mod test { // Tests validation of parameters where the expected value is different from the actual value #[test] fn validate_parameters_all_incorrect() { - let expected_parameters: HashMap = HashMap::from([ + let expected_parameters: HashMap = HashMap::from([ ( SsmKey { region: Region::new("us-west-2"), name: "test1-parameter-name".to_string(), }, - SsmValue { - value: "test1-parameter-value".to_string(), - ami_id: "test1-image-id".to_string(), - }, + "test1-parameter-value".to_string(), ), ( SsmKey { region: Region::new("us-west-2"), name: "test2-parameter-name".to_string(), }, - SsmValue { - value: "test2-parameter-value".to_string(), - ami_id: "test2-image-id".to_string(), - }, + "test2-parameter-value".to_string(), ), ( SsmKey { region: Region::new("us-east-1"), name: "test3-parameter-name".to_string(), }, - SsmValue { - value: "test3-parameter-value".to_string(), - ami_id: "test3-image-id".to_string(), - }, + "test3-parameter-value".to_string(), ), ]); let actual_parameters: SsmParameters = HashMap::from([ @@ -520,24 +445,21 @@ mod test { Some("test3-parameter-value".to_string()), Some("test3-parameter-value-wrong".to_string()), Region::new("us-east-1"), - Some("test3-image-id".to_string()), ), SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), Some("test1-parameter-value-wrong".to_string()), Region::new("us-west-2"), - Some("test1-image-id".to_string()), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), Some("test2-parameter-value-wrong".to_string()), Region::new("us-west-2"), - Some("test2-image-id".to_string()), ), ]); - let results = validate_parameters_in_region(&expected_parameters, &actual_parameters); + let results = validate_parameters_in_region(&expected_parameters, &actual_parameters, true); assert_eq!(results, expected_results); } @@ -545,36 +467,27 @@ mod test { // Tests validation of parameters where the actual value is missing #[test] fn validate_parameters_all_missing() { - let expected_parameters: HashMap = HashMap::from([ + let expected_parameters: HashMap = HashMap::from([ ( SsmKey { region: Region::new("us-west-2"), name: "test1-parameter-name".to_string(), }, - SsmValue { - value: "test1-parameter-value".to_string(), - ami_id: "test1-image-id".to_string(), - }, + "test1-parameter-value".to_string(), ), ( SsmKey { region: Region::new("us-west-2"), name: "test2-parameter-name".to_string(), }, - SsmValue { - value: "test2-parameter-value".to_string(), - ami_id: "test2-image-id".to_string(), - }, + "test2-parameter-value".to_string(), ), ( SsmKey { region: Region::new("us-east-1"), name: "test3-parameter-name".to_string(), }, - SsmValue { - value: "test3-parameter-value".to_string(), - ami_id: "test3-image-id".to_string(), - }, + "test3-parameter-value".to_string(), ), ]); let actual_parameters: SsmParameters = HashMap::new(); @@ -584,24 +497,21 @@ mod test { Some("test3-parameter-value".to_string()), None, Region::new("us-east-1"), - Some("test3-image-id".to_string()), ), SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), None, Region::new("us-west-2"), - Some("test1-image-id".to_string()), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), None, Region::new("us-west-2"), - Some("test2-image-id".to_string()), ), ]); - let results = validate_parameters_in_region(&expected_parameters, &actual_parameters); + let results = validate_parameters_in_region(&expected_parameters, &actual_parameters, true); assert_eq!(results, expected_results); } @@ -609,7 +519,7 @@ mod test { // Tests validation of parameters where the expected value is missing #[test] fn validate_parameters_all_unexpected() { - let expected_parameters: HashMap = HashMap::new(); + let expected_parameters: HashMap = HashMap::new(); let actual_parameters: SsmParameters = HashMap::from([ ( SsmKey { @@ -639,24 +549,21 @@ mod test { None, Some("test3-parameter-value".to_string()), Region::new("us-east-1"), - None, ), SsmValidationResult::new( "test1-parameter-name".to_string(), None, Some("test1-parameter-value".to_string()), Region::new("us-west-2"), - None, ), SsmValidationResult::new( "test2-parameter-name".to_string(), None, Some("test2-parameter-value".to_string()), Region::new("us-west-2"), - None, ), ]); - let results = validate_parameters_in_region(&expected_parameters, &actual_parameters); + let results = validate_parameters_in_region(&expected_parameters, &actual_parameters, true); assert_eq!(results, expected_results); } @@ -665,36 +572,27 @@ mod test { // happens once #[test] fn validate_parameters_mixed() { - let expected_parameters: HashMap = HashMap::from([ + let expected_parameters: HashMap = HashMap::from([ ( SsmKey { region: Region::new("us-west-2"), name: "test1-parameter-name".to_string(), }, - SsmValue { - value: "test1-parameter-value".to_string(), - ami_id: "test1-image-id".to_string(), - }, + "test1-parameter-value".to_string(), ), ( SsmKey { region: Region::new("us-west-2"), name: "test2-parameter-name".to_string(), }, - SsmValue { - value: "test2-parameter-value".to_string(), - ami_id: "test2-image-id".to_string(), - }, + "test2-parameter-value".to_string(), ), ( SsmKey { region: Region::new("us-east-1"), name: "test3-parameter-name".to_string(), }, - SsmValue { - value: "test3-parameter-value".to_string(), - ami_id: "test3-image-id".to_string(), - }, + "test3-parameter-value".to_string(), ), ]); let actual_parameters: SsmParameters = HashMap::from([ @@ -726,31 +624,103 @@ mod test { Some("test3-parameter-value".to_string()), None, Region::new("us-east-1"), - Some("test3-image-id".to_string()), ), SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), Some("test1-parameter-value".to_string()), Region::new("us-west-2"), - Some("test1-image-id".to_string()), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), Some("test2-parameter-value-wrong".to_string()), Region::new("us-west-2"), - Some("test2-image-id".to_string()), ), SsmValidationResult::new( "test4-parameter-name".to_string(), None, Some("test4-parameter-value".to_string()), Region::new("us-east-1"), + ), + ]); + let results = validate_parameters_in_region(&expected_parameters, &actual_parameters, true); + + assert_eq!(results, expected_results); + } + + // Tests validation of parameters where each status (Correct, Incorrect, Missing, Unexpected) + // happens once and `--check-unexpected` is false + #[test] + fn validate_parameters_mixed_unexpected_false() { + let expected_parameters: HashMap = HashMap::from([ + ( + SsmKey { + region: Region::new("us-west-2"), + name: "test1-parameter-name".to_string(), + }, + "test1-parameter-value".to_string(), + ), + ( + SsmKey { + region: Region::new("us-west-2"), + name: "test2-parameter-name".to_string(), + }, + "test2-parameter-value".to_string(), + ), + ( + SsmKey { + region: Region::new("us-east-1"), + name: "test3-parameter-name".to_string(), + }, + "test3-parameter-value".to_string(), + ), + ]); + let actual_parameters: SsmParameters = HashMap::from([ + ( + SsmKey { + region: Region::new("us-west-2"), + name: "test1-parameter-name".to_string(), + }, + "test1-parameter-value".to_string(), + ), + ( + SsmKey { + region: Region::new("us-west-2"), + name: "test2-parameter-name".to_string(), + }, + "test2-parameter-value-wrong".to_string(), + ), + ( + SsmKey { + region: Region::new("us-east-1"), + name: "test4-parameter-name".to_string(), + }, + "test4-parameter-value".to_string(), + ), + ]); + let expected_results = HashSet::from_iter(vec![ + SsmValidationResult::new( + "test3-parameter-name".to_string(), + Some("test3-parameter-value".to_string()), None, + Region::new("us-east-1"), + ), + SsmValidationResult::new( + "test1-parameter-name".to_string(), + Some("test1-parameter-value".to_string()), + Some("test1-parameter-value".to_string()), + Region::new("us-west-2"), + ), + SsmValidationResult::new( + "test2-parameter-name".to_string(), + Some("test2-parameter-value".to_string()), + Some("test2-parameter-value-wrong".to_string()), + Region::new("us-west-2"), ), ]); - let results = validate_parameters_in_region(&expected_parameters, &actual_parameters); + let results = + validate_parameters_in_region(&expected_parameters, &actual_parameters, false); assert_eq!(results, expected_results); } diff --git a/tools/pubsys/src/aws/validate_ssm/results.rs b/tools/pubsys/src/aws/validate_ssm/results.rs index f11c64d4..51e3de2a 100644 --- a/tools/pubsys/src/aws/validate_ssm/results.rs +++ b/tools/pubsys/src/aws/validate_ssm/results.rs @@ -69,10 +69,6 @@ pub struct SsmValidationResult { #[serde(serialize_with = "serialize_region")] pub(crate) region: Region, - /// The ID of the AMI the parameter is associated with - #[tabled(display_with = "display_option")] - pub(crate) ami_id: Option, - /// The validation status of the parameter pub(crate) status: SsmValidationResultStatus, } @@ -97,7 +93,6 @@ impl SsmValidationResult { expected_value: Option, actual_value: Option, region: Region, - ami_id: Option, ) -> SsmValidationResult { // Determine the validation status based on equality, presence, and absence of expected and // actual parameter values @@ -114,7 +109,6 @@ impl SsmValidationResult { expected_value, actual_value, region, - ami_id, status, } } @@ -264,7 +258,7 @@ mod test { (Region::new("us-west-2"), Ok(HashSet::from([]))), (Region::new("us-east-1"), Ok(HashSet::from([]))), ])); - let results_filtered = results.get_results_for_status(&vec![ + let results_filtered = results.get_results_for_status(&[ SsmValidationResultStatus::Correct, SsmValidationResultStatus::Incorrect, SsmValidationResultStatus::Missing, @@ -286,28 +280,24 @@ mod test { Some("test3-parameter-value".to_string()), None, Region::new("us-west-2"), - Some("test3-image-id".to_string()), ), SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), Some("test1-parameter-value".to_string()), Region::new("us-west-2"), - Some("test1-image-id".to_string()), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), Some("test2-parameter-value-wrong".to_string()), Region::new("us-west-2"), - Some("test2-image-id".to_string()), ), SsmValidationResult::new( "test4-parameter-name".to_string(), None, Some("test4-parameter-value".to_string()), Region::new("us-west-2"), - None, ), ])), ), @@ -319,34 +309,30 @@ mod test { Some("test3-parameter-value".to_string()), None, Region::new("us-east-1"), - Some("test3-image-id".to_string()), ), SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), Some("test1-parameter-value".to_string()), Region::new("us-east-1"), - Some("test1-image-id".to_string()), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), Some("test2-parameter-value-wrong".to_string()), Region::new("us-east-1"), - Some("test2-image-id".to_string()), ), SsmValidationResult::new( "test4-parameter-name".to_string(), None, Some("test4-parameter-value".to_string()), Region::new("us-east-1"), - None, ), ])), ), ])); let results_filtered = - results.get_results_for_status(&vec![SsmValidationResultStatus::Correct]); + results.get_results_for_status(&[SsmValidationResultStatus::Correct]); assert_eq!( results_filtered, @@ -356,14 +342,12 @@ mod test { Some("test1-parameter-value".to_string()), Some("test1-parameter-value".to_string()), Region::new("us-west-2"), - Some("test1-image-id".to_string()), ), &SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), Some("test1-parameter-value".to_string()), Region::new("us-east-1"), - Some("test1-image-id".to_string()), ) ]) ); @@ -381,28 +365,24 @@ mod test { Some("test3-parameter-value".to_string()), None, Region::new("us-west-2"), - Some("test3-image-id".to_string()), ), SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), Some("test1-parameter-value".to_string()), Region::new("us-west-2"), - Some("test1-image-id".to_string()), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), Some("test2-parameter-value-wrong".to_string()), Region::new("us-west-2"), - Some("test2-image-id".to_string()), ), SsmValidationResult::new( "test4-parameter-name".to_string(), None, Some("test4-parameter-value".to_string()), Region::new("us-west-2"), - None, ), ])), ), @@ -414,33 +394,29 @@ mod test { Some("test3-parameter-value".to_string()), None, Region::new("us-east-1"), - Some("test3-image-id".to_string()), ), SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), Some("test1-parameter-value".to_string()), Region::new("us-east-1"), - Some("test1-image-id".to_string()), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), Some("test2-parameter-value-wrong".to_string()), Region::new("us-east-1"), - Some("test2-image-id".to_string()), ), SsmValidationResult::new( "test4-parameter-name".to_string(), None, Some("test4-parameter-value".to_string()), Region::new("us-east-1"), - None, ), ])), ), ])); - let results_filtered = results.get_results_for_status(&vec![ + let results_filtered = results.get_results_for_status(&[ SsmValidationResultStatus::Correct, SsmValidationResultStatus::Incorrect, ]); @@ -453,28 +429,24 @@ mod test { Some("test1-parameter-value".to_string()), Some("test1-parameter-value".to_string()), Region::new("us-west-2"), - Some("test1-image-id".to_string()), ), &SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), Some("test1-parameter-value".to_string()), Region::new("us-east-1"), - Some("test1-image-id".to_string()), ), &SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), Some("test2-parameter-value-wrong".to_string()), Region::new("us-west-2"), - Some("test2-image-id".to_string()), ), &SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), Some("test2-parameter-value-wrong".to_string()), Region::new("us-east-1"), - Some("test2-image-id".to_string()), ) ]) ); @@ -492,28 +464,24 @@ mod test { Some("test3-parameter-value".to_string()), None, Region::new("us-west-2"), - Some("test3-image-id".to_string()), ), SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), Some("test1-parameter-value".to_string()), Region::new("us-west-2"), - Some("test1-image-id".to_string()), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), Some("test2-parameter-value-wrong".to_string()), Region::new("us-west-2"), - Some("test2-image-id".to_string()), ), SsmValidationResult::new( "test4-parameter-name".to_string(), None, Some("test4-parameter-value".to_string()), Region::new("us-west-2"), - None, ), ])), ), @@ -525,33 +493,29 @@ mod test { Some("test3-parameter-value".to_string()), None, Region::new("us-east-1"), - Some("test3-image-id".to_string()), ), SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), Some("test1-parameter-value".to_string()), Region::new("us-east-1"), - Some("test1-image-id".to_string()), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), Some("test2-parameter-value-wrong".to_string()), Region::new("us-east-1"), - Some("test2-image-id".to_string()), ), SsmValidationResult::new( "test4-parameter-name".to_string(), None, Some("test4-parameter-value".to_string()), Region::new("us-east-1"), - None, ), ])), ), ])); - let results_filtered = results.get_results_for_status(&vec![ + let results_filtered = results.get_results_for_status(&[ SsmValidationResultStatus::Correct, SsmValidationResultStatus::Incorrect, SsmValidationResultStatus::Missing, @@ -566,56 +530,48 @@ mod test { Some("test1-parameter-value".to_string()), Some("test1-parameter-value".to_string()), Region::new("us-west-2"), - Some("test1-image-id".to_string()), ), &SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), Some("test1-parameter-value".to_string()), Region::new("us-east-1"), - Some("test1-image-id".to_string()), ), &SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), Some("test2-parameter-value-wrong".to_string()), Region::new("us-west-2"), - Some("test2-image-id".to_string()), ), &SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), Some("test2-parameter-value-wrong".to_string()), Region::new("us-east-1"), - Some("test2-image-id".to_string()), ), &SsmValidationResult::new( "test3-parameter-name".to_string(), Some("test3-parameter-value".to_string()), None, Region::new("us-west-2"), - Some("test3-image-id".to_string()), ), &SsmValidationResult::new( "test4-parameter-name".to_string(), None, Some("test4-parameter-value".to_string()), Region::new("us-west-2"), - None, ), &SsmValidationResult::new( "test3-parameter-name".to_string(), Some("test3-parameter-value".to_string()), None, Region::new("us-east-1"), - Some("test3-image-id".to_string()), ), &SsmValidationResult::new( "test4-parameter-name".to_string(), None, Some("test4-parameter-value".to_string()), Region::new("us-east-1"), - None, ) ]) ); @@ -633,21 +589,18 @@ mod test { Some("test1-parameter-value".to_string()), Some("test1-parameter-value".to_string()), Region::new("us-west-2"), - Some("test1-image-id".to_string()), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), Some("test2-parameter-value-wrong".to_string()), Region::new("us-west-2"), - Some("test2-image-id".to_string()), ), SsmValidationResult::new( "test4-parameter-name".to_string(), None, Some("test4-parameter-value".to_string()), Region::new("us-west-2"), - None, ), ])), ), @@ -659,27 +612,24 @@ mod test { Some("test1-parameter-value".to_string()), Some("test1-parameter-value".to_string()), Region::new("us-east-1"), - Some("test1-image-id".to_string()), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), Some("test2-parameter-value-wrong".to_string()), Region::new("us-east-1"), - Some("test2-image-id".to_string()), ), SsmValidationResult::new( "test4-parameter-name".to_string(), None, Some("test4-parameter-value".to_string()), Region::new("us-east-1"), - None, ), ])), ), ])); let results_filtered = - results.get_results_for_status(&vec![SsmValidationResultStatus::Missing]); + results.get_results_for_status(&[SsmValidationResultStatus::Missing]); assert_eq!(results_filtered, HashSet::new()); } From a43e999e25e51e1fddbef9f562cf58655bc3aa41 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Fri, 7 Apr 2023 20:10:30 +0000 Subject: [PATCH 0927/1356] aws.profile: Add note to avoid default profile Recent versions of `aws-iam-authenticator`, and perhaps other AWS components, not pick up the default credential settings when `settings.aws.profile` is set to `default`. To help avoid this situation, this adds a note to the settings documentation to avoid using `default` for AWS credentials. Signed-off-by: Sean McGinnis --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 1258348e..e9576c52 100644 --- a/README.md +++ b/README.md @@ -1123,6 +1123,8 @@ They can be overridden for testing purposes in [the same way as other settings]( ``` **Note**: If `settings.aws.profile` is not set, the setting will fallback to the "default" profile. + In general it is recommended not to include a `[profile default]` section in the `aws.config` contents though. + This may have unintended side effects for other AWS services running on the node (e.g. `aws-iam-authenticator`). **Note:** The `config`, `credentials`, and `profile` are optional and do not need to be set when using an Instance Profile when running on an AWS instance. From 1dd3fd68cde170dbb781ecf2739d48b8a02a1955 Mon Sep 17 00:00:00 2001 From: mjsterckx Date: Thu, 6 Apr 2023 17:37:06 +0000 Subject: [PATCH 0928/1356] pubsys: write AMI publicity info to file --- .../pubsys/src/aws/ami/launch_permissions.rs | 98 +++++++++++++++ tools/pubsys/src/aws/ami/mod.rs | 117 +++++++++++++++--- tools/pubsys/src/aws/publish_ami/mod.rs | 84 +++++++++++-- 3 files changed, 275 insertions(+), 24 deletions(-) create mode 100644 tools/pubsys/src/aws/ami/launch_permissions.rs diff --git a/tools/pubsys/src/aws/ami/launch_permissions.rs b/tools/pubsys/src/aws/ami/launch_permissions.rs new file mode 100644 index 00000000..467a0838 --- /dev/null +++ b/tools/pubsys/src/aws/ami/launch_permissions.rs @@ -0,0 +1,98 @@ +use aws_sdk_ec2::{model::LaunchPermission, Client as Ec2Client}; +use serde::{Deserialize, Serialize}; +use snafu::ResultExt; + +/// Returns the launch permissions for the given AMI +pub(crate) async fn get_launch_permissions( + ec2_client: &Ec2Client, + region: &str, + ami_id: &str, +) -> Result> { + let ec2_response = ec2_client + .describe_image_attribute() + .image_id(ami_id) + .attribute(aws_sdk_ec2::model::ImageAttributeName::LaunchPermission) + .send() + .await + .context(error::DescribeImageAttributeSnafu { + ami_id, + region: region.to_string(), + })?; + + let mut launch_permissions = vec![]; + + let responses: Vec = + ec2_response.launch_permissions().unwrap_or(&[]).to_vec(); + for permission in responses { + launch_permissions.push(LaunchPermissionDef::try_from(permission)?) + } + Ok(launch_permissions) +} + +#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq, Hash)] +#[serde(rename_all = "lowercase")] +pub(crate) enum LaunchPermissionDef { + /// The name of the group + Group(String), + + /// The Amazon Web Services account ID + UserId(String), + + /// The ARN of an organization + OrganizationArn(String), + + /// The ARN of an organizational unit + OrganizationalUnitArn(String), +} + +impl TryFrom for LaunchPermissionDef { + type Error = crate::aws::ami::launch_permissions::Error; + + fn try_from(launch_permission: LaunchPermission) -> std::result::Result { + let LaunchPermission { + group, + user_id, + organization_arn, + organizational_unit_arn, + .. + } = launch_permission.clone(); + match (group, user_id, organization_arn, organizational_unit_arn) { + (Some(group), None, None, None) => { + Ok(LaunchPermissionDef::Group(group.as_str().to_string())) + } + (None, Some(user_id), None, None) => Ok(LaunchPermissionDef::UserId(user_id)), + (None, None, Some(organization_arn), None) => { + Ok(LaunchPermissionDef::OrganizationArn(organization_arn)) + } + (None, None, None, Some(organizational_unit_arn)) => Ok( + LaunchPermissionDef::OrganizationalUnitArn(organizational_unit_arn), + ), + _ => Err(Error::InvalidLaunchPermission { launch_permission }), + } + } +} + +mod error { + use aws_sdk_ec2::error::DescribeImageAttributeError; + use aws_sdk_ec2::model::LaunchPermission; + use aws_sdk_ec2::types::SdkError; + use snafu::Snafu; + + #[derive(Debug, Snafu)] + #[snafu(visibility(pub(super)))] + pub(crate) enum Error { + #[snafu(display("Error describing AMI {} in {}: {}", ami_id, region, source))] + DescribeImageAttribute { + ami_id: String, + region: String, + #[snafu(source(from(SdkError, Box::new)))] + source: Box>, + }, + + #[snafu(display("Invalid launch permission: {:?}", launch_permission))] + InvalidLaunchPermission { launch_permission: LaunchPermission }, + } +} +pub(crate) use error::Error; + +type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index e19a2edd..8f47c693 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -1,11 +1,14 @@ //! The ami module owns the 'ami' subcommand and controls the process of registering and copying //! EC2 AMIs. +pub(crate) mod launch_permissions; pub(crate) mod public; mod register; mod snapshot; pub(crate) mod wait; +use crate::aws::ami::launch_permissions::get_launch_permissions; +use crate::aws::ami::public::ami_is_public; use crate::aws::publish_ami::{get_snapshots, modify_image, modify_snapshots, ModifyOptions}; use crate::aws::{client::build_client_config, parse_arch, region_from_string}; use crate::Args; @@ -26,7 +29,6 @@ use register::{get_ami_id, register_image, RegisteredIds}; use serde::{Deserialize, Serialize}; use snafu::{ensure, OptionExt, ResultExt}; use std::collections::{HashMap, HashSet}; -use std::fs::File; use std::path::PathBuf; use structopt::{clap, StructOpt}; use wait::wait_for_ami; @@ -78,10 +80,7 @@ pub(crate) async fn run(args: &Args, ami_args: &AmiArgs) -> Result<()> { Ok(amis) => { // Write the AMI IDs to file if requested if let Some(ref path) = ami_args.ami_output { - let file = File::create(path).context(error::FileCreateSnafu { path })?; - serde_json::to_writer_pretty(file, &amis) - .context(error::SerializeSnafu { path })?; - info!("Wrote AMI data to {}", path.display()); + write_amis(path, &amis).context(error::WriteAmisSnafu { path })?; } Ok(()) } @@ -141,6 +140,10 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> region: base_region.as_ref(), })?; + // If the AMI does not exist yet, `public` should be false and `launch_permissions` empty + let mut public = false; + let mut launch_permissions = vec![]; + let (ids_of_image, already_registered) = if let Some(found_id) = maybe_id { warn!( "Found '{}' already registered in {}: {}", @@ -153,9 +156,25 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> region: base_region.as_ref(), })?; let found_ids = RegisteredIds { - image_id: found_id, + image_id: found_id.clone(), snapshot_ids, }; + + public = ami_is_public(&base_ec2_client, base_region.as_ref(), &found_id) + .await + .context(error::IsAmiPublicSnafu { + image_id: found_id.clone(), + region: base_region.to_string(), + })?; + + launch_permissions = + get_launch_permissions(&base_ec2_client, base_region.as_ref(), &found_id) + .await + .context(error::DescribeImageAttributeSnafu { + image_id: found_id, + region: base_region.to_string(), + })?; + (found_ids, true) } else { let new_ids = register_image(ami_args, &base_region, base_ebs_client, &base_ec2_client) @@ -174,7 +193,12 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> amis.insert( base_region.as_ref().to_string(), - Image::new(&ids_of_image.image_id, &ami_args.name), + Image::new( + &ids_of_image.image_id, + &ami_args.name, + Some(public), + Some(launch_permissions), + ), ); // If we don't need to copy AMIs, we're done. @@ -294,7 +318,25 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> "Found '{}' already registered in {}: {}", ami_args.name, region, id ); - amis.insert(region.as_ref().to_string(), Image::new(&id, &ami_args.name)); + let public = ami_is_public(&ec2_clients[®ion], region.as_ref(), &id) + .await + .context(error::IsAmiPublicSnafu { + image_id: id.clone(), + region: base_region.to_string(), + })?; + + let launch_permissions = + get_launch_permissions(&ec2_clients[®ion], region.as_ref(), &id) + .await + .context(error::DescribeImageAttributeSnafu { + region: region.as_ref(), + image_id: id.clone(), + })?; + + amis.insert( + region.as_ref().to_string(), + Image::new(&id, &ami_args.name, Some(public), Some(launch_permissions)), + ); continue; } @@ -346,7 +388,7 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> ); amis.insert( region.as_ref().to_string(), - Image::new(&image_id, &ami_args.name), + Image::new(&image_id, &ami_args.name, Some(false), Some(vec![])), ); } else { saw_error = true; @@ -379,13 +421,22 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> pub(crate) struct Image { pub(crate) id: String, pub(crate) name: String, + pub(crate) public: Option, + pub(crate) launch_permissions: Option>, } impl Image { - fn new(id: &str, name: &str) -> Self { + fn new( + id: &str, + name: &str, + public: Option, + launch_permissions: Option>, + ) -> Self { Self { id: id.to_string(), name: name.to_string(), + public, + launch_permissions, } } } @@ -442,11 +493,14 @@ async fn get_account_ids( mod error { use crate::aws::{ami, publish_ami}; use aws_sdk_ec2::error::ModifyImageAttributeError; + use aws_sdk_ec2::model::LaunchPermission; use aws_sdk_ec2::types::SdkError; use aws_sdk_sts::error::GetCallerIdentityError; use snafu::Snafu; use std::path::PathBuf; + use super::public; + #[derive(Debug, Snafu)] #[snafu(visibility(pub(super)))] pub(crate) enum Error { @@ -456,6 +510,18 @@ mod error { #[snafu(display("Error reading config: {}", source))] Config { source: pubsys_config::Error }, + #[snafu(display( + "Failed to describe image attributes for image {} in region {}: {}", + image_id, + region, + source + ))] + DescribeImageAttribute { + image_id: String, + region: String, + source: super::launch_permissions::Error, + }, + #[snafu(display("Failed to create file '{}': {}", path.display(), source))] FileCreate { path: PathBuf, @@ -502,6 +568,21 @@ mod error { source: SdkError, }, + #[snafu(display( + "Failed to check if AMI with id {} is public in {}: {}", + image_id, + region, + source + ))] + IsAmiPublic { + image_id: String, + region: String, + source: public::Error, + }, + + #[snafu(display("Invalid launch permission: {:?}", launch_permission))] + InvalidLaunchPermission { launch_permission: LaunchPermission }, + #[snafu(display("Infra.toml is missing {}", missing))] MissingConfig { missing: String }, @@ -519,19 +600,23 @@ mod error { source: ami::register::Error, }, - #[snafu(display("Failed to serialize output to '{}': {}", path.display(), source))] - Serialize { - path: PathBuf, - source: serde_json::Error, - }, - #[snafu(display("AMI '{}' in {} did not become available: {}", id, region, source))] WaitAmi { id: String, region: String, source: ami::wait::Error, }, + + #[snafu(display("Failed to write AMIs to '{}': {}", path.display(), source))] + WriteAmis { + path: PathBuf, + source: publish_ami::Error, + }, } } pub(crate) use error::Error; + +use self::launch_permissions::LaunchPermissionDef; + +use super::publish_ami::write_amis; type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/publish_ami/mod.rs b/tools/pubsys/src/aws/publish_ami/mod.rs index 5aa36976..67c06b4d 100644 --- a/tools/pubsys/src/aws/publish_ami/mod.rs +++ b/tools/pubsys/src/aws/publish_ami/mod.rs @@ -1,13 +1,16 @@ //! The publish_ami module owns the 'publish-ami' subcommand and controls the process of granting //! and revoking access to EC2 AMIs. +use crate::aws::ami::launch_permissions::{get_launch_permissions, LaunchPermissionDef}; use crate::aws::ami::wait::{self, wait_for_ami}; use crate::aws::ami::Image; use crate::aws::client::build_client_config; use crate::aws::region_from_string; use crate::Args; use aws_sdk_ec2::error::{ModifyImageAttributeError, ModifySnapshotAttributeError}; -use aws_sdk_ec2::model::{ImageAttributeName, OperationType, SnapshotAttributeName}; +use aws_sdk_ec2::model::{ + ImageAttributeName, OperationType, PermissionGroup, SnapshotAttributeName, +}; use aws_sdk_ec2::output::{ModifyImageAttributeOutput, ModifySnapshotAttributeOutput}; use aws_sdk_ec2::types::SdkError; use aws_sdk_ec2::{Client as Ec2Client, Region}; @@ -200,18 +203,33 @@ pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { .await?; info!("Updating AMI permissions - {}", description); - let ami_ids = amis - .into_iter() - .map(|(region, image)| (region, image.id)) - .collect(); modify_regional_images( &publish_args.modify_opts, &operation, - &ami_ids, + &mut amis, &ec2_clients, ) .await?; + write_amis( + &publish_args.ami_input, + &amis + .into_iter() + .map(|(region, image)| (region.to_string(), image)) + .collect::>(), + )?; + + Ok(()) +} + +pub(crate) fn write_amis(path: &PathBuf, amis: &HashMap) -> Result<()> { + let file = File::create(path).context(error::FileSnafu { + op: "write AMIs to file", + path, + })?; + serde_json::to_writer_pretty(file, &amis).context(error::SerializeSnafu { path })?; + info!("Wrote AMI data to {}", path.display()); + Ok(()) } @@ -462,11 +480,12 @@ pub(crate) async fn modify_image( pub(crate) async fn modify_regional_images( modify_opts: &ModifyOptions, operation: &OperationType, - images: &HashMap, + images: &mut HashMap, clients: &HashMap, ) -> Result<()> { let mut requests = Vec::new(); - for (region, image_id) in images { + for (region, image) in &mut *images { + let image_id = &image.id; let ec2_client = &clients[region]; let modify_image_future = modify_image(modify_opts, operation, image_id, ec2_client); @@ -492,6 +511,31 @@ pub(crate) async fn modify_regional_images( Ok(_) => { success_count += 1; info!("Modified permissions of image {} in {}", image_id, region); + + // Set the `public` and `launch_permissions` fields for the Image object + let mut image = images.get_mut(&Region::new(region.clone())).ok_or( + error::Error::MissingRegion { + region: region.clone(), + }, + )?; + let launch_permissions: Vec = get_launch_permissions( + &clients[&Region::new(region.clone())], + region.as_ref(), + &image_id, + ) + .await + .context(error::DescribeImageAttributeSnafu { + image_id: image_id.clone(), + region: region.to_string(), + })?; + + // If the launch permissions contain the group `all` after the modification, + // the image is public + image.public = Some(launch_permissions.iter().any(|launch_permission| { + launch_permission + == &LaunchPermissionDef::Group(PermissionGroup::All.as_str().to_string()) + })); + image.launch_permissions = Some(launch_permissions); } Err(e) => { error_count += 1; @@ -532,6 +576,18 @@ mod error { #[snafu(display("Error reading config: {}", source))] Config { source: pubsys_config::Error }, + #[snafu(display( + "Failed to describe image attributes for image {} in region {}: {}", + image_id, + region, + source + ))] + DescribeImageAttribute { + image_id: String, + region: String, + source: crate::aws::ami::launch_permissions::Error, + }, + #[snafu(display("Failed to describe images in {}: {}", region, source))] DescribeImages { region: String, @@ -566,6 +622,9 @@ mod error { missing: String, }, + #[snafu(display("Failed to find region {} in AMI map", region))] + MissingRegion { region: String }, + #[snafu(display( "Failed to modify permissions of {} in {}: {}", snapshot_id, @@ -611,6 +670,12 @@ mod error { #[snafu(display("DescribeImages in {} with unique filters returned multiple results: {}", region, images.join(", ")))] MultipleImages { region: String, images: Vec }, + #[snafu(display("Failed to serialize output to '{}': {}", path.display(), source))] + Serialize { + path: PathBuf, + source: serde_json::Error, + }, + #[snafu(display( "Given region(s) in Infra.toml / regions argument that are not in --ami-input file: {}", regions.join(", ") @@ -633,6 +698,7 @@ mod error { // look at this and decide whether or not their new error variant might have // modified any AMI permissions. Error::Config { .. } + | Error::DescribeImageAttribute { .. } | Error::DescribeImages { .. } | Error::Deserialize { .. } | Error::File { .. } @@ -640,10 +706,12 @@ mod error { | Error::MissingConfig { .. } | Error::MissingImage { .. } | Error::MissingInResponse { .. } + | Error::MissingRegion { .. } | Error::ModifyImageAttribute { .. } | Error::ModifyImageAttributes { .. } | Error::ModifySnapshotAttributes { .. } | Error::MultipleImages { .. } + | Error::Serialize { .. } | Error::UnknownRegions { .. } | Error::WaitAmi { .. } => 0u16, From 9f034d4e4996be8fab4718dbcade722613adaee0 Mon Sep 17 00:00:00 2001 From: mjsterckx Date: Thu, 13 Apr 2023 14:27:27 +0000 Subject: [PATCH 0929/1356] pubsys: write SSM parameters to file --- tools/pubsys/src/aws/promote_ssm/mod.rs | 322 ++++++++++++++++++++++- tools/pubsys/src/aws/ssm/mod.rs | 49 ++++ tools/pubsys/src/aws/ssm/template.rs | 197 ++++++++++++++ tools/pubsys/src/aws/validate_ssm/mod.rs | 7 +- 4 files changed, 570 insertions(+), 5 deletions(-) diff --git a/tools/pubsys/src/aws/promote_ssm/mod.rs b/tools/pubsys/src/aws/promote_ssm/mod.rs index 2863ee84..bf54cf98 100644 --- a/tools/pubsys/src/aws/promote_ssm/mod.rs +++ b/tools/pubsys/src/aws/promote_ssm/mod.rs @@ -2,7 +2,9 @@ //! SSM parameters from one version to another use crate::aws::client::build_client_config; +use crate::aws::ssm::template::RenderedParametersMap; use crate::aws::ssm::{key_difference, ssm, template, BuildContext, SsmKey}; +use crate::aws::validate_ssm::parse_parameters; use crate::aws::{parse_arch, region_from_string}; use crate::Args; use aws_sdk_ec2::model::ArchitectureValues; @@ -41,6 +43,11 @@ pub(crate) struct PromoteArgs { /// File holding the parameter templates #[structopt(long)] template_path: PathBuf, + + /// If set, contains the path to the file holding the original SSM parameters + /// and where the newly promoted parameters will be written + #[structopt(long)] + ssm_parameter_output: Option, } /// Common entrypoint from main() @@ -200,6 +207,14 @@ pub(crate) async fn run(args: &Args, promote_args: &PromoteArgs) -> Result<()> { return Ok(()); } + // If an output file path was given, read the existing parameters in `ssm_parameter_output` and + // write the newly promoted parameters to `ssm_parameter_output` along with the original + // parameters + if let Some(ssm_parameter_output) = &promote_args.ssm_parameter_output { + append_rendered_parameters(ssm_parameter_output, &set_parameters, source_target_map) + .await?; + } + // SSM set =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= info!("Setting updated SSM parameters."); @@ -216,8 +231,88 @@ pub(crate) async fn run(args: &Args, promote_args: &PromoteArgs) -> Result<()> { Ok(()) } +/// Read parameters in given file, add newly promoted parameters, and write combined parameters to +/// the given file +async fn append_rendered_parameters( + ssm_parameters_output: &PathBuf, + set_parameters: &HashMap, + source_target_map: HashMap<&String, &String>, +) -> Result<()> { + // If the file doesn't exist, assume that there are no existing parameters + let parsed_parameters = parse_parameters(&ssm_parameters_output.to_owned()) + .await + .or_else({ + |e| match e { + crate::aws::validate_ssm::Error::ReadExpectedParameterFile { .. } => { + Ok(HashMap::new()) + } + _ => Err(e), + } + }) + .context(error::ParseExistingSsmParametersSnafu { + path: ssm_parameters_output, + })?; + + let combined_parameters: HashMap> = + combine_parameters(parsed_parameters, set_parameters, source_target_map); + + write_rendered_parameters( + ssm_parameters_output, + &RenderedParametersMap::from(combined_parameters).rendered_parameters, + ) + .context(error::WriteRenderedSsmParametersSnafu { + path: ssm_parameters_output, + })?; + + Ok(()) +} + +/// Return a HashMap of Region mapped to a HashMap of SsmKey, String pairs, representing the newly +/// promoted parameters as well as the original parameters. In case of a parameter collision, +/// the parameter takes the promoted value. +fn combine_parameters( + source_parameters: HashMap>, + set_parameters: &HashMap, + source_target_map: HashMap<&String, &String>, +) -> HashMap> { + let mut combined_parameters: HashMap> = HashMap::new(); + + source_parameters + .iter() + .flat_map(|(region, parameters)| { + parameters + .iter() + .map(move |(ssm_key, ssm_value)| (region, ssm_key, ssm_value)) + }) + .for_each(|(region, ssm_key, ssm_value)| { + let add_parameters = vec![ + (ssm_key.clone(), ssm_value.clone()), + ( + SsmKey::new(region.clone(), source_target_map[&ssm_key.name].to_string()), + set_parameters[&SsmKey::new( + region.clone(), + source_target_map[&ssm_key.name].to_string(), + )] + .clone(), + ), + ]; + + combined_parameters + .entry(region.clone()) + .or_insert(HashMap::new()) + .extend(add_parameters); + }); + + combined_parameters +} + mod error { - use crate::aws::ssm::{ssm, template}; + use std::path::PathBuf; + + use crate::aws::{ + ssm::{ssm, template}, + validate_ssm, + }; use snafu::Snafu; #[derive(Debug, Snafu)] @@ -261,7 +356,232 @@ mod error { ValidateSsm { source: ssm::Error, }, + + #[snafu(display( + "Failed to parse existing SSM parameters at path {:?}: {}", + path, + source, + ))] + ParseExistingSsmParameters { + source: validate_ssm::error::Error, + path: PathBuf, + }, + + #[snafu(display("Failed to parse rendered SSM parameters to JSON: {}", source))] + ParseRenderedSsmParameters { + source: serde_json::Error, + }, + + #[snafu(display("Failed to write rendered SSM parameters to {}: {}", path.display(), source))] + WriteRenderedSsmParameters { + path: PathBuf, + source: crate::aws::ssm::Error, + }, } } pub(crate) use error::Error; + +use super::ssm::write_rendered_parameters; type Result = std::result::Result; + +#[cfg(test)] +mod test { + use std::collections::HashMap; + + use crate::aws::{promote_ssm::combine_parameters, ssm::SsmKey}; + use aws_sdk_ssm::Region; + + #[test] + fn combined_parameters() { + let existing_parameters = HashMap::from([ + ( + Region::new("us-west-2"), + HashMap::from([ + ( + SsmKey::new(Region::new("us-west-2"), "test1-parameter-name".to_string()), + "test1-parameter-value".to_string(), + ), + ( + SsmKey::new(Region::new("us-west-2"), "test2-parameter-name".to_string()), + "test2-parameter-value".to_string(), + ), + ]), + ), + ( + Region::new("us-east-1"), + HashMap::from([( + SsmKey::new(Region::new("us-east-1"), "test3-parameter-name".to_string()), + "test3-parameter-value".to_string(), + )]), + ), + ]); + let set_parameters = HashMap::from([ + ( + SsmKey::new( + Region::new("us-west-2"), + "test1-parameter-name-promoted".to_string(), + ), + "test1-parameter-value".to_string(), + ), + ( + SsmKey::new( + Region::new("us-west-2"), + "test2-parameter-name-promoted".to_string(), + ), + "test2-parameter-value".to_string(), + ), + ( + SsmKey::new( + Region::new("us-east-1"), + "test3-parameter-name-promoted".to_string(), + ), + "test3-parameter-value".to_string(), + ), + ]); + let test1_parameter_name = "test1-parameter-name".to_string(); + let test2_parameter_name = "test2-parameter-name".to_string(); + let test3_parameter_name = "test3-parameter-name".to_string(); + let test1_parameter_name_promoted = "test1-parameter-name-promoted".to_string(); + let test2_parameter_name_promoted = "test2-parameter-name-promoted".to_string(); + let test3_parameter_name_promoted = "test3-parameter-name-promoted".to_string(); + let source_target_map = HashMap::from([ + (&test1_parameter_name, &test1_parameter_name_promoted), + (&test2_parameter_name, &test2_parameter_name_promoted), + (&test3_parameter_name, &test3_parameter_name_promoted), + ]); + let map = combine_parameters(existing_parameters, &set_parameters, source_target_map); + let expected_map = HashMap::from([ + ( + Region::new("us-west-2"), + HashMap::from([ + ( + SsmKey::new(Region::new("us-west-2"), "test1-parameter-name".to_string()), + "test1-parameter-value".to_string(), + ), + ( + SsmKey::new(Region::new("us-west-2"), "test2-parameter-name".to_string()), + "test2-parameter-value".to_string(), + ), + ( + SsmKey::new( + Region::new("us-west-2"), + "test1-parameter-name-promoted".to_string(), + ), + "test1-parameter-value".to_string(), + ), + ( + SsmKey::new( + Region::new("us-west-2"), + "test2-parameter-name-promoted".to_string(), + ), + "test2-parameter-value".to_string(), + ), + ]), + ), + ( + Region::new("us-east-1"), + HashMap::from([ + ( + SsmKey::new(Region::new("us-east-1"), "test3-parameter-name".to_string()), + "test3-parameter-value".to_string(), + ), + ( + SsmKey::new( + Region::new("us-east-1"), + "test3-parameter-name-promoted".to_string(), + ), + "test3-parameter-value".to_string(), + ), + ]), + ), + ]); + assert_eq!(map, expected_map); + } + + #[test] + fn combined_parameters_overwrite() { + let existing_parameters = HashMap::from([ + ( + Region::new("us-west-2"), + HashMap::from([ + ( + SsmKey::new(Region::new("us-west-2"), "test1-parameter-name".to_string()), + "test1-parameter-value".to_string(), + ), + ( + SsmKey::new(Region::new("us-west-2"), "test2-parameter-name".to_string()), + "test2-parameter-value".to_string(), + ), + ]), + ), + ( + Region::new("us-east-1"), + HashMap::from([( + SsmKey::new(Region::new("us-east-1"), "test3-parameter-name".to_string()), + "test3-parameter-value".to_string(), + )]), + ), + ]); + let set_parameters = HashMap::from([ + ( + SsmKey::new(Region::new("us-west-2"), "test1-parameter-name".to_string()), + "test1-parameter-value-new".to_string(), + ), + ( + SsmKey::new(Region::new("us-west-2"), "test2-parameter-name".to_string()), + "test2-parameter-value-new".to_string(), + ), + ( + SsmKey::new( + Region::new("us-east-1"), + "test3-parameter-name-promoted".to_string(), + ), + "test3-parameter-value".to_string(), + ), + ]); + let test1_parameter_name = "test1-parameter-name".to_string(); + let test2_parameter_name = "test2-parameter-name".to_string(); + let test3_parameter_name = "test3-parameter-name".to_string(); + let test1_parameter_name_promoted = "test1-parameter-name".to_string(); + let test2_parameter_name_promoted = "test2-parameter-name".to_string(); + let test3_parameter_name_promoted = "test3-parameter-name-promoted".to_string(); + let source_target_map = HashMap::from([ + (&test1_parameter_name, &test1_parameter_name_promoted), + (&test2_parameter_name, &test2_parameter_name_promoted), + (&test3_parameter_name, &test3_parameter_name_promoted), + ]); + let map = combine_parameters(existing_parameters, &set_parameters, source_target_map); + let expected_map = HashMap::from([ + ( + Region::new("us-west-2"), + HashMap::from([ + ( + SsmKey::new(Region::new("us-west-2"), "test1-parameter-name".to_string()), + "test1-parameter-value-new".to_string(), + ), + ( + SsmKey::new(Region::new("us-west-2"), "test2-parameter-name".to_string()), + "test2-parameter-value-new".to_string(), + ), + ]), + ), + ( + Region::new("us-east-1"), + HashMap::from([ + ( + SsmKey::new(Region::new("us-east-1"), "test3-parameter-name".to_string()), + "test3-parameter-value".to_string(), + ), + ( + SsmKey::new( + Region::new("us-east-1"), + "test3-parameter-name-promoted".to_string(), + ), + "test3-parameter-value".to_string(), + ), + ]), + ), + ]); + assert_eq!(map, expected_map); + } +} diff --git a/tools/pubsys/src/aws/ssm/mod.rs b/tools/pubsys/src/aws/ssm/mod.rs index c06a8b32..1dc02b03 100644 --- a/tools/pubsys/src/aws/ssm/mod.rs +++ b/tools/pubsys/src/aws/ssm/mod.rs @@ -6,6 +6,7 @@ pub(crate) mod ssm; pub(crate) mod template; use self::template::RenderedParameter; +use crate::aws::ssm::template::RenderedParametersMap; use crate::aws::{ ami::public::ami_is_public, ami::Image, client::build_client_config, parse_arch, region_from_string, @@ -65,6 +66,10 @@ pub(crate) struct SsmArgs { /// Allows publishing non-public images to the `/aws/` namespace #[structopt(long)] allow_private_images: bool, + + /// If set, writes the generated SSM parameters to this path + #[structopt(long)] + ssm_parameter_output: Option, } /// Wrapper struct over parameter update and AWS clients needed to execute on it. @@ -130,6 +135,14 @@ pub(crate) async fn run(args: &Args, ssm_args: &SsmArgs) -> Result<()> { .context(error::RenderTemplatesSnafu)?; trace!("Generated templated parameters: {:#?}", new_parameters); + // If the path to an output file was given, write the rendered parameters to this file + if let Some(ssm_parameter_output) = &ssm_args.ssm_parameter_output { + write_rendered_parameters( + ssm_parameter_output, + &RenderedParametersMap::from(&new_parameters).rendered_parameters, + )?; + } + // Generate AWS Clients to use for the updates. let mut param_update_ops: Vec = Vec::with_capacity(new_parameters.len()); let mut aws_sdk_configs: HashMap = HashMap::with_capacity(regions.len()); @@ -213,6 +226,31 @@ pub(crate) async fn run(args: &Args, ssm_args: &SsmArgs) -> Result<()> { Ok(()) } +/// Write rendered parameters to the file at `ssm_parameters_output` +pub(crate) fn write_rendered_parameters( + ssm_parameters_output: &PathBuf, + parameters: &HashMap>, +) -> Result<()> { + info!( + "Writing rendered SSM parameters to {:#?}", + ssm_parameters_output + ); + + serde_json::to_writer_pretty( + &File::create(ssm_parameters_output).context(error::WriteRenderedSsmParametersSnafu { + path: ssm_parameters_output, + })?, + ¶meters, + ) + .context(error::ParseRenderedSsmParametersSnafu)?; + + info!( + "Wrote rendered SSM parameters to {:#?}", + ssm_parameters_output + ); + Ok(()) +} + // Rate limits on the EC2 side use the TokenBucket method, and buckets refill at a rate of 20 tokens per second. // See https://docs.aws.amazon.com/AWSEC2/latest/APIReference/throttling.html#throttling-rate-based for more details. const DESCRIBE_IMAGES_RATE_LIMIT: Quota = Quota::per_second(nonzero!(20u32)); @@ -486,6 +524,17 @@ mod error { ValidateSsm { source: ssm::Error, }, + + #[snafu(display("Failed to parse rendered SSM parameters to JSON: {}", source))] + ParseRenderedSsmParameters { + source: serde_json::Error, + }, + + #[snafu(display("Failed to write rendered SSM parameters to {:#?}: {}", path, source))] + WriteRenderedSsmParameters { + path: PathBuf, + source: std::io::Error, + }, } } pub(crate) use error::Error; diff --git a/tools/pubsys/src/aws/ssm/template.rs b/tools/pubsys/src/aws/ssm/template.rs index 981e469c..56c1a88a 100644 --- a/tools/pubsys/src/aws/ssm/template.rs +++ b/tools/pubsys/src/aws/ssm/template.rs @@ -177,6 +177,58 @@ fn join_name(ssm_prefix: &str, name_suffix: &str) -> String { } } +type RegionName = String; +type SsmParameterName = String; +type SsmParameterValue = String; + +/// Struct containing a HashMap of RegionName, mapped to a HashMap +/// of SsmParameterName, SsmParameterValue pairs +#[derive(Deserialize, PartialEq, Serialize)] +pub(crate) struct RenderedParametersMap { + pub(crate) rendered_parameters: + HashMap>, +} + +impl From<&Vec> for RenderedParametersMap { + fn from(parameters: &Vec) -> Self { + let mut parameter_map: HashMap> = + HashMap::new(); + for parameter in parameters.iter() { + parameter_map + .entry(parameter.ssm_key.region.to_string()) + .or_insert(HashMap::new()) + .insert( + parameter.ssm_key.name.to_owned(), + parameter.value.to_owned(), + ); + } + RenderedParametersMap { + rendered_parameters: parameter_map, + } + } +} + +impl From>> for RenderedParametersMap { + fn from(parameters: HashMap>) -> Self { + let mut parameter_map: HashMap> = + HashMap::new(); + parameters + .into_iter() + .for_each(|(region, region_parameters)| { + parameter_map.insert( + region.to_string(), + region_parameters + .into_iter() + .map(|(ssm_key, ssm_value)| (ssm_key.name, ssm_value)) + .collect::>(), + ); + }); + RenderedParametersMap { + rendered_parameters: parameter_map, + } + } +} + mod error { use snafu::Snafu; use std::io; @@ -216,3 +268,148 @@ mod error { } pub(crate) use error::Error; type Result = std::result::Result; + +#[cfg(test)] +mod test { + use std::collections::HashMap; + + use super::{RenderedParameter, RenderedParametersMap}; + use crate::aws::{ami::Image, ssm::SsmKey}; + use aws_sdk_ssm::Region; + + // These tests assert that the RenderedParametersMap can be created correctly. + #[test] + fn rendered_parameters_map_from_vec() { + let rendered_parameters = vec![ + RenderedParameter { + ami: Image { + id: "test1-image-id".to_string(), + name: "test1-image-name".to_string(), + public: Some(true), + launch_permissions: Some(vec![]), + }, + ssm_key: SsmKey { + region: Region::new("us-west-2"), + name: "test1-parameter-name".to_string(), + }, + value: "test1-parameter-value".to_string(), + }, + RenderedParameter { + ami: Image { + id: "test2-image-id".to_string(), + name: "test2-image-name".to_string(), + public: Some(true), + launch_permissions: Some(vec![]), + }, + ssm_key: SsmKey { + region: Region::new("us-west-2"), + name: "test2-parameter-name".to_string(), + }, + value: "test2-parameter-value".to_string(), + }, + RenderedParameter { + ami: Image { + id: "test3-image-id".to_string(), + name: "test3-image-name".to_string(), + public: Some(true), + launch_permissions: Some(vec![]), + }, + ssm_key: SsmKey { + region: Region::new("us-east-1"), + name: "test3-parameter-name".to_string(), + }, + value: "test3-parameter-value".to_string(), + }, + ]; + let map = &RenderedParametersMap::from(&rendered_parameters).rendered_parameters; + let expected_map = &HashMap::from([ + ( + "us-east-1".to_string(), + HashMap::from([( + "test3-parameter-name".to_string(), + "test3-parameter-value".to_string(), + )]), + ), + ( + "us-west-2".to_string(), + HashMap::from([ + ( + "test1-parameter-name".to_string(), + "test1-parameter-value".to_string(), + ), + ( + "test2-parameter-name".to_string(), + "test2-parameter-value".to_string(), + ), + ]), + ), + ]); + assert_eq!(map, expected_map); + } + + #[test] + fn rendered_parameters_map_from_empty_vec() { + let rendered_parameters = vec![]; + let map = &RenderedParametersMap::from(&rendered_parameters).rendered_parameters; + let expected_map = &HashMap::new(); + assert_eq!(map, expected_map); + } + + #[test] + fn rendered_parameters_map_from_map() { + let existing_parameters = HashMap::from([ + ( + Region::new("us-west-2"), + HashMap::from([ + ( + SsmKey::new(Region::new("us-west-2"), "test1-parameter-name".to_string()), + "test1-parameter-value".to_string(), + ), + ( + SsmKey::new(Region::new("us-west-2"), "test2-parameter-name".to_string()), + "test2-parameter-value".to_string(), + ), + ]), + ), + ( + Region::new("us-east-1"), + HashMap::from([( + SsmKey::new(Region::new("us-east-1"), "test3-parameter-name".to_string()), + "test3-parameter-value".to_string(), + )]), + ), + ]); + let map = &RenderedParametersMap::from(existing_parameters).rendered_parameters; + let expected_map = &HashMap::from([ + ( + "us-east-1".to_string(), + HashMap::from([( + "test3-parameter-name".to_string(), + "test3-parameter-value".to_string(), + )]), + ), + ( + "us-west-2".to_string(), + HashMap::from([ + ( + "test1-parameter-name".to_string(), + "test1-parameter-value".to_string(), + ), + ( + "test2-parameter-name".to_string(), + "test2-parameter-value".to_string(), + ), + ]), + ), + ]); + assert_eq!(map, expected_map); + } + + #[test] + fn rendered_parameters_map_from_empty_map() { + let existing_parameters = HashMap::new(); + let map = &RenderedParametersMap::from(existing_parameters).rendered_parameters; + let expected_map = &HashMap::new(); + assert_eq!(map, expected_map); + } +} diff --git a/tools/pubsys/src/aws/validate_ssm/mod.rs b/tools/pubsys/src/aws/validate_ssm/mod.rs index 48a97a88..493d4e14 100644 --- a/tools/pubsys/src/aws/validate_ssm/mod.rs +++ b/tools/pubsys/src/aws/validate_ssm/mod.rs @@ -64,8 +64,7 @@ pub async fn validate( // Parse the file holding expected parameters info!("Parsing expected parameters file"); - let expected_parameters = - parse_expected_parameters(&validate_ssm_args.expected_parameters_path).await?; + let expected_parameters = parse_parameters(&validate_ssm_args.expected_parameters_path).await?; info!("Parsed expected parameters file"); @@ -180,7 +179,7 @@ type ParameterValue = String; /// Parse the file holding expected parameters. Return a HashMap of Region mapped to a HashMap /// of the parameters in that region, with each parameter being a mapping of `SsmKey` to its /// value as `String`. -pub(crate) async fn parse_expected_parameters( +pub(crate) async fn parse_parameters( expected_parameters_file: &PathBuf, ) -> Result>> { // Parse the JSON file as a HashMap of region_name, mapped to a HashMap of parameter_name and @@ -232,7 +231,7 @@ pub(crate) async fn run(args: &Args, validate_ssm_args: &ValidateSsmArgs) -> Res Ok(()) } -mod error { +pub(crate) mod error { use crate::aws::ssm::ssm; use snafu::Snafu; use std::path::PathBuf; From bcc7f6fbd3074a79c24f397123949da1543b1670 Mon Sep 17 00:00:00 2001 From: ecpullen Date: Tue, 7 Mar 2023 18:25:36 +0000 Subject: [PATCH 0930/1356] testsys: Add support for metal-k8s testing --- tools/Cargo.lock | 1 + tools/testsys-config/src/lib.rs | 16 ++ tools/testsys/Cargo.toml | 1 + tools/testsys/src/aws_ecs.rs | 2 +- tools/testsys/src/aws_k8s.rs | 2 +- tools/testsys/src/crds.rs | 176 ++++++++++++++++------ tools/testsys/src/error.rs | 6 + tools/testsys/src/main.rs | 5 +- tools/testsys/src/metal_k8s.rs | 253 ++++++++++++++++++++++++++++++++ tools/testsys/src/run.rs | 51 ++++++- tools/testsys/src/sonobuoy.rs | 9 +- tools/testsys/src/vmware_k8s.rs | 4 +- 12 files changed, 468 insertions(+), 58 deletions(-) create mode 100644 tools/testsys/src/metal_k8s.rs diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 2eaa69ed..7ec6b5c4 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -3245,6 +3245,7 @@ dependencies = [ "testsys-model", "tokio", "unescape", + "url", ] [[package]] diff --git a/tools/testsys-config/src/lib.rs b/tools/testsys-config/src/lib.rs index c85fc399..f358c808 100644 --- a/tools/testsys-config/src/lib.rs +++ b/tools/testsys-config/src/lib.rs @@ -261,6 +261,12 @@ pub struct GenericVariantConfig { pub control_plane_endpoint: Option, /// The path to userdata that should be used for Bottlerocket launch pub userdata: Option, + /// The directory containing Bottlerocket images. For metal, this is the directory containing + /// gzipped images. + pub os_image_dir: Option, + /// The hardware that should be used for provisioning Bottlerocket. For metal, this is the + /// hardware csv that is passed to EKS Anywhere. + pub hardware_csv: Option, /// The workload tests that should be run #[serde(default)] pub workloads: BTreeMap, @@ -298,6 +304,8 @@ impl GenericVariantConfig { conformance_registry: self.conformance_registry.or(other.conformance_registry), control_plane_endpoint: self.control_plane_endpoint.or(other.control_plane_endpoint), userdata: self.userdata.or(other.userdata), + os_image_dir: self.os_image_dir.or(other.os_image_dir), + hardware_csv: self.hardware_csv.or(other.hardware_csv), workloads, dev: self.dev.merge(other.dev), } @@ -358,6 +366,7 @@ pub struct TestsysImages { pub eks_resource_agent_image: Option, pub ecs_resource_agent_image: Option, pub vsphere_k8s_cluster_resource_agent_image: Option, + pub metal_k8s_cluster_resource_agent_image: Option, pub ec2_resource_agent_image: Option, pub vsphere_vm_resource_agent_image: Option, pub sonobuoy_test_agent_image: Option, @@ -383,6 +392,10 @@ impl TestsysImages { "{}/vsphere-k8s-cluster-resource-agent:{tag}", registry )), + metal_k8s_cluster_resource_agent_image: Some(format!( + "{}/metal-k8s-cluster-resource-agent:{tag}", + registry + )), ec2_resource_agent_image: Some(format!("{}/ec2-resource-agent:{tag}", registry)), vsphere_vm_resource_agent_image: Some(format!( "{}/vsphere-vm-resource-agent:{tag}", @@ -408,6 +421,9 @@ impl TestsysImages { vsphere_k8s_cluster_resource_agent_image: self .vsphere_k8s_cluster_resource_agent_image .or(other.vsphere_k8s_cluster_resource_agent_image), + metal_k8s_cluster_resource_agent_image: self + .metal_k8s_cluster_resource_agent_image + .or(other.metal_k8s_cluster_resource_agent_image), vsphere_vm_resource_agent_image: self .vsphere_vm_resource_agent_image .or(other.vsphere_vm_resource_agent_image), diff --git a/tools/testsys/Cargo.toml b/tools/testsys/Cargo.toml index 0e562866..50855eb6 100644 --- a/tools/testsys/Cargo.toml +++ b/tools/testsys/Cargo.toml @@ -31,3 +31,4 @@ term_size = "0.3" testsys-config = { path = "../testsys-config/", version = "0.1" } tokio = { version = "1", features = ["macros", "rt-multi-thread", "fs"] } unescape = "0.1" +url = "2" diff --git a/tools/testsys/src/aws_ecs.rs b/tools/testsys/src/aws_ecs.rs index 33d8db63..025609bd 100644 --- a/tools/testsys/src/aws_ecs.rs +++ b/tools/testsys/src/aws_ecs.rs @@ -22,7 +22,7 @@ pub(crate) struct AwsEcsCreator { #[async_trait::async_trait] impl CrdCreator for AwsEcsCreator { /// Determine the AMI from `amis.json`. - fn image_id(&self, _: &CrdInput) -> Result { + async fn image_id(&self, _: &CrdInput) -> Result { ami(&self.ami_input, &self.region) } diff --git a/tools/testsys/src/aws_k8s.rs b/tools/testsys/src/aws_k8s.rs index 2e23e41a..d5018293 100644 --- a/tools/testsys/src/aws_k8s.rs +++ b/tools/testsys/src/aws_k8s.rs @@ -26,7 +26,7 @@ pub(crate) struct AwsK8sCreator { #[async_trait::async_trait] impl CrdCreator for AwsK8sCreator { /// Determine the AMI from `amis.json`. - fn image_id(&self, _: &CrdInput) -> Result { + async fn image_id(&self, _: &CrdInput) -> Result { ami(&self.ami_input, &self.region) } diff --git a/tools/testsys/src/crds.rs b/tools/testsys/src/crds.rs index d58c85a2..29f5aa0a 100644 --- a/tools/testsys/src/crds.rs +++ b/tools/testsys/src/crds.rs @@ -222,6 +222,7 @@ impl<'a> CrdInput<'a> { "agent-role".to_string() => some_or_null(&self.config.agent_role), "conformance-image".to_string() => some_or_null(&self.config.conformance_image), "conformance-registry".to_string() => some_or_null(&self.config.conformance_registry), + "control-plane-endpoint".to_string() => some_or_null(&self.config.control_plane_endpoint), } } @@ -282,8 +283,15 @@ impl<'a> CrdInput<'a> { // Check for /shared/clusters/.yaml self.tests_directory .join("shared") - .join("cluster-config") + .join("clusters") + .join(cluster_name) + .with_extension("yaml"), + // Check for /shared/clusters//cluster.yaml + self.tests_directory + .join("shared") + .join("clusters") .join(cluster_name) + .join("cluster") .with_extension("yaml"), ]; @@ -315,6 +323,84 @@ impl<'a> CrdInput<'a> { Ok(Some(rendered_config)) } + + /// Find the hardware csv file for the given hardware csv name and test type. + fn hardware_csv_file_path(&self, hardware_csv: &str) -> Option { + let test_type = &self.test_type.to_string(); + // List all acceptable paths to the custom crd to allow users some freedom in the way + // `tests` is organized. + let acceptable_paths = vec![ + // Check for //.csv + self.tests_directory + .join(test_type) + .join(hardware_csv) + .with_extension("csv"), + // Check for /shared/.csv + self.tests_directory + .join("shared") + .join(hardware_csv) + .with_extension("csv"), + // Check for /shared/cluster-config/.csv + self.tests_directory + .join("shared") + .join("cluster-config") + .join(hardware_csv) + .with_extension("csv"), + // Check for /shared/clusters/.csv + self.tests_directory + .join("shared") + .join("clusters") + .join(hardware_csv) + .with_extension("csv"), + ]; + + // Find the first acceptable path that exists and return that. + acceptable_paths.into_iter().find(|path| path.exists()) + } + + /// Find the resolved cluster config file for the given cluster name and test type if it exists. + fn resolved_hardware_csv(&self) -> Result> { + let hardware_csv = match &self.config.hardware_csv { + Some(hardware_csv) => hardware_csv, + None => return Ok(None), + }; + + // If the hardware csv is csv like, it probably is a csv; otherwise, it is a path to the + // hardware csv. + if hardware_csv.contains(',') { + return Ok(Some(hardware_csv.to_string())); + } + + let path = match self.hardware_csv_file_path(hardware_csv) { + None => return Ok(None), + Some(path) => path, + }; + + info!("Using hardware csv at {}", path.display()); + + let config = fs::read_to_string(&path).context(error::FileSnafu { path })?; + Ok(Some(config)) + } + + fn hardware_for_cluster(&self, cluster_name: &str) -> Result> { + // Check for /shared/clusters//hardware.csv + let path = self + .tests_directory + .join("shared") + .join("clusters") + .join(cluster_name) + .join("hardware") + .with_extension("csv"); + + if !path.exists() { + return Ok(None); + } + + info!("Using hardware csv at {}", path.display()); + + let config = fs::read_to_string(&path).context(error::FileSnafu { path })?; + Ok(Some(config)) + } } /// Take the value of the `Option` or `"null"` if the `Option` was `None` @@ -327,7 +413,7 @@ fn some_or_null(field: &Option) -> String { #[async_trait::async_trait] pub(crate) trait CrdCreator: Sync { /// Return the image id that should be used for normal testing. - fn image_id(&self, crd_input: &CrdInput) -> Result; + async fn image_id(&self, crd_input: &CrdInput) -> Result; /// Return the image id that should be used as the starting point for migration testing. async fn starting_image_id(&self, crd_input: &CrdInput) -> Result; @@ -369,15 +455,45 @@ pub(crate) trait CrdCreator: Sync { crd_input: &CrdInput, ) -> Result> { let mut crds = Vec::new(); + let image_id = match &test_type { + KnownTestType::Migration => { + if let Some(image_id) = &crd_input.starting_image_id { + debug!( + "Using the provided starting image id for migration testing '{}'", + image_id + ); + image_id.to_string() + } else { + let image_id = self.starting_image_id(crd_input).await?; + debug!( + "A starting image id was not provided, '{}' will be used instead.", + image_id + ); + image_id + } + } + _ => self.image_id(crd_input).await?, + }; for cluster_name in &crd_input.cluster_names()? { let cluster_output = self .cluster_crd(ClusterInput { cluster_name, + image_id: &image_id, crd_input, cluster_config: &crd_input.resolved_cluster_config( cluster_name, - &mut self.additional_fields(&test_type.to_string()), + &mut self + .additional_fields(&test_type.to_string()) + .into_iter() + // Add the image id incase it is needed for cluster creation + .chain(Some(("image-id".to_string(), image_id.clone())).into_iter()) + .collect::>(), )?, + hardware_csv: &crd_input + .resolved_hardware_csv() + .transpose() + .or_else(|| crd_input.hardware_for_cluster(cluster_name).transpose()) + .transpose()?, }) .await?; let cluster_crd_name = cluster_output.crd_name(); @@ -385,17 +501,17 @@ pub(crate) trait CrdCreator: Sync { debug!("Cluster crd was created for '{}'", cluster_name); crds.push(crd) } + let bottlerocket_output = self + .bottlerocket_crd(BottlerocketInput { + cluster_crd_name: &cluster_crd_name, + image_id: image_id.clone(), + test_type, + crd_input, + }) + .await?; + let bottlerocket_crd_name = bottlerocket_output.crd_name(); match &test_type { KnownTestType::Conformance | KnownTestType::Quick => { - let bottlerocket_output = self - .bottlerocket_crd(BottlerocketInput { - cluster_crd_name: &cluster_crd_name, - image_id: self.image_id(crd_input)?, - test_type, - crd_input, - }) - .await?; - let bottlerocket_crd_name = bottlerocket_output.crd_name(); if let Some(crd) = bottlerocket_output.crd() { debug!("Bottlerocket crd was created for '{}'", cluster_name); crds.push(crd) @@ -415,15 +531,6 @@ pub(crate) trait CrdCreator: Sync { } } KnownTestType::Workload => { - let bottlerocket_output = self - .bottlerocket_crd(BottlerocketInput { - cluster_crd_name: &cluster_crd_name, - image_id: self.image_id(crd_input)?, - test_type, - crd_input, - }) - .await?; - let bottlerocket_crd_name = bottlerocket_output.crd_name(); if let Some(crd) = bottlerocket_output.crd() { debug!("Bottlerocket crd was created for '{}'", cluster_name); crds.push(crd) @@ -443,29 +550,6 @@ pub(crate) trait CrdCreator: Sync { } } KnownTestType::Migration => { - let image_id = if let Some(image_id) = &crd_input.starting_image_id { - debug!( - "Using the provided starting image id for migration testing '{}'", - image_id - ); - image_id.to_string() - } else { - let image_id = self.starting_image_id(crd_input).await?; - debug!( - "A starting image id was not provided, '{}' will be used instead.", - image_id - ); - image_id - }; - let bottlerocket_output = self - .bottlerocket_crd(BottlerocketInput { - cluster_crd_name: &cluster_crd_name, - image_id, - test_type, - crd_input, - }) - .await?; - let bottlerocket_crd_name = bottlerocket_output.crd_name(); if let Some(crd) = bottlerocket_output.crd() { debug!("Bottlerocket crd was created for '{}'", cluster_name); crds.push(crd) @@ -581,7 +665,7 @@ pub(crate) trait CrdCreator: Sync { let mut fields = crd_input.config_fields(cluster_name); fields.insert("api-version".to_string(), API_VERSION.to_string()); fields.insert("namespace".to_string(), NAMESPACE.to_string()); - fields.insert("image-id".to_string(), self.image_id(crd_input)?); + fields.insert("image-id".to_string(), self.image_id(crd_input).await?); fields.append(&mut self.additional_fields(test_type)); let mut handlebars = Handlebars::new(); @@ -621,8 +705,10 @@ pub(crate) trait CrdCreator: Sync { /// The input used for cluster crd creation pub struct ClusterInput<'a> { pub cluster_name: &'a String, + pub image_id: &'a String, pub crd_input: &'a CrdInput<'a>, pub cluster_config: &'a Option, + pub hardware_csv: &'a Option, } /// The input used for bottlerocket crd creation diff --git a/tools/testsys/src/error.rs b/tools/testsys/src/error.rs index ddaca431..f937e7ac 100644 --- a/tools/testsys/src/error.rs +++ b/tools/testsys/src/error.rs @@ -102,6 +102,12 @@ pub enum Error { #[snafu(display("{} is not supported.", what))] Unsupported { what: String }, + #[snafu(display("Unable to parse url from '{}': {}", url, source))] + UrlParse { + url: String, + source: url::ParseError, + }, + #[snafu(display("Unable to create `Variant` from `{}`: {}", variant, source))] Variant { variant: String, diff --git a/tools/testsys/src/main.rs b/tools/testsys/src/main.rs index e7107f41..b078f891 100644 --- a/tools/testsys/src/main.rs +++ b/tools/testsys/src/main.rs @@ -21,6 +21,7 @@ mod delete; mod error; mod install; mod logs; +mod metal_k8s; mod migration; mod restart_test; mod run; @@ -70,8 +71,8 @@ impl TestsysArgs { #[derive(Subcommand, Debug)] enum Command { - Install(Install), - // We need to box run because it requires significantly more arguments than the other commands. + // We need to box some commands because they require significantly more arguments than the other commands. + Install(Box), Run(Box), Delete(Delete), Status(Status), diff --git a/tools/testsys/src/metal_k8s.rs b/tools/testsys/src/metal_k8s.rs new file mode 100644 index 00000000..4304a8cd --- /dev/null +++ b/tools/testsys/src/metal_k8s.rs @@ -0,0 +1,253 @@ +use crate::crds::{ + BottlerocketInput, ClusterInput, CrdCreator, CrdInput, CreateCrdOutput, MigrationInput, + TestInput, +}; +use crate::error::{self, Result}; +use crate::migration::migration_crd; +use crate::sonobuoy::{sonobuoy_crd, workload_crd}; +use bottlerocket_types::agent_config::MetalK8sClusterConfig; +use maplit::btreemap; +use serde::Deserialize; +use snafu::{OptionExt, ResultExt}; +use std::collections::BTreeMap; +use testsys_model::{Crd, DestructionPolicy}; +use url::Url; + +/// A `CrdCreator` responsible for creating crd related to `metal-k8s` variants. +pub(crate) struct MetalK8sCreator { + pub(crate) region: String, + pub(crate) encoded_mgmt_cluster_kubeconfig: String, + pub(crate) image_name: String, +} + +#[async_trait::async_trait] +impl CrdCreator for MetalK8sCreator { + /// Use the provided image name with the `os_image_dir` from `Test.toml` for the image id. + async fn image_id(&self, crd_input: &CrdInput) -> Result { + image_url( + crd_input + .config + .os_image_dir + .as_ref() + .context(error::InvalidSnafu { + what: "An os image directory is required for metal testing", + })?, + &self.image_name, + ) + } + + /// Use standard naming conventions to predict the starting image name. + async fn starting_image_id(&self, crd_input: &CrdInput) -> Result { + let filename = format!( + "bottlerocket-{}-{}-{}.img.gz", + crd_input.variant, + crd_input.arch, + crd_input + .starting_version + .as_ref() + .context(error::InvalidSnafu { + what: "The starting version must be provided for migration testing" + })? + ); + image_url(crd_input.config.os_image_dir.as_ref().context(error::InvalidSnafu { + what: "An os image directory is required for metal testing if a starting image id not used", + })?, &filename) + } + + /// Creates a metal K8s cluster CRD with the `cluster_name` in `cluster_input`. + async fn cluster_crd<'a>(&self, cluster_input: ClusterInput<'a>) -> Result { + let (cluster_name, control_plane_endpoint_ip, k8s_version) = cluster_data( + cluster_input + .cluster_config + .as_ref() + .context(error::InvalidSnafu { + what: "A cluster config is required for Bare Metal cluster provisioning.", + })?, + )?; + + let labels = cluster_input.crd_input.labels(btreemap! { + "testsys/type".to_string() => "cluster".to_string(), + "testsys/cluster".to_string() => cluster_name.clone(), + "testsys/controlPlaneEndpoint".to_string() => control_plane_endpoint_ip, + "testsys/k8sVersion".to_string() => k8s_version + }); + + // Check if the cluster already has a CRD + if let Some(cluster_crd) = cluster_input + .crd_input + .existing_crds( + &labels, + &[ + "testsys/cluster", + "testsys/type", + "testsys/controlPlaneEndpoint", + "testsys/k8sVersion", + ], + ) + .await? + .pop() + { + return Ok(CreateCrdOutput::ExistingCrd(cluster_crd)); + } + + // Check if an existing cluster is using this endpoint + let existing_clusters = cluster_input + .crd_input + .existing_crds(&labels, &["testsys/type", "testsys/controlPlaneEndpoint"]) + .await?; + + let metal_k8s_crd = MetalK8sClusterConfig::builder() + .set_labels(Some(labels)) + .mgmt_cluster_kubeconfig_base64(&self.encoded_mgmt_cluster_kubeconfig) + .hardware_csv_base64(base64::encode( + cluster_input + .hardware_csv + .as_ref() + .context(error::InvalidSnafu { + what: "A hardware CSV is required for Bare Metal testing", + })?, + )) + .cluster_config_base64(base64::encode( + cluster_input + .cluster_config + .as_ref() + .context(error::InvalidSnafu { + what: "A cluster config is required for Bare Metal testing", + })?, + )) + .set_conflicts_with(Some(existing_clusters)) + .destruction_policy( + cluster_input + .crd_input + .config + .dev + .cluster_destruction_policy + .to_owned() + .unwrap_or(DestructionPolicy::OnTestSuccess), + ) + .image( + cluster_input + .crd_input + .images + .metal_k8s_cluster_resource_agent_image + .as_ref() + .expect( + "The default metal K8s cluster resource provider image URI is missing.", + ), + ) + .set_image_pull_secret( + cluster_input + .crd_input + .images + .testsys_agent_pull_secret + .to_owned(), + ) + .privileged(true) + .build(cluster_name) + .context(error::BuildSnafu { + what: "metal K8s cluster CRD", + })?; + + Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Resource( + metal_k8s_crd, + )))) + } + + /// Machines are provisioned during cluster creation, so there is nothing to do here. + async fn bottlerocket_crd<'a>( + &self, + _bottlerocket_input: BottlerocketInput<'a>, + ) -> Result { + Ok(CreateCrdOutput::None) + } + + async fn migration_crd<'a>( + &self, + migration_input: MigrationInput<'a>, + ) -> Result { + Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(migration_crd( + migration_input, + Some("us-west-2".to_string()), + "instanceIds", + )?)))) + } + + async fn test_crd<'a>(&self, test_input: TestInput<'a>) -> Result { + Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(sonobuoy_crd( + test_input, + )?)))) + } + + async fn workload_crd<'a>(&self, test_input: TestInput<'a>) -> Result { + Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(workload_crd( + test_input, + )?)))) + } + + fn additional_fields(&self, _test_type: &str) -> BTreeMap { + btreemap! {"region".to_string() => self.region.clone()} + } +} + +/// Determine the (cluster name, control plane endpoint ip, K8s version) from an EKS Anywhere cluster manifest +fn cluster_data(config: &str) -> Result<(String, String, String)> { + let cluster_manifest = serde_yaml::Deserializer::from_str(config) + .map(|config| { + serde_yaml::Value::deserialize(config).context(error::SerdeYamlSnafu { + what: "Unable to deserialize cluster config", + }) + }) + // Make sure all of the configs were deserializable + .collect::>>()? + .into_iter() + // Find the `Cluster` config + .find(|config| { + config.get("kind") == Some(&serde_yaml::Value::String("Cluster".to_string())) + }); + let cluster_name = cluster_manifest + .as_ref() + // Get the name from the metadata field in the `Cluster` config + .and_then(|config| config.get("metadata")) + .and_then(|config| config.get("name")) + .and_then(|name| name.as_str()) + .context(error::MissingSnafu { + item: "name", + what: "EKS Anywhere config metadata", + })? + .to_string(); + + let control_plane_endpoint_ip = cluster_manifest + .as_ref() + // Get the name from the metadata field in the `Cluster` config + .and_then(|config| config.get("spec")) + .and_then(|config| config.get("controlPlaneConfiguration")) + .and_then(|config| config.get("endpoint")) + .and_then(|config| config.get("host")) + .and_then(|name| name.as_str()) + .context(error::MissingSnafu { + item: "control plane endpoint", + what: "EKS Anywhere config metadata", + })? + .to_string(); + + let k8s_version = cluster_manifest + .as_ref() + // Get the name from the metadata field in the `Cluster` config + .and_then(|config| config.get("spec")) + .and_then(|config| config.get("kubernetesVersion")) + .and_then(|name| name.as_str()) + .context(error::MissingSnafu { + item: "control plane endpoint", + what: "EKS Anywhere config metadata", + })? + .to_string(); + + Ok((cluster_name, control_plane_endpoint_ip, k8s_version)) +} + +fn image_url(image_dir: &str, filename: &str) -> Result { + let image_url = Url::parse(image_dir) + .and_then(|base_url| base_url.join(filename)) + .context(error::UrlParseSnafu { url: image_dir })?; + Ok(image_url.to_string()) +} diff --git a/tools/testsys/src/run.rs b/tools/testsys/src/run.rs index 2015529e..d4a187ea 100644 --- a/tools/testsys/src/run.rs +++ b/tools/testsys/src/run.rs @@ -3,6 +3,7 @@ use crate::aws_k8s::AwsK8sCreator; use crate::crds::{CrdCreator, CrdInput}; use crate::error; use crate::error::Result; +use crate::metal_k8s::MetalK8sCreator; use crate::vmware_k8s::VmwareK8sCreator; use bottlerocket_variant::Variant; use clap::Parser; @@ -48,7 +49,7 @@ pub(crate) struct Run { #[clap(long, env = "TESTSYS_TESTS_DIR", parse(from_os_str))] tests_directory: PathBuf, - /// The path to the EKS-A management cluster kubeconfig for vSphere K8s cluster creation + /// The path to the EKS-A management cluster kubeconfig for vSphere or metal K8s cluster creation #[clap(long, env = "TESTSYS_MGMT_CLUSTER_KUBECONFIG", parse(from_os_str))] mgmt_cluster_kubeconfig: Option, @@ -65,6 +66,10 @@ pub(crate) struct Run { #[clap(long, env = "BUILDSYS_OVA")] ova_name: Option, + /// The name of the image that should be used for Bare Metal testing + #[clap(long, env = "BUILDSYS_NAME_FULL")] + image_name: Option, + /// The path to `amis.json` #[clap(long, env = "AMI_INPUT")] ami_input: Option, @@ -155,6 +160,16 @@ struct CliConfig { /// A set of workloads that should be run for a workload test (--workload my-workload=) #[clap(long = "workload", parse(try_from_str = parse_workloads), number_of_values = 1)] pub workloads: Vec<(String, String)>, + + /// The directory containing Bottlerocket images. For metal, this is the directory containing + /// gzipped images. + #[clap(long)] + pub os_image_dir: Option, + + /// The hardware that should be used for provisioning Bottlerocket. For metal, this is the + /// hardware csv that is passed to EKS Anywhere. + #[clap(long)] + pub hardware_csv: Option, } impl From for GenericVariantConfig { @@ -168,6 +183,8 @@ impl From for GenericVariantConfig { conformance_registry: val.conformance_registry, control_plane_endpoint: val.control_plane_endpoint, userdata: val.userdata, + os_image_dir: val.os_image_dir, + hardware_csv: val.hardware_csv, dev: Default::default(), workloads: val.workloads.into_iter().collect(), } @@ -341,6 +358,30 @@ impl Run { creds: vsphere_secret, }) } + "metal-k8s" => { + debug!("Using family 'metal-k8s'"); + let aws_config = infra_config.aws.unwrap_or_default(); + let region = aws_config + .regions + .front() + .map(String::to_string) + .unwrap_or_else(|| "us-west-2".to_string()); + + let mgmt_cluster_kubeconfig = + self.mgmt_cluster_kubeconfig.context(error::InvalidSnafu { + what: "A management cluster kubeconfig is required for metal testing", + })?; + let encoded_kubeconfig = base64::encode( + read_to_string(&mgmt_cluster_kubeconfig).context(error::FileSnafu { + path: mgmt_cluster_kubeconfig, + })?, + ); + Box::new(MetalK8sCreator { + region, + encoded_mgmt_cluster_kubeconfig: encoded_kubeconfig, + image_name: self.image_name.context(error::InvalidSnafu{what: "The image name is required for Bare Metal testing. This can be set with `BUILDSYS_NAME_FULL`."})? + }) + } unsupported => { return Err(error::Error::Unsupported { what: unsupported.to_string(), @@ -472,6 +513,13 @@ pub(crate) struct TestsysImages { )] pub(crate) vsphere_k8s_cluster_resource: Option, + /// Bare Metal cluster resource agent URI. If not provided the latest released resource agent will be used. + #[clap( + long = "metal-k8s-cluster-resource-agent-image", + env = "TESTSYS_METAL_K8S_CLUSTER_RESOURCE_AGENT_IMAGE" + )] + pub(crate) metal_k8s_cluster_resource: Option, + /// EC2 resource agent URI. If not provided the latest released resource agent will be used. #[clap( long = "ec2-resource-agent-image", @@ -529,6 +577,7 @@ impl From for testsys_config::TestsysImages { eks_resource_agent_image: val.eks_resource, ecs_resource_agent_image: val.ecs_resource, vsphere_k8s_cluster_resource_agent_image: val.vsphere_k8s_cluster_resource, + metal_k8s_cluster_resource_agent_image: val.metal_k8s_cluster_resource, ec2_resource_agent_image: val.ec2_resource, vsphere_vm_resource_agent_image: val.vsphere_vm_resource, sonobuoy_test_agent_image: val.sonobuoy_test, diff --git a/tools/testsys/src/sonobuoy.rs b/tools/testsys/src/sonobuoy.rs index 0e0c2827..2741b50e 100644 --- a/tools/testsys/src/sonobuoy.rs +++ b/tools/testsys/src/sonobuoy.rs @@ -14,11 +14,8 @@ pub(crate) fn sonobuoy_crd(test_input: TestInput) -> Result { let cluster_resource_name = test_input .cluster_crd_name .as_ref() - .expect("A cluster name is required for migrations"); - let bottlerocket_resource_name = test_input - .bottlerocket_crd_name - .as_ref() - .expect("A cluster name is required for migrations"); + .expect("A cluster name is required for sonobuoy testing"); + let bottlerocket_resource_name = test_input.bottlerocket_crd_name; let sonobuoy_mode = match test_input.test_type { KnownTestType::Conformance => SonobuoyMode::CertifiedConformance, KnownTestType::Quick | KnownTestType::Migration | KnownTestType::Workload => { @@ -33,7 +30,7 @@ pub(crate) fn sonobuoy_crd(test_input: TestInput) -> Result { }); SonobuoyConfig::builder() - .resources(bottlerocket_resource_name) + .set_resources(Some(bottlerocket_resource_name.iter().cloned().collect())) .resources(cluster_resource_name) .set_depends_on(Some(test_input.prev_tests)) .set_retries(Some(5)) diff --git a/tools/testsys/src/vmware_k8s.rs b/tools/testsys/src/vmware_k8s.rs index 3090fa7c..51d43b8f 100644 --- a/tools/testsys/src/vmware_k8s.rs +++ b/tools/testsys/src/vmware_k8s.rs @@ -29,7 +29,7 @@ pub(crate) struct VmwareK8sCreator { #[async_trait::async_trait] impl CrdCreator for VmwareK8sCreator { /// Use the provided OVA name for the image id. - fn image_id(&self, _: &CrdInput) -> Result { + async fn image_id(&self, _: &CrdInput) -> Result { Ok(self.ova_name.to_string()) } @@ -104,7 +104,7 @@ impl CrdCreator for VmwareK8sCreator { .control_plane_endpoint_ip(control_plane_endpoint) .creation_policy(CreationPolicy::IfNotExists) .version(cluster_version) - .ova_name(self.image_id(cluster_input.crd_input)?) + .ova_name(self.image_id(cluster_input.crd_input).await?) .tuf_repo( cluster_input .crd_input From 505f7fbdedc21e7a5a66468548797939add32f36 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Sat, 8 Apr 2023 12:08:15 +0000 Subject: [PATCH 0931/1356] tools: Update rust dependencies Signed-off-by: Sean McGinnis --- tools/Cargo.lock | 336 +++++++++++++++++++++++------------------------ 1 file changed, 168 insertions(+), 168 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 7ec6b5c4..6e6f7abb 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -96,9 +96,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.64" +version = "0.1.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd7fce9ba8c3c042128ce72d8b2ddbf3a05747efb67ea0313c635e10bda47a2" +checksum = "b84f9ebcc6c1f5b8cb160f6990096a5c127f423fcb6e1ccc46c370cbdfb75dfc" dependencies = [ "proc-macro2", "quote", @@ -672,9 +672,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "block-buffer" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] @@ -704,11 +704,12 @@ dependencies = [ [[package]] name = "bstr" -version = "0.2.17" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" +checksum = "c3d4260bcc2e8fc9df1eac4919a720effeb63a3f0952f5bf4944adfa18897f09" dependencies = [ "memchr", + "serde", ] [[package]] @@ -883,9 +884,9 @@ dependencies = [ [[package]] name = "coldsnap" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cb5b7cce3f0df950d0ed759a9cd31c5c8c54ec783e501187fa60b7b866ea183" +checksum = "543f50d38e0db1460c01915674b1f329438d5b8e0bb40057862ee63bc1077681" dependencies = [ "argh", "async-trait", @@ -941,15 +942,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +checksum = "280a9f2d8b3a38871a3c8a46fb80db65e5e5ed97da80c4d08bf27fb63e35e181" dependencies = [ "libc", ] @@ -974,9 +975,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.6" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if", "crossbeam-utils", @@ -984,9 +985,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -995,9 +996,9 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.13" +version = "0.9.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" +checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" dependencies = [ "autocfg", "cfg-if", @@ -1008,9 +1009,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if", ] @@ -1037,9 +1038,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86d3488e7665a7a483b57e25bdd90d0aeb2bc7608c8d0346acf2ad3f1caf1d62" +checksum = "9a140f260e6f3f79013b8bfc65e7ce630c9ab4388c6a89c71e07226f49487b72" dependencies = [ "cc", "cxxbridge-flags", @@ -1049,9 +1050,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48fcaf066a053a41a81dfb14d57d99738b767febb8b735c3016e469fac5da690" +checksum = "da6383f459341ea689374bf0a42979739dc421874f112ff26f829b8040b8e613" dependencies = [ "cc", "codespan-reporting", @@ -1064,15 +1065,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2ef98b8b717a829ca5603af80e1f9e2e48013ab227b68ef37872ef84ee479bf" +checksum = "90201c1a650e95ccff1c8c0bb5a343213bdd317c6e600a93075bca2eff54ec97" [[package]] name = "cxxbridge-macro" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "086c685979a698443656e5cf7856c95c642295a38599f12fb1ff76fb28d19892" +checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" dependencies = [ "proc-macro2", "quote", @@ -1081,9 +1082,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0808e1bd8671fb44a113a14e13497557533369847788fa2ae912b6ebfce9fa8" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" dependencies = [ "darling_core", "darling_macro", @@ -1091,9 +1092,9 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "001d80444f28e193f30c2f293455da62dcf9a6b29918a4253152ae2b1de592cb" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ "fnv", "ident_case", @@ -1105,9 +1106,9 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b36230598a2d5de7ec1c6f51f72d8a99a9208daff41de2084d06e3fd3ea56685" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ "darling_core", "quote", @@ -1185,9 +1186,9 @@ dependencies = [ [[package]] name = "dyn-clone" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9b0705efd4599c15a38151f4721f7bc388306f61084d3bfd50bd07fbca5cb60" +checksum = "68b0cf012f1230e43cd00ebb729c6bb58707ecfa8ad08b52ef3a4ccd2697fc30" [[package]] name = "either" @@ -1297,9 +1298,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e2792b0ff0340399d58445b88fd9770e3489eff258a4cbc1523418f12abf84" +checksum = "531ac96c6ff5fd7c62263c5e3c67a603af4fcaee2e1a0ae5565ba3a11e69e549" dependencies = [ "futures-channel", "futures-core", @@ -1312,9 +1313,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" +checksum = "164713a5a0dcc3e7b4b1ed7d3b433cabc18025386f9339346e8daf15963cf7ac" dependencies = [ "futures-core", "futures-sink", @@ -1322,15 +1323,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8de0a35a6ab97ec8869e32a2473f4b1324459e14c29275d14b10cb1fd19b50e" +checksum = "1997dd9df74cdac935c76252744c1ed5794fac083242ea4fe77ef3ed60ba0f83" dependencies = [ "futures-core", "futures-task", @@ -1339,15 +1340,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-macro" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70" +checksum = "3eb14ed937631bd8b8b8977f2c198443447a8355b6e3ca599f38c975e5a963b6" dependencies = [ "proc-macro2", "quote", @@ -1356,15 +1357,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" +checksum = "ec93083a4aecafb2a80a885c9de1f0ccae9dbd32c2bb54b0c3a65690e0b8d2f2" [[package]] name = "futures-task" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" +checksum = "fd65540d33b37b16542a0438c12e6aeead10d4ac5d05bd3f805b8f35ab592879" [[package]] name = "futures-timer" @@ -1374,9 +1375,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" +checksum = "3ef6b17e481503ec85211fed8f39d1970f128935ca1f814cd32ac4a6842e84ab" dependencies = [ "futures-channel", "futures-core", @@ -1400,9 +1401,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.6" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -1410,9 +1411,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" dependencies = [ "cfg-if", "libc", @@ -1427,9 +1428,9 @@ checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" [[package]] name = "globset" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a1e17342619edbc21a964c2afbeb6c820c6a2560032872f397bb97ea127bd0a" +checksum = "029d74589adefde59de1a0c4f4732695c32805624aec7b68d91503d4dba79afc" dependencies = [ "aho-corasick", "bstr", @@ -1458,9 +1459,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" +checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d" dependencies = [ "bytes", "fnv", @@ -1606,9 +1607,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.24" +version = "0.14.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e011372fa0b68db8350aa7a248930ecc7839bf46d8485577d69f117a75f164c" +checksum = "cc5e554ff619822309ffd57d8734d77cd5ce6238bc956f037ea06c58238c9899" dependencies = [ "bytes", "futures-channel", @@ -1715,9 +1716,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown", @@ -1772,25 +1773,26 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.5" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1abeb7a0dd0f8181267ff8adc397075586500b81b28a73e8a0208b00fc170fb3" +checksum = "09270fd4fa1111bc614ed2246c7ef56239a3063d5be0d1ec3b589c505d400aeb" dependencies = [ + "hermit-abi 0.3.1", "libc", "windows-sys 0.45.0", ] [[package]] name = "ipnet" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" +checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" [[package]] name = "is-terminal" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e18b0a45d56fe973d6db23972bf5bc46f988a4a2385deac9cc29572f09daef" +checksum = "8687c819457e979cc940d09cb16e42a1bf70aa6b60a549de6d3a62a0ee90c69e" dependencies = [ "hermit-abi 0.3.1", "io-lifetimes", @@ -1800,9 +1802,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "js-sys" @@ -1938,9 +1940,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.139" +version = "0.2.141" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" +checksum = "3304a64d199bb964be99741b7a14d26972741915b3649639149b2479bb46f4b5" [[package]] name = "link-cplusplus" @@ -2023,18 +2025,18 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" dependencies = [ "autocfg", ] [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "miniz_oxide" @@ -2153,9 +2155,9 @@ checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "openssl" -version = "0.10.48" +version = "0.10.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "518915b97df115dd36109bfa429a48b8f737bd05508cf9588977b599648926d2" +checksum = "7e30d8bc91859781f0a943411186324d580f2bbeb71b452fe91ae344806af3f1" dependencies = [ "bitflags", "cfg-if", @@ -2185,11 +2187,10 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.83" +version = "0.9.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "666416d899cf077260dac8698d60a60b435a46d57e82acb1be3d0dad87284e5b" +checksum = "0d3d193fb1488ad46ffe3aaabc912cc931d02ee8518fe2959aea8ef52718b0c0" dependencies = [ - "autocfg", "cc", "libc", "pkg-config", @@ -2217,9 +2218,9 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.4.1" +version = "6.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" +checksum = "ceedf44fb00f2d1984b0bc98102627ce622e083e49a5bacdb3e514fa4238e267" [[package]] name = "output_vt100" @@ -2314,9 +2315,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.5.5" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028accff104c4e513bad663bbcd2ad7cfd5304144404c31ed0a77ac103d00660" +checksum = "8cbd939b234e95d72bc393d51788aec68aeeb5d51e748ca08ff3aad58cb722f7" dependencies = [ "thiserror", "ucd-trie", @@ -2324,9 +2325,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.5.5" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ac3922aac69a40733080f53c1ce7f91dcf57e1a5f6c52f421fadec7fbdc4b69" +checksum = "a81186863f3d0a27340815be8f2078dd8050b14cd71913db9fbda795e5f707d7" dependencies = [ "pest", "pest_generator", @@ -2334,9 +2335,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.5.5" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d06646e185566b5961b4058dd107e0a7f56e77c3f484549fb119867773c0f202" +checksum = "75a1ef20bf3193c15ac345acb32e26b3dc3223aff4d77ae4fc5359567683796b" dependencies = [ "pest", "pest_meta", @@ -2347,9 +2348,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.5.5" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6f60b2ba541577e2a0c307c8f39d1439108120eb7903adeb6497fa880c59616" +checksum = "5e3b284b1f13a20dc5ebc90aff59a51b8d7137c221131b52a7260c08cbc1cc80" dependencies = [ "once_cell", "pest", @@ -2444,9 +2445,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.51" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" +checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" dependencies = [ "unicode-ident", ] @@ -2553,9 +2554,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.23" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" +checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" dependencies = [ "proc-macro2", ] @@ -2592,18 +2593,18 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "10.6.1" +version = "10.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c307f7aacdbab3f0adee67d52739a1d71112cc068d6fab169ddeb18e48877fad" +checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" dependencies = [ "bitflags", ] [[package]] name = "rayon" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7" +checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" dependencies = [ "either", "rayon-core", @@ -2611,9 +2612,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.10.2" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ "crossbeam-channel", "crossbeam-deque", @@ -2643,9 +2644,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.7.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" +checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d" dependencies = [ "aho-corasick", "memchr", @@ -2654,9 +2655,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.28" +version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "reqwest" @@ -2723,9 +2724,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" +checksum = "d4a36c42d1873f9a77c53bde094f9664d9891bc604a45b4798fd2c389ed12e5b" [[package]] name = "rustc_version" @@ -2738,9 +2739,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.8" +version = "0.36.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43abb88211988493c1abb44a70efa56ff0ce98f233b7b276146f1f3f7ba9644" +checksum = "db4165c9963ab29e422d6c26fbc1d37f15bace6b2810221f9d925023480fcf0e" dependencies = [ "bitflags", "errno", @@ -2785,9 +2786,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "same-file" @@ -2839,9 +2840,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scratch" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" +checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" [[package]] name = "sct" @@ -2888,18 +2889,18 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" +checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" dependencies = [ "serde", ] [[package]] name = "serde" -version = "1.0.152" +version = "1.0.156" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" +checksum = "314b5b092c0ade17c00142951e50ced110ec27cea304b1037c6969246c2469a4" dependencies = [ "serde_derive", ] @@ -2916,9 +2917,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.152" +version = "1.0.156" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" +checksum = "d7e29c4601e36bcec74a223228dce795f4cd3616341a4af93520ca1a837c087d" dependencies = [ "proc-macro2", "quote", @@ -2938,9 +2939,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.93" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76" +checksum = "d721eca97ac802aa7777b701877c8004d950fc142651367300d21c1cc0194744" dependencies = [ "indexmap", "itoa", @@ -3039,9 +3040,9 @@ dependencies = [ [[package]] name = "simplelog" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48dfff04aade74dd495b007c831cd6f4e0cee19c344dd9dc0884c0289b70a786" +checksum = "acee08041c5de3d5048c8b3f6f13fafb3026b24ba43c6a695a0c76179b844369" dependencies = [ "log", "termcolor", @@ -3050,9 +3051,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg", ] @@ -3088,9 +3089,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", @@ -3152,9 +3153,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.107" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", @@ -3314,18 +3315,18 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" +checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" +checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e" dependencies = [ "proc-macro2", "quote", @@ -3334,9 +3335,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.19" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53250a3b3fed8ff8fd988587d8925d26a83ac3845d9e03b220b37f34c2b8d6c2" +checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" dependencies = [ "itoa", "libc", @@ -3354,9 +3355,9 @@ checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" [[package]] name = "time-macros" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a460aeb8de6dcb0f381e1ee05f1cd56fcf5a5f6eb8187ff3d8f0b11078d38b7c" +checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" dependencies = [ "time-core", ] @@ -3452,9 +3453,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" +checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" dependencies = [ "futures-core", "pin-project-lite", @@ -3695,15 +3696,15 @@ checksum = "ccb97dac3243214f8d8507998906ca3e2e0b900bf9bf4870477f125b82e68f6e" [[package]] name = "unicode-bidi" -version = "0.3.10" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" @@ -3791,12 +3792,11 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "walkdir" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" dependencies = [ "same-file", - "winapi", "winapi-util", ] @@ -3974,9 +3974,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", @@ -3989,45 +3989,45 @@ dependencies = [ [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_i686_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_x86_64_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "winreg" @@ -4061,6 +4061,6 @@ checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" [[package]] name = "zeroize" -version = "1.5.7" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" +checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" From 9dc1d26ab63bdad70afd62a8a23a9ef0cb08f8a6 Mon Sep 17 00:00:00 2001 From: mjsterckx Date: Fri, 14 Apr 2023 18:48:29 +0000 Subject: [PATCH 0932/1356] pubsys: added ami validation --- tools/Cargo.lock | 1 + tools/pubsys/Cargo.toml | 1 + tools/pubsys/src/aws/mod.rs | 1 + tools/pubsys/src/aws/validate_ami/ami.rs | 224 ++++ tools/pubsys/src/aws/validate_ami/mod.rs | 851 ++++++++++++++ tools/pubsys/src/aws/validate_ami/results.rs | 1034 ++++++++++++++++++ tools/pubsys/src/main.rs | 14 + 7 files changed, 2126 insertions(+) create mode 100644 tools/pubsys/src/aws/validate_ami/ami.rs create mode 100644 tools/pubsys/src/aws/validate_ami/mod.rs create mode 100644 tools/pubsys/src/aws/validate_ami/results.rs diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 6e6f7abb..c2c7621f 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -2486,6 +2486,7 @@ dependencies = [ "semver", "serde", "serde_json", + "serde_plain", "simplelog", "snafu", "structopt", diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index b5773934..7c0e94fe 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -38,6 +38,7 @@ reqwest = { version = "0.11", default-features = false, features = ["rustls-tls" semver = "1" serde = { version = "1", features = ["derive"] } serde_json = "1" +serde_plain = "1" simplelog = "0.12" snafu = "0.7" structopt = { version = "0.3", default-features = false } diff --git a/tools/pubsys/src/aws/mod.rs b/tools/pubsys/src/aws/mod.rs index 7cd95a33..4c4c0a1a 100644 --- a/tools/pubsys/src/aws/mod.rs +++ b/tools/pubsys/src/aws/mod.rs @@ -8,6 +8,7 @@ pub(crate) mod ami; pub(crate) mod promote_ssm; pub(crate) mod publish_ami; pub(crate) mod ssm; +pub(crate) mod validate_ami; pub(crate) mod validate_ssm; /// Builds a Region from the given region name. diff --git a/tools/pubsys/src/aws/validate_ami/ami.rs b/tools/pubsys/src/aws/validate_ami/ami.rs new file mode 100644 index 00000000..c5d96a81 --- /dev/null +++ b/tools/pubsys/src/aws/validate_ami/ami.rs @@ -0,0 +1,224 @@ +//! The ami module owns the describing of images in EC2. + +use aws_sdk_ec2::model::Image; +use aws_sdk_ec2::{Client as Ec2Client, Region}; +use futures::future::{join, ready}; +use futures::stream::{FuturesUnordered, StreamExt}; +use log::{info, trace}; +use serde::{Deserialize, Serialize}; +use snafu::ResultExt; +use std::collections::HashMap; + +use crate::aws::ami::launch_permissions::{get_launch_permissions, LaunchPermissionDef}; + +/// Wrapper structure for the `ImageDef` struct, used during deserialization +#[derive(Deserialize)] +#[serde(untagged)] +pub(crate) enum ImageData { + Image(ImageDef), + ImageList(Vec), +} + +impl ImageData { + pub(crate) fn images(&self) -> Vec { + match self { + ImageData::Image(image) => vec![image.to_owned()], + ImageData::ImageList(images) => images.to_owned(), + } + } +} + +/// Structure of the EC2 image fields that should be validated +#[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Debug, Clone)] +pub(crate) struct ImageDef { + /// The ID of the EC2 image + pub(crate) id: String, + + /// The name of the EC2 image + pub(crate) name: String, + + /// Whether or not the EC2 image is public + #[serde(default)] + pub(crate) public: bool, + + /// The launch permissions for the EC2 image. + pub(crate) launch_permissions: Option>, + + /// Whether or not the EC2 image supports Elastic Network Adapter + #[serde(default = "default_ena_support")] + pub(crate) ena_support: bool, + + /// The level of the EC2 image's Single Root I/O Virtualization support + #[serde(default = "default_sriov_net_support")] + pub(crate) sriov_net_support: String, +} + +fn default_ena_support() -> bool { + true +} + +fn default_sriov_net_support() -> String { + "simple".to_string() +} + +impl From<(Image, Option>)> for ImageDef { + fn from(args: (Image, Option>)) -> Self { + Self { + id: args.0.image_id().unwrap_or_default().to_string(), + name: args.0.name().unwrap_or_default().to_string(), + public: args.0.public().unwrap_or_default(), + launch_permissions: args.1, + ena_support: args.0.ena_support().unwrap_or_default(), + sriov_net_support: args.0.sriov_net_support().unwrap_or_default().to_string(), + } + } +} + +/// Fetches all images whose IDs are keys in `expected_images`. The map `expected_image_public` is +/// used to determine if the launch permissions for the image should be fetched (only if the image is not +/// public). The return value is a HashMap of Region to a Result, which is `Ok` if the request for +/// that region was successful and `Err` if not. The Result contains a HashMap of `image_id` to +/// `ImageDef`. +pub(crate) async fn describe_images<'a>( + clients: &'a HashMap, + expected_images: &HashMap>, +) -> HashMap<&'a Region, Result>> { + // Build requests for images; we have to request with a regional client so we split them by + // region + let mut requests = Vec::with_capacity(clients.len()); + clients.iter().for_each(|(region, ec2_client)| { + trace!("Requesting images in {}", region); + let get_future = describe_images_in_region( + region, + ec2_client, + expected_images + .get(region) + .map(|i| i.to_owned()) + .unwrap_or_default() + .into_iter() + .map(|i| (i.id.clone(), i)) + .collect::>(), + ); + + requests.push(join(ready(region), get_future)); + }); + + // Send requests in parallel and wait for responses, collecting results into a list. + requests + .into_iter() + .collect::>() + .collect() + .await +} + +/// Fetches the images whose IDs are keys in `expected_images` +pub(crate) async fn describe_images_in_region( + region: &Region, + client: &Ec2Client, + expected_images: HashMap, +) -> Result> { + info!("Retrieving images in {}", region.to_string()); + let mut images = HashMap::new(); + + // Send the request + let mut get_future = client + .describe_images() + .include_deprecated(true) + .set_image_ids(Some(Vec::from_iter( + expected_images.keys().map(|k| k.to_owned()), + ))) + .into_paginator() + .send(); + + // Iterate over the retrieved images + while let Some(page) = get_future.next().await { + let retrieved_images = page + .context(error::DescribeImagesSnafu { + region: region.to_string(), + })? + .images() + .unwrap_or_default() + .to_owned(); + for image in retrieved_images { + // Insert a new key-value pair into the map, with the key containing image ID + // and the value containing the ImageDef object created from the image + let image_id = image + .image_id() + .ok_or(error::Error::MissingField { + missing: "image_id".to_string(), + })? + .to_string(); + let expected_public = expected_images + .get(&image_id) + .ok_or(error::Error::MissingExpectedPublic { + missing: image_id.clone(), + })? + .public; + // If the image is not expected to be public, retrieve the launch permissions + trace!( + "Retrieving launch permissions for {} in {}", + image_id, + region.as_ref() + ); + let launch_permissions = if !expected_public { + Some( + get_launch_permissions(client, region.as_ref(), &image_id) + .await + .context(error::GetLaunchPermissionsSnafu { + region: region.as_ref(), + image_id: image_id.clone(), + })?, + ) + } else { + None + }; + let image_def = ImageDef::from((image.to_owned(), launch_permissions)); + images.insert(image_id, image_def); + } + } + + info!("Images in {} have been retrieved", region.to_string()); + Ok(images) +} + +pub(crate) mod error { + use aws_sdk_ec2::error::DescribeImagesError; + use aws_sdk_ssm::types::SdkError; + use aws_smithy_types::error::display::DisplayErrorContext; + use snafu::Snafu; + + #[derive(Debug, Snafu)] + #[snafu(visibility(pub(super)))] + #[allow(clippy::large_enum_variant)] + pub(crate) enum Error { + #[snafu(display( + "Failed to describe images in {}: {}", + region, + DisplayErrorContext(source) + ))] + DescribeImages { + region: String, + source: SdkError, + }, + + #[snafu(display( + "Failed to retrieve launch permissions for image {} in region {}: {}", + image_id, + region, + source + ))] + GetLaunchPermissions { + region: String, + image_id: String, + source: crate::aws::ami::launch_permissions::Error, + }, + + #[snafu(display("Missing field in image: {}", missing))] + MissingField { missing: String }, + + #[snafu(display("Missing image ID in expected image publicity map: {}", missing))] + MissingExpectedPublic { missing: String }, + } +} + +pub(crate) type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/validate_ami/mod.rs b/tools/pubsys/src/aws/validate_ami/mod.rs new file mode 100644 index 00000000..02572dcd --- /dev/null +++ b/tools/pubsys/src/aws/validate_ami/mod.rs @@ -0,0 +1,851 @@ +//! The validate_ami module owns the 'validate-ami' subcommand and controls the process of validating +//! EC2 images + +pub(crate) mod ami; +pub(crate) mod results; + +use self::ami::{ImageData, ImageDef}; +use self::results::{AmiValidationResult, AmiValidationResultStatus, AmiValidationResults}; +use crate::aws::client::build_client_config; +use crate::aws::validate_ami::ami::describe_images; +use crate::Args; +use aws_sdk_ec2::{Client as AmiClient, Region}; +use log::{error, info, trace}; +use pubsys_config::InfraConfig; +use snafu::ResultExt; +use std::collections::{HashMap, HashSet}; +use std::fs::File; +use std::path::PathBuf; +use structopt::{clap, StructOpt}; + +/// Validates EC2 images by calling `describe-images` on all images in the file given by +/// `expected-amis-path` and ensuring that the returned `public`, `ena-support`, +/// `sriov-net-support`, and `launch-permissions` fields have the expected values. +#[derive(Debug, StructOpt)] +#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +pub(crate) struct ValidateAmiArgs { + /// File holding the expected amis + #[structopt(long, parse(from_os_str))] + expected_amis_path: PathBuf, + + /// Optional path where the validation results should be written + #[structopt(long, parse(from_os_str))] + write_results_path: Option, + + #[structopt(long, requires = "write-results-path")] + /// Optional filter to only write validation results with these statuses to the above path + /// The available statuses are: `Correct`, `Incorrect`, `Missing`. + write_results_filter: Option>, + + #[structopt(long)] + /// If this argument is given, print the validation results summary as a JSON object instead + /// of a plaintext table + json: bool, +} + +/// Performs EC2 image validation and returns the `AmiValidationResults` object +pub(crate) async fn validate( + args: &Args, + validate_ami_args: &ValidateAmiArgs, +) -> Result { + info!("Parsing Infra.toml file"); + + // If a lock file exists, use that, otherwise use Infra.toml + let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) + .context(error::ConfigSnafu)?; + + trace!("Parsed infra config: {:#?}", infra_config); + + let aws = infra_config.aws.unwrap_or_default(); + + // Parse the expected ami file + info!("Parsing expected ami file"); + let expected_images = parse_expected_amis(&validate_ami_args.expected_amis_path).await?; + + info!("Parsed expected ami file"); + + // Create a `HashMap` of `AmiClient`s, one for each region where validation should happen + let base_region = &Region::new( + aws.regions + .get(0) + .ok_or(error::Error::EmptyInfraRegions { + path: args.infra_config_path.clone(), + })? + .clone(), + ); + let mut ami_clients = HashMap::with_capacity(expected_images.len()); + + for region in expected_images.keys() { + let client_config = build_client_config(region, base_region, &aws).await; + let ami_client = AmiClient::new(&client_config); + ami_clients.insert(region.clone(), ami_client); + } + + // Retrieve the EC2 images using the `AmiClient`s + info!("Retrieving EC2 images"); + let images = describe_images(&ami_clients, &expected_images) + .await + .into_iter() + .map(|(region, result)| { + ( + region, + result.map_err(|e| { + error!( + "Failed to retrieve images in region {}: {}", + region.to_string(), + e + ); + error::Error::UnreachableRegion { + region: region.to_string(), + } + }), + ) + }) + .collect::>>(); + + // Validate the retrieved EC2 images per region + info!("Validating EC2 images"); + let results: HashMap> = images + .into_iter() + .map(|(region, region_result)| { + ( + region.clone(), + validate_images_in_region( + &expected_images + .get(region) + .map(|e| e.to_owned()) + .unwrap_or_default(), + ®ion_result, + region, + ), + ) + }) + .collect(); + + let validation_results = AmiValidationResults::from_result_map(results); + + // If a path was given, write the results + if let Some(write_results_path) = &validate_ami_args.write_results_path { + // Filter the results by given status, and if no statuses were given, get all results + info!("Writing results to file"); + let results = if let Some(filter) = &validate_ami_args.write_results_filter { + validation_results.get_results_for_status(filter) + } else { + validation_results.get_all_results() + }; + + // Write the results as JSON + serde_json::to_writer_pretty( + &File::create(write_results_path).context(error::WriteValidationResultsSnafu { + path: write_results_path, + })?, + &results, + ) + .context(error::SerializeValidationResultsSnafu)?; + } + + Ok(validation_results) +} + +/// Validates EC2 images in a single region, based on a `Vec` of expected images +/// and a `HashMap` of actual retrieved images. Returns a +/// `HashSet` containing the result objects. +pub(crate) fn validate_images_in_region( + expected_images: &[ImageDef], + actual_images: &Result>, + region: &Region, +) -> HashSet { + match actual_images { + Ok(actual_images) => expected_images + .iter() + .map(|image| { + let new_image = if image.public { + ImageDef { + launch_permissions: None, + ..image.clone() + } + } else { + image.clone() + }; + AmiValidationResult::new( + image.id.clone(), + new_image, + Ok(actual_images.get(&image.id).map(|v| v.to_owned())), + region.clone(), + ) + }) + .collect(), + Err(_) => expected_images + .iter() + .map(|image| { + AmiValidationResult::new( + image.id.clone(), + image.clone(), + Err(error::Error::UnreachableRegion { + region: region.to_string(), + }), + region.clone(), + ) + }) + .collect(), + } +} + +type RegionName = String; +type AmiId = String; + +/// Parse the file holding image values. Return a `HashMap` of `Region` mapped to a vec of `ImageDef`s +/// for that region. +pub(crate) async fn parse_expected_amis( + expected_amis_path: &PathBuf, +) -> Result>> { + // Parse the JSON file as a `HashMap` of region_name, mapped to an `ImageData` struct + let expected_amis: HashMap = serde_json::from_reader( + &File::open(expected_amis_path.clone()).context(error::ReadExpectedImagesFileSnafu { + path: expected_amis_path, + })?, + ) + .context(error::ParseExpectedImagesFileSnafu)?; + + // Extract the `Vec` from the `ImageData` structs + let vectored_images = expected_amis + .into_iter() + .map(|(region, value)| (Region::new(region), value.images())) + .collect::>>(); + + Ok(vectored_images) +} + +/// Common entrypoint from main() +pub(crate) async fn run(args: &Args, validate_ami_args: &ValidateAmiArgs) -> Result<()> { + let results = validate(args, validate_ami_args).await?; + + if validate_ami_args.json { + println!( + "{}", + serde_json::to_string_pretty(&results.get_json_summary()) + .context(error::SerializeResultsSummarySnafu)? + ) + } else { + println!("{}", results); + } + Ok(()) +} + +mod error { + use snafu::Snafu; + use std::path::PathBuf; + + #[derive(Debug, Snafu)] + #[snafu(visibility(pub(super)))] + pub(crate) enum Error { + #[snafu(display("Error reading config: {}", source))] + Config { source: pubsys_config::Error }, + + #[snafu(display("Empty regions array in Infra.toml at path {}", path.display()))] + EmptyInfraRegions { path: PathBuf }, + + #[snafu(display("Failed to parse image file: {}", source))] + ParseExpectedImagesFile { source: serde_json::Error }, + + #[snafu(display("Failed to read image file: {:?}", path))] + ReadExpectedImagesFile { + source: std::io::Error, + path: PathBuf, + }, + + #[snafu(display("Failed to serialize validation results to json: {}", source))] + SerializeValidationResults { source: serde_json::Error }, + + #[snafu(display("Failed to retrieve images from region {}", region))] + UnreachableRegion { region: String }, + + #[snafu(display("Failed to write validation results to {:?}: {}", path, source))] + WriteValidationResults { + path: PathBuf, + source: std::io::Error, + }, + + #[snafu(display("Failed to serialize results summary to JSON: {}", source))] + SerializeResultsSummary { source: serde_json::Error }, + } +} + +pub(crate) use error::Error; + +type Result = std::result::Result; + +#[cfg(test)] +mod test { + use super::ami::ImageDef; + use super::validate_images_in_region; + use crate::aws::{ + ami::launch_permissions::LaunchPermissionDef, + validate_ami::results::{AmiValidationResult, AmiValidationResultStatus}, + }; + use aws_sdk_ec2::Region; + use std::collections::{HashMap, HashSet}; + + // These tests assert that the images can be validated correctly. + + // Tests validation of images where the expected value is equal to the actual value + #[test] + fn validate_images_all_correct() { + let expected_parameters: Vec = vec![ + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + ]; + let actual_parameters: HashMap = HashMap::from([ + ( + "test1-image-id".to_string(), + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + ), + ( + "test2-image-id".to_string(), + ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + ), + ( + "test3-image-id".to_string(), + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + ), + ]); + let expected_results = HashSet::from_iter(vec![ + AmiValidationResult::new( + "test3-image-id".to_string(), + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-west-2"), + ), + AmiValidationResult::new( + "test2-image-id".to_string(), + ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-west-2"), + ), + AmiValidationResult::new( + "test1-image-id".to_string(), + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-west-2"), + ), + ]); + let results = validate_images_in_region( + &expected_parameters, + &Ok(actual_parameters), + &Region::new("us-west-2"), + ); + + for result in &results { + assert_eq!(result.status, AmiValidationResultStatus::Correct); + } + assert_eq!(results, expected_results); + } + + // Tests validation of images where the expected value is different from the actual value + #[test] + fn validate_images_all_incorrect() { + let expected_parameters: Vec = vec![ + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + ]; + let actual_parameters: HashMap = HashMap::from([ + ( + "test1-image-id".to_string(), + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: false, + sriov_net_support: "simple".to_string(), + }, + ), + ( + "test2-image-id".to_string(), + ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: false, + launch_permissions: Some(vec![LaunchPermissionDef::Group("all".to_string())]), + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + ), + ( + "test3-image-id".to_string(), + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "not simple".to_string(), + }, + ), + ]); + let expected_results = HashSet::from_iter(vec![ + AmiValidationResult::new( + "test3-image-id".to_string(), + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "not simple".to_string(), + })), + Region::new("us-west-2"), + ), + AmiValidationResult::new( + "test2-image-id".to_string(), + ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: false, + launch_permissions: Some(vec![LaunchPermissionDef::Group("all".to_string())]), + ena_support: true, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-west-2"), + ), + AmiValidationResult::new( + "test1-image-id".to_string(), + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: false, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-west-2"), + ), + ]); + let results = validate_images_in_region( + &expected_parameters, + &Ok(actual_parameters), + &Region::new("us-west-2"), + ); + for result in &results { + assert_eq!(result.status, AmiValidationResultStatus::Incorrect); + } + assert_eq!(results, expected_results); + } + + // Tests validation of images where the actual value is missing + #[test] + fn validate_images_all_missing() { + let expected_parameters: Vec = vec![ + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + ]; + let actual_parameters = HashMap::new(); + let expected_results = HashSet::from_iter(vec![ + AmiValidationResult::new( + "test3-image-id".to_string(), + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(None), + Region::new("us-west-2"), + ), + AmiValidationResult::new( + "test2-image-id".to_string(), + ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(None), + Region::new("us-west-2"), + ), + AmiValidationResult::new( + "test1-image-id".to_string(), + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(None), + Region::new("us-west-2"), + ), + ]); + let results = validate_images_in_region( + &expected_parameters, + &Ok(actual_parameters), + &Region::new("us-west-2"), + ); + for result in &results { + assert_eq!(result.status, AmiValidationResultStatus::Missing); + } + assert_eq!(results, expected_results); + } + + // Tests validation of parameters where each reachable status (Correct, Incorrect, Missing) happens once + #[test] + fn validate_images_mixed() { + let expected_parameters: Vec = vec![ + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + ]; + let actual_parameters: HashMap = HashMap::from([ + ( + "test1-image-id".to_string(), + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + ), + ( + "test2-image-id".to_string(), + ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: false, + launch_permissions: Some(vec![LaunchPermissionDef::Group("all".to_string())]), + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + ), + ]); + let expected_results = HashSet::from_iter(vec![ + AmiValidationResult::new( + "test1-image-id".to_string(), + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-west-2"), + ), + AmiValidationResult::new( + "test2-image-id".to_string(), + ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: false, + launch_permissions: Some(vec![LaunchPermissionDef::Group("all".to_string())]), + ena_support: true, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-west-2"), + ), + AmiValidationResult::new( + "test3-image-id".to_string(), + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(None), + Region::new("us-west-2"), + ), + ]); + let results = validate_images_in_region( + &expected_parameters, + &Ok(actual_parameters), + &Region::new("us-west-2"), + ); + + assert_eq!(results, expected_results); + } + + // Tests validation of parameters where the region is unreachable + #[test] + fn validate_images_unreachable() { + let expected_parameters: Vec = vec![ + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + ]; + let expected_results = HashSet::from_iter(vec![ + AmiValidationResult::new( + "test1-image-id".to_string(), + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Err(crate::aws::validate_ami::Error::UnreachableRegion { + region: "us-west-2".to_string(), + }), + Region::new("us-west-2"), + ), + AmiValidationResult::new( + "test2-image-id".to_string(), + ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Err(crate::aws::validate_ami::Error::UnreachableRegion { + region: "us-west-2".to_string(), + }), + Region::new("us-west-2"), + ), + AmiValidationResult::new( + "test3-image-id".to_string(), + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Err(crate::aws::validate_ami::Error::UnreachableRegion { + region: "us-west-2".to_string(), + }), + Region::new("us-west-2"), + ), + ]); + let results = validate_images_in_region( + &expected_parameters, + &Err(crate::aws::validate_ami::Error::UnreachableRegion { + region: "us-west-2".to_string(), + }), + &Region::new("us-west-2"), + ); + + assert_eq!(results, expected_results); + } +} diff --git a/tools/pubsys/src/aws/validate_ami/results.rs b/tools/pubsys/src/aws/validate_ami/results.rs new file mode 100644 index 00000000..50add39d --- /dev/null +++ b/tools/pubsys/src/aws/validate_ami/results.rs @@ -0,0 +1,1034 @@ +//! The results module owns the reporting of EC2 image validation results. + +use super::ami::ImageDef; +use super::Result; +use aws_sdk_ec2::Region; +use serde::{Deserialize, Serialize}; +use serde_plain::{derive_display_from_serialize, derive_fromstr_from_deserialize}; +use std::collections::{HashMap, HashSet}; +use std::fmt::{self, Display}; +use tabled::{Table, Tabled}; + +/// Represent the possible status of an EC2 image validation +#[derive(Debug, Eq, Hash, PartialEq, Serialize, Deserialize)] +pub(crate) enum AmiValidationResultStatus { + /// The image was found and its monitored fields have the expected values + Correct, + + /// The image was found but some of the monitored fields do not have the expected values + Incorrect, + + /// The image was expected but not included in the actual images + Missing, + + /// The region containing the image was not reachable + Unreachable, +} + +derive_display_from_serialize!(AmiValidationResultStatus); +derive_fromstr_from_deserialize!(AmiValidationResultStatus); + +/// Represents a single EC2 image validation result +#[derive(Debug, Eq, Hash, PartialEq, Serialize)] +pub(crate) struct AmiValidationResult { + /// The ID of the image + pub(crate) id: String, + + /// `ImageDef` containing expected values for the image + pub(crate) expected_image_def: ImageDef, + + /// `ImageDef` containing actual values for the image + pub(crate) actual_image_def: Option, + + /// The region the image resides in + #[serde(serialize_with = "serialize_region")] + pub(crate) region: Region, + + /// The validation status of the image + pub(crate) status: AmiValidationResultStatus, +} + +fn serialize_region(region: &Region, serializer: S) -> std::result::Result +where + S: serde::Serializer, +{ + serializer.serialize_str(region.to_string().as_str()) +} + +impl AmiValidationResult { + pub(crate) fn new( + id: String, + expected_image_def: ImageDef, + actual_image_def: Result>, + region: Region, + ) -> Self { + // Determine the validation status based on equality, presence, and absence of expected and + // actual image values + let status = match (&expected_image_def, &actual_image_def) { + (expected_image_def, Ok(Some(actual_image_def))) + if actual_image_def == expected_image_def => + { + AmiValidationResultStatus::Correct + } + (_, Ok(Some(_))) => AmiValidationResultStatus::Incorrect, + (_, Ok(None)) => AmiValidationResultStatus::Missing, + (_, Err(_)) => AmiValidationResultStatus::Unreachable, + }; + AmiValidationResult { + id, + expected_image_def, + actual_image_def: actual_image_def.unwrap_or_default(), + region, + status, + } + } +} + +#[derive(Tabled, Serialize)] +struct AmiValidationRegionSummary { + correct: u64, + incorrect: u64, + missing: u64, + unreachable: u64, +} + +impl From<&HashSet> for AmiValidationRegionSummary { + fn from(results: &HashSet) -> Self { + let mut region_validation = AmiValidationRegionSummary { + correct: 0, + incorrect: 0, + missing: 0, + unreachable: 0, + }; + for validation_result in results { + match validation_result.status { + AmiValidationResultStatus::Correct => region_validation.correct += 1, + AmiValidationResultStatus::Incorrect => region_validation.incorrect += 1, + AmiValidationResultStatus::Missing => region_validation.missing += 1, + AmiValidationResultStatus::Unreachable => region_validation.missing += 1, + } + } + region_validation + } +} + +/// Represents all EC2 image validation results +#[derive(Debug)] +pub(crate) struct AmiValidationResults { + pub(crate) results: HashMap>, +} + +impl Default for AmiValidationResults { + fn default() -> Self { + Self::from_result_map(HashMap::new()) + } +} + +impl Display for AmiValidationResults { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Create a summary for each region, counting the number of parameters per status + let region_validations: HashMap = + self.get_results_summary(); + + // Represent the `HashMap` of summaries as a `Table` + let table = Table::new( + region_validations + .iter() + .map(|(region, results)| (region.to_string(), results)) + .collect::>(), + ) + .to_string(); + write!(f, "{}", table) + } +} + +impl AmiValidationResults { + pub(crate) fn from_result_map(results: HashMap>) -> Self { + AmiValidationResults { results } + } + + /// Returns a `HashSet` containing all validation results whose status is present in `requested_status` + pub(crate) fn get_results_for_status( + &self, + requested_status: &[AmiValidationResultStatus], + ) -> HashSet<&AmiValidationResult> { + let mut results = HashSet::new(); + for region_results in self.results.values() { + results.extend( + region_results + .iter() + .filter(|result| requested_status.contains(&result.status)) + .collect::>(), + ) + } + results + } + + /// Returns a `HashSet` containing all validation results + pub(crate) fn get_all_results(&self) -> HashSet<&AmiValidationResult> { + let mut results = HashSet::new(); + for region_results in self.results.values() { + results.extend(region_results) + } + results + } + + fn get_results_summary(&self) -> HashMap { + self.results + .iter() + .map(|(region, region_result)| { + ( + region.clone(), + AmiValidationRegionSummary::from(region_result), + ) + }) + .collect() + } + + pub(crate) fn get_json_summary(&self) -> serde_json::Value { + serde_json::json!(self + .get_results_summary() + .into_iter() + .map(|(region, results)| (region.to_string(), results)) + .collect::>()) + } +} + +#[cfg(test)] +mod test { + use super::{AmiValidationResult, AmiValidationResultStatus, AmiValidationResults}; + use crate::aws::validate_ami::ami::ImageDef; + use aws_sdk_ssm::Region; + use std::collections::{HashMap, HashSet}; + + // These tests assert that the `get_results_for_status` function returns the correct values. + + // Tests empty `AmiValidationResults` + #[test] + fn get_results_for_status_empty() { + let results = AmiValidationResults::from_result_map(HashMap::from([ + (Region::new("us-west-2"), HashSet::from([])), + (Region::new("us-east-1"), HashSet::from([])), + ])); + let results_filtered = results.get_results_for_status(&vec![ + AmiValidationResultStatus::Correct, + AmiValidationResultStatus::Incorrect, + AmiValidationResultStatus::Missing, + ]); + + assert_eq!(results_filtered, HashSet::new()); + } + + // Tests the `Correct` status + #[test] + fn get_results_for_status_correct() { + let results = AmiValidationResults::from_result_map(HashMap::from([ + ( + Region::new("us-west-2"), + HashSet::from([ + AmiValidationResult::new( + "test3-image-id".to_string(), + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: false, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-west-2"), + ), + AmiValidationResult::new( + "test1-image-id".to_string(), + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-west-2"), + ), + AmiValidationResult::new( + "test2-image-id".to_string(), + ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "not simple".to_string(), + })), + Region::new("us-west-2"), + ), + ]), + ), + ( + Region::new("us-east-1"), + HashSet::from([ + AmiValidationResult::new( + "test3-image-id".to_string(), + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-east-1"), + ), + AmiValidationResult::new( + "test1-image-id".to_string(), + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: false, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-east-1"), + ), + AmiValidationResult::new( + "test2-image-id".to_string(), + ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "not simple".to_string(), + })), + Region::new("us-east-1"), + ), + ]), + ), + ])); + let results_filtered = + results.get_results_for_status(&vec![AmiValidationResultStatus::Correct]); + + assert_eq!( + results_filtered, + HashSet::from([ + &AmiValidationResult::new( + "test1-image-id".to_string(), + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-west-2"), + ), + &AmiValidationResult::new( + "test3-image-id".to_string(), + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-east-1"), + ) + ]) + ); + } + + // Tests a filter containing the `Correct` and `Incorrect` statuses + #[test] + fn get_results_for_status_correct_incorrect() { + let results = AmiValidationResults::from_result_map(HashMap::from([ + ( + Region::new("us-west-2"), + HashSet::from([ + AmiValidationResult::new( + "test3-image-id".to_string(), + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: false, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-west-2"), + ), + AmiValidationResult::new( + "test1-image-id".to_string(), + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-west-2"), + ), + AmiValidationResult::new( + "test2-image-id".to_string(), + ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(None), + Region::new("us-west-2"), + ), + ]), + ), + ( + Region::new("us-east-1"), + HashSet::from([ + AmiValidationResult::new( + "test3-image-id".to_string(), + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-east-1"), + ), + AmiValidationResult::new( + "test1-image-id".to_string(), + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: false, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-east-1"), + ), + AmiValidationResult::new( + "test2-image-id".to_string(), + ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(None), + Region::new("us-east-1"), + ), + ]), + ), + ])); + let results_filtered = results.get_results_for_status(&vec![ + AmiValidationResultStatus::Correct, + AmiValidationResultStatus::Incorrect, + ]); + + assert_eq!( + results_filtered, + HashSet::from([ + &AmiValidationResult::new( + "test1-image-id".to_string(), + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-west-2"), + ), + &AmiValidationResult::new( + "test3-image-id".to_string(), + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-east-1"), + ), + &AmiValidationResult::new( + "test3-image-id".to_string(), + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: false, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-west-2"), + ), + &AmiValidationResult::new( + "test1-image-id".to_string(), + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: false, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-east-1"), + ) + ]) + ); + } + + // Tests a filter containing all statuses + #[test] + fn get_results_for_status_all() { + let results = AmiValidationResults::from_result_map(HashMap::from([ + ( + Region::new("us-west-2"), + HashSet::from([ + AmiValidationResult::new( + "test3-image-id".to_string(), + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: false, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-west-2"), + ), + AmiValidationResult::new( + "test1-image-id".to_string(), + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-west-2"), + ), + AmiValidationResult::new( + "test2-image-id".to_string(), + ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(None), + Region::new("us-west-2"), + ), + ]), + ), + ( + Region::new("us-east-1"), + HashSet::from([ + AmiValidationResult::new( + "test3-image-id".to_string(), + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-east-1"), + ), + AmiValidationResult::new( + "test1-image-id".to_string(), + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: false, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-east-1"), + ), + AmiValidationResult::new( + "test2-image-id".to_string(), + ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(None), + Region::new("us-east-1"), + ), + ]), + ), + ( + Region::new("us-east-2"), + HashSet::from([AmiValidationResult::new( + "test3-image-id".to_string(), + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Err(crate::aws::validate_ami::error::Error::UnreachableRegion { + region: "us-east-2".to_string(), + }), + Region::new("us-east-2"), + )]), + ), + ])); + let results_filtered = results.get_results_for_status(&vec![ + AmiValidationResultStatus::Correct, + AmiValidationResultStatus::Incorrect, + AmiValidationResultStatus::Missing, + AmiValidationResultStatus::Unreachable, + ]); + + assert_eq!( + results_filtered, + HashSet::from([ + &AmiValidationResult::new( + "test1-image-id".to_string(), + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-west-2"), + ), + &AmiValidationResult::new( + "test3-image-id".to_string(), + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-east-1"), + ), + &AmiValidationResult::new( + "test3-image-id".to_string(), + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: false, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-west-2"), + ), + &AmiValidationResult::new( + "test1-image-id".to_string(), + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: false, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-east-1"), + ), + &AmiValidationResult::new( + "test2-image-id".to_string(), + ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(None), + Region::new("us-west-2"), + ), + &AmiValidationResult::new( + "test2-image-id".to_string(), + ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(None), + Region::new("us-east-1"), + ), + &AmiValidationResult::new( + "test3-image-id".to_string(), + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Err(crate::aws::validate_ami::error::Error::UnreachableRegion { + region: "us-east-2".to_string(), + }), + Region::new("us-east-2"), + ), + ]) + ); + } + + // Tests the `Missing` filter when none of the AmiValidationResults have this status + #[test] + fn get_results_for_status_missing_none() { + let results = AmiValidationResults::from_result_map(HashMap::from([ + ( + Region::new("us-west-2"), + HashSet::from([ + AmiValidationResult::new( + "test3-image-id".to_string(), + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: false, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-west-2"), + ), + AmiValidationResult::new( + "test1-image-id".to_string(), + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-west-2"), + ), + AmiValidationResult::new( + "test2-image-id".to_string(), + ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "not simple".to_string(), + })), + Region::new("us-west-2"), + ), + ]), + ), + ( + Region::new("us-east-1"), + HashSet::from([ + AmiValidationResult::new( + "test3-image-id".to_string(), + ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test3-image-id".to_string(), + name: "test3-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-east-1"), + ), + AmiValidationResult::new( + "test1-image-id".to_string(), + ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test1-image-id".to_string(), + name: "test1-image".to_string(), + public: true, + launch_permissions: None, + ena_support: false, + sriov_net_support: "simple".to_string(), + })), + Region::new("us-east-1"), + ), + AmiValidationResult::new( + "test2-image-id".to_string(), + ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "simple".to_string(), + }, + Ok(Some(ImageDef { + id: "test2-image-id".to_string(), + name: "test2-image".to_string(), + public: true, + launch_permissions: None, + ena_support: true, + sriov_net_support: "not simple".to_string(), + })), + Region::new("us-east-1"), + ), + ]), + ), + ])); + let results_filtered = + results.get_results_for_status(&vec![AmiValidationResultStatus::Missing]); + + assert_eq!(results_filtered, HashSet::new()); + } +} diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs index adf99931..28b82f30 100644 --- a/tools/pubsys/src/main.rs +++ b/tools/pubsys/src/main.rs @@ -123,6 +123,14 @@ fn run() -> Result<()> { .context(error::ValidateSsmSnafu) }) } + SubCommand::ValidateAmi(ref validate_ami_args) => { + let rt = Runtime::new().context(error::RuntimeSnafu)?; + rt.block_on(async { + aws::validate_ami::run(&args, validate_ami_args) + .await + .context(error::ValidateAmiSnafu) + }) + } SubCommand::UploadOva(ref upload_args) => { vmware::upload_ova::run(&args, upload_args).context(error::UploadOvaSnafu) } @@ -161,6 +169,7 @@ enum SubCommand { Ami(aws::ami::AmiArgs), PublishAmi(aws::publish_ami::PublishArgs), + ValidateAmi(aws::validate_ami::ValidateAmiArgs), Ssm(aws::ssm::SsmArgs), PromoteSsm(aws::promote_ssm::PromoteArgs), @@ -239,6 +248,11 @@ mod error { ValidateSsm { source: crate::aws::validate_ssm::Error, }, + + #[snafu(display("Failed to validate EC2 images: {}", source))] + ValidateAmi { + source: crate::aws::validate_ami::Error, + }, } fn publish_ami_message(error: &crate::aws::publish_ami::Error) -> String { From 4b3c9065ec067147a8ebcb4235ca2bfcbc622a1b Mon Sep 17 00:00:00 2001 From: mjsterckx Date: Wed, 19 Apr 2023 20:54:06 +0000 Subject: [PATCH 0933/1356] pubsys: added 'Unreachable' status to SsmValidationResultStatus --- tools/pubsys/src/aws/validate_ssm/mod.rs | 290 ++++++++++++------- tools/pubsys/src/aws/validate_ssm/results.rs | 261 ++++++++--------- 2 files changed, 301 insertions(+), 250 deletions(-) diff --git a/tools/pubsys/src/aws/validate_ssm/mod.rs b/tools/pubsys/src/aws/validate_ssm/mod.rs index 493d4e14..16df9380 100644 --- a/tools/pubsys/src/aws/validate_ssm/mod.rs +++ b/tools/pubsys/src/aws/validate_ssm/mod.rs @@ -9,7 +9,7 @@ use super::ssm::{SsmKey, SsmParameters}; use crate::aws::client::build_client_config; use crate::Args; use aws_sdk_ssm::{Client as SsmClient, Region}; -use log::{info, trace}; +use log::{error, info, trace}; use pubsys_config::InfraConfig; use snafu::ResultExt; use std::collections::{HashMap, HashSet}; @@ -80,27 +80,41 @@ pub async fn validate( // Retrieve the SSM parameters using the SsmClients info!("Retrieving SSM parameters"); - let parameters = get_parameters_by_prefix(&ssm_clients, ssm_prefix).await; + let parameters = get_parameters_by_prefix(&ssm_clients, ssm_prefix) + .await + .into_iter() + .map(|(region, result)| { + ( + region, + result.map_err(|e| { + error!( + "Failed to retrieve images in region {}: {}", + region.to_string(), + e + ); + error::Error::UnreachableRegion { + region: region.to_string(), + } + }), + ) + }) + .collect::>>(); // Validate the retrieved SSM parameters per region info!("Validating SSM parameters"); - let results: HashMap>> = - parameters - .into_iter() - .map(|(region, region_result)| { - ( - region.clone(), - region_result.map(|result| { - validate_parameters_in_region( - expected_parameters.get(region).unwrap_or(&HashMap::new()), - &result, - validate_ssm_args.check_unexpected, - ) - }), - ) - }) - .collect::>>>( - ); + let results: HashMap> = parameters + .into_iter() + .map(|(region, region_result)| { + ( + region.clone(), + validate_parameters_in_region( + expected_parameters.get(region).unwrap_or(&HashMap::new()), + ®ion_result, + validate_ssm_args.check_unexpected, + ), + ) + }) + .collect::>>(); let validation_results = SsmValidationResults::new(results); @@ -108,24 +122,18 @@ pub async fn validate( if let Some(write_results_path) = &validate_ssm_args.write_results_path { // Filter the results by given status, and if no statuses were given, get all results info!("Writing results to file"); - let filtered_results = validation_results.get_results_for_status( - validate_ssm_args - .write_results_filter - .as_ref() - .unwrap_or(&vec![ - SsmValidationResultStatus::Correct, - SsmValidationResultStatus::Incorrect, - SsmValidationResultStatus::Missing, - SsmValidationResultStatus::Unexpected, - ]), - ); + let results = if let Some(filter) = &validate_ssm_args.write_results_filter { + validation_results.get_results_for_status(filter) + } else { + validation_results.get_all_results() + }; // Write the results as JSON serde_json::to_writer_pretty( &File::create(write_results_path).context(error::WriteValidationResultsSnafu { path: write_results_path, })?, - &filtered_results, + &results, ) .context(error::SerializeValidationResultsSnafu)?; } @@ -138,38 +146,55 @@ pub async fn validate( /// SsmValidationResult objects. pub(crate) fn validate_parameters_in_region( expected_parameters: &HashMap, - actual_parameters: &SsmParameters, + actual_parameters: &Result, check_unexpected: bool, ) -> HashSet { - // Clone the HashMap of actual parameters so items can be removed - let mut actual_parameters = actual_parameters.clone(); - let mut results = HashSet::new(); - - // Validate all expected parameters, creating an SsmValidationResult object and - // removing the corresponding parameter from `actual_parameters` if found - for (ssm_key, ssm_value) in expected_parameters { - results.insert(SsmValidationResult::new( - ssm_key.name.to_owned(), - Some(ssm_value.clone()), - actual_parameters.get(ssm_key).map(|v| v.to_owned()), - ssm_key.region.clone(), - )); - actual_parameters.remove(ssm_key); - } - - if check_unexpected { - // Any remaining parameters in `actual_parameters` were not present in `expected_parameters` - // and therefore get the `Unexpected` status - for (ssm_key, ssm_value) in actual_parameters { - results.insert(SsmValidationResult::new( - ssm_key.name.to_owned(), - None, - Some(ssm_value), - ssm_key.region.clone(), - )); + match actual_parameters { + Ok(actual_parameters) => { + // Clone the HashMap of actual parameters so items can be removed + let mut actual_parameters = actual_parameters.clone(); + let mut results = HashSet::new(); + + // Validate all expected parameters, creating an SsmValidationResult object and + // removing the corresponding parameter from `actual_parameters` if found + for (ssm_key, ssm_value) in expected_parameters { + results.insert(SsmValidationResult::new( + ssm_key.name.to_owned(), + Some(ssm_value.clone()), + Ok(actual_parameters.get(ssm_key).map(|v| v.to_owned())), + ssm_key.region.clone(), + )); + actual_parameters.remove(ssm_key); + } + + if check_unexpected { + // Any remaining parameters in `actual_parameters` were not present in `expected_parameters` + // and therefore get the `Unexpected` status + for (ssm_key, ssm_value) in actual_parameters { + results.insert(SsmValidationResult::new( + ssm_key.name.to_owned(), + None, + Ok(Some(ssm_value)), + ssm_key.region.clone(), + )); + } + } + results } + Err(_) => expected_parameters + .iter() + .map(|(ssm_key, ssm_value)| { + SsmValidationResult::new( + ssm_key.name.to_owned(), + Some(ssm_value.to_owned()), + Err(error::Error::UnreachableRegion { + region: ssm_key.region.to_string(), + }), + ssm_key.region.clone(), + ) + }) + .collect(), } - results } type RegionName = String; @@ -242,27 +267,6 @@ pub(crate) mod error { #[snafu(display("Error reading config: {}", source))] Config { source: pubsys_config::Error }, - #[snafu(display("Error reading validation config at path {}: {}", path.display(), source))] - ReadValidationConfig { - source: std::io::Error, - path: PathBuf, - }, - - #[snafu(display("Error parsing validation config: {}", source))] - ParseValidationConfig { source: serde_json::Error }, - - #[snafu(display("Missing field in validation config: {}", missing))] - MissingField { missing: String }, - - #[snafu(display("Missing region in expected parameters: {}", missing))] - MissingExpectedRegion { missing: String }, - - #[snafu(display("Missing region in actual parameters: {}", missing))] - MissingActualRegion { missing: String }, - - #[snafu(display("Found no parameters in source version {}", version))] - EmptySource { version: String }, - #[snafu(display("Failed to fetch parameters from SSM: {}", source))] FetchSsm { source: ssm::error::Error }, @@ -272,9 +276,6 @@ pub(crate) mod error { #[snafu(display("Failed to validate SSM parameters: {}", missing))] ValidateSsm { missing: String }, - #[snafu(display("Failed to validate SSM parameters in region: {}", region))] - ValidateSsmRegion { region: String }, - #[snafu(display("Failed to parse expected parameters file: {}", source))] ParseExpectedParameterFile { source: serde_json::Error }, @@ -290,6 +291,9 @@ pub(crate) mod error { #[snafu(display("Failed to serialize validation results to json: {}", source))] SerializeValidationResults { source: serde_json::Error }, + #[snafu(display("Failed to retrieve SSM parameters from region {}", region))] + UnreachableRegion { region: String }, + #[snafu(display("Failed to write validation results to {}: {}", path.display(), source))] WriteValidationResults { path: PathBuf, @@ -368,23 +372,24 @@ mod test { SsmValidationResult::new( "test3-parameter-name".to_string(), Some("test3-parameter-value".to_string()), - Some("test3-parameter-value".to_string()), + Ok(Some("test3-parameter-value".to_string())), Region::new("us-east-1"), ), SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), - Some("test1-parameter-value".to_string()), + Ok(Some("test1-parameter-value".to_string())), Region::new("us-west-2"), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), - Some("test2-parameter-value".to_string()), + Ok(Some("test2-parameter-value".to_string())), Region::new("us-west-2"), ), ]); - let results = validate_parameters_in_region(&expected_parameters, &actual_parameters, true); + let results = + validate_parameters_in_region(&expected_parameters, &Ok(actual_parameters), true); assert_eq!(results, expected_results); } @@ -442,23 +447,24 @@ mod test { SsmValidationResult::new( "test3-parameter-name".to_string(), Some("test3-parameter-value".to_string()), - Some("test3-parameter-value-wrong".to_string()), + Ok(Some("test3-parameter-value-wrong".to_string())), Region::new("us-east-1"), ), SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), - Some("test1-parameter-value-wrong".to_string()), + Ok(Some("test1-parameter-value-wrong".to_string())), Region::new("us-west-2"), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), - Some("test2-parameter-value-wrong".to_string()), + Ok(Some("test2-parameter-value-wrong".to_string())), Region::new("us-west-2"), ), ]); - let results = validate_parameters_in_region(&expected_parameters, &actual_parameters, true); + let results = + validate_parameters_in_region(&expected_parameters, &Ok(actual_parameters), true); assert_eq!(results, expected_results); } @@ -494,23 +500,24 @@ mod test { SsmValidationResult::new( "test3-parameter-name".to_string(), Some("test3-parameter-value".to_string()), - None, + Ok(None), Region::new("us-east-1"), ), SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), - None, + Ok(None), Region::new("us-west-2"), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), - None, + Ok(None), Region::new("us-west-2"), ), ]); - let results = validate_parameters_in_region(&expected_parameters, &actual_parameters, true); + let results = + validate_parameters_in_region(&expected_parameters, &Ok(actual_parameters), true); assert_eq!(results, expected_results); } @@ -546,23 +553,24 @@ mod test { SsmValidationResult::new( "test3-parameter-name".to_string(), None, - Some("test3-parameter-value".to_string()), + Ok(Some("test3-parameter-value".to_string())), Region::new("us-east-1"), ), SsmValidationResult::new( "test1-parameter-name".to_string(), None, - Some("test1-parameter-value".to_string()), + Ok(Some("test1-parameter-value".to_string())), Region::new("us-west-2"), ), SsmValidationResult::new( "test2-parameter-name".to_string(), None, - Some("test2-parameter-value".to_string()), + Ok(Some("test2-parameter-value".to_string())), Region::new("us-west-2"), ), ]); - let results = validate_parameters_in_region(&expected_parameters, &actual_parameters, true); + let results = + validate_parameters_in_region(&expected_parameters, &Ok(actual_parameters), true); assert_eq!(results, expected_results); } @@ -621,34 +629,35 @@ mod test { SsmValidationResult::new( "test3-parameter-name".to_string(), Some("test3-parameter-value".to_string()), - None, + Ok(None), Region::new("us-east-1"), ), SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), - Some("test1-parameter-value".to_string()), + Ok(Some("test1-parameter-value".to_string())), Region::new("us-west-2"), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), - Some("test2-parameter-value-wrong".to_string()), + Ok(Some("test2-parameter-value-wrong".to_string())), Region::new("us-west-2"), ), SsmValidationResult::new( "test4-parameter-name".to_string(), None, - Some("test4-parameter-value".to_string()), + Ok(Some("test4-parameter-value".to_string())), Region::new("us-east-1"), ), ]); - let results = validate_parameters_in_region(&expected_parameters, &actual_parameters, true); + let results = + validate_parameters_in_region(&expected_parameters, &Ok(actual_parameters), true); assert_eq!(results, expected_results); } - // Tests validation of parameters where each status (Correct, Incorrect, Missing, Unexpected) + // Tests validation of parameters where each reachable status (Correct, Incorrect, Missing, Unexpected) // happens once and `--check-unexpected` is false #[test] fn validate_parameters_mixed_unexpected_false() { @@ -702,24 +711,87 @@ mod test { SsmValidationResult::new( "test3-parameter-name".to_string(), Some("test3-parameter-value".to_string()), - None, + Ok(None), Region::new("us-east-1"), ), SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), - Some("test1-parameter-value".to_string()), + Ok(Some("test1-parameter-value".to_string())), Region::new("us-west-2"), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), - Some("test2-parameter-value-wrong".to_string()), + Ok(Some("test2-parameter-value-wrong".to_string())), Region::new("us-west-2"), ), ]); let results = - validate_parameters_in_region(&expected_parameters, &actual_parameters, false); + validate_parameters_in_region(&expected_parameters, &Ok(actual_parameters), false); + + assert_eq!(results, expected_results); + } + + // Tests validation of parameters where the status is Unreachable + #[test] + fn validate_parameters_unreachable() { + let expected_parameters: HashMap = HashMap::from([ + ( + SsmKey { + region: Region::new("us-west-2"), + name: "test1-parameter-name".to_string(), + }, + "test1-parameter-value".to_string(), + ), + ( + SsmKey { + region: Region::new("us-west-2"), + name: "test2-parameter-name".to_string(), + }, + "test2-parameter-value".to_string(), + ), + ( + SsmKey { + region: Region::new("us-east-1"), + name: "test3-parameter-name".to_string(), + }, + "test3-parameter-value".to_string(), + ), + ]); + let expected_results = HashSet::from_iter(vec![ + SsmValidationResult::new( + "test3-parameter-name".to_string(), + Some("test3-parameter-value".to_string()), + Err(crate::aws::validate_ssm::Error::UnreachableRegion { + region: "us-west-2".to_string(), + }), + Region::new("us-east-1"), + ), + SsmValidationResult::new( + "test1-parameter-name".to_string(), + Some("test1-parameter-value".to_string()), + Err(crate::aws::validate_ssm::Error::UnreachableRegion { + region: "us-west-2".to_string(), + }), + Region::new("us-west-2"), + ), + SsmValidationResult::new( + "test2-parameter-name".to_string(), + Some("test2-parameter-value".to_string()), + Err(crate::aws::validate_ssm::Error::UnreachableRegion { + region: "us-west-2".to_string(), + }), + Region::new("us-west-2"), + ), + ]); + let results = validate_parameters_in_region( + &expected_parameters, + &Err(crate::aws::validate_ssm::Error::UnreachableRegion { + region: "us-west-2".to_string(), + }), + false, + ); assert_eq!(results, expected_results); } diff --git a/tools/pubsys/src/aws/validate_ssm/results.rs b/tools/pubsys/src/aws/validate_ssm/results.rs index 51e3de2a..dc098281 100644 --- a/tools/pubsys/src/aws/validate_ssm/results.rs +++ b/tools/pubsys/src/aws/validate_ssm/results.rs @@ -1,11 +1,11 @@ //! The results module owns the reporting of SSM validation results. -use crate::aws::ssm::ssm::Result; +use crate::aws::validate_ssm::Result; use aws_sdk_ssm::Region; use serde::{Deserialize, Serialize}; +use serde_plain::{derive_display_from_serialize, derive_fromstr_from_deserialize}; use std::collections::{HashMap, HashSet}; use std::fmt::{self, Display}; -use std::str::FromStr; use tabled::{Table, Tabled}; /// Represent the possible status of an SSM validation @@ -22,47 +22,24 @@ pub enum SsmValidationResultStatus { /// The parameter was present in the actual parameters but not expected Unexpected, -} -impl Display for SsmValidationResultStatus { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Self::Correct => write!(f, "Correct"), - Self::Incorrect => write!(f, "Incorrect"), - Self::Missing => write!(f, "Missing"), - Self::Unexpected => write!(f, "Unexpected"), - } - } + /// The region containing the parameter is not reachable + Unreachable, } -impl FromStr for SsmValidationResultStatus { - type Err = super::Error; - - fn from_str(s: &str) -> std::result::Result { - match s { - "Correct" => Ok(Self::Correct), - "Incorrect" => Ok(Self::Incorrect), - "Missing" => Ok(Self::Missing), - "Unexpected" => Ok(Self::Unexpected), - filter => Err(Self::Err::InvalidStatusFilter { - filter: filter.to_string(), - }), - } - } -} +derive_display_from_serialize!(SsmValidationResultStatus); +derive_fromstr_from_deserialize!(SsmValidationResultStatus); /// Represents a single SSM validation result -#[derive(Debug, Eq, Hash, PartialEq, Tabled, Serialize)] +#[derive(Debug, Eq, Hash, PartialEq, Serialize)] pub struct SsmValidationResult { /// The name of the parameter pub(crate) name: String, /// The expected value of the parameter - #[tabled(display_with = "display_option")] pub(crate) expected_value: Option, /// The actual retrieved value of the parameter - #[tabled(display_with = "display_option")] pub(crate) actual_value: Option, /// The region the parameter resides in @@ -73,13 +50,6 @@ pub struct SsmValidationResult { pub(crate) status: SsmValidationResultStatus, } -fn display_option(option: &Option) -> &str { - match option { - Some(option) => option, - None => "N/A", - } -} - fn serialize_region(region: &Region, serializer: S) -> std::result::Result where S: serde::Serializer, @@ -91,23 +61,24 @@ impl SsmValidationResult { pub(crate) fn new( name: String, expected_value: Option, - actual_value: Option, + actual_value: Result>, region: Region, ) -> SsmValidationResult { // Determine the validation status based on equality, presence, and absence of expected and // actual parameter values let status = match (&expected_value, &actual_value) { - (Some(expected_value), Some(actual_value)) if actual_value.eq(expected_value) => { + (Some(expected_value), Ok(Some(actual_value))) if actual_value.eq(expected_value) => { SsmValidationResultStatus::Correct } - (Some(_), Some(_)) => SsmValidationResultStatus::Incorrect, - (_, None) => SsmValidationResultStatus::Missing, - (None, _) => SsmValidationResultStatus::Unexpected, + (Some(_), Ok(Some(_))) => SsmValidationResultStatus::Incorrect, + (_, Ok(None)) => SsmValidationResultStatus::Missing, + (None, Ok(_)) => SsmValidationResultStatus::Unexpected, + (_, Err(_)) => SsmValidationResultStatus::Unreachable, }; SsmValidationResult { name, expected_value, - actual_value, + actual_value: actual_value.unwrap_or_default(), region, status, } @@ -116,11 +87,11 @@ impl SsmValidationResult { #[derive(Tabled, Serialize)] struct SsmValidationRegionSummary { - correct: i32, - incorrect: i32, - missing: i32, - unexpected: i32, - accessible: bool, + correct: u64, + incorrect: u64, + missing: u64, + unexpected: u64, + unreachable: u64, } impl From<&HashSet> for SsmValidationRegionSummary { @@ -130,7 +101,7 @@ impl From<&HashSet> for SsmValidationRegionSummary { incorrect: 0, missing: 0, unexpected: 0, - accessible: true, + unreachable: 0, }; for validation_result in results { match validation_result.status { @@ -138,30 +109,17 @@ impl From<&HashSet> for SsmValidationRegionSummary { SsmValidationResultStatus::Incorrect => region_validation.incorrect += 1, SsmValidationResultStatus::Missing => region_validation.missing += 1, SsmValidationResultStatus::Unexpected => region_validation.unexpected += 1, + SsmValidationResultStatus::Unreachable => region_validation.unreachable += 1, } } region_validation } } -impl SsmValidationRegionSummary { - fn no_valid_results() -> Self { - // When the parameters in a region couldn't be retrieved, use `-1` to indicate this in the - // output table and set `accessible` to `false` - SsmValidationRegionSummary { - correct: -1, - incorrect: -1, - missing: -1, - unexpected: -1, - accessible: false, - } - } -} - /// Represents all SSM validation results #[derive(Debug)] pub struct SsmValidationResults { - pub(crate) results: HashMap>>, + pub(crate) results: HashMap>, } impl Default for SsmValidationResults { @@ -189,7 +147,7 @@ impl Display for SsmValidationResults { } impl SsmValidationResults { - pub fn new(results: HashMap>>) -> Self { + pub fn new(results: HashMap>) -> Self { SsmValidationResults { results } } @@ -200,7 +158,7 @@ impl SsmValidationResults { requested_status: &[SsmValidationResultStatus], ) -> HashSet<&SsmValidationResult> { let mut results = HashSet::new(); - for region_results in self.results.values().flatten() { + for region_results in self.results.values() { results.extend( region_results .iter() @@ -211,22 +169,23 @@ impl SsmValidationResults { results } + /// Returns a `HashSet` containing all validation results + pub(crate) fn get_all_results(&self) -> HashSet<&SsmValidationResult> { + let mut results = HashSet::new(); + for region_results in self.results.values() { + results.extend(region_results) + } + results + } + fn get_results_summary(&self) -> HashMap { self.results .iter() .map(|(region, region_result)| { - region_result - .as_ref() - .map(|region_validation| { - ( - region.clone(), - SsmValidationRegionSummary::from(region_validation), - ) - }) - .unwrap_or(( - region.clone(), - SsmValidationRegionSummary::no_valid_results(), - )) + ( + region.clone(), + SsmValidationRegionSummary::from(region_result), + ) }) .collect() } @@ -255,8 +214,8 @@ mod test { #[test] fn get_results_for_status_empty() { let results = SsmValidationResults::new(HashMap::from([ - (Region::new("us-west-2"), Ok(HashSet::from([]))), - (Region::new("us-east-1"), Ok(HashSet::from([]))), + (Region::new("us-west-2"), HashSet::from([])), + (Region::new("us-east-1"), HashSet::from([])), ])); let results_filtered = results.get_results_for_status(&[ SsmValidationResultStatus::Correct, @@ -274,61 +233,61 @@ mod test { let results = SsmValidationResults::new(HashMap::from([ ( Region::new("us-west-2"), - Ok(HashSet::from([ + HashSet::from([ SsmValidationResult::new( "test3-parameter-name".to_string(), Some("test3-parameter-value".to_string()), - None, + Ok(None), Region::new("us-west-2"), ), SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), - Some("test1-parameter-value".to_string()), + Ok(Some("test1-parameter-value".to_string())), Region::new("us-west-2"), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), - Some("test2-parameter-value-wrong".to_string()), + Ok(Some("test2-parameter-value-wrong".to_string())), Region::new("us-west-2"), ), SsmValidationResult::new( "test4-parameter-name".to_string(), None, - Some("test4-parameter-value".to_string()), + Ok(Some("test4-parameter-value".to_string())), Region::new("us-west-2"), ), - ])), + ]), ), ( Region::new("us-east-1"), - Ok(HashSet::from([ + HashSet::from([ SsmValidationResult::new( "test3-parameter-name".to_string(), Some("test3-parameter-value".to_string()), - None, + Ok(None), Region::new("us-east-1"), ), SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), - Some("test1-parameter-value".to_string()), + Ok(Some("test1-parameter-value".to_string())), Region::new("us-east-1"), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), - Some("test2-parameter-value-wrong".to_string()), + Ok(Some("test2-parameter-value-wrong".to_string())), Region::new("us-east-1"), ), SsmValidationResult::new( "test4-parameter-name".to_string(), None, - Some("test4-parameter-value".to_string()), + Ok(Some("test4-parameter-value".to_string())), Region::new("us-east-1"), ), - ])), + ]), ), ])); let results_filtered = @@ -340,13 +299,13 @@ mod test { &SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), - Some("test1-parameter-value".to_string()), + Ok(Some("test1-parameter-value".to_string())), Region::new("us-west-2"), ), &SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), - Some("test1-parameter-value".to_string()), + Ok(Some("test1-parameter-value".to_string())), Region::new("us-east-1"), ) ]) @@ -359,61 +318,61 @@ mod test { let results = SsmValidationResults::new(HashMap::from([ ( Region::new("us-west-2"), - Ok(HashSet::from([ + HashSet::from([ SsmValidationResult::new( "test3-parameter-name".to_string(), Some("test3-parameter-value".to_string()), - None, + Ok(None), Region::new("us-west-2"), ), SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), - Some("test1-parameter-value".to_string()), + Ok(Some("test1-parameter-value".to_string())), Region::new("us-west-2"), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), - Some("test2-parameter-value-wrong".to_string()), + Ok(Some("test2-parameter-value-wrong".to_string())), Region::new("us-west-2"), ), SsmValidationResult::new( "test4-parameter-name".to_string(), None, - Some("test4-parameter-value".to_string()), + Ok(Some("test4-parameter-value".to_string())), Region::new("us-west-2"), ), - ])), + ]), ), ( Region::new("us-east-1"), - Ok(HashSet::from([ + HashSet::from([ SsmValidationResult::new( "test3-parameter-name".to_string(), Some("test3-parameter-value".to_string()), - None, + Ok(None), Region::new("us-east-1"), ), SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), - Some("test1-parameter-value".to_string()), + Ok(Some("test1-parameter-value".to_string())), Region::new("us-east-1"), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), - Some("test2-parameter-value-wrong".to_string()), + Ok(Some("test2-parameter-value-wrong".to_string())), Region::new("us-east-1"), ), SsmValidationResult::new( "test4-parameter-name".to_string(), None, - Some("test4-parameter-value".to_string()), + Ok(Some("test4-parameter-value".to_string())), Region::new("us-east-1"), ), - ])), + ]), ), ])); let results_filtered = results.get_results_for_status(&[ @@ -427,25 +386,25 @@ mod test { &SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), - Some("test1-parameter-value".to_string()), + Ok(Some("test1-parameter-value".to_string())), Region::new("us-west-2"), ), &SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), - Some("test1-parameter-value".to_string()), + Ok(Some("test1-parameter-value".to_string())), Region::new("us-east-1"), ), &SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), - Some("test2-parameter-value-wrong".to_string()), + Ok(Some("test2-parameter-value-wrong".to_string())), Region::new("us-west-2"), ), &SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), - Some("test2-parameter-value-wrong".to_string()), + Ok(Some("test2-parameter-value-wrong".to_string())), Region::new("us-east-1"), ) ]) @@ -458,61 +417,72 @@ mod test { let results = SsmValidationResults::new(HashMap::from([ ( Region::new("us-west-2"), - Ok(HashSet::from([ + HashSet::from([ SsmValidationResult::new( "test3-parameter-name".to_string(), Some("test3-parameter-value".to_string()), - None, + Ok(None), Region::new("us-west-2"), ), SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), - Some("test1-parameter-value".to_string()), + Ok(Some("test1-parameter-value".to_string())), Region::new("us-west-2"), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), - Some("test2-parameter-value-wrong".to_string()), + Ok(Some("test2-parameter-value-wrong".to_string())), Region::new("us-west-2"), ), SsmValidationResult::new( "test4-parameter-name".to_string(), None, - Some("test4-parameter-value".to_string()), + Ok(Some("test4-parameter-value".to_string())), Region::new("us-west-2"), ), - ])), + ]), ), ( Region::new("us-east-1"), - Ok(HashSet::from([ + HashSet::from([ SsmValidationResult::new( "test3-parameter-name".to_string(), Some("test3-parameter-value".to_string()), - None, + Ok(None), Region::new("us-east-1"), ), SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), - Some("test1-parameter-value".to_string()), + Ok(Some("test1-parameter-value".to_string())), Region::new("us-east-1"), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), - Some("test2-parameter-value-wrong".to_string()), + Ok(Some("test2-parameter-value-wrong".to_string())), Region::new("us-east-1"), ), SsmValidationResult::new( "test4-parameter-name".to_string(), None, - Some("test4-parameter-value".to_string()), + Ok(Some("test4-parameter-value".to_string())), Region::new("us-east-1"), ), - ])), + ]), + ), + ( + Region::new("us-east-2"), + HashSet::from([SsmValidationResult::new( + "test3-parameter-name".to_string(), + Some("test3-parameter-value".to_string()), + Err(crate::aws::validate_ssm::Error::UnreachableRegion { + region: "us-east-2".to_string(), + }), + Region::new("us-east-2"), + )]), ), ])); let results_filtered = results.get_results_for_status(&[ @@ -520,6 +490,7 @@ mod test { SsmValidationResultStatus::Incorrect, SsmValidationResultStatus::Missing, SsmValidationResultStatus::Unexpected, + SsmValidationResultStatus::Unreachable, ]); assert_eq!( @@ -528,51 +499,59 @@ mod test { &SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), - Some("test1-parameter-value".to_string()), + Ok(Some("test1-parameter-value".to_string())), Region::new("us-west-2"), ), &SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), - Some("test1-parameter-value".to_string()), + Ok(Some("test1-parameter-value".to_string())), Region::new("us-east-1"), ), &SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), - Some("test2-parameter-value-wrong".to_string()), + Ok(Some("test2-parameter-value-wrong".to_string())), Region::new("us-west-2"), ), &SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), - Some("test2-parameter-value-wrong".to_string()), + Ok(Some("test2-parameter-value-wrong".to_string())), Region::new("us-east-1"), ), &SsmValidationResult::new( "test3-parameter-name".to_string(), Some("test3-parameter-value".to_string()), - None, + Ok(None), Region::new("us-west-2"), ), &SsmValidationResult::new( "test4-parameter-name".to_string(), None, - Some("test4-parameter-value".to_string()), + Ok(Some("test4-parameter-value".to_string())), Region::new("us-west-2"), ), &SsmValidationResult::new( "test3-parameter-name".to_string(), Some("test3-parameter-value".to_string()), - None, + Ok(None), Region::new("us-east-1"), ), &SsmValidationResult::new( "test4-parameter-name".to_string(), None, - Some("test4-parameter-value".to_string()), + Ok(Some("test4-parameter-value".to_string())), Region::new("us-east-1"), - ) + ), + &SsmValidationResult::new( + "test3-parameter-name".to_string(), + Some("test3-parameter-value".to_string()), + Err(crate::aws::validate_ssm::Error::UnreachableRegion { + region: "us-east-2".to_string() + }), + Region::new("us-east-2"), + ), ]) ); } @@ -583,49 +562,49 @@ mod test { let results = SsmValidationResults::new(HashMap::from([ ( Region::new("us-west-2"), - Ok(HashSet::from([ + HashSet::from([ SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), - Some("test1-parameter-value".to_string()), + Ok(Some("test1-parameter-value".to_string())), Region::new("us-west-2"), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), - Some("test2-parameter-value-wrong".to_string()), + Ok(Some("test2-parameter-value-wrong".to_string())), Region::new("us-west-2"), ), SsmValidationResult::new( "test4-parameter-name".to_string(), None, - Some("test4-parameter-value".to_string()), + Ok(Some("test4-parameter-value".to_string())), Region::new("us-west-2"), ), - ])), + ]), ), ( Region::new("us-east-1"), - Ok(HashSet::from([ + HashSet::from([ SsmValidationResult::new( "test1-parameter-name".to_string(), Some("test1-parameter-value".to_string()), - Some("test1-parameter-value".to_string()), + Ok(Some("test1-parameter-value".to_string())), Region::new("us-east-1"), ), SsmValidationResult::new( "test2-parameter-name".to_string(), Some("test2-parameter-value".to_string()), - Some("test2-parameter-value-wrong".to_string()), + Ok(Some("test2-parameter-value-wrong".to_string())), Region::new("us-east-1"), ), SsmValidationResult::new( "test4-parameter-name".to_string(), None, - Some("test4-parameter-value".to_string()), + Ok(Some("test4-parameter-value".to_string())), Region::new("us-east-1"), ), - ])), + ]), ), ])); let results_filtered = From 6fff6744100e3699fa7d3b214a0073aa4ce9e591 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 18 Apr 2023 14:31:47 -0700 Subject: [PATCH 0934/1356] models,packages: add 'hostname-override' for kubelet, add kubelet option This adds the 'kubernetes.hostname-override' setting for kubelet's '--hostname-override' option. --- README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/README.md b/README.md index e9576c52..ce32db6c 100644 --- a/README.md +++ b/README.md @@ -548,6 +548,18 @@ For Kubernetes variants in AWS, the following settings are set for you automatic * `settings.kubernetes.cluster-dns-ip`: Derived from the EKS Service IP CIDR or the CIDR block of the primary network interface. * `settings.kubernetes.max-pods`: The maximum number of pods that can be scheduled on this node (limited by number of available IPv4 addresses) +* `settings.kubernetes.hostname-override`: The node name kubelet uses as identification instead of the hostname or the name determined by the in-tree cloud provider if that's enabled. + + **Important note for all Kubernetes variants:** Changing this setting at runtime (not via user-data) can cause issues with kubelet registration, as hostname is closely tied to the identity of the system for both registration and certificates/authorization purposes. + + Most users don't need to change this setting. + If left unset, the system hostname will be used instead. + The `settings.network.hostname` setting can be used to specify the value for both `kubelet` and the host. + Only set this override if you intend for the `kubelet` to register with a different name than the host. + + For `aws-k8s-1.26` variants, which use the "external" cloud provider, a hostname override will be automatically generated by querying the EC2 API for the private DNS name of the instance. + This is done for backwards compatibility with the deprecated "aws" cloud provider, which adjusted the hostname in a similar way. + Future `aws-k8s-*` variants may remove this behavior. #### Amazon ECS settings From 38c9d884c7636c4a8c771dd1ecea32935873ee04 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Fri, 14 Apr 2023 09:49:26 +0000 Subject: [PATCH 0935/1356] kernel-5.10: update to 5.10.176 Rebase to Amazon Linux upstream version based on 5.10.176. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 8f6374ca..b874948e 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -14,8 +14,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/bfdedd54405ee75070fa9b53342399680e3145e362f41deb1276de2082625061/kernel-5.10.173-154.642.amzn2.src.rpm" -sha512 = "b98f97a00dfbec2ba6681faa326782bbe02c8a57758890076f71bb07a149d6dee3dba1237c07fb195c6a65956bee572f0d8757898375f437244eec7e69938e0b" +url = "https://cdn.amazonlinux.com/blobstore/c945e51a5ad81a6fd3ec405e57ad4ccd8ea44c8e26b1165771768e3da28fc382/kernel-5.10.176-157.645.amzn2.src.rpm" +sha512 = "57bb9eb168ad6051c7a8e938edb5f70ef9a1234a5e32f31a96dcb6dec8e3a48e3b4ba41f00fd5ea2934c89bb28c37c5553dc34e96345c354b563707ae52ae59d" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index ad5a5635..1551c6cd 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.173 +Version: 5.10.176 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/bfdedd54405ee75070fa9b53342399680e3145e362f41deb1276de2082625061/kernel-5.10.173-154.642.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/c945e51a5ad81a6fd3ec405e57ad4ccd8ea44c8e26b1165771768e3da28fc382/kernel-5.10.176-157.645.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From b0e3837585114b7614f692f4639782d7970a62df Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Fri, 14 Apr 2023 09:49:44 +0000 Subject: [PATCH 0936/1356] kernel-5.15: update to 5.15.104 Rebase to Amazon Linux upstream version based on 5.15.104. Signed-off-by: Leonard Foerster --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index a80c4155..20d325e8 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -14,8 +14,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/567d93a3639fa16d002a80a970223b8dc134fc4d1214125b379750ee689a76ea/kernel-5.15.102-61.139.amzn2.src.rpm" -sha512 = "6df4d568ef60cd631a7764d33f771cae6be576cbbf0400e86eafdad0a86ddeb65c96dc2ad40698573277fa8afe1076cdc9e45c9776f6f7f782a273f0e416fc88" +url = "https://cdn.amazonlinux.com/blobstore/0e9e64310ac3393b8630cc3e40ae23a8ae04cdf1e7c76f578f18bf94dcd72771/kernel-5.15.104-63.140.amzn2.src.rpm" +sha512 = "6f80bbec90263a331fd93bc429b0050833229bc437d9e860f56e711a2689c20fa17828434675c0fa40fa4dc5ed7e75e68699640bb77d02a60caf460184cd908a" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 06a0fa44..2bb6f61b 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.102 +Version: 5.15.104 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/567d93a3639fa16d002a80a970223b8dc134fc4d1214125b379750ee689a76ea/kernel-5.15.102-61.139.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/0e9e64310ac3393b8630cc3e40ae23a8ae04cdf1e7c76f578f18bf94dcd72771/kernel-5.15.104-63.140.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 79488466f774f7010422631dd62135100a6ab0c7 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Fri, 21 Apr 2023 08:38:45 +0000 Subject: [PATCH 0937/1356] kernel-5.10: update to 5.10.177 Rebase to Amazon Linux upstream version based on 5.10.177. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index b874948e..27522dd4 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -14,8 +14,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/c945e51a5ad81a6fd3ec405e57ad4ccd8ea44c8e26b1165771768e3da28fc382/kernel-5.10.176-157.645.amzn2.src.rpm" -sha512 = "57bb9eb168ad6051c7a8e938edb5f70ef9a1234a5e32f31a96dcb6dec8e3a48e3b4ba41f00fd5ea2934c89bb28c37c5553dc34e96345c354b563707ae52ae59d" +url = "https://cdn.amazonlinux.com/blobstore/7ca24767b6ccf9edb988e7415593fb1fef1691b323a0a5f41077742adecc881f/kernel-5.10.177-158.645.amzn2.src.rpm" +sha512 = "036798180a75bb7c7872306845a2b15118c7183472f77823c6398774c1abc692489a5bb1e0f07fb10ad036ea68dee769a51e4ad4181afdddd0c267e1233c8bdf" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 1551c6cd..c831af63 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.176 +Version: 5.10.177 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/c945e51a5ad81a6fd3ec405e57ad4ccd8ea44c8e26b1165771768e3da28fc382/kernel-5.10.176-157.645.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/7ca24767b6ccf9edb988e7415593fb1fef1691b323a0a5f41077742adecc881f/kernel-5.10.177-158.645.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 1af3efdc282389ae29efeee071d2286c1c6bac13 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Fri, 21 Apr 2023 08:39:13 +0000 Subject: [PATCH 0938/1356] kernel-5.15: update to 5.15.106 Rebase to Amazon Linux upstream version based on 5.15.106. Signed-off-by: Leonard Foerster --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 20d325e8..8608dfc6 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -14,8 +14,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/0e9e64310ac3393b8630cc3e40ae23a8ae04cdf1e7c76f578f18bf94dcd72771/kernel-5.15.104-63.140.amzn2.src.rpm" -sha512 = "6f80bbec90263a331fd93bc429b0050833229bc437d9e860f56e711a2689c20fa17828434675c0fa40fa4dc5ed7e75e68699640bb77d02a60caf460184cd908a" +url = "https://cdn.amazonlinux.com/blobstore/a05414b6b80f2113b47d97b12e0a706c05597c2dd2100da31341333605de9209/kernel-5.15.106-64.140.amzn2.src.rpm" +sha512 = "0a5a0319bd4c019a31d8139f4b5468ca0abc989f1846f4a7ae90c05e90be29119d33d43dc64cd39ddbcd0bf6b445aaa7379d0ca6392c146afa34c64f6dbb156f" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 2bb6f61b..b1314124 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.104 +Version: 5.15.106 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/0e9e64310ac3393b8630cc3e40ae23a8ae04cdf1e7c76f578f18bf94dcd72771/kernel-5.15.104-63.140.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/a05414b6b80f2113b47d97b12e0a706c05597c2dd2100da31341333605de9209/kernel-5.15.106-64.140.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 938c05d0cbc958b020e7ab9939fc418d9ebdeb9c Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Fri, 21 Apr 2023 09:20:47 +0000 Subject: [PATCH 0939/1356] Revert "kernel-5.10,5.15: Backport of upstream commit `bpf: Adjust insufficient" This reverts commit 819f78704040df2e12428ec56f21de15b085368b. The backported commit is now included in the upstream variant, through v5.10.177 and v5.15.105. Signed-off-by: Leonard Foerster --- ...t-insufficient-default-bpf_jit_limit.patch | 76 ------------------- packages/kernel-5.10/kernel-5.10.spec | 2 - ...t-insufficient-default-bpf_jit_limit.patch | 76 ------------------- packages/kernel-5.15/kernel-5.15.spec | 2 - 4 files changed, 156 deletions(-) delete mode 100644 packages/kernel-5.10/1003-bpf-Adjust-insufficient-default-bpf_jit_limit.patch delete mode 100644 packages/kernel-5.15/1004-bpf-Adjust-insufficient-default-bpf_jit_limit.patch diff --git a/packages/kernel-5.10/1003-bpf-Adjust-insufficient-default-bpf_jit_limit.patch b/packages/kernel-5.10/1003-bpf-Adjust-insufficient-default-bpf_jit_limit.patch deleted file mode 100644 index d47e3014..00000000 --- a/packages/kernel-5.10/1003-bpf-Adjust-insufficient-default-bpf_jit_limit.patch +++ /dev/null @@ -1,76 +0,0 @@ -From a4bbab27c4bf69486f5846d44134eb31c37e9b22 Mon Sep 17 00:00:00 2001 -From: Daniel Borkmann -Date: Mon, 20 Mar 2023 15:37:25 +0100 -Subject: [PATCH] bpf: Adjust insufficient default bpf_jit_limit - -[ Upstream commit 10ec8ca8ec1a2f04c4ed90897225231c58c124a7 ] - -We've seen recent AWS EKS (Kubernetes) user reports like the following: - - After upgrading EKS nodes from v20230203 to v20230217 on our 1.24 EKS - clusters after a few days a number of the nodes have containers stuck - in ContainerCreating state or liveness/readiness probes reporting the - following error: - - Readiness probe errored: rpc error: code = Unknown desc = failed to - exec in container: failed to start exec "4a11039f730203ffc003b7[...]": - OCI runtime exec failed: exec failed: unable to start container process: - unable to init seccomp: error loading seccomp filter into kernel: - error loading seccomp filter: errno 524: unknown - - However, we had not been seeing this issue on previous AMIs and it only - started to occur on v20230217 (following the upgrade from kernel 5.4 to - 5.10) with no other changes to the underlying cluster or workloads. - - We tried the suggestions from that issue (sysctl net.core.bpf_jit_limit=452534528) - which helped to immediately allow containers to be created and probes to - execute but after approximately a day the issue returned and the value - returned by cat /proc/vmallocinfo | grep bpf_jit | awk '{s+=$2} END {print s}' - was steadily increasing. - -I tested bpf tree to observe bpf_jit_charge_modmem, bpf_jit_uncharge_modmem -their sizes passed in as well as bpf_jit_current under tcpdump BPF filter, -seccomp BPF and native (e)BPF programs, and the behavior all looks sane -and expected, that is nothing "leaking" from an upstream perspective. - -The bpf_jit_limit knob was originally added in order to avoid a situation -where unprivileged applications loading BPF programs (e.g. seccomp BPF -policies) consuming all the module memory space via BPF JIT such that loading -of kernel modules would be prevented. The default limit was defined back in -2018 and while good enough back then, we are generally seeing far more BPF -consumers today. - -Adjust the limit for the BPF JIT pool from originally 1/4 to now 1/2 of the -module memory space to better reflect today's needs and avoid more users -running into potentially hard to debug issues. - -Fixes: fdadd04931c2 ("bpf: fix bpf_jit_limit knob for PAGE_SIZE >= 64K") -Reported-by: Stephen Haynes -Reported-by: Lefteris Alexakis -Signed-off-by: Daniel Borkmann -Link: https://github.com/awslabs/amazon-eks-ami/issues/1179 -Link: https://github.com/awslabs/amazon-eks-ami/issues/1219 -Reviewed-by: Kuniyuki Iwashima -Link: https://lore.kernel.org/r/20230320143725.8394-1-daniel@iogearbox.net -Signed-off-by: Alexei Starovoitov -Signed-off-by: Sasha Levin ---- - kernel/bpf/core.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c -index 73d4b1e32fbd..d3f6a070875c 100644 ---- a/kernel/bpf/core.c -+++ b/kernel/bpf/core.c -@@ -826,7 +826,7 @@ static int __init bpf_jit_charge_init(void) - { - /* Only used as heuristic here to derive limit. */ - bpf_jit_limit_max = bpf_jit_alloc_exec_limit(); -- bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2, -+ bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1, - PAGE_SIZE), LONG_MAX); - return 0; - } --- -2.39.2 - diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index c831af63..aa9e5629 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -17,8 +17,6 @@ Source103: config-bottlerocket-vmware Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch # Enable INITRAMFS_FORCE config option for our use case. Patch1002: 1002-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch -# Backport of bpf jit limit adjustments, see https://github.com/awslabs/amazon-eks-ami/issues/1179 -Patch1003: 1003-bpf-Adjust-insufficient-default-bpf_jit_limit.patch # Add zstd support for compressed kernel modules Patch2000: 2000-kbuild-move-module-strip-compression-code-into-scrip.patch diff --git a/packages/kernel-5.15/1004-bpf-Adjust-insufficient-default-bpf_jit_limit.patch b/packages/kernel-5.15/1004-bpf-Adjust-insufficient-default-bpf_jit_limit.patch deleted file mode 100644 index 0bbb8036..00000000 --- a/packages/kernel-5.15/1004-bpf-Adjust-insufficient-default-bpf_jit_limit.patch +++ /dev/null @@ -1,76 +0,0 @@ -From 54869daa6a437887614274f65298ba44a3fac63a Mon Sep 17 00:00:00 2001 -From: Daniel Borkmann -Date: Mon, 20 Mar 2023 15:37:25 +0100 -Subject: [PATCH] bpf: Adjust insufficient default bpf_jit_limit - -[ Upstream commit 10ec8ca8ec1a2f04c4ed90897225231c58c124a7 ] - -We've seen recent AWS EKS (Kubernetes) user reports like the following: - - After upgrading EKS nodes from v20230203 to v20230217 on our 1.24 EKS - clusters after a few days a number of the nodes have containers stuck - in ContainerCreating state or liveness/readiness probes reporting the - following error: - - Readiness probe errored: rpc error: code = Unknown desc = failed to - exec in container: failed to start exec "4a11039f730203ffc003b7[...]": - OCI runtime exec failed: exec failed: unable to start container process: - unable to init seccomp: error loading seccomp filter into kernel: - error loading seccomp filter: errno 524: unknown - - However, we had not been seeing this issue on previous AMIs and it only - started to occur on v20230217 (following the upgrade from kernel 5.4 to - 5.10) with no other changes to the underlying cluster or workloads. - - We tried the suggestions from that issue (sysctl net.core.bpf_jit_limit=452534528) - which helped to immediately allow containers to be created and probes to - execute but after approximately a day the issue returned and the value - returned by cat /proc/vmallocinfo | grep bpf_jit | awk '{s+=$2} END {print s}' - was steadily increasing. - -I tested bpf tree to observe bpf_jit_charge_modmem, bpf_jit_uncharge_modmem -their sizes passed in as well as bpf_jit_current under tcpdump BPF filter, -seccomp BPF and native (e)BPF programs, and the behavior all looks sane -and expected, that is nothing "leaking" from an upstream perspective. - -The bpf_jit_limit knob was originally added in order to avoid a situation -where unprivileged applications loading BPF programs (e.g. seccomp BPF -policies) consuming all the module memory space via BPF JIT such that loading -of kernel modules would be prevented. The default limit was defined back in -2018 and while good enough back then, we are generally seeing far more BPF -consumers today. - -Adjust the limit for the BPF JIT pool from originally 1/4 to now 1/2 of the -module memory space to better reflect today's needs and avoid more users -running into potentially hard to debug issues. - -Fixes: fdadd04931c2 ("bpf: fix bpf_jit_limit knob for PAGE_SIZE >= 64K") -Reported-by: Stephen Haynes -Reported-by: Lefteris Alexakis -Signed-off-by: Daniel Borkmann -Link: https://github.com/awslabs/amazon-eks-ami/issues/1179 -Link: https://github.com/awslabs/amazon-eks-ami/issues/1219 -Reviewed-by: Kuniyuki Iwashima -Link: https://lore.kernel.org/r/20230320143725.8394-1-daniel@iogearbox.net -Signed-off-by: Alexei Starovoitov -Signed-off-by: Sasha Levin ---- - kernel/bpf/core.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c -index cea0d1296599..f7c27c1cc593 100644 ---- a/kernel/bpf/core.c -+++ b/kernel/bpf/core.c -@@ -829,7 +829,7 @@ static int __init bpf_jit_charge_init(void) - { - /* Only used as heuristic here to derive limit. */ - bpf_jit_limit_max = bpf_jit_alloc_exec_limit(); -- bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2, -+ bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1, - PAGE_SIZE), LONG_MAX); - return 0; - } --- -2.39.2 - diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index b1314124..b2bb478b 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -19,8 +19,6 @@ Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch Patch1002: 1002-Revert-kbuild-hide-tools-build-targets-from-external.patch # Enable INITRAMFS_FORCE config option for our use case. Patch1003: 1003-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch -# Backport of bpf jit limit adjustments, see https://github.com/awslabs/amazon-eks-ami/issues/1179 -Patch1004: 1004-bpf-Adjust-insufficient-default-bpf_jit_limit.patch BuildRequires: bc BuildRequires: elfutils-devel From c17d18a702699f60e365b91c3834c90b262ac499 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Thu, 6 Apr 2023 19:04:16 +0000 Subject: [PATCH 0940/1356] kubelet: Enable setting cpuManagerPolicyOptions This adds a new `settings.kubernetes.cpu-manager-policy-options` setting to allow configuring the kubelet cpuManagerPolicyOptions. When the CPU manager policy is set to "static", these options allow affecting the behavior of CPU allocation for pods. Signed-off-by: Sean McGinnis --- README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/README.md b/README.md index ce32db6c..ede3ae05 100644 --- a/README.md +++ b/README.md @@ -418,6 +418,18 @@ The following settings are optional and allow you to further configure your clus * `settings.kubernetes.container-log-max-files`: The maximum number of container log files that can be present for a container. * `settings.kubernetes.container-log-max-size`: The maximum size of container log file before it is rotated. * `settings.kubernetes.cpu-manager-policy`: Specifies the CPU manager policy. Possible values are `static` and `none`. Defaults to `none`. If you want to allow pods with certain resource characteristics to be granted increased CPU affinity and exclusivity on the node, you can set this setting to `static`. You should reboot if you change this setting after startup - try `apiclient reboot`. +* `settings.kubernetes.cpu-manager-policy-options`: Policy options to apply when `cpu-manager-policy` is set to `static`. Currently `full-pcpus-only` is the only option. + + For example: + + ```toml + [settings.kubernetes] + cpu-manager-policy = "static" + cpu-manager-policy-options = [ + "full-pcpus-only" + ] + ``` + * `settings.kubernetes.cpu-manager-reconcile-period`: Specifies the CPU manager reconcile period, which controls how often updated CPU assignments are written to cgroupfs. The value is a duration like `30s` for 30 seconds or `1h5m` for 1 hour and 5 minutes. * `settings.kubernetes.credential-providers`: Contains a collection of Kubelet image credential provider settings. Each name under `credential-providers` is the name of the plugin to configure. From 1a116895710c26003d19bd5f3e6f8080517d7dbd Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 25 Apr 2023 07:53:30 +0000 Subject: [PATCH 0941/1356] kernel: Switch SCSI_VIRTIO driver to built-in Enable usage of the Bottlerocket metal variant on other virtualization stacks where the boot device is provided through the scsis virtio driver. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/config-bottlerocket-metal | 3 +++ packages/kernel-5.15/config-bottlerocket-metal | 3 +++ 2 files changed, 6 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket-metal b/packages/kernel-5.10/config-bottlerocket-metal index 1d9e1bb2..167620f0 100644 --- a/packages/kernel-5.10/config-bottlerocket-metal +++ b/packages/kernel-5.10/config-bottlerocket-metal @@ -103,3 +103,6 @@ CONFIG_MEGARAID_SAS=y # Microsemi PQI controllers CONFIG_SCSI_SMARTPQI=y + +# Support for virtio scsi boot devices for other cloud providers +CONFIG_SCSI_VIRTIO=y diff --git a/packages/kernel-5.15/config-bottlerocket-metal b/packages/kernel-5.15/config-bottlerocket-metal index a162ccb5..abef8fba 100644 --- a/packages/kernel-5.15/config-bottlerocket-metal +++ b/packages/kernel-5.15/config-bottlerocket-metal @@ -123,3 +123,6 @@ CONFIG_MEGARAID_SAS=y # Microsemi PQI controllers CONFIG_SCSI_SMARTPQI=y + +# Support for virtio scsi boot devices for other cloud providers +CONFIG_SCSI_VIRTIO=y From 02c505d4a290373b05fb1982df8465362ac2656b Mon Sep 17 00:00:00 2001 From: ecpullen Date: Mon, 1 May 2023 17:47:38 +0000 Subject: [PATCH 0942/1356] testsys: Update testsys to v0.0.7 --- tools/Cargo.lock | 323 +++++++++++++++----------------- tools/deny.toml | 11 ++ tools/testsys-config/Cargo.toml | 2 +- tools/testsys/Cargo.toml | 4 +- tools/testsys/src/status.rs | 47 ++--- 5 files changed, 192 insertions(+), 195 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index c2c7621f..a9c0722f 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -63,7 +63,7 @@ dependencies = [ "argh_shared", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -91,7 +91,7 @@ checksum = "3b015a331cc64ebd1774ba119538573603427eaace0a1950c423ab971f903796" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -102,7 +102,7 @@ checksum = "b84f9ebcc6c1f5b8cb160f6990096a5c127f423fcb6e1ccc46c370cbdfb75dfc" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -497,7 +497,7 @@ dependencies = [ "http", "http-body", "hyper", - "hyper-rustls", + "hyper-rustls 0.23.2", "lazy_static", "pin-project-lite", "serde", @@ -655,6 +655,12 @@ version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5" +[[package]] +name = "base64" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" + [[package]] name = "base64-simd" version = "0.7.0" @@ -681,15 +687,15 @@ dependencies = [ [[package]] name = "bottlerocket-types" -version = "0.0.6" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.6#a69b7006887249f89238b428a9acb6a0050dd384" +version = "0.0.7" +source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.7#241f73d798db903a4736b460f70338f8859b291f" dependencies = [ "builder-derive", "configuration-derive", "serde", "serde_json", "serde_plain", - "serde_yaml", + "serde_yaml 0.8.26", "testsys-model", ] @@ -714,13 +720,13 @@ dependencies = [ [[package]] name = "builder-derive" -version = "0.0.6" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.6#a69b7006887249f89238b428a9acb6a0050dd384" +version = "0.0.7" +source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.7#241f73d798db903a4736b460f70338f8859b291f" dependencies = [ "proc-macro2", "quote", "serde", - "syn", + "syn 1.0.109", ] [[package]] @@ -860,7 +866,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -910,11 +916,11 @@ dependencies = [ [[package]] name = "configuration-derive" -version = "0.0.6" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.6#a69b7006887249f89238b428a9acb6a0050dd384" +version = "0.0.7" +source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.7#241f73d798db903a4736b460f70338f8859b291f" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1033,7 +1039,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1060,7 +1066,7 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn", + "syn 1.0.109", ] [[package]] @@ -1077,7 +1083,7 @@ checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1101,7 +1107,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn", + "syn 1.0.109", ] [[package]] @@ -1112,7 +1118,7 @@ checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ "darling_core", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1254,7 +1260,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1272,21 +1278,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "form_urlencoded" version = "1.1.0" @@ -1352,7 +1343,7 @@ checksum = "3eb14ed937631bd8b8b8977f2c198443447a8355b6e3ca599f38c975e5a963b6" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1630,36 +1621,33 @@ dependencies = [ ] [[package]] -name = "hyper-openssl" -version = "0.9.2" +name = "hyper-rustls" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6ee5d7a8f718585d1c3c61dfde28ef5b0bb14734b4db13f5ada856cdc6c612b" +checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" dependencies = [ "http", "hyper", - "linked_hash_set", - "once_cell", - "openssl", - "openssl-sys", - "parking_lot", + "log", + "rustls 0.20.8", + "rustls-native-certs", "tokio", - "tokio-openssl", - "tower-layer", + "tokio-rustls 0.23.4", ] [[package]] name = "hyper-rustls" -version = "0.23.2" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" +checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7" dependencies = [ "http", "hyper", "log", - "rustls", + "rustls 0.21.0", "rustls-native-certs", "tokio", - "tokio-rustls", + "tokio-rustls 0.24.0", ] [[package]] @@ -1751,7 +1739,7 @@ dependencies = [ "log", "pubsys-config", "serde_json", - "serde_yaml", + "serde_yaml 0.8.26", "sha2", "shell-words", "simplelog", @@ -1817,9 +1805,9 @@ dependencies = [ [[package]] name = "json-patch" -version = "0.3.0" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e712e62827c382a77b87f590532febb1f8b2fdbc3eefa1ee37fe7281687075ef" +checksum = "1f54898088ccb91df1b492cc80029a6fdf1c48ca0db7c6822a8babad69c94658" dependencies = [ "serde", "serde_json", @@ -1840,11 +1828,11 @@ dependencies = [ [[package]] name = "k8s-openapi" -version = "0.17.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1985030683a2bac402cbda61222195de80d3f66b4c87ab56e5fea379bd98c3" +checksum = "cd990069640f9db34b3b0f7a1afc62a05ffaa3be9b66aa3c313f58346df7f788" dependencies = [ - "base64 0.20.0", + "base64 0.21.0", "bytes", "chrono", "serde", @@ -1854,9 +1842,9 @@ dependencies = [ [[package]] name = "kube" -version = "0.78.0" +version = "0.82.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53ee2ba94546e32a5aef943e5831c6ac25592ff8dcfa8b2a06e0aaea90c69188" +checksum = "dc7d3d52dd5c871991679102e80dfb192faaaa09fecdbccdd8c55af264ce7a8f" dependencies = [ "k8s-openapi", "kube-client", @@ -1866,9 +1854,9 @@ dependencies = [ [[package]] name = "kube-client" -version = "0.78.0" +version = "0.82.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c9ca1f597bd48ed26f45f601bf2fa3aaa0933b8d1652d883b8444519b72af4a" +checksum = "544339f1665488243f79080441cacb09c997746fd763342303e66eebb9d3ba13" dependencies = [ "base64 0.20.0", "bytes", @@ -1879,19 +1867,20 @@ dependencies = [ "http", "http-body", "hyper", - "hyper-openssl", + "hyper-rustls 0.24.0", "hyper-timeout", "jsonpath_lib", "k8s-openapi", "kube-core", - "openssl", "pem", "pin-project", "rand", + "rustls 0.21.0", + "rustls-pemfile", "secrecy", "serde", "serde_json", - "serde_yaml", + "serde_yaml 0.9.21", "thiserror", "tokio", "tokio-tungstenite", @@ -1903,9 +1892,9 @@ dependencies = [ [[package]] name = "kube-core" -version = "0.78.0" +version = "0.82.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61f2c6d1a2d1584859499eb05a41c5a44713818041621fa7515cfdbdf4769ea7" +checksum = "25983d07f414dfffba08c5951fe110f649113416b1d8e22f7c89c750eb2555a7" dependencies = [ "chrono", "form_urlencoded", @@ -1921,15 +1910,15 @@ dependencies = [ [[package]] name = "kube-derive" -version = "0.78.0" +version = "0.82.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e1dfe288fd87029f87c5713ddf585a4221e1b5be8f8c7c02ba28f5211f2a6d7" +checksum = "5af652b642aca19ef5194de3506aa39f89d788d5326a570da68b13a02d6c5ba2" dependencies = [ "darling", "proc-macro2", "quote", "serde_json", - "syn", + "syn 1.0.109", ] [[package]] @@ -1959,15 +1948,6 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" -[[package]] -name = "linked_hash_set" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47186c6da4d81ca383c7c47c1bfc80f4b95f4720514d860a5407aaf4233f9588" -dependencies = [ - "linked-hash-map", -] - [[package]] name = "linux-raw-sys" version = "0.1.4" @@ -2153,50 +2133,12 @@ version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" -[[package]] -name = "openssl" -version = "0.10.50" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e30d8bc91859781f0a943411186324d580f2bbeb71b452fe91ae344806af3f1" -dependencies = [ - "bitflags", - "cfg-if", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "openssl-probe" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" -[[package]] -name = "openssl-sys" -version = "0.9.85" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d3d193fb1488ad46ffe3aaabc912cc931d02ee8518fe2959aea8ef52718b0c0" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - [[package]] name = "ordered-float" version = "2.10.0" @@ -2343,7 +2285,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2374,7 +2316,7 @@ checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2389,12 +2331,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkg-config" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" - [[package]] name = "portable-atomic" version = "0.3.19" @@ -2428,7 +2364,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "version_check", ] @@ -2513,7 +2449,7 @@ dependencies = [ "log", "parse-datetime", "serde", - "serde_yaml", + "serde_yaml 0.8.26", "snafu", "toml", "url", @@ -2675,7 +2611,7 @@ dependencies = [ "http", "http-body", "hyper", - "hyper-rustls", + "hyper-rustls 0.23.2", "ipnet", "js-sys", "log", @@ -2683,13 +2619,13 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls", + "rustls 0.20.8", "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls", + "tokio-rustls 0.23.4", "tower-service", "url", "wasm-bindgen", @@ -2764,6 +2700,18 @@ dependencies = [ "webpki", ] +[[package]] +name = "rustls" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07180898a28ed6a7f7ba2311594308f595e3dd2e3c3812fa0a80a47b45f17e5d" +dependencies = [ + "log", + "ring", + "rustls-webpki", + "sct", +] + [[package]] name = "rustls-native-certs" version = "0.6.2" @@ -2785,6 +2733,16 @@ dependencies = [ "base64 0.13.1", ] +[[package]] +name = "rustls-webpki" +version = "0.100.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "ryu" version = "1.0.13" @@ -2830,7 +2788,7 @@ dependencies = [ "proc-macro2", "quote", "serde_derive_internals", - "syn", + "syn 1.0.109", ] [[package]] @@ -2899,9 +2857,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.156" +version = "1.0.160" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "314b5b092c0ade17c00142951e50ced110ec27cea304b1037c6969246c2469a4" +checksum = "bb2f3770c8bce3bcda7e149193a069a0f4365bda1fa5cd88e03bca26afc1216c" dependencies = [ "serde_derive", ] @@ -2918,13 +2876,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.156" +version = "1.0.160" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7e29c4601e36bcec74a223228dce795f4cd3616341a4af93520ca1a837c087d" +checksum = "291a097c63d8497e00160b166a967a4a79c64f3facdd01cbd7502231688d77df" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -2935,7 +2893,7 @@ checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2983,6 +2941,19 @@ dependencies = [ "yaml-rust", ] +[[package]] +name = "serde_yaml" +version = "0.9.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9d684e3ec7de3bf5466b32bd75303ac16f0736426e5a4e0d6e489559ce1249c" +dependencies = [ + "indexmap", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + [[package]] name = "sha1" version = "0.10.5" @@ -3085,7 +3056,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -3143,7 +3114,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -3163,6 +3134,17 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn" +version = "2.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + [[package]] name = "tabled" version = "0.10.0" @@ -3184,7 +3166,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -3240,7 +3222,7 @@ dependencies = [ "serde", "serde_json", "serde_plain", - "serde_yaml", + "serde_yaml 0.8.26", "snafu", "term_size", "testsys-config", @@ -3261,7 +3243,7 @@ dependencies = [ "log", "maplit", "serde", - "serde_yaml", + "serde_yaml 0.8.26", "snafu", "testsys-model", "toml", @@ -3270,8 +3252,8 @@ dependencies = [ [[package]] name = "testsys-model" -version = "0.0.6" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.6#a69b7006887249f89238b428a9acb6a0050dd384" +version = "0.0.7" +source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.7#241f73d798db903a4736b460f70338f8859b291f" dependencies = [ "async-recursion", "async-trait", @@ -3291,7 +3273,7 @@ dependencies = [ "serde", "serde_json", "serde_plain", - "serde_yaml", + "serde_yaml 0.8.26", "snafu", "tabled", "tokio", @@ -3316,22 +3298,22 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.39" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c" +checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.39" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e" +checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -3426,30 +3408,28 @@ checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] -name = "tokio-openssl" -version = "0.6.3" +name = "tokio-rustls" +version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08f9ffb7809f1b20c1b398d92acf4cc719874b3b2b2d9ea2f09b4a80350878a" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ - "futures-util", - "openssl", - "openssl-sys", + "rustls 0.20.8", "tokio", + "webpki", ] [[package]] name = "tokio-rustls" -version = "0.23.4" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" +checksum = "e0d409377ff5b1e3ca6437aa86c1eb7d40c134bfec254e44c830defa92669db5" dependencies = [ - "rustls", + "rustls 0.21.0", "tokio", - "webpki", ] [[package]] @@ -3580,11 +3560,11 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.3.5" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" +checksum = "5d1d42a9b3f3ec46ba828e8d376aec14592ea199f70a06a548587ecd1c4ab658" dependencies = [ - "base64 0.13.1", + "base64 0.20.0", "bitflags", "bytes", "futures-core", @@ -3592,6 +3572,7 @@ dependencies = [ "http", "http-body", "http-range-header", + "mime", "pin-project-lite", "tower-layer", "tower-service", @@ -3631,7 +3612,7 @@ checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -3728,6 +3709,12 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" +[[package]] +name = "unsafe-libyaml" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1865806a559042e51ab5414598446a5871b561d21b6764f2eabb0dd481d880a6" + [[package]] name = "untrusted" version = "0.7.1" @@ -3773,12 +3760,6 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" -[[package]] -name = "vcpkg" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" - [[package]] name = "vec_map" version = "0.8.2" @@ -3844,7 +3825,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-shared", ] @@ -3878,7 +3859,7 @@ checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-backend", "wasm-bindgen-shared", ] diff --git a/tools/deny.toml b/tools/deny.toml index 437ca3d9..cf87a2f4 100644 --- a/tools/deny.toml +++ b/tools/deny.toml @@ -52,6 +52,13 @@ license-files = [ { path = "LICENSE", hash = 0x001c7e6c }, ] +[[licenses.clarify]] +name = "rustls-webpki" +expression = "ISC" +license-files = [ + { path = "LICENSE", hash = 0x001c7e6c }, +] + [bans] # Deny multiple versions or wildcard dependencies. multiple-versions = "deny" @@ -75,6 +82,10 @@ skip-tree = [ # dependency tree because windows-sys has many sub-crates # that differ in major version. { name = "windows-sys", version = "=0.42.0" }, + + # TestSys uses a newer version of base64 and serde_yaml + { name = "testsys-model", version = "=0.0.7" }, + { name = "bottlerocket-types", version = "=0.0.7" }, ] [sources] diff --git a/tools/testsys-config/Cargo.toml b/tools/testsys-config/Cargo.toml index 687041ed..56f8b04b 100644 --- a/tools/testsys-config/Cargo.toml +++ b/tools/testsys-config/Cargo.toml @@ -13,7 +13,7 @@ home = "0.5" lazy_static = "1" log = "0.4" maplit="1" -testsys-model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.6", tag = "v0.0.6"} +testsys-model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.7", tag = "v0.0.7"} serde = { version = "1", features = ["derive"] } serde_yaml = "0.8" snafu = "0.7" diff --git a/tools/testsys/Cargo.toml b/tools/testsys/Cargo.toml index 50855eb6..b8f1508a 100644 --- a/tools/testsys/Cargo.toml +++ b/tools/testsys/Cargo.toml @@ -11,7 +11,7 @@ async-trait = "0.1" aws-config = "0.54.1" aws-sdk-ec2 = "0.24" base64 = "0.20" -bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.6", tag = "v0.0.6"} +bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.7", tag = "v0.0.7"} bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } clap = { version = "3", features = ["derive", "env"] } env_logger = "0.10" @@ -19,7 +19,7 @@ futures = "0.3" handlebars = "4" log = "0.4" maplit = "1" -testsys-model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.6", tag = "v0.0.6"} +testsys-model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.7", tag = "v0.0.7"} pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } fastrand = "1" serde = { version = "1", features = ["derive"] } diff --git a/tools/testsys/src/status.rs b/tools/testsys/src/status.rs index 180c45a7..b7a88f21 100644 --- a/tools/testsys/src/status.rs +++ b/tools/testsys/src/status.rs @@ -4,9 +4,7 @@ use log::{debug, info}; use serde::Deserialize; use serde_plain::derive_fromstr_from_deserialize; use snafu::ResultExt; -use testsys_model::test_manager::{ - CrdState, CrdType, SelectionParams, StatusProgress, TestManager, -}; +use testsys_model::test_manager::{CrdState, CrdType, SelectionParams, StatusColumn, TestManager}; /// Check the status of testsys objects. #[derive(Debug, Parser)] @@ -15,10 +13,6 @@ pub(crate) struct Status { #[clap(long, short = 'o')] output: Option, - /// Check the status of the testsys controller - #[clap(long, short = 'c')] - controller: bool, - /// Focus status on a particular arch #[clap(long)] arch: Option, @@ -64,17 +58,21 @@ impl Status { labels.push(format!("testsys/variant={}", variant)) }; let mut status = client - .status( - &SelectionParams { - labels: Some(labels.join(",")), - state, - crd_type, - ..Default::default() - }, - self.controller, - ) + .status(&SelectionParams { + labels: Some(labels.join(",")), + state, + crd_type, + ..Default::default() + }) .await?; + status.add_column(StatusColumn::name()); + status.add_column(StatusColumn::crd_type()); + status.add_column(StatusColumn::state()); + status.add_column(StatusColumn::passed()); + status.add_column(StatusColumn::failed()); + status.add_column(StatusColumn::skipped()); + match self.output { Some(StatusOutput::Json) => { info!( @@ -88,16 +86,23 @@ impl Status { Some(StatusOutput::Narrow) => (), None => { status.new_column("BUILD ID", |crd| { - crd.labels().get("testsys/build-id").cloned() + crd.labels() + .get("testsys/build-id") + .cloned() + .into_iter() + .collect() }); - status.with_time(); + status.add_column(StatusColumn::last_update()); } Some(StatusOutput::Wide) => { status.new_column("BUILD ID", |crd| { - crd.labels().get("testsys/build-id").cloned() + crd.labels() + .get("testsys/build-id") + .cloned() + .into_iter() + .collect() }); - status.with_time(); - status.with_progress(StatusProgress::WithTests); + status.add_column(StatusColumn::last_update()); } }; From 76fa107f14b3012f3b46be1f5d21a761ee2ddc22 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Wed, 3 May 2023 15:07:07 +0000 Subject: [PATCH 0943/1356] kernel-5.10: update to 5.10.178 Rebase to Amazon Linux upstream version based on 5.10.178. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 27522dd4..f3150c7d 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -14,8 +14,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/7ca24767b6ccf9edb988e7415593fb1fef1691b323a0a5f41077742adecc881f/kernel-5.10.177-158.645.amzn2.src.rpm" -sha512 = "036798180a75bb7c7872306845a2b15118c7183472f77823c6398774c1abc692489a5bb1e0f07fb10ad036ea68dee769a51e4ad4181afdddd0c267e1233c8bdf" +url = "https://cdn.amazonlinux.com/blobstore/13be720c0258208a986213f02d549940509f5125eac626729bc5dd3612bef2f8/kernel-5.10.178-162.673.amzn2.src.rpm" +sha512 = "d1785ac9f88afbe2ee36bc4a16319c076048b89eb4488fdec884a785d1f68ad981b9499c54fa289329d5db228400175aa7cf05d4e0c7d9a75a68a18532a31957" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index aa9e5629..51208611 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.177 +Version: 5.10.178 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/7ca24767b6ccf9edb988e7415593fb1fef1691b323a0a5f41077742adecc881f/kernel-5.10.177-158.645.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/13be720c0258208a986213f02d549940509f5125eac626729bc5dd3612bef2f8/kernel-5.10.178-162.673.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From c4613a9a86d7257533efa209923a7567c6f5aa3a Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Wed, 3 May 2023 15:07:33 +0000 Subject: [PATCH 0944/1356] kernel-5.15: update to 5.15.108 Rebase to Amazon Linux upstream version based on 5.15.108. Signed-off-by: Leonard Foerster --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 8608dfc6..c5f96ac3 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -14,8 +14,8 @@ path = "pkg.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/a05414b6b80f2113b47d97b12e0a706c05597c2dd2100da31341333605de9209/kernel-5.15.106-64.140.amzn2.src.rpm" -sha512 = "0a5a0319bd4c019a31d8139f4b5468ca0abc989f1846f4a7ae90c05e90be29119d33d43dc64cd39ddbcd0bf6b445aaa7379d0ca6392c146afa34c64f6dbb156f" +url = "https://cdn.amazonlinux.com/blobstore/e8de7cc956678c88e06d181df5b0dde1c39fdc2fce4a47b5b466585f1e164a35/kernel-5.15.108-65.141.amzn2.src.rpm" +sha512 = "3c5eaa6bea14f8f06a8999f05c2fe92b4b623ec4c445c0136903977a5cee02e46119585fa7afd20685156735bd6512e5400628868c759016027673c4ebb5cceb" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index b2bb478b..d617f5df 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.106 +Version: 5.15.108 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/a05414b6b80f2113b47d97b12e0a706c05597c2dd2100da31341333605de9209/kernel-5.15.106-64.140.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/e8de7cc956678c88e06d181df5b0dde1c39fdc2fce4a47b5b466585f1e164a35/kernel-5.15.108-65.141.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 53bf77681aab32d2a38bc81d527278e2b024608b Mon Sep 17 00:00:00 2001 From: ecpullen Date: Tue, 14 Mar 2023 20:41:15 +0000 Subject: [PATCH 0945/1356] testsys: Support ecs workload testing --- tools/testsys-config/src/lib.rs | 5 ++ tools/testsys/src/aws_ecs.rs | 86 ++++++++++++++++++++++++++++++--- tools/testsys/src/run.rs | 8 +++ 3 files changed, 93 insertions(+), 6 deletions(-) diff --git a/tools/testsys-config/src/lib.rs b/tools/testsys-config/src/lib.rs index f358c808..9ec442c6 100644 --- a/tools/testsys-config/src/lib.rs +++ b/tools/testsys-config/src/lib.rs @@ -373,6 +373,7 @@ pub struct TestsysImages { pub ecs_test_agent_image: Option, pub migration_test_agent_image: Option, pub k8s_workload_agent_image: Option, + pub ecs_workload_agent_image: Option, pub controller_image: Option, pub testsys_agent_pull_secret: Option, } @@ -405,6 +406,7 @@ impl TestsysImages { ecs_test_agent_image: Some(format!("{}/ecs-test-agent:{tag}", registry)), migration_test_agent_image: Some(format!("{}/migration-test-agent:{tag}", registry)), k8s_workload_agent_image: Some(format!("{}/k8s-workload-agent:{tag}", registry)), + ecs_workload_agent_image: Some(format!("{}/ecs-workload-agent:{tag}", registry)), controller_image: Some(format!("{}/controller:{tag}", registry)), testsys_agent_pull_secret: None, } @@ -440,6 +442,9 @@ impl TestsysImages { k8s_workload_agent_image: self .k8s_workload_agent_image .or(other.k8s_workload_agent_image), + ecs_workload_agent_image: self + .ecs_workload_agent_image + .or(other.ecs_workload_agent_image), controller_image: self.controller_image.or(other.controller_image), testsys_agent_pull_secret: self .testsys_agent_pull_secret diff --git a/tools/testsys/src/aws_ecs.rs b/tools/testsys/src/aws_ecs.rs index 025609bd..c95a063b 100644 --- a/tools/testsys/src/aws_ecs.rs +++ b/tools/testsys/src/aws_ecs.rs @@ -5,12 +5,14 @@ use crate::crds::{ }; use crate::error::{self, Result}; use crate::migration::migration_crd; -use bottlerocket_types::agent_config::{ClusterType, EcsClusterConfig, EcsTestConfig}; +use bottlerocket_types::agent_config::{ + ClusterType, EcsClusterConfig, EcsTestConfig, EcsWorkloadTestConfig, WorkloadTest, +}; use log::debug; use maplit::btreemap; use snafu::{OptionExt, ResultExt}; use std::collections::BTreeMap; -use testsys_model::{Crd, DestructionPolicy}; +use testsys_model::{Crd, DestructionPolicy, Test}; /// A `CrdCreator` responsible for creating crd related to `aws-ecs` variants. pub(crate) struct AwsEcsCreator { @@ -193,13 +195,85 @@ impl CrdCreator for AwsEcsCreator { Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(test_crd)))) } - async fn workload_crd<'a>(&self, _test_input: TestInput<'a>) -> Result { - Err(error::Error::Invalid { - what: "Workload testing is not supported for non-k8s variants".to_string(), - }) + async fn workload_crd<'a>(&self, test_input: TestInput<'a>) -> Result { + Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(workload_crd( + &self.region, + test_input, + )?)))) } fn additional_fields(&self, _test_type: &str) -> BTreeMap { btreemap! {"region".to_string() => self.region.clone()} } } + +/// Create a workload CRD for K8s testing. +pub(crate) fn workload_crd(region: &str, test_input: TestInput) -> Result { + let cluster_resource_name = test_input + .cluster_crd_name + .as_ref() + .expect("A cluster name is required for ECS workload tests"); + let bottlerocket_resource_name = test_input + .bottlerocket_crd_name + .as_ref() + .expect("A bottlerocket resource name is required for ECS workload tests"); + + let labels = test_input.crd_input.labels(btreemap! { + "testsys/type".to_string() => test_input.test_type.to_string(), + "testsys/cluster".to_string() => cluster_resource_name.to_string(), + }); + let plugins: Vec<_> = test_input + .crd_input + .config + .workloads + .iter() + .map(|(name, image)| WorkloadTest { + name: name.to_string(), + image: image.to_string(), + ..Default::default() + }) + .collect(); + if plugins.is_empty() { + return Err(error::Error::Invalid { + what: "There were no plugins specified in the workload test. + Workloads can be specified in `Test.toml` or via the command line." + .to_string(), + }); + } + + EcsWorkloadTestConfig::builder() + .resources(bottlerocket_resource_name) + .resources(cluster_resource_name) + .set_depends_on(Some(test_input.prev_tests)) + .set_retries(Some(5)) + .image( + test_input + .crd_input + .images + .ecs_workload_agent_image + .to_owned() + .expect("The default K8s workload testing image is missing"), + ) + .set_image_pull_secret( + test_input + .crd_input + .images + .testsys_agent_pull_secret + .to_owned(), + ) + .keep_running(true) + .region(region.to_string()) + .cluster_name_template(cluster_resource_name, "clusterName") + .assume_role(test_input.crd_input.config.agent_role.to_owned()) + .tests(plugins) + .set_secrets(Some(test_input.crd_input.config.secrets.to_owned())) + .set_labels(Some(labels)) + .build(format!( + "{}{}", + cluster_resource_name, + test_input.name_suffix.unwrap_or("-test") + )) + .context(error::BuildSnafu { + what: "Workload CRD", + }) +} diff --git a/tools/testsys/src/run.rs b/tools/testsys/src/run.rs index d4a187ea..81b513a0 100644 --- a/tools/testsys/src/run.rs +++ b/tools/testsys/src/run.rs @@ -559,6 +559,13 @@ pub(crate) struct TestsysImages { )] pub(crate) k8s_workload: Option, + /// ECS workload agent URI. If not provided the latest released test agent will be used. + #[clap( + long = "ecs-workload-agent-image", + env = "TESTSYS_ECS_WORKLOAD_AGENT_IMAGE" + )] + pub(crate) ecs_workload: Option, + /// TestSys controller URI. If not provided the latest released controller will be used. #[clap(long = "controller-image", env = "TESTSYS_CONTROLLER_IMAGE")] pub(crate) controller_uri: Option, @@ -584,6 +591,7 @@ impl From for testsys_config::TestsysImages { ecs_test_agent_image: val.ecs_test, migration_test_agent_image: val.migration_test, k8s_workload_agent_image: val.k8s_workload, + ecs_workload_agent_image: val.ecs_workload, controller_image: val.controller_uri, testsys_agent_pull_secret: val.secret, } From 205d3763b73a69b350ab64f1bbc88feeafa1c7c7 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 4 May 2023 09:20:52 +0000 Subject: [PATCH 0946/1356] tools/diff-kernel-config: optimize bail-out time on failure We usually use the diff-kernel-config tool to find differences in kernel configuration for changes to our kernels (updates, features, config changes). The script builds kernels at two diffrent development stages, extracts the resulting kernel config and compares. Usually we can expect the 'before' state to be stable and being the state currently in the repo, while the 'after' state will have changes. The state at 'before' is more unlikely to fail builds than the 'after' state. So build the 'after' state first so we do waste less time re-doing builds in case of build failures on 'after' state. Signed-off-by: Leonard Foerster --- tools/diff-kernel-config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/diff-kernel-config b/tools/diff-kernel-config index 2dbdb02d..24b5a4f9 100755 --- a/tools/diff-kernel-config +++ b/tools/diff-kernel-config @@ -140,7 +140,7 @@ on_exit "git checkout --quiet '${gitrev_original}'" mkdir -p "${output_dir}" || bail "Failed to create output directory '${output_dir}'" -for state in before after; do +for state in after before; do gitrev_var=gitrev_${state} git checkout --quiet "${!gitrev_var}" || bail "Cannot check out '${!gitrev_var}'." From 21c301e365980060d858a9c812f9e3f6dd85e923 Mon Sep 17 00:00:00 2001 From: ecpullen Date: Tue, 2 May 2023 16:48:35 +0000 Subject: [PATCH 0947/1356] testsys: Add support for `sonobuoy-image` --- tools/testsys-config/src/lib.rs | 3 +++ tools/testsys/src/run.rs | 6 ++++++ tools/testsys/src/sonobuoy.rs | 1 + 3 files changed, 10 insertions(+) diff --git a/tools/testsys-config/src/lib.rs b/tools/testsys-config/src/lib.rs index 9ec442c6..ecdcfff3 100644 --- a/tools/testsys-config/src/lib.rs +++ b/tools/testsys-config/src/lib.rs @@ -253,6 +253,8 @@ pub struct GenericVariantConfig { pub secrets: BTreeMap, /// The role that should be assumed for this particular variant pub agent_role: Option, + /// The location of the sonobuoy testing image + pub sonobuoy_image: Option, /// The custom images used for conformance testing pub conformance_image: Option, /// The custom registry used for conformance testing @@ -300,6 +302,7 @@ impl GenericVariantConfig { instance_type: self.instance_type.or(other.instance_type), secrets, agent_role: self.agent_role.or(other.agent_role), + sonobuoy_image: self.sonobuoy_image.or(other.sonobuoy_image), conformance_image: self.conformance_image.or(other.conformance_image), conformance_registry: self.conformance_registry.or(other.conformance_registry), control_plane_endpoint: self.control_plane_endpoint.or(other.control_plane_endpoint), diff --git a/tools/testsys/src/run.rs b/tools/testsys/src/run.rs index 81b513a0..c3ab6be1 100644 --- a/tools/testsys/src/run.rs +++ b/tools/testsys/src/run.rs @@ -131,6 +131,11 @@ struct CliConfig { #[clap(long, env = "TESTSYS_TARGET_CLUSTER_NAME")] target_cluster_name: Option, + /// The sonobuoy image that should be used for conformance testing. It may be omitted to use the default + /// sonobuoy image. + #[clap(long, env = "TESTSYS_SONOBUOY_IMAGE")] + sonobuoy_image: Option, + /// The image that should be used for conformance testing. It may be omitted to use the default /// testing image. #[clap(long, env = "TESTSYS_CONFORMANCE_IMAGE")] @@ -179,6 +184,7 @@ impl From for GenericVariantConfig { instance_type: val.instance_type, secrets: val.secret.into_iter().collect(), agent_role: val.assume_role, + sonobuoy_image: val.sonobuoy_image, conformance_image: val.conformance_image, conformance_registry: val.conformance_registry, control_plane_endpoint: val.control_plane_endpoint, diff --git a/tools/testsys/src/sonobuoy.rs b/tools/testsys/src/sonobuoy.rs index 2741b50e..d3288442 100644 --- a/tools/testsys/src/sonobuoy.rs +++ b/tools/testsys/src/sonobuoy.rs @@ -68,6 +68,7 @@ pub(crate) fn sonobuoy_crd(test_input: TestInput) -> Result { .to_owned() .map(e2e_repo_config_base64), ) + .sonobuoy_image(test_input.crd_input.config.sonobuoy_image.to_owned()) .kube_conformance_image(test_input.crd_input.config.conformance_image.to_owned()) .assume_role(test_input.crd_input.config.agent_role.to_owned()) .set_secrets(Some(test_input.crd_input.config.secrets.to_owned())) From 2eead6f52785065df515d36278574685085e0d7f Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Wed, 22 Mar 2023 16:05:41 +0000 Subject: [PATCH 0948/1356] kubelet: Add `cpu-cfs-quota-enforced` setting This adds a setting to be able to set the KubeletConfig value `cpuCFSQuota` to `false`. Signed-off-by: Sean McGinnis --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index ede3ae05..bf2bf9f4 100644 --- a/README.md +++ b/README.md @@ -417,6 +417,7 @@ The following settings are optional and allow you to further configure your clus * `settings.kubernetes.cluster-domain`: The DNS domain for this cluster, allowing all Kubernetes-run containers to search this domain before the host's search domains. Defaults to `cluster.local`. * `settings.kubernetes.container-log-max-files`: The maximum number of container log files that can be present for a container. * `settings.kubernetes.container-log-max-size`: The maximum size of container log file before it is rotated. +* `settings.kubernetes.cpu-cfs-quota-enforced`: Whether CPU CFS quotas are enforced. Defaults to `true`. * `settings.kubernetes.cpu-manager-policy`: Specifies the CPU manager policy. Possible values are `static` and `none`. Defaults to `none`. If you want to allow pods with certain resource characteristics to be granted increased CPU affinity and exclusivity on the node, you can set this setting to `static`. You should reboot if you change this setting after startup - try `apiclient reboot`. * `settings.kubernetes.cpu-manager-policy-options`: Policy options to apply when `cpu-manager-policy` is set to `static`. Currently `full-pcpus-only` is the only option. From d3a46514641f419f51779eb2e3064d682fa019c6 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Wed, 22 Mar 2023 17:56:07 +0000 Subject: [PATCH 0949/1356] kubelet: Add shutdown grace period settings This adds the settings: - `settings.kubernetes.shutdown-grace-period` - `settings.kubernetes.shutdown-grace-period-for-critical-pods` These control how long kubelet will wait on shutdown for pods to exit, and what portion of that period to dedicate to waiting for critical pods to exit. Signed-off-by: Sean McGinnis --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index bf2bf9f4..a44d3d51 100644 --- a/README.md +++ b/README.md @@ -518,6 +518,8 @@ If you downgrade from v1.14.0 to an earlier version, and you have these values s * `settings.kubernetes.registry-burst`: The maximum size of bursty pulls. * `settings.kubernetes.registry-qps`: The registry pull QPS. * `settings.kubernetes.server-tls-bootstrap`: Enables or disables server certificate bootstrap. When enabled, the kubelet will request a certificate from the certificates.k8s.io API. This requires an approver to approve the certificate signing requests (CSR). Defaults to `true`. +* `settings.kubernetes.shutdown-grace-period`: Delay the node should wait for pod termination before shutdown. Default is `0s`. +* `settings.kubernetes.shutdown-grace-period-for-critical-pods`: The portion of the shutdown delay that should be dedicated to critical pod shutdown. Default is `0s`. * `settings.kubernetes.standalone-mode`: Whether to run the kubelet in standalone mode, without connecting to an API server. Defaults to `false`. * `settings.kubernetes.system-reserved`: Resources reserved for system components. From a9e0efddaab91097c0d63e81b0b7b5032a844e3b Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Wed, 22 Mar 2023 19:29:00 +0000 Subject: [PATCH 0950/1356] kubelet: Add soft eviction settings This adds the settings: - `settings.kubernetes.eviction-soft` - `settings.kubernetes.eviction-soft-grace-period` - `settings.kubernetes.eviction-max-pod-grace-period` To configure the Kubelet soft eviction settings. This is similar to the existing ``settings.kubernetes.eviction-hard` with additional configuration needed to control the behavior of the soft eviction thresholds. Signed-off-by: Sean McGinnis --- README.md | 45 +++++++++++++++++++++++++++++---------------- 1 file changed, 29 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index a44d3d51..cb900f23 100644 --- a/README.md +++ b/README.md @@ -477,37 +477,50 @@ The following settings are optional and allow you to further configure your clus * `settings.kubernetes.event-burst`: The maximum size of a burst of event creations. * `settings.kubernetes.event-qps`: The maximum event creations per second. * `settings.kubernetes.eviction-hard`: The signals and thresholds that trigger pod eviction. +* `settings.kubernetes.eviction-max-pod-grace-period`: Maximum grace period, in seconds, to wait for pod termination before soft eviction. Default is `0`. +* `settings.kubernetes.eviction-soft`: The signals and thresholds that trigger pod eviction with a provided grace period. +* `settings.kubernetes.eviction-soft-grace-period`: Delay for each signal to wait for pod termination before eviction. + Remember to quote signals (since they all contain ".") and to quote all values. - Example user data for setting up eviction hard: + Example user data for setting up eviction values: ```toml [settings.kubernetes.eviction-hard] "memory.available" = "15%" + + [settings.kubernetes.eviction-soft] + "memory.available" = "12%" + + [settings.kubernetes.eviction-soft-grace-period] + "memory.available" = "30s" + + [settings.kubernetes] + "eviction-max-pod-grace-period" = 40 ``` * `settings.kubernetes.image-gc-high-threshold-percent`: The percent of disk usage after which image garbage collection is always run, expressed as an integer from 0-100 inclusive. * `settings.kubernetes.image-gc-low-threshold-percent`: The percent of disk usage before which image garbage collection is never run, expressed as an integer from 0-100 inclusive. -Since v1.14.0 `image-gc-high-threshold-percent` and `image-gc-low-threshold-percent` can be represented as numbers. -For example: + Since v1.14.0 `image-gc-high-threshold-percent` and `image-gc-low-threshold-percent` can be represented as numbers. + For example: -```toml -[settings.kubernetes] -image-gc-high-threshold-percent = 85 -image-gc-low-threshold-percent = 80 -``` + ```toml + [settings.kubernetes] + image-gc-high-threshold-percent = 85 + image-gc-low-threshold-percent = 80 + ``` -For backward compatibility, both string and numeric representations are accepted since v1.14.0. -Prior to v1.14.0 these needed to be represented as strings, for example: + For backward compatibility, both string and numeric representations are accepted since v1.14.0. + Prior to v1.14.0 these needed to be represented as strings, for example: -```toml -[settings.kubernetes] -image-gc-high-threshold-percent = "85" -image-gc-low-threshold-percent = "80" -``` + ```toml + [settings.kubernetes] + image-gc-high-threshold-percent = "85" + image-gc-low-threshold-percent = "80" + ``` -If you downgrade from v1.14.0 to an earlier version, and you have these values set as numbers, they will be converted to strings on downgrade. + If you downgrade from v1.14.0 to an earlier version, and you have these values set as numbers, they will be converted to strings on downgrade. * `settings.kubernetes.kube-api-burst`: The burst to allow while talking with kubernetes. * `settings.kubernetes.kube-api-qps`: The QPS to use while talking with kubernetes apiserver. From 6e2106074bf2cb1a6c4237d4d8cc1a20b61fea7a Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Wed, 5 Apr 2023 13:55:26 +0000 Subject: [PATCH 0951/1356] kubelet: Add reserved memory settings This enables the kubelet memory manager and allows setting its values and the memory manager policy. Signed-off-by: Sean McGinnis --- README.md | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/README.md b/README.md index cb900f23..5fbbeb79 100644 --- a/README.md +++ b/README.md @@ -526,6 +526,34 @@ The following settings are optional and allow you to further configure your clus * `settings.kubernetes.kube-api-qps`: The QPS to use while talking with kubernetes apiserver. * `settings.kubernetes.log-level`: Adjust the logging verbosity of the `kubelet` process. The default log level is 2, with higher numbers enabling more verbose logging. +* `settings.kubernetes.memory-manager-policy`: The memory management policy to use: `None` (default) or `Static`. + Note, when using the `Static` policy you should also set `settings.kubernetes.memory-manager-reserved-memory` values. +* `settings.kubernetes.memory-manager-reserved-memory`: Used to set the total amount of reserved memory for a node. + These settings are used to configure memory manager policy when `settings.kubernetes.memory-manager-policy` is set to `Static`. + + `memory-manager-reserved-memory` is set per NUMA node. For example: + + ```toml + [settings.kubernetes] + "memory-manager-policy" = "Static" + + [settings.kubernetes.memory-manager-reserved-memory.0] + # Reserve a single 1GiB huge page along with 674MiB of memory + "enabled" = true + "memory" = "674Mi" + "hugepages-1Gi" = "1Gi" + + [settings.kubernetes.memory-manager-reserved-memory.1] + # Reserve 1,074 2MiB huge pages + "enabled" = true + "hugepages-2Mi" = "2148Mi" + ``` + + **Warning:** `memory-manager-reserved-memory` settings are an advanced configuration and requires a clear understanding of what you are setting. + Misconfiguration of reserved memory settings may cause the Kubernetes `kubelet` process to fail. + It can be very difficult to recover from configuration errors. + Use the memory reservation information from `kubectl describe node` and make sure you understand the Kubernetes documentation related to the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) and how to [reserve compute resources for system daemons](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/). + * `settings.kubernetes.pod-pids-limit`: The maximum number of processes per pod. * `settings.kubernetes.provider-id`: This sets the unique ID of the instance that an external provider (i.e. cloudprovider) can use to identify a specific node. * `settings.kubernetes.registry-burst`: The maximum size of bursty pulls. From d004494f2a61177e4a7b148dbb5715b9513f85bf Mon Sep 17 00:00:00 2001 From: John McBride Date: Thu, 4 May 2023 21:33:29 +0000 Subject: [PATCH 0952/1356] Rust `tools` dependencies: Upgrade h2 and aws-sigv4 Signed-off-by: John McBride --- tools/Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index a9c0722f..b05c4769 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -429,9 +429,9 @@ dependencies = [ [[package]] name = "aws-sigv4" -version = "0.54.1" +version = "0.54.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdaf11005b7444e6cd66f600d09861a3aeb6eb89a0f003c7c9820dbab2d15297" +checksum = "86529e7b64d902efea8fff52c1b2529368d04f90305cf632729e3713f6b57dc0" dependencies = [ "aws-smithy-eventstream", "aws-smithy-http", @@ -1450,9 +1450,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.16" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d" +checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" dependencies = [ "bytes", "fnv", From d114b7374cf75a16981e2f8aee383a915a9cfc91 Mon Sep 17 00:00:00 2001 From: ecpullen Date: Mon, 1 May 2023 18:15:52 +0000 Subject: [PATCH 0953/1356] testsys: Add support for karpenter testing --- tools/Cargo.lock | 2 + tools/testsys-config/Cargo.toml | 2 + tools/testsys-config/src/lib.rs | 38 +++++++++ tools/testsys/src/aws_k8s.rs | 18 +++- tools/testsys/src/aws_resources.rs | 132 ++++++++++++++++++++++++++++- tools/testsys/src/run.rs | 16 +++- 6 files changed, 204 insertions(+), 4 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index b05c4769..3d3f7ae4 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -3236,6 +3236,7 @@ dependencies = [ name = "testsys-config" version = "0.1.0" dependencies = [ + "bottlerocket-types", "bottlerocket-variant", "handlebars", "home", @@ -3243,6 +3244,7 @@ dependencies = [ "log", "maplit", "serde", + "serde_plain", "serde_yaml 0.8.26", "snafu", "testsys-model", diff --git a/tools/testsys-config/Cargo.toml b/tools/testsys-config/Cargo.toml index 56f8b04b..9722233d 100644 --- a/tools/testsys-config/Cargo.toml +++ b/tools/testsys-config/Cargo.toml @@ -7,6 +7,7 @@ edition = "2021" publish = false [dependencies] +bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.7", tag = "v0.0.7"} bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } handlebars = "4" home = "0.5" @@ -15,6 +16,7 @@ log = "0.4" maplit="1" testsys-model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.7", tag = "v0.0.7"} serde = { version = "1", features = ["derive"] } +serde_plain = "1" serde_yaml = "0.8" snafu = "0.7" toml = "0.5" diff --git a/tools/testsys-config/src/lib.rs b/tools/testsys-config/src/lib.rs index ecdcfff3..b841a720 100644 --- a/tools/testsys-config/src/lib.rs +++ b/tools/testsys-config/src/lib.rs @@ -1,3 +1,4 @@ +use bottlerocket_types::agent_config::KarpenterDeviceMapping; use bottlerocket_variant::Variant; pub use error::Error; use handlebars::Handlebars; @@ -11,6 +12,7 @@ use std::path::Path; use testsys_model::constants::TESTSYS_VERSION; use testsys_model::{DestructionPolicy, SecretName}; pub type Result = std::result::Result; +use serde_plain::derive_fromstr_from_deserialize; /// Configuration needed to run tests #[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq)] @@ -248,6 +250,11 @@ pub struct GenericVariantConfig { pub cluster_names: Vec, /// The instance type that instances should be launched with pub instance_type: Option, + /// Specify how Bottlerocket instances should be launched (ec2, karpenter) + pub resource_agent_type: Option, + /// Launch instances with the following Block Device Mapping + #[serde(default)] + pub block_device_mapping: Vec, /// The secrets needed by the agents #[serde(default)] pub secrets: BTreeMap, @@ -297,9 +304,17 @@ impl GenericVariantConfig { self.workloads }; + let block_device_mapping = if self.block_device_mapping.is_empty() { + other.block_device_mapping + } else { + self.block_device_mapping + }; + Self { cluster_names, instance_type: self.instance_type.or(other.instance_type), + resource_agent_type: self.resource_agent_type.or(other.resource_agent_type), + block_device_mapping, secrets, agent_role: self.agent_role.or(other.agent_role), sonobuoy_image: self.sonobuoy_image.or(other.sonobuoy_image), @@ -315,6 +330,21 @@ impl GenericVariantConfig { } } +#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Clone)] +#[serde(rename_all = "kebab-case")] +pub enum ResourceAgentType { + Karpenter, + Ec2, +} + +impl Default for ResourceAgentType { + fn default() -> Self { + Self::Ec2 + } +} + +derive_fromstr_from_deserialize!(ResourceAgentType); + /// The configuration for a specific config level (-). This may or may not be arch /// specific depending on it's location in `GenericConfig`. /// The configurable fields here add refined control to TestSys objects. @@ -371,6 +401,7 @@ pub struct TestsysImages { pub vsphere_k8s_cluster_resource_agent_image: Option, pub metal_k8s_cluster_resource_agent_image: Option, pub ec2_resource_agent_image: Option, + pub ec2_karpenter_resource_agent_image: Option, pub vsphere_vm_resource_agent_image: Option, pub sonobuoy_test_agent_image: Option, pub ecs_test_agent_image: Option, @@ -401,6 +432,10 @@ impl TestsysImages { registry )), ec2_resource_agent_image: Some(format!("{}/ec2-resource-agent:{tag}", registry)), + ec2_karpenter_resource_agent_image: Some(format!( + "{}/ec2-karpenter-resource-agent:{tag}", + registry + )), vsphere_vm_resource_agent_image: Some(format!( "{}/vsphere-vm-resource-agent:{tag}", registry @@ -435,6 +470,9 @@ impl TestsysImages { ec2_resource_agent_image: self .ec2_resource_agent_image .or(other.ec2_resource_agent_image), + ec2_karpenter_resource_agent_image: self + .ec2_karpenter_resource_agent_image + .or(other.ec2_karpenter_resource_agent_image), sonobuoy_test_agent_image: self .sonobuoy_test_agent_image .or(other.sonobuoy_test_agent_image), diff --git a/tools/testsys/src/aws_k8s.rs b/tools/testsys/src/aws_k8s.rs index d5018293..971607d9 100644 --- a/tools/testsys/src/aws_k8s.rs +++ b/tools/testsys/src/aws_k8s.rs @@ -1,4 +1,4 @@ -use crate::aws_resources::{ami, ami_name, ec2_crd, get_ami_id}; +use crate::aws_resources::{ami, ami_name, ec2_crd, ec2_karpenter_crd, get_ami_id}; use crate::crds::{ BottlerocketInput, ClusterInput, CrdCreator, CrdInput, CreateCrdOutput, MigrationInput, TestInput, @@ -14,6 +14,7 @@ use serde_yaml::Value; use snafu::{OptionExt, ResultExt}; use std::collections::BTreeMap; use std::str::FromStr; +use testsys_config::ResourceAgentType; use testsys_model::{Crd, DestructionPolicy}; /// A `CrdCreator` responsible for creating crd related to `aws-k8s` variants. @@ -148,7 +149,20 @@ impl CrdCreator for AwsK8sCreator { bottlerocket_input: BottlerocketInput<'a>, ) -> Result { Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Resource( - ec2_crd(bottlerocket_input, ClusterType::Eks, &self.region).await?, + match bottlerocket_input + .crd_input + .config + .resource_agent_type + .to_owned() + .unwrap_or_default() + { + ResourceAgentType::Ec2 => { + ec2_crd(bottlerocket_input, ClusterType::Eks, &self.region).await? + } + ResourceAgentType::Karpenter => { + ec2_karpenter_crd(bottlerocket_input, &self.region).await? + } + }, )))) } diff --git a/tools/testsys/src/aws_resources.rs b/tools/testsys/src/aws_resources.rs index 9d6466b8..ab9985ba 100644 --- a/tools/testsys/src/aws_resources.rs +++ b/tools/testsys/src/aws_resources.rs @@ -2,7 +2,9 @@ use crate::crds::BottlerocketInput; use crate::error::{self, Result}; use aws_sdk_ec2::model::{Filter, Image}; use aws_sdk_ec2::Region; -use bottlerocket_types::agent_config::{ClusterType, CustomUserData, Ec2Config}; +use bottlerocket_types::agent_config::{ + ClusterType, CustomUserData, Ec2Config, Ec2KarpenterConfig, KarpenterDeviceMapping, +}; use maplit::btreemap; use serde::Deserialize; use snafu::{ensure, OptionExt, ResultExt}; @@ -117,6 +119,17 @@ pub(crate) async fn ec2_crd<'a>( cluster_type: ClusterType, region: &str, ) -> Result { + if !bottlerocket_input + .crd_input + .config + .block_device_mapping + .is_empty() + { + return Err(error::Error::Invalid { + what: "Custom block mappings are not supported for ec2 instance launch".to_string(), + }); + } + let cluster_name = bottlerocket_input .cluster_crd_name .as_ref() @@ -216,3 +229,120 @@ pub(crate) async fn ec2_crd<'a>( what: "EC2 instance provider CRD", }) } + +/// Create a CRD to launch Bottlerocket instances on an EKS or ECS cluster. +pub(crate) async fn ec2_karpenter_crd<'a>( + bottlerocket_input: BottlerocketInput<'a>, + region: &str, +) -> Result { + let cluster_name = bottlerocket_input + .cluster_crd_name + .as_ref() + .expect("A cluster provider is required"); + + // Create the labels for this EC2 provider. + let labels = bottlerocket_input.crd_input.labels(btreemap! { + "testsys/type".to_string() => "instances".to_string(), + "testsys/cluster".to_string() => cluster_name.to_string(), + "testsys/region".to_string() => region.to_string() + }); + + // Find all resources using the same cluster. + let conflicting_resources = bottlerocket_input + .crd_input + .existing_crds( + &labels, + &["testsys/cluster", "testsys/type", "testsys/region"], + ) + .await?; + + // If no mappings were provided use a standard mapping as a default + let device_mappings = if bottlerocket_input + .crd_input + .config + .block_device_mapping + .is_empty() + { + vec![ + KarpenterDeviceMapping { + name: "/dev/xvda".to_string(), + volume_type: "gp3".to_string(), + volume_size: 4, + delete_on_termination: true, + }, + KarpenterDeviceMapping { + name: "/dev/xvdb".to_string(), + volume_type: "gp3".to_string(), + volume_size: 20, + delete_on_termination: true, + }, + ] + } else { + bottlerocket_input + .crd_input + .config + .block_device_mapping + .clone() + }; + + let mut ec2_builder = Ec2KarpenterConfig::builder(); + ec2_builder + .node_ami(bottlerocket_input.image_id) + .instance_types::>( + bottlerocket_input + .crd_input + .config + .instance_type + .iter() + .cloned() + .collect(), + ) + .custom_user_data( + bottlerocket_input + .crd_input + .encoded_userdata()? + .map(|encoded_userdata| CustomUserData::Merge { encoded_userdata }), + ) + .cluster_name_template(cluster_name, "clusterName") + .region_template(cluster_name, "region") + .subnet_ids_template(cluster_name, "privateSubnetIds") + .endpoint_template(cluster_name, "endpoint") + .cluster_sg_template(cluster_name, "clustersharedSg") + .device_mappings(device_mappings) + .assume_role(bottlerocket_input.crd_input.config.agent_role.clone()) + .depends_on(cluster_name) + .image( + bottlerocket_input + .crd_input + .images + .ec2_karpenter_resource_agent_image + .as_ref() + .expect("Missing default image for EC2 resource agent"), + ) + .set_image_pull_secret( + bottlerocket_input + .crd_input + .images + .testsys_agent_pull_secret + .clone(), + ) + .set_labels(Some(labels)) + .set_conflicts_with(conflicting_resources.into()) + .set_secrets(Some(bottlerocket_input.crd_input.config.secrets.clone())) + .destruction_policy( + bottlerocket_input + .crd_input + .config + .dev + .bottlerocket_destruction_policy + .to_owned() + .unwrap_or(DestructionPolicy::OnTestSuccess), + ); + + let suffix: String = repeat_with(fastrand::lowercase).take(4).collect(); + ec2_builder + .build(format!("{}-karpenter-{}", cluster_name, suffix)) + .context(error::BuildSnafu { + what: "EC2 instance provider CRD", + }) +} diff --git a/tools/testsys/src/run.rs b/tools/testsys/src/run.rs index c3ab6be1..faa53c75 100644 --- a/tools/testsys/src/run.rs +++ b/tools/testsys/src/run.rs @@ -19,7 +19,7 @@ use snafu::{OptionExt, ResultExt}; use std::fs::read_to_string; use std::path::PathBuf; use std::str::FromStr; -use testsys_config::{GenericVariantConfig, TestConfig}; +use testsys_config::{GenericVariantConfig, ResourceAgentType, TestConfig}; use testsys_model::test_manager::TestManager; use testsys_model::SecretName; @@ -162,6 +162,10 @@ struct CliConfig { #[clap(long, env = "TESTSYS_USERDATA")] pub userdata: Option, + /// Specify the method that should be used to launch instances + #[clap(long, env = "TESTSYS_RESOURCE_AGENT")] + pub resource_agent_type: Option, + /// A set of workloads that should be run for a workload test (--workload my-workload=) #[clap(long = "workload", parse(try_from_str = parse_workloads), number_of_values = 1)] pub workloads: Vec<(String, String)>, @@ -182,6 +186,8 @@ impl From for GenericVariantConfig { GenericVariantConfig { cluster_names: val.target_cluster_name.into_iter().collect(), instance_type: val.instance_type, + resource_agent_type: val.resource_agent_type, + block_device_mapping: Default::default(), secrets: val.secret.into_iter().collect(), agent_role: val.assume_role, sonobuoy_image: val.sonobuoy_image, @@ -533,6 +539,13 @@ pub(crate) struct TestsysImages { )] pub(crate) ec2_resource: Option, + /// EC2 Karpenter resource agent URI. If not provided the latest released resource agent will be used. + #[clap( + long = "ec2-resource-agent-image", + env = "TESTSYS_EC2_KARPENTER_RESOURCE_AGENT_IMAGE" + )] + pub(crate) ec2_karpenter_resource: Option, + /// vSphere VM resource agent URI. If not provided the latest released resource agent will be used. #[clap( long = "vsphere-vm-resource-agent-image", @@ -592,6 +605,7 @@ impl From for testsys_config::TestsysImages { vsphere_k8s_cluster_resource_agent_image: val.vsphere_k8s_cluster_resource, metal_k8s_cluster_resource_agent_image: val.metal_k8s_cluster_resource, ec2_resource_agent_image: val.ec2_resource, + ec2_karpenter_resource_agent_image: val.ec2_karpenter_resource, vsphere_vm_resource_agent_image: val.vsphere_vm_resource, sonobuoy_test_agent_image: val.sonobuoy_test, ecs_test_agent_image: val.ecs_test, From e692c90b142eeb056a76dab23e9cecfb3829a251 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Thu, 20 Apr 2023 20:38:04 +0000 Subject: [PATCH 0954/1356] Add kubernetes-1.27 package and variants This adds a new package for installing k8s 1.27. It also adds all variants for the *-k8s-* variant. Signed-off-by: Sean McGinnis --- .github/workflows/build.yml | 25 ++++++++++++++++++++++++- README.md | 4 ++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index d2389e0c..0524878e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -38,7 +38,14 @@ jobs: continue-on-error: ${{ matrix.supported }} strategy: matrix: - variant: [aws-k8s-1.22, aws-k8s-1.23, aws-k8s-1.24, aws-k8s-1.25, aws-k8s-1.26, aws-ecs-1] + variant: + - aws-k8s-1.22 + - aws-k8s-1.23 + - aws-k8s-1.24 + - aws-k8s-1.25 + - aws-k8s-1.26 + - aws-k8s-1.27 + - aws-ecs-1 arch: [x86_64, aarch64] supported: [true] fetch-upstream: ["false"] @@ -75,6 +82,10 @@ jobs: arch: x86_64 supported: false fetch-upstream: "false" + - variant: metal-k8s-1.27 + arch: x86_64 + supported: false + fetch-upstream: "false" - variant: vmware-k8s-1.22 arch: x86_64 supported: true @@ -95,6 +106,10 @@ jobs: arch: x86_64 supported: true fetch-upstream: "false" + - variant: vmware-k8s-1.27 + arch: x86_64 + supported: true + fetch-upstream: "false" - variant: aws-k8s-1.22-nvidia arch: x86_64 supported: true @@ -135,6 +150,14 @@ jobs: arch: aarch64 supported: true fetch-upstream: "true" + - variant: aws-k8s-1.27-nvidia + arch: x86_64 + supported: true + fetch-upstream: "true" + - variant: aws-k8s-1.27-nvidia + arch: aarch64 + supported: true + fetch-upstream: "true" - variant: aws-ecs-1-nvidia arch: x86_64 supported: true diff --git a/README.md b/README.md index 5fbbeb79..e10371c7 100644 --- a/README.md +++ b/README.md @@ -65,11 +65,13 @@ The following variants support EKS, as described above: * `aws-k8s-1.24` * `aws-k8s-1.25` * `aws-k8s-1.26` +* `aws-k8s-1.27` * `aws-k8s-1.22-nvidia` * `aws-k8s-1.23-nvidia` * `aws-k8s-1.24-nvidia` * `aws-k8s-1.25-nvidia` * `aws-k8s-1.26-nvidia` +* `aws-k8s-1.27-nvidia` The following variants support ECS: @@ -83,6 +85,7 @@ We also have variants that are designed to be Kubernetes worker nodes in VMware: * `vmware-k8s-1.24` * `vmware-k8s-1.25` * `vmware-k8s-1.26` +* `vmware-k8s-1.27` The following variants are designed to be Kubernetes worker nodes on bare metal: @@ -91,6 +94,7 @@ The following variants are designed to be Kubernetes worker nodes on bare metal: * `metal-k8s-1.24` * `metal-k8s-1.25` * `metal-k8s-1.26` +* `metal-k8s-1.27` The following variants are no longer supported: From f59c4eeedb52d7639962c9c1fc34f8b73be5e291 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Mon, 8 May 2023 14:43:02 +0000 Subject: [PATCH 0955/1356] kernel-5.15: Default disable panic on hung task and lockups Setting panic as defaults for hung tasks and lockups is probably not the best idea for the general case. In scenarios with remote storage we may see hangs as interactions with that storage might be delayed due to network connectivity issues. In such cases there is use cases that do not benefit from panic and reboot. Disable the default for those panic options. This does not take away the option to configure panic on hung tasks or lockups through either sysctl or kernel boot parameters. As a bonus this aligns both kernels 5.10 and 5.15 on coherent settings for the panic choices. Signed-off-by: Leonard Foerster --- packages/kernel-5.15/config-bottlerocket | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/packages/kernel-5.15/config-bottlerocket b/packages/kernel-5.15/config-bottlerocket index 222e3843..b6cf3fc1 100644 --- a/packages/kernel-5.15/config-bottlerocket +++ b/packages/kernel-5.15/config-bottlerocket @@ -195,3 +195,8 @@ CONFIG_SCSI_ISCSI_ATTRS=m # target side CONFIG_ISCSI_TARGET=m # CONFIG_INFINIBAND_ISERT is not set + +# Disable panic on hung and lockup conditions by default +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set From a18ffb905648ad927cc7fb75d3682e9ebc439379 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Thu, 11 May 2023 16:23:20 +0000 Subject: [PATCH 0956/1356] Dynamically get list of variants for GitHub Actions This updates the `build` workflow to determine the set of variants to test against at runtime. It looks at what is under the `variants` directory to determine what to include, rather than needing to hard code this list in the action itself. This also gives the potential that we can use this in a composable workflow so multiple workflows can get the list of variants without needing to remember to update the list in multiple places whenever a variant is added or removed. --- .github/workflows/build.yml | 155 ++++++------------------------------ 1 file changed, 25 insertions(+), 130 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 0524878e..a568c67e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -31,141 +31,36 @@ env: GOPROXY: direct jobs: + list-variants: + if: github.repository == 'bottlerocket-os/bottlerocket' + runs-on: ubuntu-latest + outputs: + variants: ${{ steps.get-variants.outputs.variants }} + aarch-enemies: ${{ steps.get-variants.outputs.aarch-enemies }} + steps: + - uses: actions/checkout@v3 + - id: get-variants + name: Determine variants + run: | + cd variants + output="variants=$(ls -d */ | cut -d'/' -f 1 | grep -vE '^(shared|target)$' | jq -R -s -c 'split("\n")[:-1]')" + echo $output + echo $output >> $GITHUB_OUTPUT + output="aarch-enemies=$(ls -d */ | cut -d'/' -f 1 | grep -E '(^(metal|vmware)|\-dev$)' | jq -R -s -c 'split("\n")[:-1] | [ .[] | {"variant": ., "arch": "aarch64"}]')" + echo $output + echo $output >> $GITHUB_OUTPUT + build: + needs: list-variants runs-on: group: bottlerocket labels: bottlerocket_ubuntu-latest_32-core - continue-on-error: ${{ matrix.supported }} + continue-on-error: true strategy: matrix: - variant: - - aws-k8s-1.22 - - aws-k8s-1.23 - - aws-k8s-1.24 - - aws-k8s-1.25 - - aws-k8s-1.26 - - aws-k8s-1.27 - - aws-ecs-1 + variant: ${{ fromJson(needs.list-variants.outputs.variants) }} arch: [x86_64, aarch64] - supported: [true] - fetch-upstream: ["false"] - include: - - variant: aws-dev - arch: x86_64 - supported: false - fetch-upstream: "false" - - variant: vmware-dev - arch: x86_64 - supported: false - fetch-upstream: "false" - - variant: metal-dev - arch: x86_64 - supported: false - fetch-upstream: "false" - - variant: metal-k8s-1.22 - arch: x86_64 - supported: false - fetch-upstream: "false" - - variant: metal-k8s-1.23 - arch: x86_64 - supported: false - fetch-upstream: "false" - - variant: metal-k8s-1.24 - arch: x86_64 - supported: false - fetch-upstream: "false" - - variant: metal-k8s-1.25 - arch: x86_64 - supported: false - fetch-upstream: "false" - - variant: metal-k8s-1.26 - arch: x86_64 - supported: false - fetch-upstream: "false" - - variant: metal-k8s-1.27 - arch: x86_64 - supported: false - fetch-upstream: "false" - - variant: vmware-k8s-1.22 - arch: x86_64 - supported: true - fetch-upstream: "false" - - variant: vmware-k8s-1.23 - arch: x86_64 - supported: true - fetch-upstream: "false" - - variant: vmware-k8s-1.24 - arch: x86_64 - supported: true - fetch-upstream: "false" - - variant: vmware-k8s-1.25 - arch: x86_64 - supported: true - fetch-upstream: "false" - - variant: vmware-k8s-1.26 - arch: x86_64 - supported: true - fetch-upstream: "false" - - variant: vmware-k8s-1.27 - arch: x86_64 - supported: true - fetch-upstream: "false" - - variant: aws-k8s-1.22-nvidia - arch: x86_64 - supported: true - fetch-upstream: "true" - - variant: aws-k8s-1.22-nvidia - arch: aarch64 - supported: true - fetch-upstream: "true" - - variant: aws-k8s-1.23-nvidia - arch: x86_64 - supported: true - fetch-upstream: "true" - - variant: aws-k8s-1.23-nvidia - arch: aarch64 - supported: true - fetch-upstream: "true" - - variant: aws-k8s-1.24-nvidia - arch: x86_64 - supported: true - fetch-upstream: "true" - - variant: aws-k8s-1.24-nvidia - arch: aarch64 - supported: true - fetch-upstream: "true" - - variant: aws-k8s-1.25-nvidia - arch: x86_64 - supported: true - fetch-upstream: "true" - - variant: aws-k8s-1.25-nvidia - arch: aarch64 - supported: true - fetch-upstream: "true" - - variant: aws-k8s-1.26-nvidia - arch: x86_64 - supported: true - fetch-upstream: "true" - - variant: aws-k8s-1.26-nvidia - arch: aarch64 - supported: true - fetch-upstream: "true" - - variant: aws-k8s-1.27-nvidia - arch: x86_64 - supported: true - fetch-upstream: "true" - - variant: aws-k8s-1.27-nvidia - arch: aarch64 - supported: true - fetch-upstream: "true" - - variant: aws-ecs-1-nvidia - arch: x86_64 - supported: true - fetch-upstream: "true" - - variant: aws-ecs-1-nvidia - arch: aarch64 - supported: true - fetch-upstream: "true" + exclude: ${{ fromJson(needs.list-variants.outputs.aarch-enemies) }} fail-fast: false steps: - name: Preflight step to set up the runner @@ -222,5 +117,5 @@ jobs: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} \ -e BUILDSYS_ARCH=${{ matrix.arch }} \ -e BUILDSYS_JOBS=12 \ - -e BUILDSYS_UPSTREAM_SOURCE_FALLBACK=${{ matrix.fetch-upstream }} \ - -e BUILDSYS_UPSTREAM_LICENSE_FETCH=${{ matrix.fetch-upstream }} + -e BUILDSYS_UPSTREAM_SOURCE_FALLBACK="${{ contains(matrix.variant, 'nvidia') }}" \ + -e BUILDSYS_UPSTREAM_LICENSE_FETCH="${{ contains(matrix.variant, 'nvidia') }}" From 0ae244106e8df0db8640057c31f7f8c467246a04 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Fri, 7 Apr 2023 16:37:18 +0000 Subject: [PATCH 0957/1356] Drop Kubernetes 1.22 variants This removes the 1.22 k8s variants. This version of Kubernetes has gone end-of-life and we will no longer support it. Signed-off-by: Sean McGinnis --- BUILDING.md | 2 +- README.md | 6 +----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/BUILDING.md b/BUILDING.md index c79b8c6f..d4f5f1b2 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -150,7 +150,7 @@ cargo make -e BUILDSYS_UPSTREAM_LICENSE_FETCH=true fetch-licenses 3. Build your image, setting the `BUILDSYS_UPSTREAM_SOURCE_FALLBACK` flag to `true`, if you haven't cached the driver's sources: ```shell -K8S_VERSION=1.22 +K8S_VERSION=1.24 cargo make \ -e BUILDSYS_VARIANT=aws-k8s-${K8S_VERSION}-nvidia \ -e BUILDSYS_UPSTREAM_SOURCE_FALLBACK="true" diff --git a/README.md b/README.md index e10371c7..17b6ff47 100644 --- a/README.md +++ b/README.md @@ -60,13 +60,11 @@ For example, an `x86_64` build of the `aws-k8s-1.24` variant will produce an ima The following variants support EKS, as described above: -* `aws-k8s-1.22` * `aws-k8s-1.23` * `aws-k8s-1.24` * `aws-k8s-1.25` * `aws-k8s-1.26` * `aws-k8s-1.27` -* `aws-k8s-1.22-nvidia` * `aws-k8s-1.23-nvidia` * `aws-k8s-1.24-nvidia` * `aws-k8s-1.25-nvidia` @@ -80,7 +78,6 @@ The following variants support ECS: We also have variants that are designed to be Kubernetes worker nodes in VMware: -* `vmware-k8s-1.22` * `vmware-k8s-1.23` * `vmware-k8s-1.24` * `vmware-k8s-1.25` @@ -89,7 +86,6 @@ We also have variants that are designed to be Kubernetes worker nodes in VMware: The following variants are designed to be Kubernetes worker nodes on bare metal: -* `metal-k8s-1.22` * `metal-k8s-1.23` * `metal-k8s-1.24` * `metal-k8s-1.25` @@ -98,7 +94,7 @@ The following variants are designed to be Kubernetes worker nodes on bare metal: The following variants are no longer supported: -* All Kubernetes variants using Kubernetes 1.21 and earlier +* All Kubernetes variants using Kubernetes 1.22 and earlier We recommend users replace nodes running these variants with the [latest variant compatible with their cluster](variants/). From e441754d4dde35763c4fbd2ab4b4ba14868c6920 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-Fran=C3=A7ois=20BETTE?= Date: Sat, 6 May 2023 19:06:43 +0200 Subject: [PATCH 0958/1356] Add the argument "--product-name" to start-local-vm script to support BUILDSYS_NAME option when building image --- tools/start-local-vm | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/tools/start-local-vm b/tools/start-local-vm index c1ef649f..dd409711 100755 --- a/tools/start-local-vm +++ b/tools/start-local-vm @@ -1,10 +1,10 @@ #!/usr/bin/env bash # shellcheck disable=SC2054 # Arrays are formatted for passing args to other tools - shopt -s nullglob arch=${BUILDSYS_ARCH} variant=${BUILDSYS_VARIANT} +product_name=bottlerocket host_port_forwards=tcp::2222-:22 vm_mem=4G vm_cpus=4 @@ -41,6 +41,9 @@ Options: BUILDSYS_ARCH environment variable is set) --variant Bottlerocket variant to run (may be omitted if the BUILDSYS_VARIANT environment variable is set) + --product-name + product name used for file and directory naming used when + building with the "-e BUILDSYS_NAME" option (default is bottlerocket) --host-port-forwards list of host ports to forward to the VM; HOST_PORT_FWDS must be a valid QEMU port forwarding specifier (default @@ -92,6 +95,8 @@ parse_args() { shift; arch=$1 ;; --variant) shift; variant=$1 ;; + --product-name) + shift; product_name=$1 ;; --host-port-forwards) shift; host_port_forwards=$1 ;; --vm-memory) @@ -141,8 +146,8 @@ extract_image() { prepare_raw_images() { local -r image_dir=build/images/${arch}-${variant}/latest - local -r compressed_boot_image=${image_dir}/bottlerocket-${variant}-${arch}.img.lz4 - local -r compressed_data_image=${image_dir}/bottlerocket-${variant}-${arch}-data.img.lz4 + local -r compressed_boot_image=${image_dir}/${product_name}-${variant}-${arch}.img.lz4 + local -r compressed_data_image=${image_dir}/${product_name}-${variant}-${arch}-data.img.lz4 if [[ -e ${compressed_boot_image} ]]; then readonly boot_image=${compressed_boot_image%*.lz4} @@ -262,4 +267,4 @@ parse_args "$@" prepare_raw_images create_extra_files inject_files -launch_vm +launch_vm \ No newline at end of file From a8fd85fbeca9ef0ec985323451c9c684e180f54a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-Fran=C3=A7ois=20BETTE?= Date: Sat, 6 May 2023 19:34:30 +0200 Subject: [PATCH 0959/1356] Revert "Add the argument "--product-name" to start-local-vm script to support BUILDSYS_NAME option when building image" This reverts commit 6df8a6722f9b6b352185e1d237177eaeee0ad185. --- tools/start-local-vm | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/tools/start-local-vm b/tools/start-local-vm index dd409711..c1ef649f 100755 --- a/tools/start-local-vm +++ b/tools/start-local-vm @@ -1,10 +1,10 @@ #!/usr/bin/env bash # shellcheck disable=SC2054 # Arrays are formatted for passing args to other tools + shopt -s nullglob arch=${BUILDSYS_ARCH} variant=${BUILDSYS_VARIANT} -product_name=bottlerocket host_port_forwards=tcp::2222-:22 vm_mem=4G vm_cpus=4 @@ -41,9 +41,6 @@ Options: BUILDSYS_ARCH environment variable is set) --variant Bottlerocket variant to run (may be omitted if the BUILDSYS_VARIANT environment variable is set) - --product-name - product name used for file and directory naming used when - building with the "-e BUILDSYS_NAME" option (default is bottlerocket) --host-port-forwards list of host ports to forward to the VM; HOST_PORT_FWDS must be a valid QEMU port forwarding specifier (default @@ -95,8 +92,6 @@ parse_args() { shift; arch=$1 ;; --variant) shift; variant=$1 ;; - --product-name) - shift; product_name=$1 ;; --host-port-forwards) shift; host_port_forwards=$1 ;; --vm-memory) @@ -146,8 +141,8 @@ extract_image() { prepare_raw_images() { local -r image_dir=build/images/${arch}-${variant}/latest - local -r compressed_boot_image=${image_dir}/${product_name}-${variant}-${arch}.img.lz4 - local -r compressed_data_image=${image_dir}/${product_name}-${variant}-${arch}-data.img.lz4 + local -r compressed_boot_image=${image_dir}/bottlerocket-${variant}-${arch}.img.lz4 + local -r compressed_data_image=${image_dir}/bottlerocket-${variant}-${arch}-data.img.lz4 if [[ -e ${compressed_boot_image} ]]; then readonly boot_image=${compressed_boot_image%*.lz4} @@ -267,4 +262,4 @@ parse_args "$@" prepare_raw_images create_extra_files inject_files -launch_vm \ No newline at end of file +launch_vm From 01ee7b41179b684583be0ff02021471d34cc9344 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-Fran=C3=A7ois=20BETTE?= Date: Tue, 9 May 2023 14:50:19 +0200 Subject: [PATCH 0960/1356] start-local-vm: add "--product-name" argument to support BUILDSYS_NAME --- tools/start-local-vm | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tools/start-local-vm b/tools/start-local-vm index c1ef649f..c316488e 100755 --- a/tools/start-local-vm +++ b/tools/start-local-vm @@ -5,6 +5,7 @@ shopt -s nullglob arch=${BUILDSYS_ARCH} variant=${BUILDSYS_VARIANT} +product_name=${BUILDSYS_NAME:-bottlerocket} host_port_forwards=tcp::2222-:22 vm_mem=4G vm_cpus=4 @@ -41,6 +42,10 @@ Options: BUILDSYS_ARCH environment variable is set) --variant Bottlerocket variant to run (may be omitted if the BUILDSYS_VARIANT environment variable is set) + --product-name + product name used for file and directory naming used when + building with the "-e BUILDSYS_NAME" option; may be omitted if the + BUILDSYS_NAME environment variable is set. Otherwise default is bottlerocket if not defined or empty --host-port-forwards list of host ports to forward to the VM; HOST_PORT_FWDS must be a valid QEMU port forwarding specifier (default @@ -92,6 +97,8 @@ parse_args() { shift; arch=$1 ;; --variant) shift; variant=$1 ;; + --product-name) + shift; product_name=$1 ;; --host-port-forwards) shift; host_port_forwards=$1 ;; --vm-memory) @@ -141,8 +148,8 @@ extract_image() { prepare_raw_images() { local -r image_dir=build/images/${arch}-${variant}/latest - local -r compressed_boot_image=${image_dir}/bottlerocket-${variant}-${arch}.img.lz4 - local -r compressed_data_image=${image_dir}/bottlerocket-${variant}-${arch}-data.img.lz4 + local -r compressed_boot_image=${image_dir}/${product_name}-${variant}-${arch}.img.lz4 + local -r compressed_data_image=${image_dir}/${product_name}-${variant}-${arch}-data.img.lz4 if [[ -e ${compressed_boot_image} ]]; then readonly boot_image=${compressed_boot_image%*.lz4} @@ -262,4 +269,4 @@ parse_args "$@" prepare_raw_images create_extra_files inject_files -launch_vm +launch_vm \ No newline at end of file From 1970d6f24a710f306f578ac053dda68b4eb1a45a Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Wed, 10 May 2023 16:13:01 +0200 Subject: [PATCH 0961/1356] kernel-5.10: cherry-pick fix for CVE-2023-32233 The fix for CVE-2023-32233 is currently only available in the kernel.org upstream 5.10 stable kernel, but not yet in an Amazon Linux kernel release. Cherry-pick it from the upstream kernel. Signed-off-by: Markus Boehme --- ...les-deactivate-anonymous-set-from-pr.patch | 125 ++++++++++++++++++ packages/kernel-5.10/kernel-5.10.spec | 3 + 2 files changed, 128 insertions(+) create mode 100644 packages/kernel-5.10/5001-netfilter-nf_tables-deactivate-anonymous-set-from-pr.patch diff --git a/packages/kernel-5.10/5001-netfilter-nf_tables-deactivate-anonymous-set-from-pr.patch b/packages/kernel-5.10/5001-netfilter-nf_tables-deactivate-anonymous-set-from-pr.patch new file mode 100644 index 00000000..21f3b178 --- /dev/null +++ b/packages/kernel-5.10/5001-netfilter-nf_tables-deactivate-anonymous-set-from-pr.patch @@ -0,0 +1,125 @@ +From e044a24447189419c3a7ccc5fa6da7516036dc55 Mon Sep 17 00:00:00 2001 +From: Pablo Neira Ayuso +Date: Tue, 2 May 2023 10:25:24 +0200 +Subject: [PATCH] netfilter: nf_tables: deactivate anonymous set from + preparation phase + +commit c1592a89942e9678f7d9c8030efa777c0d57edab upstream. + +Toggle deleted anonymous sets as inactive in the next generation, so +users cannot perform any update on it. Clear the generation bitmask +in case the transaction is aborted. + +The following KASAN splat shows a set element deletion for a bound +anonymous set that has been already removed in the same transaction. + +[ 64.921510] ================================================================== +[ 64.923123] BUG: KASAN: wild-memory-access in nf_tables_commit+0xa24/0x1490 [nf_tables] +[ 64.924745] Write of size 8 at addr dead000000000122 by task test/890 +[ 64.927903] CPU: 3 PID: 890 Comm: test Not tainted 6.3.0+ #253 +[ 64.931120] Call Trace: +[ 64.932699] +[ 64.934292] dump_stack_lvl+0x33/0x50 +[ 64.935908] ? nf_tables_commit+0xa24/0x1490 [nf_tables] +[ 64.937551] kasan_report+0xda/0x120 +[ 64.939186] ? nf_tables_commit+0xa24/0x1490 [nf_tables] +[ 64.940814] nf_tables_commit+0xa24/0x1490 [nf_tables] +[ 64.942452] ? __kasan_slab_alloc+0x2d/0x60 +[ 64.944070] ? nf_tables_setelem_notify+0x190/0x190 [nf_tables] +[ 64.945710] ? kasan_set_track+0x21/0x30 +[ 64.947323] nfnetlink_rcv_batch+0x709/0xd90 [nfnetlink] +[ 64.948898] ? nfnetlink_rcv_msg+0x480/0x480 [nfnetlink] + +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Greg Kroah-Hartman +--- + include/net/netfilter/nf_tables.h | 1 + + net/netfilter/nf_tables_api.c | 12 ++++++++++++ + net/netfilter/nft_dynset.c | 2 +- + net/netfilter/nft_lookup.c | 2 +- + net/netfilter/nft_objref.c | 2 +- + 5 files changed, 16 insertions(+), 3 deletions(-) + +diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h +index e66fee99ed3e..564fbe0c865f 100644 +--- a/include/net/netfilter/nf_tables.h ++++ b/include/net/netfilter/nf_tables.h +@@ -507,6 +507,7 @@ struct nft_set_binding { + }; + + enum nft_trans_phase; ++void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set); + void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, + struct nft_set_binding *binding, + enum nft_trans_phase phase); +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c +index 7bb716df7afc..fe51cedd9cc3 100644 +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -4479,12 +4479,24 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, + } + } + ++void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set) ++{ ++ if (nft_set_is_anonymous(set)) ++ nft_clear(ctx->net, set); ++ ++ set->use++; ++} ++EXPORT_SYMBOL_GPL(nf_tables_activate_set); ++ + void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, + struct nft_set_binding *binding, + enum nft_trans_phase phase) + { + switch (phase) { + case NFT_TRANS_PREPARE: ++ if (nft_set_is_anonymous(set)) ++ nft_deactivate_next(ctx->net, set); ++ + set->use--; + return; + case NFT_TRANS_ABORT: +diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c +index 8c45e01fecdd..038588d4d80e 100644 +--- a/net/netfilter/nft_dynset.c ++++ b/net/netfilter/nft_dynset.c +@@ -233,7 +233,7 @@ static void nft_dynset_activate(const struct nft_ctx *ctx, + { + struct nft_dynset *priv = nft_expr_priv(expr); + +- priv->set->use++; ++ nf_tables_activate_set(ctx, priv->set); + } + + static void nft_dynset_destroy(const struct nft_ctx *ctx, +diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c +index b0f558b4fea5..8bc008ff00cb 100644 +--- a/net/netfilter/nft_lookup.c ++++ b/net/netfilter/nft_lookup.c +@@ -132,7 +132,7 @@ static void nft_lookup_activate(const struct nft_ctx *ctx, + { + struct nft_lookup *priv = nft_expr_priv(expr); + +- priv->set->use++; ++ nf_tables_activate_set(ctx, priv->set); + } + + static void nft_lookup_destroy(const struct nft_ctx *ctx, +diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c +index bc104d36d3bb..25157d8cc250 100644 +--- a/net/netfilter/nft_objref.c ++++ b/net/netfilter/nft_objref.c +@@ -180,7 +180,7 @@ static void nft_objref_map_activate(const struct nft_ctx *ctx, + { + struct nft_objref_map *priv = nft_expr_priv(expr); + +- priv->set->use++; ++ nf_tables_activate_set(ctx, priv->set); + } + + static void nft_objref_map_destroy(const struct nft_ctx *ctx, +-- +2.25.1 + diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 51208611..1e3299c4 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -22,6 +22,9 @@ Patch1002: 1002-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch Patch2000: 2000-kbuild-move-module-strip-compression-code-into-scrip.patch Patch2001: 2001-kbuild-add-support-for-zstd-compressed-modules.patch +# Backport from v5.10.180 upstream, drop when Amazon Linux base is v5.10.180 or later +Patch5001: 5001-netfilter-nf_tables-deactivate-anonymous-set-from-pr.patch + BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From a3dbd15ae661479c9a1459c85776f354a606e838 Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Wed, 10 May 2023 16:15:15 +0200 Subject: [PATCH 0962/1356] kernel-5.15: cherry-pick fix for CVE-2023-32233 The fix for CVE-2023-32233 is currently only available in the kernel.org upstream 5.15 stable kernel, but not yet in an Amazon Linux kernel release. Cherry-pick it from the upstream kernel. Signed-off-by: Markus Boehme --- ...les-deactivate-anonymous-set-from-pr.patch | 125 ++++++++++++++++++ packages/kernel-5.15/kernel-5.15.spec | 3 + 2 files changed, 128 insertions(+) create mode 100644 packages/kernel-5.15/5001-netfilter-nf_tables-deactivate-anonymous-set-from-pr.patch diff --git a/packages/kernel-5.15/5001-netfilter-nf_tables-deactivate-anonymous-set-from-pr.patch b/packages/kernel-5.15/5001-netfilter-nf_tables-deactivate-anonymous-set-from-pr.patch new file mode 100644 index 00000000..26050e2e --- /dev/null +++ b/packages/kernel-5.15/5001-netfilter-nf_tables-deactivate-anonymous-set-from-pr.patch @@ -0,0 +1,125 @@ +From 21c2a454486d5e9c1517ecca19266b3be3df73ca Mon Sep 17 00:00:00 2001 +From: Pablo Neira Ayuso +Date: Tue, 2 May 2023 10:25:24 +0200 +Subject: [PATCH] netfilter: nf_tables: deactivate anonymous set from + preparation phase + +commit c1592a89942e9678f7d9c8030efa777c0d57edab upstream. + +Toggle deleted anonymous sets as inactive in the next generation, so +users cannot perform any update on it. Clear the generation bitmask +in case the transaction is aborted. + +The following KASAN splat shows a set element deletion for a bound +anonymous set that has been already removed in the same transaction. + +[ 64.921510] ================================================================== +[ 64.923123] BUG: KASAN: wild-memory-access in nf_tables_commit+0xa24/0x1490 [nf_tables] +[ 64.924745] Write of size 8 at addr dead000000000122 by task test/890 +[ 64.927903] CPU: 3 PID: 890 Comm: test Not tainted 6.3.0+ #253 +[ 64.931120] Call Trace: +[ 64.932699] +[ 64.934292] dump_stack_lvl+0x33/0x50 +[ 64.935908] ? nf_tables_commit+0xa24/0x1490 [nf_tables] +[ 64.937551] kasan_report+0xda/0x120 +[ 64.939186] ? nf_tables_commit+0xa24/0x1490 [nf_tables] +[ 64.940814] nf_tables_commit+0xa24/0x1490 [nf_tables] +[ 64.942452] ? __kasan_slab_alloc+0x2d/0x60 +[ 64.944070] ? nf_tables_setelem_notify+0x190/0x190 [nf_tables] +[ 64.945710] ? kasan_set_track+0x21/0x30 +[ 64.947323] nfnetlink_rcv_batch+0x709/0xd90 [nfnetlink] +[ 64.948898] ? nfnetlink_rcv_msg+0x480/0x480 [nfnetlink] + +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Greg Kroah-Hartman +--- + include/net/netfilter/nf_tables.h | 1 + + net/netfilter/nf_tables_api.c | 12 ++++++++++++ + net/netfilter/nft_dynset.c | 2 +- + net/netfilter/nft_lookup.c | 2 +- + net/netfilter/nft_objref.c | 2 +- + 5 files changed, 16 insertions(+), 3 deletions(-) + +diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h +index 8def00a04541..22f67ae935e0 100644 +--- a/include/net/netfilter/nf_tables.h ++++ b/include/net/netfilter/nf_tables.h +@@ -584,6 +584,7 @@ struct nft_set_binding { + }; + + enum nft_trans_phase; ++void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set); + void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, + struct nft_set_binding *binding, + enum nft_trans_phase phase); +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c +index 822d13e64b32..091df8a7cb1e 100644 +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -4839,12 +4839,24 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, + } + } + ++void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set) ++{ ++ if (nft_set_is_anonymous(set)) ++ nft_clear(ctx->net, set); ++ ++ set->use++; ++} ++EXPORT_SYMBOL_GPL(nf_tables_activate_set); ++ + void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, + struct nft_set_binding *binding, + enum nft_trans_phase phase) + { + switch (phase) { + case NFT_TRANS_PREPARE: ++ if (nft_set_is_anonymous(set)) ++ nft_deactivate_next(ctx->net, set); ++ + set->use--; + return; + case NFT_TRANS_ABORT: +diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c +index 87f3af4645d9..29c7ae8789e9 100644 +--- a/net/netfilter/nft_dynset.c ++++ b/net/netfilter/nft_dynset.c +@@ -342,7 +342,7 @@ static void nft_dynset_activate(const struct nft_ctx *ctx, + { + struct nft_dynset *priv = nft_expr_priv(expr); + +- priv->set->use++; ++ nf_tables_activate_set(ctx, priv->set); + } + + static void nft_dynset_destroy(const struct nft_ctx *ctx, +diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c +index bd3485dd930f..9d18c5428d53 100644 +--- a/net/netfilter/nft_lookup.c ++++ b/net/netfilter/nft_lookup.c +@@ -167,7 +167,7 @@ static void nft_lookup_activate(const struct nft_ctx *ctx, + { + struct nft_lookup *priv = nft_expr_priv(expr); + +- priv->set->use++; ++ nf_tables_activate_set(ctx, priv->set); + } + + static void nft_lookup_destroy(const struct nft_ctx *ctx, +diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c +index 94b2327e71dc..3ff91bcaa5f2 100644 +--- a/net/netfilter/nft_objref.c ++++ b/net/netfilter/nft_objref.c +@@ -183,7 +183,7 @@ static void nft_objref_map_activate(const struct nft_ctx *ctx, + { + struct nft_objref_map *priv = nft_expr_priv(expr); + +- priv->set->use++; ++ nf_tables_activate_set(ctx, priv->set); + } + + static void nft_objref_map_destroy(const struct nft_ctx *ctx, +-- +2.25.1 + diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index d617f5df..ab5b6eda 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -20,6 +20,9 @@ Patch1002: 1002-Revert-kbuild-hide-tools-build-targets-from-external.patch # Enable INITRAMFS_FORCE config option for our use case. Patch1003: 1003-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch +# Backport from v5.15.111 upstream, drop when Amazon Linux base is v5.15.111 or later +Patch5001: 5001-netfilter-nf_tables-deactivate-anonymous-set-from-pr.patch + BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From 9ba42a5ba228733c49bd2ea635cf96d8d704ad25 Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Tue, 23 May 2023 20:06:48 +0000 Subject: [PATCH 0963/1356] pubsys: fix bug preventing multiple ssm promotions --- tools/pubsys/src/aws/promote_ssm/mod.rs | 144 ++++++++++-------------- 1 file changed, 57 insertions(+), 87 deletions(-) diff --git a/tools/pubsys/src/aws/promote_ssm/mod.rs b/tools/pubsys/src/aws/promote_ssm/mod.rs index bf54cf98..84c240f4 100644 --- a/tools/pubsys/src/aws/promote_ssm/mod.rs +++ b/tools/pubsys/src/aws/promote_ssm/mod.rs @@ -211,8 +211,7 @@ pub(crate) async fn run(args: &Args, promote_args: &PromoteArgs) -> Result<()> { // write the newly promoted parameters to `ssm_parameter_output` along with the original // parameters if let Some(ssm_parameter_output) = &promote_args.ssm_parameter_output { - append_rendered_parameters(ssm_parameter_output, &set_parameters, source_target_map) - .await?; + append_rendered_parameters(ssm_parameter_output, &set_parameters).await?; } // SSM set =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= @@ -236,7 +235,6 @@ pub(crate) async fn run(args: &Args, promote_args: &PromoteArgs) -> Result<()> { async fn append_rendered_parameters( ssm_parameters_output: &PathBuf, set_parameters: &HashMap, - source_target_map: HashMap<&String, &String>, ) -> Result<()> { // If the file doesn't exist, assume that there are no existing parameters let parsed_parameters = parse_parameters(&ssm_parameters_output.to_owned()) @@ -251,10 +249,15 @@ async fn append_rendered_parameters( }) .context(error::ParseExistingSsmParametersSnafu { path: ssm_parameters_output, - })?; + })? + // SsmKey contains region information, so we can lose the top-level region. + .into_values() + .fold(HashMap::new(), |mut acc, params| { + acc.extend(params); + acc + }); - let combined_parameters: HashMap> = - combine_parameters(parsed_parameters, set_parameters, source_target_map); + let combined_parameters = merge_parameters(parsed_parameters, set_parameters); write_rendered_parameters( ssm_parameters_output, @@ -270,37 +273,28 @@ async fn append_rendered_parameters( /// Return a HashMap of Region mapped to a HashMap of SsmKey, String pairs, representing the newly /// promoted parameters as well as the original parameters. In case of a parameter collision, /// the parameter takes the promoted value. -fn combine_parameters( - source_parameters: HashMap>, +fn merge_parameters( + source_parameters: HashMap, set_parameters: &HashMap, - source_target_map: HashMap<&String, &String>, ) -> HashMap> { - let mut combined_parameters: HashMap> = HashMap::new(); + let mut combined_parameters = HashMap::new(); + + // Flatten parameters into tuples to simplify processing elements. + fn flatten(parameter: (SsmKey, String)) -> (Region, SsmKey, String) { + let (key, value) = parameter; + (key.region.clone(), key, value) + } source_parameters - .iter() - .flat_map(|(region, parameters)| { - parameters - .iter() - .map(move |(ssm_key, ssm_value)| (region, ssm_key, ssm_value)) - }) + .into_iter() + .map(flatten) + // Process the `set_parameters` second so that they overwrite existing values. + .chain(set_parameters.clone().into_iter().map(flatten)) .for_each(|(region, ssm_key, ssm_value)| { - let add_parameters = vec![ - (ssm_key.clone(), ssm_value.clone()), - ( - SsmKey::new(region.clone(), source_target_map[&ssm_key.name].to_string()), - set_parameters[&SsmKey::new( - region.clone(), - source_target_map[&ssm_key.name].to_string(), - )] - .clone(), - ), - ]; - combined_parameters - .entry(region.clone()) + .entry(region) .or_insert(HashMap::new()) - .extend(add_parameters); + .insert(ssm_key, ssm_value); }); combined_parameters @@ -388,31 +382,30 @@ type Result = std::result::Result; mod test { use std::collections::HashMap; - use crate::aws::{promote_ssm::combine_parameters, ssm::SsmKey}; + use crate::aws::{promote_ssm::merge_parameters, ssm::SsmKey}; use aws_sdk_ssm::Region; #[test] fn combined_parameters() { let existing_parameters = HashMap::from([ ( - Region::new("us-west-2"), - HashMap::from([ - ( - SsmKey::new(Region::new("us-west-2"), "test1-parameter-name".to_string()), - "test1-parameter-value".to_string(), - ), - ( - SsmKey::new(Region::new("us-west-2"), "test2-parameter-name".to_string()), - "test2-parameter-value".to_string(), - ), - ]), + SsmKey::new(Region::new("us-west-2"), "test1-parameter-name".to_string()), + "test1-parameter-value".to_string(), ), ( - Region::new("us-east-1"), - HashMap::from([( - SsmKey::new(Region::new("us-east-1"), "test3-parameter-name".to_string()), - "test3-parameter-value".to_string(), - )]), + SsmKey::new(Region::new("us-west-2"), "test2-parameter-name".to_string()), + "test2-parameter-value".to_string(), + ), + ( + SsmKey::new(Region::new("us-east-1"), "test3-parameter-name".to_string()), + "test3-parameter-value".to_string(), + ), + ( + SsmKey::new( + Region::new("us-east-1"), + "test4-unpromoted-parameter-name".to_string(), + ), + "test4-unpromoted-parameter-value".to_string(), ), ]); let set_parameters = HashMap::from([ @@ -438,18 +431,7 @@ mod test { "test3-parameter-value".to_string(), ), ]); - let test1_parameter_name = "test1-parameter-name".to_string(); - let test2_parameter_name = "test2-parameter-name".to_string(); - let test3_parameter_name = "test3-parameter-name".to_string(); - let test1_parameter_name_promoted = "test1-parameter-name-promoted".to_string(); - let test2_parameter_name_promoted = "test2-parameter-name-promoted".to_string(); - let test3_parameter_name_promoted = "test3-parameter-name-promoted".to_string(); - let source_target_map = HashMap::from([ - (&test1_parameter_name, &test1_parameter_name_promoted), - (&test2_parameter_name, &test2_parameter_name_promoted), - (&test3_parameter_name, &test3_parameter_name_promoted), - ]); - let map = combine_parameters(existing_parameters, &set_parameters, source_target_map); + let map = merge_parameters(existing_parameters, &set_parameters); let expected_map = HashMap::from([ ( Region::new("us-west-2"), @@ -492,6 +474,13 @@ mod test { ), "test3-parameter-value".to_string(), ), + ( + SsmKey::new( + Region::new("us-east-1"), + "test4-unpromoted-parameter-name".to_string(), + ), + "test4-unpromoted-parameter-value".to_string(), + ), ]), ), ]); @@ -502,24 +491,16 @@ mod test { fn combined_parameters_overwrite() { let existing_parameters = HashMap::from([ ( - Region::new("us-west-2"), - HashMap::from([ - ( - SsmKey::new(Region::new("us-west-2"), "test1-parameter-name".to_string()), - "test1-parameter-value".to_string(), - ), - ( - SsmKey::new(Region::new("us-west-2"), "test2-parameter-name".to_string()), - "test2-parameter-value".to_string(), - ), - ]), + SsmKey::new(Region::new("us-west-2"), "test1-parameter-name".to_string()), + "test1-parameter-value".to_string(), ), ( - Region::new("us-east-1"), - HashMap::from([( - SsmKey::new(Region::new("us-east-1"), "test3-parameter-name".to_string()), - "test3-parameter-value".to_string(), - )]), + SsmKey::new(Region::new("us-west-2"), "test2-parameter-name".to_string()), + "test2-parameter-value".to_string(), + ), + ( + SsmKey::new(Region::new("us-east-1"), "test3-parameter-name".to_string()), + "test3-parameter-value".to_string(), ), ]); let set_parameters = HashMap::from([ @@ -539,18 +520,7 @@ mod test { "test3-parameter-value".to_string(), ), ]); - let test1_parameter_name = "test1-parameter-name".to_string(); - let test2_parameter_name = "test2-parameter-name".to_string(); - let test3_parameter_name = "test3-parameter-name".to_string(); - let test1_parameter_name_promoted = "test1-parameter-name".to_string(); - let test2_parameter_name_promoted = "test2-parameter-name".to_string(); - let test3_parameter_name_promoted = "test3-parameter-name-promoted".to_string(); - let source_target_map = HashMap::from([ - (&test1_parameter_name, &test1_parameter_name_promoted), - (&test2_parameter_name, &test2_parameter_name_promoted), - (&test3_parameter_name, &test3_parameter_name_promoted), - ]); - let map = combine_parameters(existing_parameters, &set_parameters, source_target_map); + let map = merge_parameters(existing_parameters, &set_parameters); let expected_map = HashMap::from([ ( Region::new("us-west-2"), From d859e8f42449e798f19feff18d79ca040c572cc8 Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Tue, 23 May 2023 21:29:41 +0000 Subject: [PATCH 0964/1356] kernel-5.10: increase default net.unix.max_dgram_qlen to 512 Increase the kernel's default value for the net.unix.max_dgram_qlen sysctl to 512. This is a change to the kernel rather than a plain sysctl setting since systemd-sysctl only applies settings to the host, while the changed default value in the kernel also applies to every new network namespace. Signed-off-by: Markus Boehme --- ...crease-default-max_dgram_qlen-to-512.patch | 47 +++++++++++++++++++ packages/kernel-5.10/kernel-5.10.spec | 2 + 2 files changed, 49 insertions(+) create mode 100644 packages/kernel-5.10/1003-af_unix-increase-default-max_dgram_qlen-to-512.patch diff --git a/packages/kernel-5.10/1003-af_unix-increase-default-max_dgram_qlen-to-512.patch b/packages/kernel-5.10/1003-af_unix-increase-default-max_dgram_qlen-to-512.patch new file mode 100644 index 00000000..9363ddad --- /dev/null +++ b/packages/kernel-5.10/1003-af_unix-increase-default-max_dgram_qlen-to-512.patch @@ -0,0 +1,47 @@ +From b3983ebbfa2dc231a2b61092b0a936bd25294239 Mon Sep 17 00:00:00 2001 +From: Markus Boehme +Date: Tue, 23 May 2023 21:24:38 +0000 +Subject: [PATCH] af_unix: increase default max_dgram_qlen to 512 + +The net.unix.max_dgram_qlen sysctl has been defined with a default value of +10 since before the current Git history started in 2005. Systems have more +resources these days, and while the default values for other sysctls like +net.core.somaxconn have been adapted, max_dgram_qlen never was. + +Increase the default value for max_dgram_qlen to 512. A large number of +hosts effectively already run with this or a larger value, since systemd +has been making sure it is set to at least 512 since 2015. + +Signed-off-by: Markus Boehme +--- + Documentation/networking/ip-sysctl.rst | 2 +- + net/unix/af_unix.c | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst +index 252212998..164a65667 100644 +--- a/Documentation/networking/ip-sysctl.rst ++++ b/Documentation/networking/ip-sysctl.rst +@@ -2688,5 +2688,5 @@ addr_scope_policy - INTEGER + max_dgram_qlen - INTEGER + The maximum length of dgram socket receive queue + +- Default: 10 ++ Default: 512 + +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c +index 28721e957..a5f081ad8 100644 +--- a/net/unix/af_unix.c ++++ b/net/unix/af_unix.c +@@ -2948,7 +2948,7 @@ static int __net_init unix_net_init(struct net *net) + { + int error = -ENOMEM; + +- net->unx.sysctl_max_dgram_qlen = 10; ++ net->unx.sysctl_max_dgram_qlen = 512; + if (unix_sysctl_register(net)) + goto out; + +-- +2.39.2 + diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 1e3299c4..e3dda0c5 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -17,6 +17,8 @@ Source103: config-bottlerocket-vmware Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch # Enable INITRAMFS_FORCE config option for our use case. Patch1002: 1002-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch +# Increase default of sysctl net.unix.max_dgram_qlen to 512. +Patch1003: 1003-af_unix-increase-default-max_dgram_qlen-to-512.patch # Add zstd support for compressed kernel modules Patch2000: 2000-kbuild-move-module-strip-compression-code-into-scrip.patch From 5cc487fe24f2ec9a496fa0e1a076c6386701ef47 Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Tue, 23 May 2023 21:00:28 +0000 Subject: [PATCH 0965/1356] kernel-5.15: increase default net.unix.max_dgram_qlen to 512 Increase the kernel's default value for the net.unix.max_dgram_qlen sysctl to 512. This is a change to the kernel rather than a plain sysctl setting since systemd-sysctl only applies settings to the host, while the changed default value in the kernel also applies to every new network namespace. Signed-off-by: Markus Boehme --- ...crease-default-max_dgram_qlen-to-512.patch | 47 +++++++++++++++++++ packages/kernel-5.15/kernel-5.15.spec | 2 + 2 files changed, 49 insertions(+) create mode 100644 packages/kernel-5.15/1004-af_unix-increase-default-max_dgram_qlen-to-512.patch diff --git a/packages/kernel-5.15/1004-af_unix-increase-default-max_dgram_qlen-to-512.patch b/packages/kernel-5.15/1004-af_unix-increase-default-max_dgram_qlen-to-512.patch new file mode 100644 index 00000000..888ebbd3 --- /dev/null +++ b/packages/kernel-5.15/1004-af_unix-increase-default-max_dgram_qlen-to-512.patch @@ -0,0 +1,47 @@ +From e36140bfb2795377360bb92c343b10c717567c62 Mon Sep 17 00:00:00 2001 +From: Markus Boehme +Date: Tue, 23 May 2023 17:16:44 +0000 +Subject: [PATCH] af_unix: increase default max_dgram_qlen to 512 + +The net.unix.max_dgram_qlen sysctl has been defined with a default value of +10 since before the current Git history started in 2005. Systems have more +resources these days, and while the default values for other sysctls like +net.core.somaxconn have been adapted, max_dgram_qlen never was. + +Increase the default value for max_dgram_qlen to 512. A large number of +hosts effectively already run with this or a larger value, since systemd +has been making sure it is set to at least 512 since 2015. + +Signed-off-by: Markus Boehme +--- + Documentation/networking/ip-sysctl.rst | 2 +- + net/unix/af_unix.c | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst +index 7890b395e..54a0be396 100644 +--- a/Documentation/networking/ip-sysctl.rst ++++ b/Documentation/networking/ip-sysctl.rst +@@ -2885,5 +2885,5 @@ plpmtud_probe_interval - INTEGER + max_dgram_qlen - INTEGER + The maximum length of dgram socket receive queue + +- Default: 10 ++ Default: 512 + +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c +index a96026dbd..267ee6d29 100644 +--- a/net/unix/af_unix.c ++++ b/net/unix/af_unix.c +@@ -3343,7 +3343,7 @@ static int __net_init unix_net_init(struct net *net) + { + int error = -ENOMEM; + +- net->unx.sysctl_max_dgram_qlen = 10; ++ net->unx.sysctl_max_dgram_qlen = 512; + if (unix_sysctl_register(net)) + goto out; + +-- +2.39.2 + diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index ab5b6eda..d9d733e1 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -19,6 +19,8 @@ Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch Patch1002: 1002-Revert-kbuild-hide-tools-build-targets-from-external.patch # Enable INITRAMFS_FORCE config option for our use case. Patch1003: 1003-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch +# Increase default of sysctl net.unix.max_dgram_qlen to 512. +Patch1004: 1004-af_unix-increase-default-max_dgram_qlen-to-512.patch # Backport from v5.15.111 upstream, drop when Amazon Linux base is v5.15.111 or later Patch5001: 5001-netfilter-nf_tables-deactivate-anonymous-set-from-pr.patch From 759f181a69685702a795d6bb68b9a9bcb1368aed Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 25 May 2023 18:47:36 +0000 Subject: [PATCH 0966/1356] build: clean up boilerplate Previously we used empty `pkg.rs` and `lib.rs` files to carry out a no-op build. Since `rustc` is equally happy compiling an empty file in another directory, we can use `/dev/null` instead. Likewise, we used a copy of the same build script in all variants and almost all packages. Instead, we can have a single copy of the script in the parent directories and refer to it via relative path. Signed-off-by: Ben Cressey --- packages/{grub => }/build.rs | 0 packages/grub/Cargo.toml | 4 ++-- packages/grub/pkg.rs | 1 - packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/build.rs | 9 --------- packages/kernel-5.10/pkg.rs | 1 - packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/build.rs | 9 --------- packages/kernel-5.15/pkg.rs | 1 - packages/kmod-5.10-nvidia/Cargo.toml | 4 ++-- packages/kmod-5.10-nvidia/build.rs | 9 --------- packages/kmod-5.10-nvidia/pkg.rs | 1 - packages/kmod-5.15-nvidia/Cargo.toml | 4 ++-- packages/kmod-5.15-nvidia/build.rs | 9 --------- packages/kmod-5.15-nvidia/pkg.rs | 1 - packages/microcode/Cargo.toml | 4 ++-- packages/microcode/build.rs | 9 --------- packages/microcode/pkg.rs | 1 - 18 files changed, 12 insertions(+), 63 deletions(-) rename packages/{grub => }/build.rs (100%) delete mode 100644 packages/grub/pkg.rs delete mode 100644 packages/kernel-5.10/build.rs delete mode 100644 packages/kernel-5.10/pkg.rs delete mode 100644 packages/kernel-5.15/build.rs delete mode 100644 packages/kernel-5.15/pkg.rs delete mode 100644 packages/kmod-5.10-nvidia/build.rs delete mode 100644 packages/kmod-5.10-nvidia/pkg.rs delete mode 100644 packages/kmod-5.15-nvidia/build.rs delete mode 100644 packages/kmod-5.15-nvidia/pkg.rs delete mode 100644 packages/microcode/build.rs delete mode 100644 packages/microcode/pkg.rs diff --git a/packages/grub/build.rs b/packages/build.rs similarity index 100% rename from packages/grub/build.rs rename to packages/build.rs diff --git a/packages/grub/Cargo.toml b/packages/grub/Cargo.toml index 8b6382c8..630025a9 100644 --- a/packages/grub/Cargo.toml +++ b/packages/grub/Cargo.toml @@ -3,10 +3,10 @@ name = "grub" version = "0.1.0" edition = "2021" publish = false -build = "build.rs" +build = "../build.rs" [lib] -path = "pkg.rs" +path = "/dev/null" [[package.metadata.build-package.external-files]] url = "https://al2022-repos-us-west-2-9761ab97.s3.dualstack.us-west-2.amazonaws.com/blobstore/aa41fdf9982b65a4c4dad5df5b49ba143b1710d60f82688221966f3c790c6c63/grub2-2.06-42.amzn2022.0.1.src.rpm" diff --git a/packages/grub/pkg.rs b/packages/grub/pkg.rs deleted file mode 100644 index d799fb2d..00000000 --- a/packages/grub/pkg.rs +++ /dev/null @@ -1 +0,0 @@ -// not used diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index f3150c7d..bf6584fc 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -3,14 +3,14 @@ name = "kernel-5_10" version = "0.1.0" edition = "2021" publish = false -build = "build.rs" +build = "../build.rs" [package.metadata.build-package] variant-sensitive = "platform" package-name = "kernel-5.10" [lib] -path = "pkg.rs" +path = "/dev/null" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. diff --git a/packages/kernel-5.10/build.rs b/packages/kernel-5.10/build.rs deleted file mode 100644 index cad8999a..00000000 --- a/packages/kernel-5.10/build.rs +++ /dev/null @@ -1,9 +0,0 @@ -use std::process::{exit, Command}; - -fn main() -> Result<(), std::io::Error> { - let ret = Command::new("buildsys").arg("build-package").status()?; - if !ret.success() { - exit(1); - } - Ok(()) -} diff --git a/packages/kernel-5.10/pkg.rs b/packages/kernel-5.10/pkg.rs deleted file mode 100644 index d799fb2d..00000000 --- a/packages/kernel-5.10/pkg.rs +++ /dev/null @@ -1 +0,0 @@ -// not used diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index c5f96ac3..8f828372 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -3,14 +3,14 @@ name = "kernel-5_15" version = "0.1.0" edition = "2021" publish = false -build = "build.rs" +build = "../build.rs" [package.metadata.build-package] variant-sensitive = "platform" package-name = "kernel-5.15" [lib] -path = "pkg.rs" +path = "/dev/null" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. diff --git a/packages/kernel-5.15/build.rs b/packages/kernel-5.15/build.rs deleted file mode 100644 index cad8999a..00000000 --- a/packages/kernel-5.15/build.rs +++ /dev/null @@ -1,9 +0,0 @@ -use std::process::{exit, Command}; - -fn main() -> Result<(), std::io::Error> { - let ret = Command::new("buildsys").arg("build-package").status()?; - if !ret.success() { - exit(1); - } - Ok(()) -} diff --git a/packages/kernel-5.15/pkg.rs b/packages/kernel-5.15/pkg.rs deleted file mode 100644 index d799fb2d..00000000 --- a/packages/kernel-5.15/pkg.rs +++ /dev/null @@ -1 +0,0 @@ -// not used diff --git a/packages/kmod-5.10-nvidia/Cargo.toml b/packages/kmod-5.10-nvidia/Cargo.toml index 65b2a124..40dcadde 100644 --- a/packages/kmod-5.10-nvidia/Cargo.toml +++ b/packages/kmod-5.10-nvidia/Cargo.toml @@ -3,10 +3,10 @@ name = "kmod-5_10-nvidia" version = "0.1.0" edition = "2021" publish = false -build = "build.rs" +build = "../build.rs" [lib] -path = "pkg.rs" +path = "/dev/null" [package.metadata.build-package] package-name = "kmod-5.10-nvidia" diff --git a/packages/kmod-5.10-nvidia/build.rs b/packages/kmod-5.10-nvidia/build.rs deleted file mode 100644 index cad8999a..00000000 --- a/packages/kmod-5.10-nvidia/build.rs +++ /dev/null @@ -1,9 +0,0 @@ -use std::process::{exit, Command}; - -fn main() -> Result<(), std::io::Error> { - let ret = Command::new("buildsys").arg("build-package").status()?; - if !ret.success() { - exit(1); - } - Ok(()) -} diff --git a/packages/kmod-5.10-nvidia/pkg.rs b/packages/kmod-5.10-nvidia/pkg.rs deleted file mode 100644 index d799fb2d..00000000 --- a/packages/kmod-5.10-nvidia/pkg.rs +++ /dev/null @@ -1 +0,0 @@ -// not used diff --git a/packages/kmod-5.15-nvidia/Cargo.toml b/packages/kmod-5.15-nvidia/Cargo.toml index ca7084f0..f8ccbdea 100644 --- a/packages/kmod-5.15-nvidia/Cargo.toml +++ b/packages/kmod-5.15-nvidia/Cargo.toml @@ -3,10 +3,10 @@ name = "kmod-5_15-nvidia" version = "0.1.0" edition = "2021" publish = false -build = "build.rs" +build = "../build.rs" [lib] -path = "pkg.rs" +path = "/dev/null" [package.metadata.build-package] package-name = "kmod-5.15-nvidia" diff --git a/packages/kmod-5.15-nvidia/build.rs b/packages/kmod-5.15-nvidia/build.rs deleted file mode 100644 index cad8999a..00000000 --- a/packages/kmod-5.15-nvidia/build.rs +++ /dev/null @@ -1,9 +0,0 @@ -use std::process::{exit, Command}; - -fn main() -> Result<(), std::io::Error> { - let ret = Command::new("buildsys").arg("build-package").status()?; - if !ret.success() { - exit(1); - } - Ok(()) -} diff --git a/packages/kmod-5.15-nvidia/pkg.rs b/packages/kmod-5.15-nvidia/pkg.rs deleted file mode 100644 index d799fb2d..00000000 --- a/packages/kmod-5.15-nvidia/pkg.rs +++ /dev/null @@ -1 +0,0 @@ -// not used diff --git a/packages/microcode/Cargo.toml b/packages/microcode/Cargo.toml index 1df40a91..5380a1ad 100644 --- a/packages/microcode/Cargo.toml +++ b/packages/microcode/Cargo.toml @@ -3,10 +3,10 @@ name = "microcode" version = "0.1.0" edition = "2021" publish = false -build = "build.rs" +build = "../build.rs" [lib] -path = "pkg.rs" +path = "/dev/null" # Use latest-srpm-urls.sh to get these. diff --git a/packages/microcode/build.rs b/packages/microcode/build.rs deleted file mode 100644 index cad8999a..00000000 --- a/packages/microcode/build.rs +++ /dev/null @@ -1,9 +0,0 @@ -use std::process::{exit, Command}; - -fn main() -> Result<(), std::io::Error> { - let ret = Command::new("buildsys").arg("build-package").status()?; - if !ret.success() { - exit(1); - } - Ok(()) -} diff --git a/packages/microcode/pkg.rs b/packages/microcode/pkg.rs deleted file mode 100644 index d799fb2d..00000000 --- a/packages/microcode/pkg.rs +++ /dev/null @@ -1 +0,0 @@ -// not used From 797f5b0b52703c113eb0d2afdfe35d2fb14c92cd Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Thu, 1 Jun 2023 15:46:40 -0700 Subject: [PATCH 0967/1356] actions workflows: remove GOPROXY=direct from env We've been running into 502s randomly during 'go mod vendor' for our Bottlerocket builds. This backs out the change to always use upstream sources for our go modules so that we reach out to the default go module proxy first in hopes of avoiding this error. --- .github/workflows/build.yml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a568c67e..f9cc7d0d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -22,14 +22,6 @@ concurrency: group: ${{ github.ref }} cancel-in-progress: true -env: - # When Go packages are built, buildsys will vendor in dependent Go code for - # that package and bundle it up in a tarball. This env variable is consumed - # and used to configure Go to directly download code from its upstream source. - # This is a useful early signal during GitHub actions to see if there are - # upstream Go code problems. - GOPROXY: direct - jobs: list-variants: if: github.repository == 'bottlerocket-os/bottlerocket' From 5e08a5d265f83c9faae32b508ed90d745745fd78 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Wed, 17 May 2023 21:42:04 +0000 Subject: [PATCH 0968/1356] actions: Make variant list modular In order to better organize our action definitions, and to help promote the ability to reuse some common code, this moves the current "build" step of getting the list of variants into it's own action that can be called in one or more workflows. Signed-off-by: Sean McGinnis --- .github/actions/list-variants/action.yml | 24 ++++++++++++++++++++++++ .github/workflows/build.yml | 16 ++++++---------- 2 files changed, 30 insertions(+), 10 deletions(-) create mode 100644 .github/actions/list-variants/action.yml diff --git a/.github/actions/list-variants/action.yml b/.github/actions/list-variants/action.yml new file mode 100644 index 00000000..a6022a0d --- /dev/null +++ b/.github/actions/list-variants/action.yml @@ -0,0 +1,24 @@ +name: "List active variants" +description: "Dynamically determines current Bottlerocket variants based on repo contents." +outputs: + variants: + description: A list of all variants defined in the repo + value: ${{ steps.get-variants.outputs.variants }} + aarch-enemies: + description: Variants that should not run for aarch64 + value: ${{ steps.get-variants.outputs.aarch-enemies }} +runs: + using: "composite" + steps: + - uses: actions/checkout@v3 + - id: get-variants + name: Determine variants + shell: bash + run: | + cd variants + output="variants=$(ls -d */ | cut -d'/' -f 1 | grep -vE '^(shared|target)$' | jq -R -s -c 'split("\n")[:-1]')" + echo $output + echo $output >> $GITHUB_OUTPUT + output="aarch-enemies=$(ls -d */ | cut -d'/' -f 1 | grep -E '(^(metal|vmware)|\-dev$)' | jq -R -s -c 'split("\n")[:-1] | [ .[] | {"variant": ., "arch": "aarch64"}]')" + echo $output + echo $output >> $GITHUB_OUTPUT diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f9cc7d0d..a252562c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -24,23 +24,18 @@ concurrency: jobs: list-variants: + # This needs to be its own job since the build job needs its output before + # it can initialize if: github.repository == 'bottlerocket-os/bottlerocket' + name: "Determine variants" runs-on: ubuntu-latest outputs: variants: ${{ steps.get-variants.outputs.variants }} aarch-enemies: ${{ steps.get-variants.outputs.aarch-enemies }} steps: - uses: actions/checkout@v3 - - id: get-variants - name: Determine variants - run: | - cd variants - output="variants=$(ls -d */ | cut -d'/' -f 1 | grep -vE '^(shared|target)$' | jq -R -s -c 'split("\n")[:-1]')" - echo $output - echo $output >> $GITHUB_OUTPUT - output="aarch-enemies=$(ls -d */ | cut -d'/' -f 1 | grep -E '(^(metal|vmware)|\-dev$)' | jq -R -s -c 'split("\n")[:-1] | [ .[] | {"variant": ., "arch": "aarch64"}]')" - echo $output - echo $output >> $GITHUB_OUTPUT + - uses: ./.github/actions/list-variants + id: get-variants build: needs: list-variants @@ -54,6 +49,7 @@ jobs: arch: [x86_64, aarch64] exclude: ${{ fromJson(needs.list-variants.outputs.aarch-enemies) }} fail-fast: false + name: "Build ${{ matrix.variant }}-${{ matrix.arch }}" steps: - name: Preflight step to set up the runner run: | From 3ed44f9ef293335e7643bdceec7d3f8cdeebc0b8 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Wed, 17 May 2023 22:26:12 +0000 Subject: [PATCH 0969/1356] actions: Make cache setup modular This moves some of our cache and other node setup to a modular action that can be consumed by multiple workflows. This allows the same logic to be used while keeping only one place to make any updates. Signed-off-by: Sean McGinnis --- .github/actions/setup-node/action.yml | 47 +++++++++++++++++++++++++++ .github/workflows/build.yml | 38 ++-------------------- .github/workflows/cache.yml | 31 ++---------------- 3 files changed, 53 insertions(+), 63 deletions(-) create mode 100644 .github/actions/setup-node/action.yml diff --git a/.github/actions/setup-node/action.yml b/.github/actions/setup-node/action.yml new file mode 100644 index 00000000..d6db96ed --- /dev/null +++ b/.github/actions/setup-node/action.yml @@ -0,0 +1,47 @@ +name: "Node setup" +description: "Performs setup for caching and other common needs." +inputs: + perform-cache-cleanup: + description: "Whether to perform cache cleanup" + required: false + default: false + type: boolean +runs: + using: "composite" + steps: + - run: | + echo "OS_ARCH=`uname -m`" >> $GITHUB_ENV + sudo apt -y install build-essential openssl libssl-dev pkg-config liblz4-tool + shell: bash + - uses: actions/cache@v3 + # Cache `cargo-make`, `cargo-cache`, `cargo-sweep` + with: + path: | + ~/.cargo + key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }} + - uses: actions/cache@v3 + # Cache first-party code dependencies + with: + path: | + .cargo + key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }}-${{ hashFiles('sources/Cargo.lock') }} + - uses: actions/cache@v3 + # Cache 'tools/' dependencies and build artifacts + with: + path: | + tools/bin + tools/.crates.toml + tools/.crates2.json + tools/target + key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }}-${{ hashFiles('tools/Cargo.lock') }} + - run: cargo install --locked --version 0.36.0 cargo-make + shell: bash + - run: cargo install --locked --version 0.6.2 cargo-sweep + shell: bash + - if: ${{ inputs.perform-cache-cleanup }} + run: cargo install --locked --version 0.8.3 --no-default-features --features ci-autoclean cargo-cache + shell: bash + - run: | + cargo sweep -i -r tools/ + cargo sweep -t 7 -r tools/ + shell: bash diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a252562c..4fbea4f4 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -51,42 +51,9 @@ jobs: fail-fast: false name: "Build ${{ matrix.variant }}-${{ matrix.arch }}" steps: - - name: Preflight step to set up the runner - run: | - echo "OS_ARCH=`uname -m`" >> $GITHUB_ENV - sudo apt -y install build-essential openssl libssl-dev pkg-config liblz4-tool - uses: actions/checkout@v3 - # Cache `cargo-make`, `cargo-cache`, `cargo-sweep` - - uses: actions/cache@v3 - with: - path: | - ~/.cargo - key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }} - # Cache first-party rust code crate dependencies - - uses: actions/cache@v3 - with: - path: | - .cargo - key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }}-${{ hashFiles('sources/Cargo.lock') }}-${{ hashFiles('.github/workflows/build.yml') }} - restore-keys: | - ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }}-${{ hashFiles('sources/Cargo.lock') }} - # Cache 'tools/' dependencies and build artifacts - - uses: actions/cache@v3 - with: - path: | - tools/bin - tools/.crates.toml - tools/.crates2.json - tools/target - key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }}-${{ hashFiles('tools/Cargo.lock') }}-${{ hashFiles('.github/workflows/build.yml') }} - restore-keys: | - ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }}-${{ hashFiles('tools/Cargo.lock') }} - - run: rustup component add rustfmt - - run: cargo install --version 0.36.0 cargo-make - - run: cargo install --version 0.6.2 cargo-sweep - - run: | - cargo sweep -i -r tools/ - cargo sweep -t 7 -r tools/ + - name: Preflight step to set up the runner + uses: ./.github/actions/setup-node - if: contains(matrix.variant, 'nvidia') run: | cat <<-EOF > Licenses.toml @@ -96,6 +63,7 @@ jobs: { path = "NVIDIA", license-url = "https://www.nvidia.com/en-us/drivers/nvidia-license/" } ] EOF + - run: rustup component add rustfmt - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} unit-tests - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} check-fmt # Avoid running Go lint check via `cargo make check-lints` since there's a separate golangci-lint workflow diff --git a/.github/workflows/cache.yml b/.github/workflows/cache.yml index e4a8f3de..2c7399cc 100644 --- a/.github/workflows/cache.yml +++ b/.github/workflows/cache.yml @@ -21,36 +21,11 @@ jobs: labels: bottlerocket_ubuntu-latest_16-core continue-on-error: true steps: - - run: | - echo "OS_ARCH=`uname -m`" >> $GITHUB_ENV - uses: actions/checkout@v3 - # Cache `cargo-make`, `cargo-cache`, `cargo-sweep` - - uses: actions/cache@v3 + - name: Preflight step to set up the runner + uses: ./.github/actions/setup-node with: - path: | - ~/.cargo - key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }} - # Cache first-party code dependencies - - uses: actions/cache@v3 - with: - path: | - .cargo - key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }}-${{ hashFiles('sources/Cargo.lock') }} - # Cache 'tools/' dependencies and build artifacts - - uses: actions/cache@v3 - with: - path: | - tools/bin - tools/.crates.toml - tools/.crates2.json - tools/target - key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }}-${{ hashFiles('tools/Cargo.lock') }} - - run: cargo install --locked --version 0.36.0 cargo-make - - run: cargo install --locked --version 0.8.3 --no-default-features --features ci-autoclean cargo-cache - - run: cargo install --locked --version 0.6.2 cargo-sweep - - run: | - cargo sweep -i -r tools/ - cargo sweep -t 7 -r tools/ + perform-cache-cleanup: true - run: cargo make publish-setup-tools - run: cargo make publish-tools - run: cargo make build-tools From 3cac1600bee9bc27342b23505c35ff2bbae66945 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 8 Jun 2023 13:29:24 +0000 Subject: [PATCH 0970/1356] tools/diff-kernel-config: Fixup example to reflect current syntax Fix the help message to reflect the current interface in the example. In 43234d3cc46b1b tools/diff-kernel-config: Adjust script to work on variants we moved from specifying kernel versions to compare to comparing kernels bound to specific variants. We forgot to adjust the example accordingly. Signed-off-by: Leonard Foerster --- tools/diff-kernel-config | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tools/diff-kernel-config b/tools/diff-kernel-config index 24b5a4f9..7e2c200c 100755 --- a/tools/diff-kernel-config +++ b/tools/diff-kernel-config @@ -40,10 +40,11 @@ Compare kernel configurations before and after a series of commits. Example invocation: - This compares the config changes for kernels 5.10 and 5.15 before and - after the most recent commit has been applied: + This compares the config changes for kernels 5.10 (through metal-k8s-1.23) + and 5.15 (through metal-k8s-1.26) before and after the most recent commit + has been applied: - $0 -b HEAD^ -a HEAD -k 5.10 -k 5.15 -o configs + $0 -b HEAD^ -a HEAD -v metal-k8s-1.23 -v metal-k8s-1.26 -o configs Notes: From df7b94de49041e826a34a64faa98d6effe2ad68b Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 8 Jun 2023 19:39:25 +0000 Subject: [PATCH 0971/1356] build: generate empty bootconfig in rpm2img Now that `bootconfig` is in the SDK, we don't need to rely on having the file in the repo checkout. Signed-off-by: Ben Cressey --- tools/bootconfig/empty-bootconfig.data | Bin 40 -> 0 bytes tools/rpm2img | 13 ++++++++++--- 2 files changed, 10 insertions(+), 3 deletions(-) delete mode 100644 tools/bootconfig/empty-bootconfig.data diff --git a/tools/bootconfig/empty-bootconfig.data b/tools/bootconfig/empty-bootconfig.data deleted file mode 100644 index c184eba1a6c1b48ca5c5714544edfc47768da0f1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 scmc~!Ey_#HQK+uv%FN3w0nrQ$A`A=+m23 "${PRIVATE_MOUNT}/bootconfig.in" +kernel {} +init {} +EOF +touch "${PRIVATE_MOUNT}/bootconfig.data" +bootconfig -a "${PRIVATE_MOUNT}/bootconfig.in" "${PRIVATE_MOUNT}/bootconfig.data" +rm "${PRIVATE_MOUNT}/bootconfig.in" + # Targeted toward the current API server implementation. # Relative to the ext4 defaults, we: # - adjust the inode ratio since we expect lots of small files From 60b504cd2c0724557647bde5c6217e5ae5876fed Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Mon, 12 Jun 2023 13:17:10 +0000 Subject: [PATCH 0972/1356] trivial: Various spelling typos Minor clean up to fix spelling errors or typos. Signed-off-by: Sean McGinnis --- README.md | 2 +- tools/buildsys/src/gomod.rs | 4 ++-- tools/buildsys/src/manifest.rs | 2 +- tools/testsys/src/crds.rs | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 17b6ff47..40511341 100644 --- a/README.md +++ b/README.md @@ -900,7 +900,7 @@ enable-unprivileged-ports = true ``` The following allows for custom DNS settings, which are used to generate the `/etc/resolv.conf`. -If either DNS setting is not populated, the system will use the DHCP lease of the primary interface to gather these setings. +If either DNS setting is not populated, the system will use the DHCP lease of the primary interface to gather these settings. See the `resolv.conf` [man page](https://man7.org/linux/man-pages/man5/resolv.conf.5.html) for more detail. * `settings.dns.name-servers`: An array of IP address strings that represent the desired name server(s). diff --git a/tools/buildsys/src/gomod.rs b/tools/buildsys/src/gomod.rs index ae5b606b..de4b89c8 100644 --- a/tools/buildsys/src/gomod.rs +++ b/tools/buildsys/src/gomod.rs @@ -44,9 +44,9 @@ const GO_MOD_DOCKER_SCRIPT_NAME: &str = "docker-go-script.sh"; // // This script exists as an in memory template string literal and is populated // into a temporary file in the package directory itself to enable buildsys to -// be as portable as possible and have no dependecy on runtime paths. Since +// be as portable as possible and have no dependency on runtime paths. Since // buildsys is executed from the context of many different package directories, -// managing a temporary file via this Rust module prevents having to aquire the +// managing a temporary file via this Rust module prevents having to acquire the // path of some static script file on the host system. const GO_MOD_SCRIPT_TMPL: &str = r###"#!/bin/bash diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index 7125725d..bf4ac68f 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -54,7 +54,7 @@ omitted since the single top-level directory will authomatically be used. `bundle-output-path` is an optional argument that provides the desired path of the output archive. By default, this will use the name of the existing archive, -but pre-pended with "bundled-". For example, if "my-unique-archive-name.tar.gz" +but prepended with "bundled-". For example, if "my-unique-archive-name.tar.gz" is entered as the value for `bundle-output-path`, then the output directory will be named `my-unique-archive-name.tar.gz`. Or, by default, given the name of some upstream archive is "my-package.tar.gz", the output archive would be diff --git a/tools/testsys/src/crds.rs b/tools/testsys/src/crds.rs index 29f5aa0a..069a7eef 100644 --- a/tools/testsys/src/crds.rs +++ b/tools/testsys/src/crds.rs @@ -485,7 +485,7 @@ pub(crate) trait CrdCreator: Sync { &mut self .additional_fields(&test_type.to_string()) .into_iter() - // Add the image id incase it is needed for cluster creation + // Add the image id in case it is needed for cluster creation .chain(Some(("image-id".to_string(), image_id.clone())).into_iter()) .collect::>(), )?, From e202ce21fe6247a9681b1d105b0f3d3debde7a65 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 8 Jun 2023 21:12:37 +0000 Subject: [PATCH 0973/1356] build: retry builds for createrepo_c error RPMs from package builds go into a shared host directory which is mounted into subsequent package builds. It is possible for new RPMs to be copied in just as `createrepo_c` is running, which can cause it to read partial RPMs and return an error. Since the partial RPMs can't be required dependencies - or else the build would not have started yet - it would be possible to ignore the `createrepo_c` errors and continue with whatever repo was created. However, this could mask other errors that might not be expected. `buildsys` copies RPMs into a different directory on the filesystem before renaming them into the shared directory. Calling `sync()` on one or both directories might help, but would hurt the performance of all builds for what seems to be a rare error. Instead, treat the specific `rpmReadPackageFile()` error as retryable and attempt the build again. If a genuinely malformed RPM is written to the shared directory, the builds will ultimately fail when retries are exhausted. Signed-off-by: Ben Cressey --- tools/buildsys/src/builder.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index 01d83887..bb3d7353 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -68,6 +68,19 @@ lazy_static! { static ref UNEXPECTED_EOF_ERROR: Regex = Regex::new("(?m)^unexpected EOF$").unwrap(); } +/* +Sometimes new RPMs are not fully written to the host directory before another build starts, which +exposes `createrepo_c` to partially-written RPMs that cannot be added to the repo metadata. Retry +these errors by restarting the build since the alternatives are to ignore the `createrepo_c` exit +code (masking other problems) or aggressively `sync()` the host directory (hurting performance). +*/ +lazy_static! { + static ref CREATEREPO_C_READ_HEADER_ERROR: Regex = Regex::new(®ex::escape( + r#"C_CREATEREPOLIB: Warning: read_header: rpmReadPackageFile() error"# + )) + .unwrap(); +} + static DOCKER_BUILD_MAX_ATTEMPTS: NonZeroU16 = nonzero!(10u16); pub(crate) struct PackageBuilder; @@ -300,6 +313,7 @@ fn build( &*DOCKER_BUILD_FRONTEND_ERROR, &*DOCKER_BUILD_DEAD_RECORD_ERROR, &*UNEXPECTED_EOF_ERROR, + &*CREATEREPO_C_READ_HEADER_ERROR, ], }, )?; From bff5eefa41d8e1ca3a896ceb8225c5f82609b8cb Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 8 Jun 2023 23:32:11 +0000 Subject: [PATCH 0974/1356] build: fix retry for unexpected EOF Each line in stdout is prefixed by a timestamp, so "unexpected EOF" will never occur at the start of a line. Signed-off-by: Ben Cressey --- tools/buildsys/src/builder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index bb3d7353..29048412 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -65,7 +65,7 @@ We use (?m) for multi-line mode so we can match the message on a line of its own the output ourselves; we match the regexes against the whole of stdout. */ lazy_static! { - static ref UNEXPECTED_EOF_ERROR: Regex = Regex::new("(?m)^unexpected EOF$").unwrap(); + static ref UNEXPECTED_EOF_ERROR: Regex = Regex::new("(?m)unexpected EOF$").unwrap(); } /* From 286fb272050b543ee8277d939aa2189e84023b97 Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Fri, 26 May 2023 17:55:23 +0000 Subject: [PATCH 0975/1356] pubsys: refactor promote_ssm::merge_parameters --- tools/pubsys/src/aws/promote_ssm/mod.rs | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/tools/pubsys/src/aws/promote_ssm/mod.rs b/tools/pubsys/src/aws/promote_ssm/mod.rs index 84c240f4..fc4530a8 100644 --- a/tools/pubsys/src/aws/promote_ssm/mod.rs +++ b/tools/pubsys/src/aws/promote_ssm/mod.rs @@ -279,20 +279,14 @@ fn merge_parameters( ) -> HashMap> { let mut combined_parameters = HashMap::new(); - // Flatten parameters into tuples to simplify processing elements. - fn flatten(parameter: (SsmKey, String)) -> (Region, SsmKey, String) { - let (key, value) = parameter; - (key.region.clone(), key, value) - } - source_parameters .into_iter() - .map(flatten) // Process the `set_parameters` second so that they overwrite existing values. - .chain(set_parameters.clone().into_iter().map(flatten)) - .for_each(|(region, ssm_key, ssm_value)| { + .chain(set_parameters.clone().into_iter()) + .for_each(|(ssm_key, ssm_value)| { combined_parameters - .entry(region) + // The `entry()` API demands that we clone + .entry(ssm_key.region.clone()) .or_insert(HashMap::new()) .insert(ssm_key, ssm_value); }); From c2c15ea8b2600103a5626b0dcd6381a4fae0c734 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Tue, 13 Jun 2023 16:51:05 +0000 Subject: [PATCH 0976/1356] pubsys: Make AMI already registered message more visible Pubsys emits a message when asked to publish an AMI that is already found. This is currently a single line message that blends in with the other output when doing `cargo make ami` and makes it very easy to miss. Normally this is good, but when actively developing and making local code changes, this can make it more likely that the developer will not notice the message and spend time trying to understand why their code changes are not visible when running new instances using this AMI. This changes the warning message to be a little more visible so it is much more noticable when trying to publish an AMI that is already registered to try to avoid these cases. Signed-off-by: Sean McGinnis --- tools/pubsys/src/aws/ami/mod.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index 8f47c693..ea511ebb 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -33,6 +33,8 @@ use std::path::PathBuf; use structopt::{clap, StructOpt}; use wait::wait_for_ami; +const WARN_SEPARATOR: &str = "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"; + /// Builds Bottlerocket AMIs using latest build artifacts #[derive(Debug, StructOpt)] #[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] @@ -146,8 +148,8 @@ async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> let (ids_of_image, already_registered) = if let Some(found_id) = maybe_id { warn!( - "Found '{}' already registered in {}: {}", - ami_args.name, base_region, found_id + "\n{}\n\nFound '{}' already registered in {}: {}\n\n{0}", + WARN_SEPARATOR, ami_args.name, base_region, found_id ); let snapshot_ids = get_snapshots(&found_id, &base_region, &base_ec2_client) .await From 0da1dd72985e0b307bb7aafae297d08551b3b142 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Wed, 8 Mar 2023 16:08:55 +0000 Subject: [PATCH 0977/1356] packages: Add kernel-6.1 sources Signed-off-by: Leonard Foerster --- ...-prepare-target-for-external-modules.patch | 51 ++++ ...de-tools-build-targets-from-external.patch | 64 +++++ ...-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch | 50 ++++ packages/kernel-6.1/Cargo.toml | 21 ++ packages/kernel-6.1/README.md | 16 ++ packages/kernel-6.1/config-bottlerocket | 197 +++++++++++++ packages/kernel-6.1/config-bottlerocket-aws | 13 + packages/kernel-6.1/config-bottlerocket-metal | 124 ++++++++ .../kernel-6.1/config-bottlerocket-vmware | 0 packages/kernel-6.1/kernel-6.1.spec | 272 ++++++++++++++++++ packages/kernel-6.1/latest-srpm-url.sh | 6 + 11 files changed, 814 insertions(+) create mode 100644 packages/kernel-6.1/1001-Makefile-add-prepare-target-for-external-modules.patch create mode 100644 packages/kernel-6.1/1002-Revert-kbuild-hide-tools-build-targets-from-external.patch create mode 100644 packages/kernel-6.1/1003-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch create mode 100644 packages/kernel-6.1/Cargo.toml create mode 100644 packages/kernel-6.1/README.md create mode 100644 packages/kernel-6.1/config-bottlerocket create mode 100644 packages/kernel-6.1/config-bottlerocket-aws create mode 100644 packages/kernel-6.1/config-bottlerocket-metal create mode 100644 packages/kernel-6.1/config-bottlerocket-vmware create mode 100644 packages/kernel-6.1/kernel-6.1.spec create mode 100755 packages/kernel-6.1/latest-srpm-url.sh diff --git a/packages/kernel-6.1/1001-Makefile-add-prepare-target-for-external-modules.patch b/packages/kernel-6.1/1001-Makefile-add-prepare-target-for-external-modules.patch new file mode 100644 index 00000000..4a52dd19 --- /dev/null +++ b/packages/kernel-6.1/1001-Makefile-add-prepare-target-for-external-modules.patch @@ -0,0 +1,51 @@ +From fc06fd8a1e59838d431c85bc8017c2520bf08695 Mon Sep 17 00:00:00 2001 +From: Ben Cressey +Date: Mon, 19 Apr 2021 18:46:04 +0000 +Subject: [PATCH] Makefile: add prepare target for external modules + +We need to ensure that native versions of programs like `objtool` are +built before trying to build out-of-tree modules, or else the build +will fail. + +Unlike other distributions, we cannot include these programs in our +kernel-devel archive, because we rely on cross-compilation: these are +"host" programs and may not match the architecture of the target. + +Ideally, out-of-tree builds would run `make prepare` first, so that +these programs could be compiled in the normal fashion. We ship all +the files needed for this to work. However, this requirement is +specific to our use case, and DKMS does not support it. + +Adding a minimal prepare target to the dependency graph causes the +programs to be built automatically and improves compatibility with +existing solutions. + +Signed-off-by: Ben Cressey +Signed-off-by: Arnaldo Garcia Rincon +--- + Makefile | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/Makefile b/Makefile +index 23390805e..346b898eb 100644 +--- a/Makefile ++++ b/Makefile +@@ -1874,6 +1874,15 @@ else # KBUILD_EXTMOD + KBUILD_BUILTIN := + KBUILD_MODULES := 1 + ++PHONY += modules_prepare ++modules_prepare: tools/objtool ++ $(Q)$(MAKE) $(build)=scripts/basic ++ $(Q)$(MAKE) $(build)=scripts/dtc ++ $(Q)$(MAKE) $(build)=scripts/mod ++ $(Q)$(MAKE) $(build)=scripts ++ ++prepare: modules_prepare ++ + build-dir := $(KBUILD_EXTMOD) + + compile_commands.json: $(extmod_prefix)compile_commands.json +-- +2.39.1 + diff --git a/packages/kernel-6.1/1002-Revert-kbuild-hide-tools-build-targets-from-external.patch b/packages/kernel-6.1/1002-Revert-kbuild-hide-tools-build-targets-from-external.patch new file mode 100644 index 00000000..ef333085 --- /dev/null +++ b/packages/kernel-6.1/1002-Revert-kbuild-hide-tools-build-targets-from-external.patch @@ -0,0 +1,64 @@ +From 05a7163507930b56804896818c80e92a2454ef4d Mon Sep 17 00:00:00 2001 +From: Arnaldo Garcia Rincon +Date: Wed, 22 Jun 2022 19:26:43 +0000 +Subject: [PATCH] Revert "kbuild: hide tools/ build targets from external + module builds" + +This reverts commit 1bb0b18a06dceee1fdc32161a72e28eab6f011c4 in which +the targets to build "tools/*" were hidden for external modules, but +they are required by the kmod kit since the 'tools/*' binaries are not +distributed as part of the archive. + +Signed-off-by: Arnaldo Garcia Rincon +--- + Makefile | 27 ++++++++++++++------------- + 1 file changed, 14 insertions(+), 13 deletions(-) + +diff --git a/Makefile b/Makefile +index 346b898eb..e3d39f7c3 100644 +--- a/Makefile ++++ b/Makefile +@@ -1421,19 +1421,6 @@ ifneq ($(wildcard $(resolve_btfids_O)),) + $(Q)$(MAKE) -sC $(srctree)/tools/bpf/resolve_btfids O=$(resolve_btfids_O) clean + endif + +-# Clear a bunch of variables before executing the submake +-ifeq ($(quiet),silent_) +-tools_silent=s +-endif +- +-tools/: FORCE +- $(Q)mkdir -p $(objtree)/tools +- $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ +- +-tools/%: FORCE +- $(Q)mkdir -p $(objtree)/tools +- $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $* +- + # --------------------------------------------------------------------------- + # Kernel selftest + +@@ -2124,6 +2111,20 @@ kernelversion: + image_name: + @echo $(KBUILD_IMAGE) + ++# Clear a bunch of variables before executing the submake ++ ++ifeq ($(quiet),silent_) ++tools_silent=s ++endif ++ ++tools/: FORCE ++ $(Q)mkdir -p $(objtree)/tools ++ $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ ++ ++tools/%: FORCE ++ $(Q)mkdir -p $(objtree)/tools ++ $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $* ++ + quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN $(wildcard $(rm-files))) + cmd_rmfiles = rm -rf $(rm-files) + +-- +2.39.1 + diff --git a/packages/kernel-6.1/1003-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch b/packages/kernel-6.1/1003-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch new file mode 100644 index 00000000..ce4578b7 --- /dev/null +++ b/packages/kernel-6.1/1003-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch @@ -0,0 +1,50 @@ +From 002d1909e18b7ca876edd4680ffcf8b59dea6c1b Mon Sep 17 00:00:00 2001 +From: Ben Cressey +Date: Tue, 18 Oct 2022 22:24:52 +0000 +Subject: [PATCH] initramfs: unlink INITRAMFS_FORCE from CMDLINE_{EXTEND,FORCE} + +The motivation given in cff75e0b6fe83 for tying INITRAMFS_FORCE to +either of CMDLINE_{EXTEND,FORCE} was that these options imply an +inflexible bootloader, and that overriding the initramfs image would +also only be necessary for inflexible bootloaders. + +However, with the advent of Boot Config support, distributions that do +not normally use an initramfs may still want to allow an "initrd" to be +passed by the bootloader in order to accept boot configuration data. In +such cases, the CMDLINE_{EXTEND,FORCE} options are not desired because +the bootloader is actually expected to control the kernel command line. + +Unlinking the INITRAMFS_FORCE config option allows Boot Config data to +be passed by the bootloader while still preventing an unexpected +initramfs from overriding the built-in initramfs (if any). + +Signed-off-by: Ben Cressey +--- + usr/Kconfig | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/usr/Kconfig b/usr/Kconfig +index 8bbcf699f..06aac1906 100644 +--- a/usr/Kconfig ++++ b/usr/Kconfig +@@ -24,7 +24,7 @@ config INITRAMFS_SOURCE + + config INITRAMFS_FORCE + bool "Ignore the initramfs passed by the bootloader" +- depends on CMDLINE_EXTEND || CMDLINE_FORCE ++ default n + help + This option causes the kernel to ignore the initramfs image + (or initrd image) passed to it by the bootloader. This is +@@ -32,6 +32,8 @@ config INITRAMFS_FORCE + and is useful if you cannot or don't want to change the image + your bootloader passes to the kernel. + ++ If unsure, say N. ++ + config INITRAMFS_ROOT_UID + int "User ID to map to 0 (user root)" + depends on INITRAMFS_SOURCE!="" +-- +2.39.1 + diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml new file mode 100644 index 00000000..f2dcaedd --- /dev/null +++ b/packages/kernel-6.1/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "kernel-6_1" +version = "0.1.0" +edition = "2021" +publish = false +build = "../build.rs" + +[package.metadata.build-package] +variant-sensitive = "platform" +package-name = "kernel-6.1" + +[lib] +path = "/dev/null" + +[[package.metadata.build-package.external-files]] +# Use latest-srpm-url.sh to get this. +url = "https://cdn.amazonlinux.com/al2023/blobstore/7da4ac2135147d0ab97afaa82b81cb7408dc5a2fa8f8dd3e785a908585977bd3/kernel-6.1.19-30.43.amzn2023.src.rpm" +sha512 = "6f2458e5707c7635c86e08de666e1beaab6b6ce133a5cdf084c72c80802d54f4d352784ce97d4832bf9e8e7a74f491a551829806df391604138489ecb9d0ac17" + +[build-dependencies] +microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/README.md b/packages/kernel-6.1/README.md new file mode 100644 index 00000000..a999af73 --- /dev/null +++ b/packages/kernel-6.1/README.md @@ -0,0 +1,16 @@ +# kernel-6.1 + +This package contains the Bottlerocket Linux kernel of the 6.1 series. + + +## Testing of Configuration Changes + +Bottlerocket kernels are built in multiple flavors (e.g. cloud, bare metal) and for multiple architectures (e.g. aarch64, x86_64). +The kernel configuration for any of those combinations might change independently of the others. +Please use `tools/diff-kernel-config` from the main Bottlerocket repository to ensure the configuration for any of the combinations does not change inadvertently. +Changes that can have an effect on the resulting kernel configuration include: + +* explicit kernel configuration changes +* package updates/kernel rebases + +Reviewers on a pull request potentially changing the kernel configuration will appreciate having the report produced by `diff-kernel-config` included in the PR description. diff --git a/packages/kernel-6.1/config-bottlerocket b/packages/kernel-6.1/config-bottlerocket new file mode 100644 index 00000000..222e3843 --- /dev/null +++ b/packages/kernel-6.1/config-bottlerocket @@ -0,0 +1,197 @@ +# Because Bottlerocket does not have an initramfs, modules required to mount +# the root filesystem must be set to y. + +# The root filesystem is ext4 +CONFIG_EXT4_FS=y + +# btrfs support for compatibility +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y + +# Support for squashfs used to provide kernel headers with zstd compression +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_SQUASHFS_ZSTD=y +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 + +# NVMe support +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_CORE=y + +# Xen blkfront for Xen-based EC2 platforms +CONFIG_XEN_BLKDEV_FRONTEND=y + +# virtio for local testing with QEMU +CONFIG_VIRTIO=y +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_PCI=y + +# dm-verity and enabling it on the kernel command line +CONFIG_BLK_DEV_DM=y +CONFIG_DAX=y +CONFIG_DM_INIT=y +CONFIG_DM_VERITY=y + +# TCMU/LIO +CONFIG_TCM_USER2=m + +# EFI +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_EFI_MIXED=y + +# EFI video +CONFIG_FB=y +CONFIG_FB_EFI=y +CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER=y + +# yama LSM for ptrace restrictions +CONFIG_SECURITY_YAMA=y + +# Do not allow SELinux to be disabled at boot. +# CONFIG_SECURITY_SELINUX_BOOTPARAM is not set + +# Do not allow SELinux to be disabled at runtime. +# CONFIG_SECURITY_SELINUX_DISABLE is not set + +# Do not allow SELinux to use `enforcing=0` behavior. +# CONFIG_SECURITY_SELINUX_DEVELOP is not set + +# Check the protection applied by the kernel for mmap and mprotect, +# rather than the protection requested by userspace. +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=0 + +# Enable support for the kernel lockdown security module. +CONFIG_SECURITY_LOCKDOWN_LSM=y + +# Enable lockdown early so that if the option is present on the +# kernel command line, it can be enforced. +CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y + +# disable integrity measurement architecture +# CONFIG_IMA is not set + +# Disable SafeSetID LSM +# CONFIG_SECURITY_SAFESETID is not set + +# enable /proc/config.gz +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y + +# kernel headers at /sys/kernel/kheaders.tar.xz +CONFIG_IKHEADERS=y + +# BTF debug info at /sys/kernel/btf/vmlinux +CONFIG_DEBUG_INFO_BTF=y + +# We don't want to extend the kernel command line with any upstream defaults; +# Bottlerocket uses a fairly custom setup that needs tight control over it. +# CONFIG_CMDLINE_EXTEND is not set + +# We don't want to unpack the initramfs passed by the bootloader. The intent of +# this option is to ensure that the built-in initramfs is used. Since we do not +# have a built-in initramfs, in practice this means we will never unpack any +# initramfs. +# +# We rely on `CONFIG_BLK_DEV_INITRD` for boot config support, so we can't just +# disable the functionality altogether. +CONFIG_INITRAMFS_FORCE=y + +# Enable ZSTD kernel image compression +CONFIG_HAVE_KERNEL_ZSTD=y +CONFIG_KERNEL_ZSTD=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_DECOMPRESS_ZSTD=y + +# Enable xz modules compression +# CONFIG_MODULE_COMPRESS_NONE is not set +CONFIG_MODULE_COMPRESS_XZ=y + +# Load i8042 controller, keyboard, and mouse as modules, to avoid waiting for +# them before mounting the root device. +CONFIG_SERIO_I8042=m +CONFIG_KEYBOARD_ATKBD=m +CONFIG_MOUSE_PS2=m +# CONFIG_MOUSE_PS2_ALPS is not set +# CONFIG_MOUSE_PS2_BYD is not set +# CONFIG_MOUSE_PS2_LOGIPS2PP is not set +# CONFIG_MOUSE_PS2_SYNAPTICS is not set +# CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS is not set +# CONFIG_MOUSE_PS2_CYPRESS is not set +# CONFIG_MOUSE_PS2_TRACKPOINT is not set +# CONFIG_MOUSE_PS2_ELANTECH is not set +# CONFIG_MOUSE_PS2_SENTELIC is not set +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +# CONFIG_MOUSE_PS2_FOCALTECH is not set + +# Add virtio drivers for development setups running as guests in qemu +CONFIG_VIRTIO_CONSOLE=m +CONFIG_HW_RANDOM_VIRTIO=m + +# Add support for IPMI drivers +CONFIG_IPMI_HANDLER=m + +# Add support for bootconfig +CONFIG_BOOT_CONFIG=y + +# Enables support for checkpoint/restore +CONFIG_CHECKPOINT_RESTORE=y + +# Disable unused filesystems. +# CONFIG_AFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_USE_FOR_EXT2=y +# CONFIG_GFS2_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_NFS_V2 is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_NTFS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_UFS_FS is not set +# CONFIG_ZONEFS_FS is not set +# CONFIG_NTFS3_FS is not set + +# Disable unused network protocols. +# CONFIG_AF_RXRPC is not set +# CONFIG_ATM is not set +# CONFIG_CAN is not set +# CONFIG_HSR is not set +# CONFIG_IP_DCCP is not set +# CONFIG_L2TP is not set +# CONFIG_RDS is not set +# CONFIG_RFKILL is not set +# CONFIG_TIPC is not set + +# Disable USB-attached network interfaces, unused in the cloud and on server-grade hardware. +# CONFIG_USB_NET_DRIVERS is not set + +# Disable unused qdiscs +# - sch_cake targets home routers and residential links +# CONFIG_NET_SCH_CAKE is not set + +# Provide minimal iSCSI via TCP support for initiator and target mode +# initiator side +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +CONFIG_SCSI_ISCSI_ATTRS=m +# target side +CONFIG_ISCSI_TARGET=m +# CONFIG_INFINIBAND_ISERT is not set diff --git a/packages/kernel-6.1/config-bottlerocket-aws b/packages/kernel-6.1/config-bottlerocket-aws new file mode 100644 index 00000000..6b4ed404 --- /dev/null +++ b/packages/kernel-6.1/config-bottlerocket-aws @@ -0,0 +1,13 @@ +# Support boot from IDE disks +CONFIG_ATA=y +CONFIG_ATA_PIIX=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y + +# Mellanox network support +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX5_CORE=m +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_INFINIBAND=m +CONFIG_MLXFW=m diff --git a/packages/kernel-6.1/config-bottlerocket-metal b/packages/kernel-6.1/config-bottlerocket-metal new file mode 100644 index 00000000..f2628bbb --- /dev/null +++ b/packages/kernel-6.1/config-bottlerocket-metal @@ -0,0 +1,124 @@ +# This file holds all the settings that are specific to hardware enablement +# we do for the metal variants. + +# SATA support +CONFIG_BLK_DEV_SD=y +CONFIG_SATA_AHCI=y +CONFIG_ATA=y +CONFIG_ATA_PIIX=y + +# AMD network support +CONFIG_NET_VENDOR_AMD=y +CONFIG_AMD_XGBE=m +# CONFIG_AMD_XGBE_DCB is not set + +# Broadcom network support +CONFIG_NET_VENDOR_BROADCOM=y +CONFIG_TIGON3_HWMON=y +CONFIG_TIGON3=m +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +CONFIG_BNXT=m + +# Chelsio network support +CONFIG_NET_VENDOR_CHELSIO=y +CONFIG_CHELSIO_T4=m +CONFIG_CHELSIO_T4VF=m +# CONFIG_CHELSIO_T4_DCB is not set +# CONFIG_CHELSIO_INLINE_CRYPTO is not set +# CONFIG_INFINIBAND_CXGB4 is not set +# CONFIG_ISCSI_TARGET_CXGB4 is not set + +# Cisco UCS network support +CONFIG_NET_VENDOR_CISCO=y +CONFIG_ENIC=m +CONFIG_INFINIBAND_USNIC=m + +# Emulex network support +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_BE2NET=m +CONFIG_BE2NET_BE2=y +CONFIG_BE2NET_BE3=y +CONFIG_BE2NET_LANCER=y +CONFIG_BE2NET_SKYHAWK=y +CONFIG_BE2NET_HWMON=y + +# Huawei network support +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m + +# Intel network support +CONFIG_NET_VENDOR_INTEL=y +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_E1000E_HWTS=y +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m + +# Intel 10G network support +CONFIG_I40E=m +# CONFIG_I40E_DCB is not set +CONFIG_ICE=m +# CONFIG_INFINIBAND_IRDMA is not set +CONFIG_PLDMFW=y +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_FM10K=m + +# Mellanox network support +CONFIG_MLXFW=m +CONFIG_MLX5_CORE=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX5_CORE_EN=y +CONFIG_NET_SWITCHDEV=y + +# Myricom network support +CONFIG_NET_VENDOR_MYRI=y +CONFIG_MYRI10GE=m +CONFIG_MYRI10GE_DCA=y + +# Pensando network support +CONFIG_NET_VENDOR_PENSANDO=y +CONFIG_IONIC=m + +# Solarflare network support +CONFIG_NET_VENDOR_SOLARFLARE=y +CONFIG_SFC=m +CONFIG_SFC_SRIOV=y +# CONFIG_SFC_MCDI_LOGGING is not set +CONFIG_SFC_MCDI_MON=y +CONFIG_SFC_FALCON=m + +# QLogic network support +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QED=m +CONFIG_QED_SRIOV=y +CONFIG_QEDE=m +# CONFIG_INFINIBAND_QEDR is not set +# CONFIG_QEDF is not set +# CONFIG_QEDI is not set +# CONFIG_QLA3XXX is not set +CONFIG_QLCNIC=m +CONFIG_QLCNIC_SRIOV=y +# CONFIG_QLCNIC_DCB is not set +# CONFIG_QLCNIC_HWMON is not set +# CONFIG_NETXEN_NIC is not set + +# Cisco UCS HBA support +CONFIG_FCOE_FNIC=m +CONFIG_SCSI_SNIC=m + +# LSI Logic's SAS based RAID controllers +CONFIG_SCSI_MPT3SAS=y +CONFIG_MEGARAID_SAS=y + +# Microsemi PQI controllers +CONFIG_SCSI_SMARTPQI=y + +# Support for virtio scsi boot devices for other cloud providers +CONFIG_SCSI_VIRTIO=y diff --git a/packages/kernel-6.1/config-bottlerocket-vmware b/packages/kernel-6.1/config-bottlerocket-vmware new file mode 100644 index 00000000..e69de29b diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec new file mode 100644 index 00000000..ddc80143 --- /dev/null +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -0,0 +1,272 @@ +%global debug_package %{nil} + +Name: %{_cross_os}kernel-6.1 +Version: 6.1.19 +Release: 1%{?dist} +Summary: The Linux kernel +License: GPL-2.0 WITH Linux-syscall-note +URL: https://www.kernel.org/ +# Use latest-srpm-url.sh to get this. +Source0: https://cdn.amazonlinux.com/al2023/blobstore/7da4ac2135147d0ab97afaa82b81cb7408dc5a2fa8f8dd3e785a908585977bd3/kernel-6.1.19-30.43.amzn2023.src.rpm +Source100: config-bottlerocket +Source101: config-bottlerocket-aws +Source102: config-bottlerocket-metal +Source103: config-bottlerocket-vmware + +# Help out-of-tree module builds run `make prepare` automatically. +Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch +# Expose tools/* targets for out-of-tree module builds. +Patch1002: 1002-Revert-kbuild-hide-tools-build-targets-from-external.patch +# Enable INITRAMFS_FORCE config option for our use case. +Patch1003: 1003-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch + +BuildRequires: bc +BuildRequires: elfutils-devel +BuildRequires: hostname +BuildRequires: kmod +BuildRequires: openssl-devel + +# CPU microcode updates are included as "extra firmware" so the files don't +# need to be installed on the root filesystem. However, we want the license and +# attribution files to be available in the usual place. +%if "%{_cross_arch}" == "x86_64" +BuildRequires: %{_cross_os}microcode +Requires: %{_cross_os}microcode-licenses +%endif + +# Pull in expected modules and development files. +Requires: %{name}-modules = %{version}-%{release} +Requires: %{name}-devel = %{version}-%{release} + +%global kernel_sourcedir %{_cross_usrsrc}/kernels +%global kernel_libdir %{_cross_libdir}/modules/%{version} + +%description +%{summary}. + +%package devel +Summary: Configured Linux kernel source for module building + +%description devel +%{summary}. + +%package archive +Summary: Archived Linux kernel source for module building + +%description archive +%{summary}. + +%package modules +Summary: Modules for the Linux kernel + +%description modules +%{summary}. + +%package headers +Summary: Header files for the Linux kernel for use by glibc + +%description headers +%{summary}. + +%prep +rpm2cpio %{SOURCE0} | cpio -iu linux-%{version}.tar config-%{_cross_arch} "*.patch" +tar -xof linux-%{version}.tar; rm linux-%{version}.tar +%setup -TDn linux-%{version} +# Patches from the Source0 SRPM +for patch in ../*.patch; do + patch -p1 <"$patch" +done +# Patches listed in this spec (Patch0001...) +%autopatch -p1 + +%if "%{_cross_arch}" == "x86_64" +microcode="$(find %{_cross_libdir}/firmware -type f -path '*/*-ucode/*' -printf '%%P ')" +cat < ../config-microcode +CONFIG_EXTRA_FIRMWARE="${microcode}" +CONFIG_EXTRA_FIRMWARE_DIR="%{_cross_libdir}/firmware" +EOF +%endif + +export ARCH="%{_cross_karch}" +export CROSS_COMPILE="%{_cross_target}-" + +KCONFIG_CONFIG="arch/%{_cross_karch}/configs/%{_cross_vendor}_defconfig" \ +scripts/kconfig/merge_config.sh \ + ../config-%{_cross_arch} \ +%if "%{_cross_arch}" == "x86_64" + ../config-microcode \ +%endif + %{SOURCE100} \ + %{_sourcedir}/config-bottlerocket-%{_cross_variant_platform} + +rm -f ../config-* ../*.patch + +%global kmake \ +make -s\\\ + ARCH="%{_cross_karch}"\\\ + CROSS_COMPILE="%{_cross_target}-"\\\ + INSTALL_HDR_PATH="%{buildroot}%{_cross_prefix}"\\\ + INSTALL_MOD_PATH="%{buildroot}%{_cross_prefix}"\\\ + INSTALL_MOD_STRIP=1\\\ +%{nil} + +%build +%kmake mrproper +%kmake %{_cross_vendor}_defconfig +%kmake %{?_smp_mflags} %{_cross_kimage} +%kmake %{?_smp_mflags} modules + +%install +%kmake %{?_smp_mflags} headers_install +%kmake %{?_smp_mflags} modules_install + +install -d %{buildroot}/boot +install -T -m 0755 arch/%{_cross_karch}/boot/%{_cross_kimage} %{buildroot}/boot/vmlinuz +install -m 0644 .config %{buildroot}/boot/config + +find %{buildroot}%{_cross_prefix} \ + \( -name .install -o -name .check -o \ + -name ..install.cmd -o -name ..check.cmd \) -delete + +# For out-of-tree kmod builds, we need to support the following targets: +# make scripts -> make prepare -> make modules +# +# This requires enough of the kernel tree to build host programs under the +# "scripts" and "tools" directories. + +# Any existing ELF objects will not work properly if we're cross-compiling for +# a different architecture, so get rid of them to avoid confusing errors. +find arch scripts tools -type f -executable \ + -exec sh -c "head -c4 {} | grep -q ELF && rm {}" \; + +# We don't need to include these files. +find -type f \( -name \*.cmd -o -name \*.gitignore \) -delete + +# Avoid an OpenSSL dependency by stubbing out options for module signing and +# trusted keyrings, so `sign-file` and `extract-cert` won't be built. External +# kernel modules do not have access to the keys they would need to make use of +# these tools. +sed -i \ + -e 's,$(CONFIG_MODULE_SIG_FORMAT),n,g' \ + -e 's,$(CONFIG_SYSTEM_TRUSTED_KEYRING),n,g' \ + scripts/Makefile + +# Restrict permissions on System.map. +chmod 600 System.map + +( + find * \ + -type f \ + \( -name Build\* -o -name Kbuild\* -o -name Kconfig\* -o -name Makefile\* \) \ + -print + + find arch/%{_cross_karch}/ \ + -type f \ + \( -name module.lds -o -name vmlinux.lds.S -o -name Platform -o -name \*.tbl \) \ + -print + + find arch/%{_cross_karch}/{include,lib}/ -type f ! -name \*.o ! -name \*.o.d -print + echo arch/%{_cross_karch}/kernel/asm-offsets.s + echo lib/vdso/gettimeofday.c + + for d in \ + arch/%{_cross_karch}/tools \ + arch/%{_cross_karch}/kernel/vdso ; do + [ -d "${d}" ] && find "${d}/" -type f -print + done + + find include -type f -print + find scripts -type f ! -name \*.l ! -name \*.y ! -name \*.o -print + + find tools/{arch/%{_cross_karch},include,objtool,scripts}/ -type f ! -name \*.o -print + echo tools/build/fixdep.c + find tools/lib/subcmd -type f -print + find tools/lib/{ctype,hweight,rbtree,string,str_error_r}.c + + echo kernel/bounds.c + echo kernel/time/timeconst.bc + echo security/selinux/include/classmap.h + echo security/selinux/include/initial_sid_to_string.h + echo security/selinux/include/policycap.h + echo security/selinux/include/policycap_names.h + + echo .config + echo Module.symvers + echo System.map +) | sort -u > kernel_devel_files + +# Create squashfs of kernel-devel files (ie. /usr/src/kernels/). +# +# -no-exports: +# The filesystem does not need to be exported via NFS. +# +# -all-root: +# Make all files owned by root rather than the build user. +# +# -comp zstd: +# zstd offers compression ratios like xz and decompression speeds like lz4. +SQUASHFS_OPTS="-no-exports -all-root -comp zstd" +mkdir -p src_squashfs/%{version} +tar c -T kernel_devel_files | tar x -C src_squashfs/%{version} +mksquashfs src_squashfs kernel-devel.squashfs ${SQUASHFS_OPTS} + +# Create a tarball of the same files, for use outside the running system. +# In theory we could extract these files with `unsquashfs`, but we do not want +# to require it to be installed on the build host, and it errors out when run +# inside Docker unless the limit for open files is lowered. +tar cf kernel-devel.tar src_squashfs/%{version} --transform='s|src_squashfs/%{version}|kernel-devel|' +xz -T0 kernel-devel.tar + +install -D kernel-devel.squashfs %{buildroot}%{_cross_datadir}/bottlerocket/kernel-devel.squashfs +install -D kernel-devel.tar.xz %{buildroot}%{_cross_datadir}/bottlerocket/kernel-devel.tar.xz +install -d %{buildroot}%{kernel_sourcedir} + +# Replace the incorrect links from modules_install. These will be bound +# into a host container (and unused in the host) so they must not point +# to %{_cross_usrsrc} (eg. /x86_64-bottlerocket-linux-gnu/sys-root/...) +rm -f %{buildroot}%{kernel_libdir}/build %{buildroot}%{kernel_libdir}/source +ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{kernel_libdir}/build +ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{kernel_libdir}/source + +%files +%license COPYING LICENSES/preferred/GPL-2.0 LICENSES/exceptions/Linux-syscall-note +%{_cross_attribution_file} +/boot/vmlinuz +/boot/config + +%files modules +%dir %{_cross_libdir}/modules +%{_cross_libdir}/modules/* + +%files headers +%dir %{_cross_includedir}/asm +%dir %{_cross_includedir}/asm-generic +%dir %{_cross_includedir}/drm +%dir %{_cross_includedir}/linux +%dir %{_cross_includedir}/misc +%dir %{_cross_includedir}/mtd +%dir %{_cross_includedir}/rdma +%dir %{_cross_includedir}/scsi +%dir %{_cross_includedir}/sound +%dir %{_cross_includedir}/video +%dir %{_cross_includedir}/xen +%{_cross_includedir}/asm/* +%{_cross_includedir}/asm-generic/* +%{_cross_includedir}/drm/* +%{_cross_includedir}/linux/* +%{_cross_includedir}/misc/* +%{_cross_includedir}/mtd/* +%{_cross_includedir}/rdma/* +%{_cross_includedir}/scsi/* +%{_cross_includedir}/sound/* +%{_cross_includedir}/video/* +%{_cross_includedir}/xen/* + +%files devel +%dir %{kernel_sourcedir} +%{_cross_datadir}/bottlerocket/kernel-devel.squashfs + +%files archive +%{_cross_datadir}/bottlerocket/kernel-devel.tar.xz + +%changelog diff --git a/packages/kernel-6.1/latest-srpm-url.sh b/packages/kernel-6.1/latest-srpm-url.sh new file mode 100755 index 00000000..b13fcbf9 --- /dev/null +++ b/packages/kernel-6.1/latest-srpm-url.sh @@ -0,0 +1,6 @@ +#!/bin/sh +cmd='dnf install -q -y --releasever=latest yum-utils && yumdownloader -q --releasever=latest --source --urls kernel' +docker run --rm amazonlinux:2023 sh -c "${cmd}" \ + | grep '^http' \ + | xargs --max-args=1 --no-run-if-empty realpath --canonicalize-missing --relative-to=. \ + | sed 's_:/_://_' From d45a467410f2ec01992fc79868d81f250cb04026 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Wed, 31 May 2023 09:10:58 +0000 Subject: [PATCH 0978/1356] kernel-6.1: Disable FS_ENCRYPTION support Amazon Linux enabled FS_ENCRYPTION with the introduction of kernel-6.1. With Bottlerocket, that might lock us into certain file system choices inadvertantly. Disable it for now and rely on block level encryption through EBS volume encryption or dm-crypt instead. Signed-off-by: Leonard Foerster --- packages/kernel-6.1/config-bottlerocket | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/packages/kernel-6.1/config-bottlerocket b/packages/kernel-6.1/config-bottlerocket index 222e3843..ccad1088 100644 --- a/packages/kernel-6.1/config-bottlerocket +++ b/packages/kernel-6.1/config-bottlerocket @@ -1,6 +1,11 @@ # Because Bottlerocket does not have an initramfs, modules required to mount # the root filesystem must be set to y. +# disable filesystem encryption support as it may lock users into certain +# filesystems inadvertantly. For now EBS volume encryption or dm-crypt seems +# to be the more universal choice. +# CONFIG_FS_ENCRYPTION is not set + # The root filesystem is ext4 CONFIG_EXT4_FS=y From 64a8f4b07f13d45c0a0195e37a382b8baff32166 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 6 Jun 2023 09:57:48 +0000 Subject: [PATCH 0979/1356] kernel-6.1: increase default net.unix.max_dgram_qlen to 512 [port from kernel-5.15: ddc075761b315d76f86668a9552bc524591aaf5e] Increase the kernel's default value for the net.unix.max_dgram_qlen sysctl to 512. This is a change to the kernel rather than a plain sysctl setting since systemd-sysctl only applies settings to the host, while the changed default value in the kernel also applies to every new network namespace. Signed-off-by: Markus Boehme Signed-off-by: Leonard Foerster --- ...crease-default-max_dgram_qlen-to-512.patch | 47 +++++++++++++++++++ packages/kernel-6.1/kernel-6.1.spec | 2 + 2 files changed, 49 insertions(+) create mode 100644 packages/kernel-6.1/1004-af_unix-increase-default-max_dgram_qlen-to-512.patch diff --git a/packages/kernel-6.1/1004-af_unix-increase-default-max_dgram_qlen-to-512.patch b/packages/kernel-6.1/1004-af_unix-increase-default-max_dgram_qlen-to-512.patch new file mode 100644 index 00000000..57eb6758 --- /dev/null +++ b/packages/kernel-6.1/1004-af_unix-increase-default-max_dgram_qlen-to-512.patch @@ -0,0 +1,47 @@ +From 1faecf19a86dbb29b62607b1740ef59a5c35acb2 Mon Sep 17 00:00:00 2001 +From: Markus Boehme +Date: Tue, 23 May 2023 17:16:44 +0000 +Subject: [PATCH] af_unix: increase default max_dgram_qlen to 512 + +The net.unix.max_dgram_qlen sysctl has been defined with a default value of +10 since before the current Git history started in 2005. Systems have more +resources these days, and while the default values for other sysctls like +net.core.somaxconn have been adapted, max_dgram_qlen never was. + +Increase the default value for max_dgram_qlen to 512. A large number of +hosts effectively already run with this or a larger value, since systemd +has been making sure it is set to at least 512 since 2015. + +Signed-off-by: Markus Boehme +--- + Documentation/networking/ip-sysctl.rst | 2 +- + net/unix/af_unix.c | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst +index e7b3fa7bb..f20837a92 100644 +--- a/Documentation/networking/ip-sysctl.rst ++++ b/Documentation/networking/ip-sysctl.rst +@@ -3038,5 +3038,5 @@ ecn_enable - BOOLEAN + max_dgram_qlen - INTEGER + The maximum length of dgram socket receive queue + +- Default: 10 ++ Default: 512 + +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c +index f0c2293f1..3962a66c5 100644 +--- a/net/unix/af_unix.c ++++ b/net/unix/af_unix.c +@@ -3600,7 +3600,7 @@ static int __net_init unix_net_init(struct net *net) + { + int i; + +- net->unx.sysctl_max_dgram_qlen = 10; ++ net->unx.sysctl_max_dgram_qlen = 512; + if (unix_sysctl_register(net)) + goto out; + +-- +2.40.0 + diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index ddc80143..c770302f 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -19,6 +19,8 @@ Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch Patch1002: 1002-Revert-kbuild-hide-tools-build-targets-from-external.patch # Enable INITRAMFS_FORCE config option for our use case. Patch1003: 1003-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch +# Increase default of sysctl net.unix.max_dgram_qlen to 512. +Patch1004: 1004-af_unix-increase-default-max_dgram_qlen-to-512.patch BuildRequires: bc BuildRequires: elfutils-devel From a0193df1ffdb7b66f58f97722338661d77a0aec3 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Wed, 7 Jun 2023 15:32:36 +0000 Subject: [PATCH 0980/1356] kernel-6.1: Disable advanced features of ICE NIC driver For most NICs we have been going with the bare driver without any additionally configurable features. With the 6.1 kernel we pick up additional features for the already included ICE driver. Disable these features as we have done for other NICs before. Signed-off-by: Leonard Foerster --- packages/kernel-6.1/config-bottlerocket-metal | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/kernel-6.1/config-bottlerocket-metal b/packages/kernel-6.1/config-bottlerocket-metal index f2628bbb..b2f4972c 100644 --- a/packages/kernel-6.1/config-bottlerocket-metal +++ b/packages/kernel-6.1/config-bottlerocket-metal @@ -60,6 +60,8 @@ CONFIG_IGBVF=m CONFIG_I40E=m # CONFIG_I40E_DCB is not set CONFIG_ICE=m +# CONFIG_ICE_HWTS is not set +# CONFIG_ICE_SWITCHDEV is not set # CONFIG_INFINIBAND_IRDMA is not set CONFIG_PLDMFW=y CONFIG_IXGB=m From cccc6d92a72a589e08775fb84b91ad85ffb644ed Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Wed, 7 Jun 2023 15:37:20 +0000 Subject: [PATCH 0981/1356] kernel-6.1: Disable DAMON subsystem We currently do not have a use case for the DAMON subsystem. Disable it for now to trim some code we build. If we come across a use case we can add it into our config. Signed-off-by: Leonard Foerster --- packages/kernel-6.1/config-bottlerocket | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/kernel-6.1/config-bottlerocket b/packages/kernel-6.1/config-bottlerocket index ccad1088..c501e5f4 100644 --- a/packages/kernel-6.1/config-bottlerocket +++ b/packages/kernel-6.1/config-bottlerocket @@ -200,3 +200,6 @@ CONFIG_SCSI_ISCSI_ATTRS=m # target side CONFIG_ISCSI_TARGET=m # CONFIG_INFINIBAND_ISERT is not set + +# Disable DAMON subsystem. We currently do not have a good use-case for DAMON. +# CONFIG_DAMON is not set From eedc8f41eeff0963f5def1751fccc45aff6407aa Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Wed, 14 Jun 2023 17:14:36 +0000 Subject: [PATCH 0982/1356] glossary: add shimpei, prairiedog and oci-add-hooks Signed-off-by: Arnaldo Garcia Rincon --- GLOSSARY.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/GLOSSARY.md b/GLOSSARY.md index 86b0cf7d..dd08bfca 100644 --- a/GLOSSARY.md +++ b/GLOSSARY.md @@ -27,6 +27,8 @@ It finds settings that need generation by way of metadata in the API, and calls helper programs specified by that metadata. * [**thar-be-settings**](sources/api/thar-be-settings): A program that writes out system configuration files, replacing template variables with settings from the API. * [**updog**](sources/updater/updog): An update client that interfaces with a specified TUF updates repository to upgrade or downgrade Bottlerocket hosts to different image versions. +* [**prairiedog**](sources/api/prairiedog): A program that handles various boot related operations. +* [**shimpei**](sources/shimpei): An OCI compatible shim wrapper around `oci-add-hooks`. Its sole purpose is to call `oci-add-hooks` with the additional `--hook-config-path` and `--runtime-path` parameters that can't be provided by containerd. ## Non-Bottlerocket terms @@ -42,3 +44,4 @@ * **TUF**: [The Update Framework](https://theupdateframework.io/). A framework that helps developers maintain the security of software update systems. * [**wicked**](https://github.com/openSUSE/wicked): A network interface framework and management system. +* [**oci-add-hooks**](https://github.com/awslabs/oci-add-hooks): An OCI runtime that injects the OCI `prestart`, `poststart`, and `poststop` hooks into a container `config.json` before passing along to an OCI compatible runtime. From 4c14a5c8653099a469ec558e5611634c63662890 Mon Sep 17 00:00:00 2001 From: ecpullen Date: Fri, 16 Jun 2023 17:18:26 +0000 Subject: [PATCH 0983/1356] testsys: Upgrade to v0.0.8 --- tools/Cargo.lock | 16 ++++++++-------- tools/deny.toml | 4 ++-- tools/testsys-config/Cargo.toml | 4 ++-- tools/testsys/Cargo.toml | 4 ++-- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 3d3f7ae4..6caab0d1 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -687,8 +687,8 @@ dependencies = [ [[package]] name = "bottlerocket-types" -version = "0.0.7" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.7#241f73d798db903a4736b460f70338f8859b291f" +version = "0.0.8" +source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.8#cea049d4e94d5824beb25ffc23893c358d187ef9" dependencies = [ "builder-derive", "configuration-derive", @@ -720,8 +720,8 @@ dependencies = [ [[package]] name = "builder-derive" -version = "0.0.7" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.7#241f73d798db903a4736b460f70338f8859b291f" +version = "0.0.8" +source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.8#cea049d4e94d5824beb25ffc23893c358d187ef9" dependencies = [ "proc-macro2", "quote", @@ -916,8 +916,8 @@ dependencies = [ [[package]] name = "configuration-derive" -version = "0.0.7" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.7#241f73d798db903a4736b460f70338f8859b291f" +version = "0.0.8" +source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.8#cea049d4e94d5824beb25ffc23893c358d187ef9" dependencies = [ "quote", "syn 1.0.109", @@ -3254,8 +3254,8 @@ dependencies = [ [[package]] name = "testsys-model" -version = "0.0.7" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.7#241f73d798db903a4736b460f70338f8859b291f" +version = "0.0.8" +source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.8#cea049d4e94d5824beb25ffc23893c358d187ef9" dependencies = [ "async-recursion", "async-trait", diff --git a/tools/deny.toml b/tools/deny.toml index cf87a2f4..74392d3d 100644 --- a/tools/deny.toml +++ b/tools/deny.toml @@ -84,8 +84,8 @@ skip-tree = [ { name = "windows-sys", version = "=0.42.0" }, # TestSys uses a newer version of base64 and serde_yaml - { name = "testsys-model", version = "=0.0.7" }, - { name = "bottlerocket-types", version = "=0.0.7" }, + { name = "testsys-model", version = "=0.0.8" }, + { name = "bottlerocket-types", version = "=0.0.8" }, ] [sources] diff --git a/tools/testsys-config/Cargo.toml b/tools/testsys-config/Cargo.toml index 9722233d..2835f7a7 100644 --- a/tools/testsys-config/Cargo.toml +++ b/tools/testsys-config/Cargo.toml @@ -7,14 +7,14 @@ edition = "2021" publish = false [dependencies] -bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.7", tag = "v0.0.7"} +bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.8", tag = "v0.0.8"} bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } handlebars = "4" home = "0.5" lazy_static = "1" log = "0.4" maplit="1" -testsys-model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.7", tag = "v0.0.7"} +testsys-model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.8", tag = "v0.0.8"} serde = { version = "1", features = ["derive"] } serde_plain = "1" serde_yaml = "0.8" diff --git a/tools/testsys/Cargo.toml b/tools/testsys/Cargo.toml index b8f1508a..9e8ec9bb 100644 --- a/tools/testsys/Cargo.toml +++ b/tools/testsys/Cargo.toml @@ -11,7 +11,7 @@ async-trait = "0.1" aws-config = "0.54.1" aws-sdk-ec2 = "0.24" base64 = "0.20" -bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.7", tag = "v0.0.7"} +bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.8", tag = "v0.0.8"} bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } clap = { version = "3", features = ["derive", "env"] } env_logger = "0.10" @@ -19,7 +19,7 @@ futures = "0.3" handlebars = "4" log = "0.4" maplit = "1" -testsys-model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.7", tag = "v0.0.7"} +testsys-model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.8", tag = "v0.0.8"} pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } fastrand = "1" serde = { version = "1", features = ["derive"] } From a7627259751cb855b8398311e71155ffee74f868 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Tue, 20 Jun 2023 19:34:49 +0000 Subject: [PATCH 0984/1356] Pin GitHub Action rust version to 1.69.0 This adds a step in the node setup to install our expected version of the rust toolchain. Signed-off-by: Sean McGinnis --- .github/actions/setup-node/action.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/actions/setup-node/action.yml b/.github/actions/setup-node/action.yml index d6db96ed..ee036f99 100644 --- a/.github/actions/setup-node/action.yml +++ b/.github/actions/setup-node/action.yml @@ -34,6 +34,12 @@ runs: tools/.crates2.json tools/target key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }}-${{ hashFiles('tools/Cargo.lock') }} + - name: Setup pinned rust version + uses: actions-rs/toolchain@v1 + with: + toolchain: 1.69.0 + override: true + components: rustfmt, clippy - run: cargo install --locked --version 0.36.0 cargo-make shell: bash - run: cargo install --locked --version 0.6.2 cargo-sweep From f0a50dd7c617a7e27ea5c396ae1770a9fb6a826a Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 17 Feb 2023 17:27:40 +0000 Subject: [PATCH 0985/1356] build: set target CPU to target arch for RPMs This causes the RPM package architecture to match the target's rather than the host's. By default, `dnf` and `rpm` will ignore non-native RPMs, but both can be forced to use them. When the package architecture is set correctly, it's not necessary to include the architecture in the package name, and the macros in the SDK no longer do so. Fix up various places in the image build process that assumed the architecture would be present. Signed-off-by: Ben Cressey --- tools/rpm2img | 11 +++-------- tools/rpm2migrations | 2 +- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/tools/rpm2img b/tools/rpm2img index 09d15667..3841c797 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -199,7 +199,7 @@ if [ "${PARTITION_PLAN}" == "split" ] ; then fi INSTALL_TIME="$(date -u +%Y-%m-%dT%H:%M:%SZ)" -rpm -iv --root "${ROOT_MOUNT}" "${PACKAGE_DIR}"/*.rpm +rpm -iv --ignorearch --root "${ROOT_MOUNT}" "${PACKAGE_DIR}"/*.rpm # inventory installed packages INVENTORY_QUERY="\{\"Name\":\"%{NAME}\"\ @@ -217,13 +217,8 @@ mapfile -t installed_rpms <<< "$(rpm -qa --root "${ROOT_MOUNT}" \ # wrap installed_rpms mapfile into json INVENTORY_DATA="$(jq --raw-output . <<< "${installed_rpms[@]}")" -# replace the package architecture with the target architecture (for cross-compiled builds) -if [[ "${BUILDER_ARCH}" != "${ARCH}" ]]; then - INVENTORY_DATA="$(jq --arg BUILDER_ARCH "${BUILDER_ARCH}" --arg TARGET_ARCH "${ARCH}" \ - '(.Architecture) |= sub($BUILDER_ARCH; $TARGET_ARCH)' <<< "${INVENTORY_DATA}")" -fi -# remove the 'bottlerocket--' prefix from package names -INVENTORY_DATA="$(jq --arg PKG_PREFIX "bottlerocket-${ARCH}-" \ +# remove the 'bottlerocket-' prefix from package names +INVENTORY_DATA="$(jq --arg PKG_PREFIX "bottlerocket-" \ '(.Name) |= sub($PKG_PREFIX; "")' <<< "${INVENTORY_DATA}")" # sort by package name and add 'Content' as top-level INVENTORY_DATA="$(jq --slurp 'sort_by(.Name)' <<< "${INVENTORY_DATA}" | jq '{"Content": .}')" diff --git a/tools/rpm2migrations b/tools/rpm2migrations index 498bc637..87d3d87e 100755 --- a/tools/rpm2migrations +++ b/tools/rpm2migrations @@ -24,7 +24,7 @@ SYS_ROOT="${ARCH}-bottlerocket-linux-gnu/sys-root" MIGRATIONS_DIR="${ROOT_TEMP}/${SYS_ROOT}/usr/share/migrations" # "Install" the migrations (just puts them in $MIGRATIONS_DIR) -rpm -iv --root "${ROOT_TEMP}" "${PACKAGE_DIR}"/*.rpm +rpm -iv --ignorearch --root "${ROOT_TEMP}" "${PACKAGE_DIR}"/*.rpm if [ ! -d "${MIGRATIONS_DIR}" ]; then echo "Migrations directory does not exist: ${MIGRATIONS_DIR}" From d566566a4d6e3738e0f533925770306dbde80a1c Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 17 Feb 2023 17:20:01 +0000 Subject: [PATCH 0986/1356] build: drop "BuildArch: noarch" from all packages This was used as a quick way to disable debuginfo generation, but it creates ambiguity when using the RPM's architecture to indicate what target the package was built for. These packages capture the architecture in the sysroot-based paths used for installed files, so they are not actually arch-agnostic in the sense that "noarch" implies. Signed-off-by: Ben Cressey --- packages/grub/grub.spec | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index 98513b4f..e417d414 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -69,7 +69,6 @@ BuildRequires: gettext-devel %package modules Summary: Modules for the bootloader with support for Linux and more -BuildArch: noarch %description modules %{summary}. From 21e5fde03a9b52d1c8599823d4eda649920cb674 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 19 Jun 2023 22:38:33 +0000 Subject: [PATCH 0987/1356] grub: switch to new arch macros Signed-off-by: Ben Cressey --- packages/grub/grub.spec | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index e417d414..c620fd9a 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -2,6 +2,7 @@ %global __strip %{_bindir}/true %global efidir /boot/efi/EFI/BOOT +%global efi_image boot%{_cross_efi_arch}.efi %global biosdir /boot/grub # This is specific to the upstream source RPM, and will likely need to be @@ -194,7 +195,7 @@ mkdir -p %{buildroot}%{efidir} -c %{S:2} \ -d ./grub-core/ \ -O "%{_cross_grub_efi_format}" \ - -o "%{buildroot}%{efidir}/%{_cross_grub_efi_image}" \ + -o "%{buildroot}%{efidir}/%{efi_image}" \ -p "/EFI/BOOT" \ efi_gop ${MODS} popd @@ -208,7 +209,7 @@ popd %{biosdir}/core.img %endif %dir %{efidir} -%{efidir}/%{_cross_grub_efi_image} +%{efidir}/%{efi_image} %{_cross_sbindir}/grub-bios-setup %exclude %{_cross_bashdir} %exclude %{_cross_infodir} From a635e9e1be5f0f501729ca472dd9554582529a5e Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 21 Jun 2023 17:19:11 +0000 Subject: [PATCH 0988/1356] build: unpin Rust version This reverts commit ee7205d92c7f8aaeb4ea8d66105f158c9731fc6c. Pinning the Rust version is no longer required, now that Rust 1.70 is in the SDK and supports the new sparse index format by default. --- .github/actions/setup-node/action.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.github/actions/setup-node/action.yml b/.github/actions/setup-node/action.yml index ee036f99..d6db96ed 100644 --- a/.github/actions/setup-node/action.yml +++ b/.github/actions/setup-node/action.yml @@ -34,12 +34,6 @@ runs: tools/.crates2.json tools/target key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }}-${{ hashFiles('tools/Cargo.lock') }} - - name: Setup pinned rust version - uses: actions-rs/toolchain@v1 - with: - toolchain: 1.69.0 - override: true - components: rustfmt, clippy - run: cargo install --locked --version 0.36.0 cargo-make shell: bash - run: cargo install --locked --version 0.6.2 cargo-sweep From e06ff4aa4ab1792755b51368f50d8dc54cb740b7 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Mon, 19 Jun 2023 22:56:01 +0000 Subject: [PATCH 0989/1356] ecs: add settings to clean up container images This adds the configurations exposed by the agent to clean up container images after a period of time. As with previous changes, time-related configurations are rendered at `/etc/ecs/ecs.config` instead of `/etc/ecs/ecs.config.json`, since the JSON parser used by the ECS agent fails to parse time-formatted strings. Signed-off-by: Arnaldo Garcia Rincon --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 40511341..c3382ea7 100644 --- a/README.md +++ b/README.md @@ -656,6 +656,13 @@ Valid time units include `s`, `m`, and `h`, e.g. `1h`, `1m1s`. * `settings.ecs.metadata-service-burst`: The burst rate limit of the throttling configurations set for the task metadata service. * `settings.ecs.reserved-memory`: The amount of memory, in MiB, reserved for critical system processes. * `settings.ecs.task-cleanup-wait`: Time to wait before the task's containers are removed after they are stopped. +Valid time units are `s`, `m`, and `h`, e.g. `1h`, `1m1s`. +* `settings.ecs.image-cleanup-wait`: Time to wait between image cleanup cycles. +Valid time units are `s`, `m`, and `h`, e.g. `1h`, `1m1s`. +* `settings.ecs.image-cleanup-delete-per-cycle`: Number of images to delete in a single image cleanup cycle. +* `settings.ecs.image-cleanup-enabled`: Enable automatic images clean up after the tasks have been removed. +Defaults to `false` +* `settings.ecs.image-cleanup-age`: Time since the image was pulled to be considered for clean up. Valid time units are `s`, `m`, and `h`, e.g. `1h`, `1m1s`. **Note**: `metadata-service-rps` and `metadata-service-burst` directly map to the values set by the `ECS_TASK_METADATA_RPS_LIMIT` environment variable. From c5e16b56cdb96ec0e7787238c675c62da9a7ec4f Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 27 Jun 2023 13:36:03 +0000 Subject: [PATCH 0990/1356] kernel-5.10: update to 5.10.184 Rebase to Amazon Linux upstream version based on 5.10.184. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index bf6584fc..36dc4d97 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -14,8 +14,8 @@ path = "/dev/null" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/13be720c0258208a986213f02d549940509f5125eac626729bc5dd3612bef2f8/kernel-5.10.178-162.673.amzn2.src.rpm" -sha512 = "d1785ac9f88afbe2ee36bc4a16319c076048b89eb4488fdec884a785d1f68ad981b9499c54fa289329d5db228400175aa7cf05d4e0c7d9a75a68a18532a31957" +url = "https://cdn.amazonlinux.com/blobstore/73e966edcb947b3b7d077150dcea95b838666a21da320092f9659ddafa3409fb/kernel-5.10.184-175.731.amzn2.src.rpm" +sha512 = "5245ba11ae97b9f646ea817960e204283acd86b1b6c6e42e0b268268723d1d3e516c97ce0d868341de9e82476a9f8c5b21ef4eb5d7e11b7a38a5a8234c1b1a72" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index e3dda0c5..897a4f1f 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.178 +Version: 5.10.184 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/13be720c0258208a986213f02d549940509f5125eac626729bc5dd3612bef2f8/kernel-5.10.178-162.673.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/73e966edcb947b3b7d077150dcea95b838666a21da320092f9659ddafa3409fb/kernel-5.10.184-175.731.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 5e95b270a0e716e196663c4a4ed40806cc6d2a73 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Mon, 19 Jun 2023 16:16:56 +0000 Subject: [PATCH 0991/1356] Revert "kernel-5.10: cherry-pick fix for CVE-2023-32233" This reverts commit e2083a2ae1e820300273f2264549b8d656d8a9f0. The fix was introduced upstream into the 5.10 series starting with version v5.10.180. We do not need to carry the patch downstream anymore. Signed-off-by: Leonard Foerster --- ...les-deactivate-anonymous-set-from-pr.patch | 125 ------------------ packages/kernel-5.10/kernel-5.10.spec | 3 - 2 files changed, 128 deletions(-) delete mode 100644 packages/kernel-5.10/5001-netfilter-nf_tables-deactivate-anonymous-set-from-pr.patch diff --git a/packages/kernel-5.10/5001-netfilter-nf_tables-deactivate-anonymous-set-from-pr.patch b/packages/kernel-5.10/5001-netfilter-nf_tables-deactivate-anonymous-set-from-pr.patch deleted file mode 100644 index 21f3b178..00000000 --- a/packages/kernel-5.10/5001-netfilter-nf_tables-deactivate-anonymous-set-from-pr.patch +++ /dev/null @@ -1,125 +0,0 @@ -From e044a24447189419c3a7ccc5fa6da7516036dc55 Mon Sep 17 00:00:00 2001 -From: Pablo Neira Ayuso -Date: Tue, 2 May 2023 10:25:24 +0200 -Subject: [PATCH] netfilter: nf_tables: deactivate anonymous set from - preparation phase - -commit c1592a89942e9678f7d9c8030efa777c0d57edab upstream. - -Toggle deleted anonymous sets as inactive in the next generation, so -users cannot perform any update on it. Clear the generation bitmask -in case the transaction is aborted. - -The following KASAN splat shows a set element deletion for a bound -anonymous set that has been already removed in the same transaction. - -[ 64.921510] ================================================================== -[ 64.923123] BUG: KASAN: wild-memory-access in nf_tables_commit+0xa24/0x1490 [nf_tables] -[ 64.924745] Write of size 8 at addr dead000000000122 by task test/890 -[ 64.927903] CPU: 3 PID: 890 Comm: test Not tainted 6.3.0+ #253 -[ 64.931120] Call Trace: -[ 64.932699] -[ 64.934292] dump_stack_lvl+0x33/0x50 -[ 64.935908] ? nf_tables_commit+0xa24/0x1490 [nf_tables] -[ 64.937551] kasan_report+0xda/0x120 -[ 64.939186] ? nf_tables_commit+0xa24/0x1490 [nf_tables] -[ 64.940814] nf_tables_commit+0xa24/0x1490 [nf_tables] -[ 64.942452] ? __kasan_slab_alloc+0x2d/0x60 -[ 64.944070] ? nf_tables_setelem_notify+0x190/0x190 [nf_tables] -[ 64.945710] ? kasan_set_track+0x21/0x30 -[ 64.947323] nfnetlink_rcv_batch+0x709/0xd90 [nfnetlink] -[ 64.948898] ? nfnetlink_rcv_msg+0x480/0x480 [nfnetlink] - -Signed-off-by: Pablo Neira Ayuso -Signed-off-by: Greg Kroah-Hartman ---- - include/net/netfilter/nf_tables.h | 1 + - net/netfilter/nf_tables_api.c | 12 ++++++++++++ - net/netfilter/nft_dynset.c | 2 +- - net/netfilter/nft_lookup.c | 2 +- - net/netfilter/nft_objref.c | 2 +- - 5 files changed, 16 insertions(+), 3 deletions(-) - -diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h -index e66fee99ed3e..564fbe0c865f 100644 ---- a/include/net/netfilter/nf_tables.h -+++ b/include/net/netfilter/nf_tables.h -@@ -507,6 +507,7 @@ struct nft_set_binding { - }; - - enum nft_trans_phase; -+void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set); - void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, - struct nft_set_binding *binding, - enum nft_trans_phase phase); -diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c -index 7bb716df7afc..fe51cedd9cc3 100644 ---- a/net/netfilter/nf_tables_api.c -+++ b/net/netfilter/nf_tables_api.c -@@ -4479,12 +4479,24 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, - } - } - -+void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set) -+{ -+ if (nft_set_is_anonymous(set)) -+ nft_clear(ctx->net, set); -+ -+ set->use++; -+} -+EXPORT_SYMBOL_GPL(nf_tables_activate_set); -+ - void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, - struct nft_set_binding *binding, - enum nft_trans_phase phase) - { - switch (phase) { - case NFT_TRANS_PREPARE: -+ if (nft_set_is_anonymous(set)) -+ nft_deactivate_next(ctx->net, set); -+ - set->use--; - return; - case NFT_TRANS_ABORT: -diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c -index 8c45e01fecdd..038588d4d80e 100644 ---- a/net/netfilter/nft_dynset.c -+++ b/net/netfilter/nft_dynset.c -@@ -233,7 +233,7 @@ static void nft_dynset_activate(const struct nft_ctx *ctx, - { - struct nft_dynset *priv = nft_expr_priv(expr); - -- priv->set->use++; -+ nf_tables_activate_set(ctx, priv->set); - } - - static void nft_dynset_destroy(const struct nft_ctx *ctx, -diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c -index b0f558b4fea5..8bc008ff00cb 100644 ---- a/net/netfilter/nft_lookup.c -+++ b/net/netfilter/nft_lookup.c -@@ -132,7 +132,7 @@ static void nft_lookup_activate(const struct nft_ctx *ctx, - { - struct nft_lookup *priv = nft_expr_priv(expr); - -- priv->set->use++; -+ nf_tables_activate_set(ctx, priv->set); - } - - static void nft_lookup_destroy(const struct nft_ctx *ctx, -diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c -index bc104d36d3bb..25157d8cc250 100644 ---- a/net/netfilter/nft_objref.c -+++ b/net/netfilter/nft_objref.c -@@ -180,7 +180,7 @@ static void nft_objref_map_activate(const struct nft_ctx *ctx, - { - struct nft_objref_map *priv = nft_expr_priv(expr); - -- priv->set->use++; -+ nf_tables_activate_set(ctx, priv->set); - } - - static void nft_objref_map_destroy(const struct nft_ctx *ctx, --- -2.25.1 - diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 897a4f1f..2c6c1fba 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -24,9 +24,6 @@ Patch1003: 1003-af_unix-increase-default-max_dgram_qlen-to-512.patch Patch2000: 2000-kbuild-move-module-strip-compression-code-into-scrip.patch Patch2001: 2001-kbuild-add-support-for-zstd-compressed-modules.patch -# Backport from v5.10.180 upstream, drop when Amazon Linux base is v5.10.180 or later -Patch5001: 5001-netfilter-nf_tables-deactivate-anonymous-set-from-pr.patch - BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From 6e1a9abe30ca50e3733d3e3a836d1c745182ab09 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 27 Jun 2023 13:36:32 +0000 Subject: [PATCH 0992/1356] kernel-5.15: update to 5.15.117 Rebase to Amazon Linux upstream version based on 5.15.117. Signed-off-by: Leonard Foerster --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 8f828372..a1464a8a 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -14,8 +14,8 @@ path = "/dev/null" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/e8de7cc956678c88e06d181df5b0dde1c39fdc2fce4a47b5b466585f1e164a35/kernel-5.15.108-65.141.amzn2.src.rpm" -sha512 = "3c5eaa6bea14f8f06a8999f05c2fe92b4b623ec4c445c0136903977a5cee02e46119585fa7afd20685156735bd6512e5400628868c759016027673c4ebb5cceb" +url = "https://cdn.amazonlinux.com/blobstore/dee03ce3e2dcaf93eec3457db4f5a6973c1837abd3c96229897cb29e5c72d348/kernel-5.15.117-73.143.amzn2.src.rpm" +sha512 = "5b846ce8b18cf155925534a26faf6ef26f47c808a7adaf089248fdce0fc2f06acad389e49595eeda390bd28ca64c9f47765ea7431b64709c6913f52266063024" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index d9d733e1..15305ce5 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.108 +Version: 5.15.117 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/e8de7cc956678c88e06d181df5b0dde1c39fdc2fce4a47b5b466585f1e164a35/kernel-5.15.108-65.141.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/dee03ce3e2dcaf93eec3457db4f5a6973c1837abd3c96229897cb29e5c72d348/kernel-5.15.117-73.143.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 8bf94bc869deca1d7c296f963ac6e78a7179bfb9 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 20 Jun 2023 08:18:24 +0000 Subject: [PATCH 0993/1356] Revert "kernel-5.15: cherry-pick fix for CVE-2023-32233" This reverts commit a3496133ae8ccabe3406e472ce931b22eeb930b2. The fix was introduced into the stable 5.15 series upstream in version v5.15.111. We do not need to carry the downstream variant anymore. Signed-off-by: Leonard Foerster --- ...les-deactivate-anonymous-set-from-pr.patch | 125 ------------------ packages/kernel-5.15/kernel-5.15.spec | 3 - 2 files changed, 128 deletions(-) delete mode 100644 packages/kernel-5.15/5001-netfilter-nf_tables-deactivate-anonymous-set-from-pr.patch diff --git a/packages/kernel-5.15/5001-netfilter-nf_tables-deactivate-anonymous-set-from-pr.patch b/packages/kernel-5.15/5001-netfilter-nf_tables-deactivate-anonymous-set-from-pr.patch deleted file mode 100644 index 26050e2e..00000000 --- a/packages/kernel-5.15/5001-netfilter-nf_tables-deactivate-anonymous-set-from-pr.patch +++ /dev/null @@ -1,125 +0,0 @@ -From 21c2a454486d5e9c1517ecca19266b3be3df73ca Mon Sep 17 00:00:00 2001 -From: Pablo Neira Ayuso -Date: Tue, 2 May 2023 10:25:24 +0200 -Subject: [PATCH] netfilter: nf_tables: deactivate anonymous set from - preparation phase - -commit c1592a89942e9678f7d9c8030efa777c0d57edab upstream. - -Toggle deleted anonymous sets as inactive in the next generation, so -users cannot perform any update on it. Clear the generation bitmask -in case the transaction is aborted. - -The following KASAN splat shows a set element deletion for a bound -anonymous set that has been already removed in the same transaction. - -[ 64.921510] ================================================================== -[ 64.923123] BUG: KASAN: wild-memory-access in nf_tables_commit+0xa24/0x1490 [nf_tables] -[ 64.924745] Write of size 8 at addr dead000000000122 by task test/890 -[ 64.927903] CPU: 3 PID: 890 Comm: test Not tainted 6.3.0+ #253 -[ 64.931120] Call Trace: -[ 64.932699] -[ 64.934292] dump_stack_lvl+0x33/0x50 -[ 64.935908] ? nf_tables_commit+0xa24/0x1490 [nf_tables] -[ 64.937551] kasan_report+0xda/0x120 -[ 64.939186] ? nf_tables_commit+0xa24/0x1490 [nf_tables] -[ 64.940814] nf_tables_commit+0xa24/0x1490 [nf_tables] -[ 64.942452] ? __kasan_slab_alloc+0x2d/0x60 -[ 64.944070] ? nf_tables_setelem_notify+0x190/0x190 [nf_tables] -[ 64.945710] ? kasan_set_track+0x21/0x30 -[ 64.947323] nfnetlink_rcv_batch+0x709/0xd90 [nfnetlink] -[ 64.948898] ? nfnetlink_rcv_msg+0x480/0x480 [nfnetlink] - -Signed-off-by: Pablo Neira Ayuso -Signed-off-by: Greg Kroah-Hartman ---- - include/net/netfilter/nf_tables.h | 1 + - net/netfilter/nf_tables_api.c | 12 ++++++++++++ - net/netfilter/nft_dynset.c | 2 +- - net/netfilter/nft_lookup.c | 2 +- - net/netfilter/nft_objref.c | 2 +- - 5 files changed, 16 insertions(+), 3 deletions(-) - -diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h -index 8def00a04541..22f67ae935e0 100644 ---- a/include/net/netfilter/nf_tables.h -+++ b/include/net/netfilter/nf_tables.h -@@ -584,6 +584,7 @@ struct nft_set_binding { - }; - - enum nft_trans_phase; -+void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set); - void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, - struct nft_set_binding *binding, - enum nft_trans_phase phase); -diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c -index 822d13e64b32..091df8a7cb1e 100644 ---- a/net/netfilter/nf_tables_api.c -+++ b/net/netfilter/nf_tables_api.c -@@ -4839,12 +4839,24 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, - } - } - -+void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set) -+{ -+ if (nft_set_is_anonymous(set)) -+ nft_clear(ctx->net, set); -+ -+ set->use++; -+} -+EXPORT_SYMBOL_GPL(nf_tables_activate_set); -+ - void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, - struct nft_set_binding *binding, - enum nft_trans_phase phase) - { - switch (phase) { - case NFT_TRANS_PREPARE: -+ if (nft_set_is_anonymous(set)) -+ nft_deactivate_next(ctx->net, set); -+ - set->use--; - return; - case NFT_TRANS_ABORT: -diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c -index 87f3af4645d9..29c7ae8789e9 100644 ---- a/net/netfilter/nft_dynset.c -+++ b/net/netfilter/nft_dynset.c -@@ -342,7 +342,7 @@ static void nft_dynset_activate(const struct nft_ctx *ctx, - { - struct nft_dynset *priv = nft_expr_priv(expr); - -- priv->set->use++; -+ nf_tables_activate_set(ctx, priv->set); - } - - static void nft_dynset_destroy(const struct nft_ctx *ctx, -diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c -index bd3485dd930f..9d18c5428d53 100644 ---- a/net/netfilter/nft_lookup.c -+++ b/net/netfilter/nft_lookup.c -@@ -167,7 +167,7 @@ static void nft_lookup_activate(const struct nft_ctx *ctx, - { - struct nft_lookup *priv = nft_expr_priv(expr); - -- priv->set->use++; -+ nf_tables_activate_set(ctx, priv->set); - } - - static void nft_lookup_destroy(const struct nft_ctx *ctx, -diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c -index 94b2327e71dc..3ff91bcaa5f2 100644 ---- a/net/netfilter/nft_objref.c -+++ b/net/netfilter/nft_objref.c -@@ -183,7 +183,7 @@ static void nft_objref_map_activate(const struct nft_ctx *ctx, - { - struct nft_objref_map *priv = nft_expr_priv(expr); - -- priv->set->use++; -+ nf_tables_activate_set(ctx, priv->set); - } - - static void nft_objref_map_destroy(const struct nft_ctx *ctx, --- -2.25.1 - diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 15305ce5..0783e99f 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -22,9 +22,6 @@ Patch1003: 1003-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch # Increase default of sysctl net.unix.max_dgram_qlen to 512. Patch1004: 1004-af_unix-increase-default-max_dgram_qlen-to-512.patch -# Backport from v5.15.111 upstream, drop when Amazon Linux base is v5.15.111 or later -Patch5001: 5001-netfilter-nf_tables-deactivate-anonymous-set-from-pr.patch - BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From fdf74aca239580219d3c8da6bdeb66f06bdc2fca Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 27 Jun 2023 13:36:49 +0000 Subject: [PATCH 0994/1356] kernel-6.1: update to 6.1.29 Rebase to Amazon Linux upstream version based on 6.1.29. Signed-off-by: Leonard Foerster --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index f2dcaedd..8965c4b1 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -14,8 +14,8 @@ path = "/dev/null" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/7da4ac2135147d0ab97afaa82b81cb7408dc5a2fa8f8dd3e785a908585977bd3/kernel-6.1.19-30.43.amzn2023.src.rpm" -sha512 = "6f2458e5707c7635c86e08de666e1beaab6b6ce133a5cdf084c72c80802d54f4d352784ce97d4832bf9e8e7a74f491a551829806df391604138489ecb9d0ac17" +url = "https://cdn.amazonlinux.com/al2023/blobstore/fc78f9cacdcb6227481fd326c05429914f6b085d7abad49c0b1fd896ec02dd4b/kernel-6.1.29-50.88.amzn2023.src.rpm" +sha512 = "0d3a40a5811d36c0ac8a731686a816ae47f66f10ce8ca945f4e727f6c188c9d0a54c504667c25a86b7c80437c9fddafa3973205ad73ed7330b8957b526eff5ed" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index c770302f..6b5df7e2 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.19 +Version: 6.1.29 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/7da4ac2135147d0ab97afaa82b81cb7408dc5a2fa8f8dd3e785a908585977bd3/kernel-6.1.19-30.43.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/fc78f9cacdcb6227481fd326c05429914f6b085d7abad49c0b1fd896ec02dd4b/kernel-6.1.29-50.88.amzn2023.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 7a25b2f145c8067a9a4808d0c814aac6da9f5b6c Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 20 Jun 2023 14:33:13 +0000 Subject: [PATCH 0995/1356] kernel-5.10: Choose upstream smartpqi driver over backport In the past we have built-in the upstream smartpqi driver for our metal platforms and inherited the module build on aws/vmware platforms from AL. AL recently introduced a newer version of that driver alongside the in-tree driver. Keep the status quo by only building the in-tree variant, effectively reducing the amount of code built and shipped. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/config-bottlerocket | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index b0e7d9d2..bb3c3a51 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -152,3 +152,7 @@ CONFIG_EXT4_USE_FOR_EXT2=y # Disable unused MPI3MR driver AL carries as a backport # CONFIG_SCSI_MPI3MR is not set + +# Work with the previously used in-tree version of SMARTPQI instead of AL backport +# CONFIG_AMAZON_SCSI_SMARTPQI is not set +CONFIG_SCSI_SMARTPQI=m From 99f8ee581186604a19c2be5a926af0e9b35e415f Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 20 Jun 2023 14:43:16 +0000 Subject: [PATCH 0996/1356] kernel-5.10, -5.15: Disable edac driver for Intel 10NM memory controller We have not used and currently, do not have requests to support edac on that style of hardware. Keep with our kernels small and at the status quo. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/config-bottlerocket | 3 +++ packages/kernel-5.15/config-bottlerocket | 3 +++ 2 files changed, 6 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index bb3c3a51..b19ae074 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -156,3 +156,6 @@ CONFIG_EXT4_USE_FOR_EXT2=y # Work with the previously used in-tree version of SMARTPQI instead of AL backport # CONFIG_AMAZON_SCSI_SMARTPQI is not set CONFIG_SCSI_SMARTPQI=m + +# Disable edac driver for intel 10nm memory controllers +# CONFIG_EDAC_I10NM is not set diff --git a/packages/kernel-5.15/config-bottlerocket b/packages/kernel-5.15/config-bottlerocket index b6cf3fc1..15c84258 100644 --- a/packages/kernel-5.15/config-bottlerocket +++ b/packages/kernel-5.15/config-bottlerocket @@ -200,3 +200,6 @@ CONFIG_ISCSI_TARGET=m # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set # CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set # CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set + +# Disable edac driver for intel 10nm memory controllers +# CONFIG_EDAC_I10NM is not set From 4c8b68e9079305c93d1fd460c6a5775be96b47fd Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 20 Jun 2023 15:04:01 +0000 Subject: [PATCH 0997/1356] kernel-5.10: Remove specialized IPMI drivers AL has recently added a bunch of IPMI drivers to their kernels. In Bottlerocket those do not get used currently. Remove the unneeded drivers. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/config-bottlerocket | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index b19ae074..f0e1a5ca 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -101,8 +101,17 @@ CONFIG_SERIO_I8042=m CONFIG_KEYBOARD_ATKBD=m CONFIG_MOUSE_PS2=m -# Add support for IPMI drivers +# Add support for the basic IPMI handler. While Bottlerocket does not ship with +# any specific IPMI interfaces, the basic IPMI handler interface is used by the +# nvidia drivers, which makes this necessary. CONFIG_IPMI_HANDLER=m +# Disable more specialized IPMI drivers that are not relevant for our use-cases +# CONFIG_IPMI_DEVICE_INTERFACE is not set +# CONFIG_IPMI_PANIC_EVENT is not set +# CONFIG_IPMI_POWEROFF is not set +# CONFIG_IPMI_SI is not set +# CONFIG_IPMI_WATCHDOG is not set +# CONFIG_ACPI_IPMI is not set # Add support for bootconfig CONFIG_BOOT_CONFIG=y From 828770682e611982da5e4558ad0d374f8078b3c9 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Wed, 28 Jun 2023 09:52:58 +0000 Subject: [PATCH 0998/1356] kernel-5.10: Disable Amazon Linux BBR2 port Amazon Linux introduced a port of the BBR2 congestion control algorithm. As this is a comparably large patch set that has not made it upstream yet and is marked as alpha/preview state. Disabling it for now to keep our kernel more stable. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/config-bottlerocket | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index f0e1a5ca..7a1b1ef0 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -168,3 +168,6 @@ CONFIG_SCSI_SMARTPQI=m # Disable edac driver for intel 10nm memory controllers # CONFIG_EDAC_I10NM is not set + +# Disable AL port of BBR2 congestion algorithm +# CONFIG_TCP_CONG_BBR2 is not set From fd19cf9be23b2e43a3d8f76536710312e3edc39a Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Wed, 5 Jul 2023 11:00:41 -0500 Subject: [PATCH 0999/1356] Actions: add random delay to matrix start For our main "build" GitHub Action we build a matrix execution for each variant supported. This causes many jobs to start at the same time, and could lead to triggering some protections from the external resources that they need to pull from (GitHub, crates.io, etc). This adds a random delay up to 30 seconds to the individual variant runs so they don't all kick off the same actions at the same time, hopefully spreading some of the load a little better and avoiding some of these backoff protections. Signed-off-by: Sean McGinnis --- .github/workflows/build.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4fbea4f4..38f192f8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -51,6 +51,11 @@ jobs: fail-fast: false name: "Build ${{ matrix.variant }}-${{ matrix.arch }}" steps: + - name: Random delay + run: | + delay=$((1 + $RANDOM % 32)) + echo "Waiting ${delay} seconds before execution" + sleep $delay - uses: actions/checkout@v3 - name: Preflight step to set up the runner uses: ./.github/actions/setup-node From 5f4b7a5052dbb233b8ba8f48c0d7b40726976692 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Thu, 22 Jun 2023 21:22:46 +0000 Subject: [PATCH 1000/1356] Bump clap to 4.3.5 in tools/infrasys Signed-off-by: Sean McGinnis --- tools/Cargo.lock | 206 +++++++++++++++++++++++++++++++---- tools/deny.toml | 3 + tools/infrasys/Cargo.toml | 3 +- tools/infrasys/src/main.rs | 22 ++-- tools/infrasys/src/shared.rs | 4 +- 5 files changed, 203 insertions(+), 35 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 6caab0d1..eb9b7832 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -44,6 +44,55 @@ dependencies = [ "winapi", ] +[[package]] +name = "anstream" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is-terminal", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" + +[[package]] +name = "anstyle-parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +dependencies = [ + "windows-sys 0.48.0", +] + +[[package]] +name = "anstyle-wincon" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188" +dependencies = [ + "anstyle", + "windows-sys 0.48.0", +] + [[package]] name = "argh" version = "0.1.10" @@ -847,8 +896,8 @@ checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" dependencies = [ "atty", "bitflags", - "clap_derive", - "clap_lex", + "clap_derive 3.2.18", + "clap_lex 0.2.4", "indexmap", "once_cell", "strsim 0.10.0", @@ -856,6 +905,30 @@ dependencies = [ "textwrap 0.16.0", ] +[[package]] +name = "clap" +version = "4.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2686c4115cb0810d9a984776e197823d08ec94f176549a89a9efded477c456dc" +dependencies = [ + "clap_builder", + "clap_derive 4.3.2", + "once_cell", +] + +[[package]] +name = "clap_builder" +version = "4.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e53afce1efce6ed1f633cf0e57612fe51db54a1ee4fd8f8503d078fe02d69ae" +dependencies = [ + "anstream", + "anstyle", + "bitflags", + "clap_lex 0.5.0", + "strsim 0.10.0", +] + [[package]] name = "clap_derive" version = "3.2.18" @@ -869,6 +942,18 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "clap_derive" +version = "4.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "syn 2.0.15", +] + [[package]] name = "clap_lex" version = "0.2.4" @@ -878,6 +963,12 @@ dependencies = [ "os_str_bytes", ] +[[package]] +name = "clap_lex" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" + [[package]] name = "codespan-reporting" version = "0.11.1" @@ -914,6 +1005,12 @@ dependencies = [ "tokio", ] +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + [[package]] name = "configuration-derive" version = "0.0.8" @@ -1734,7 +1831,7 @@ dependencies = [ "aws-sdk-cloudformation", "aws-sdk-s3", "aws-types", - "clap 3.2.23", + "clap 4.3.5", "hex", "log", "pubsys-config", @@ -1744,7 +1841,6 @@ dependencies = [ "shell-words", "simplelog", "snafu", - "structopt", "tokio", "toml", "url", @@ -3762,6 +3858,12 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +[[package]] +name = "utf8parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" + [[package]] name = "vec_map" version = "0.8.2" @@ -3938,13 +4040,13 @@ version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", ] [[package]] @@ -3953,7 +4055,16 @@ version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ - "windows-targets", + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.0", ] [[package]] @@ -3962,13 +4073,28 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +dependencies = [ + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", ] [[package]] @@ -3977,42 +4103,84 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" + [[package]] name = "windows_aarch64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" + [[package]] name = "windows_i686_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" +[[package]] +name = "windows_i686_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" + [[package]] name = "windows_i686_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" +[[package]] +name = "windows_i686_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" + [[package]] name = "windows_x86_64_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" + [[package]] name = "windows_x86_64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" + [[package]] name = "winreg" version = "0.10.1" diff --git a/tools/deny.toml b/tools/deny.toml index 74392d3d..d6432e92 100644 --- a/tools/deny.toml +++ b/tools/deny.toml @@ -86,6 +86,9 @@ skip-tree = [ # TestSys uses a newer version of base64 and serde_yaml { name = "testsys-model", version = "=0.0.8" }, { name = "bottlerocket-types", version = "=0.0.8" }, + + # generate-readme uses an old version of clap and other dependencies + { name = "generate-readme", version = "0.1.0" } ] [sources] diff --git a/tools/infrasys/Cargo.toml b/tools/infrasys/Cargo.toml index d23cd291..a2538a09 100644 --- a/tools/infrasys/Cargo.toml +++ b/tools/infrasys/Cargo.toml @@ -8,7 +8,7 @@ publish = false [dependencies] async-trait = "0.1" -clap = "3" +clap = { version = "4", features = ["derive"] } hex = "0.4" log = "0.4" pubsys-config = { path = "../pubsys-config/", version = "0.1" } @@ -22,7 +22,6 @@ sha2 = "0.10" shell-words = "1" simplelog = "0.12" snafu = "0.7" -structopt = { version = "0.3", default-features = false } tokio = { version = "1", default-features = false, features = ["macros", "rt-multi-thread"] } toml = "0.5" url = "2" diff --git a/tools/infrasys/src/main.rs b/tools/infrasys/src/main.rs index 7c4e1eff..ef330f6d 100644 --- a/tools/infrasys/src/main.rs +++ b/tools/infrasys/src/main.rs @@ -5,6 +5,7 @@ mod s3; mod shared; use aws_sdk_cloudformation::Region; +use clap::Parser; use error::Result; use log::{error, info}; use pubsys_config::{InfraConfig, RepoConfig, S3Config, SigningKeyConfig}; @@ -16,35 +17,32 @@ use std::collections::HashMap; use std::num::NonZeroUsize; use std::path::{Path, PathBuf}; use std::{fs, process}; -use structopt::{clap, StructOpt}; use tokio::runtime::Runtime; use url::Url; // =^..^= =^..^= =^..^= SUB-COMMAND STRUCTS =^..^= =^..^= =^..^= -#[derive(Debug, StructOpt)] -#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +#[derive(Debug, Parser)] struct Args { - #[structopt(global = true, long, default_value = "INFO")] + #[arg(global = true, long, default_value = "INFO")] log_level: LevelFilter, - // Path to Infra.toml (NOTE: must be specified before subcommand) - #[structopt(long, parse(from_os_str))] + // Path to Infra.toml (NOTE: must be specified before subcommand) + #[arg(long)] infra_config_path: PathBuf, - #[structopt(subcommand)] + #[command(subcommand)] subcommand: SubCommand, } -#[derive(Debug, StructOpt)] -#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +#[derive(Debug, Parser)] struct CreateInfraArgs { /// Path to the root.json file. - #[structopt(long)] + #[arg(long)] root_role_path: PathBuf, } -#[derive(Debug, StructOpt)] +#[derive(Debug, Parser)] enum SubCommand { /// Creates infrastructure specified in the Infra.toml file. CreateInfra(CreateInfraArgs), @@ -61,7 +59,7 @@ fn main() { fn run() -> Result<()> { // Parse and store the args passed to the program - let args = Args::from_args(); + let args = Args::parse(); match args.log_level { // Set log level for AWS SDK to error to reduce verbosity. diff --git a/tools/infrasys/src/shared.rs b/tools/infrasys/src/shared.rs index 0e458191..c4d04b6d 100644 --- a/tools/infrasys/src/shared.rs +++ b/tools/infrasys/src/shared.rs @@ -1,13 +1,13 @@ use aws_sdk_cloudformation::model::{Output, Parameter}; use aws_sdk_cloudformation::Client as CloudFormationClient; +use clap::Parser; use log::info; use snafu::{ensure, OptionExt, ResultExt}; use std::{env, thread, time}; -use structopt::StructOpt; use super::{error, Result}; -#[derive(Debug, StructOpt)] +#[derive(Debug, Parser)] pub enum KeyRole { Root, Publication, From 6e4d8d2e30416cc7455ac7ed8ef71b65a85305bd Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Fri, 23 Jun 2023 13:47:23 +0000 Subject: [PATCH 1001/1356] Bump clap to 4.3.5 in tools/pubsys Updates clap in pubsys and removes structops. Signed-off-by: Sean McGinnis --- tools/Cargo.lock | 3 +- tools/pubsys/Cargo.toml | 3 +- tools/pubsys/src/aws/ami/mod.rs | 23 +++++----- tools/pubsys/src/aws/promote_ssm/mod.rs | 19 ++++---- tools/pubsys/src/aws/publish_ami/mod.rs | 32 +++++++------ tools/pubsys/src/aws/ssm/mod.rs | 23 +++++----- tools/pubsys/src/aws/validate_ami/mod.rs | 13 +++--- tools/pubsys/src/aws/validate_ami/results.rs | 2 +- tools/pubsys/src/aws/validate_ssm/mod.rs | 15 +++---- tools/pubsys/src/aws/validate_ssm/results.rs | 2 +- tools/pubsys/src/main.rs | 45 +++++++++---------- tools/pubsys/src/repo.rs | 37 ++++++++------- .../pubsys/src/repo/check_expirations/mod.rs | 15 +++---- tools/pubsys/src/repo/refresh_repo/mod.rs | 21 +++++---- tools/pubsys/src/repo/validate_repo/mod.rs | 15 +++---- tools/pubsys/src/vmware/upload_ova/mod.rs | 15 +++---- 16 files changed, 134 insertions(+), 149 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index eb9b7832..3206a2ca 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -2500,7 +2500,7 @@ dependencies = [ "aws-types", "buildsys", "chrono", - "clap 3.2.23", + "clap 4.3.5", "coldsnap", "duct", "futures", @@ -2521,7 +2521,6 @@ dependencies = [ "serde_plain", "simplelog", "snafu", - "structopt", "tabled", "tempfile", "tinytemplate", diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index 7c0e94fe..8a63145a 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -19,7 +19,7 @@ aws-smithy-types = "0.54" aws-types = "0.54" buildsys = { path = "../buildsys", version = "0.1" } chrono = { version = "0.4", default-features = false, features = ["std", "clock"] } -clap = "3" +clap = { version = "4", features = ["derive"] } coldsnap = { version = "0.5", default-features = false, features = ["aws-sdk-rust-rustls"] } duct = "0.13" futures = "0.3" @@ -41,7 +41,6 @@ serde_json = "1" serde_plain = "1" simplelog = "0.12" snafu = "0.7" -structopt = { version = "0.3", default-features = false } tabled = "0.10" tempfile = "3" tinytemplate = "1" diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index ea511ebb..425fd344 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -21,6 +21,7 @@ use aws_sdk_ec2::{Client as Ec2Client, Region}; use aws_sdk_sts::error::GetCallerIdentityError; use aws_sdk_sts::output::GetCallerIdentityOutput; use aws_sdk_sts::Client as StsClient; +use clap::Parser; use futures::future::{join, lazy, ready, FutureExt}; use futures::stream::{self, StreamExt}; use log::{error, info, trace, warn}; @@ -30,49 +31,47 @@ use serde::{Deserialize, Serialize}; use snafu::{ensure, OptionExt, ResultExt}; use std::collections::{HashMap, HashSet}; use std::path::PathBuf; -use structopt::{clap, StructOpt}; use wait::wait_for_ami; const WARN_SEPARATOR: &str = "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"; /// Builds Bottlerocket AMIs using latest build artifacts -#[derive(Debug, StructOpt)] -#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +#[derive(Debug, Parser)] pub(crate) struct AmiArgs { /// Path to the image containing the os volume - #[structopt(short = "o", long, parse(from_os_str))] + #[arg(short = 'o', long)] os_image: PathBuf, /// Path to the image containing the data volume - #[structopt(short = "d", long, parse(from_os_str))] + #[arg(short = 'd', long)] data_image: Option, /// Path to the variant manifest - #[structopt(short = "v", long, parse(from_os_str))] + #[arg(short = 'v', long)] variant_manifest: PathBuf, /// The architecture of the machine image - #[structopt(short = "a", long, parse(try_from_str = parse_arch))] + #[arg(short = 'a', long, value_parser = parse_arch)] arch: ArchitectureValues, /// The desired AMI name - #[structopt(short = "n", long)] + #[arg(short = 'n', long)] name: String, /// The desired AMI description - #[structopt(long)] + #[arg(long)] description: Option, /// Don't display progress bars - #[structopt(long)] + #[arg(long)] no_progress: bool, /// Regions where you want the AMI, the first will be used as the base for copying - #[structopt(long, use_delimiter = true)] + #[arg(long, value_delimiter = ',')] regions: Vec, /// If specified, save created regional AMI IDs in JSON at this path. - #[structopt(long)] + #[arg(long)] ami_output: Option, } diff --git a/tools/pubsys/src/aws/promote_ssm/mod.rs b/tools/pubsys/src/aws/promote_ssm/mod.rs index fc4530a8..f8aeaa56 100644 --- a/tools/pubsys/src/aws/promote_ssm/mod.rs +++ b/tools/pubsys/src/aws/promote_ssm/mod.rs @@ -9,44 +9,43 @@ use crate::aws::{parse_arch, region_from_string}; use crate::Args; use aws_sdk_ec2::model::ArchitectureValues; use aws_sdk_ssm::{Client as SsmClient, Region}; +use clap::Parser; use log::{info, trace}; use pubsys_config::InfraConfig; use snafu::{ensure, ResultExt}; use std::collections::HashMap; use std::path::PathBuf; -use structopt::{clap, StructOpt}; /// Copies sets of SSM parameters -#[derive(Debug, StructOpt)] -#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +#[derive(Debug, Parser)] pub(crate) struct PromoteArgs { /// The architecture of the machine image - #[structopt(long, parse(try_from_str = parse_arch))] + #[arg(long, value_parser = parse_arch)] arch: ArchitectureValues, /// The variant name for the current build - #[structopt(long)] + #[arg(long)] variant: String, /// Version number (or string) to copy from - #[structopt(long)] + #[arg(long)] source: String, /// Version number (or string) to copy to - #[structopt(long)] + #[arg(long)] target: String, /// Comma-separated list of regions to promote in, overriding Infra.toml - #[structopt(long, use_delimiter = true)] + #[arg(long, value_delimiter = ',')] regions: Vec, /// File holding the parameter templates - #[structopt(long)] + #[arg(long)] template_path: PathBuf, /// If set, contains the path to the file holding the original SSM parameters /// and where the newly promoted parameters will be written - #[structopt(long)] + #[arg(long)] ssm_parameter_output: Option, } diff --git a/tools/pubsys/src/aws/publish_ami/mod.rs b/tools/pubsys/src/aws/publish_ami/mod.rs index 67c06b4d..e1277433 100644 --- a/tools/pubsys/src/aws/publish_ami/mod.rs +++ b/tools/pubsys/src/aws/publish_ami/mod.rs @@ -14,6 +14,7 @@ use aws_sdk_ec2::model::{ use aws_sdk_ec2::output::{ModifyImageAttributeOutput, ModifySnapshotAttributeOutput}; use aws_sdk_ec2::types::SdkError; use aws_sdk_ec2::{Client as Ec2Client, Region}; +use clap::{Args as ClapArgs, Parser}; use futures::future::{join, ready}; use futures::stream::{self, StreamExt}; use log::{debug, error, info, trace}; @@ -23,52 +24,49 @@ use std::collections::{HashMap, HashSet}; use std::fs::File; use std::iter::FromIterator; use std::path::PathBuf; -use structopt::{clap, StructOpt}; -#[derive(Debug, StructOpt)] +#[derive(Debug, Parser)] pub(crate) struct ModifyOptions { /// User IDs to give/remove access - #[structopt(long, use_delimiter = true, group = "who")] + #[arg(long, value_delimiter = ',', group = "who")] pub(crate) user_ids: Vec, /// Group names to give/remove access - #[structopt(long, use_delimiter = true, group = "who")] + #[arg(long, value_delimiter = ',', group = "who")] pub(crate) group_names: Vec, /// Organization arns to give/remove access - #[structopt(long, use_delimiter = true, group = "who")] + #[arg(long, value_delimiter = ',', group = "who")] pub(crate) organization_arns: Vec, /// Organizational unit arns to give/remove access - #[structopt(long, use_delimiter = true, group = "who")] + #[arg(long, value_delimiter = ',', group = "who")] pub(crate) organizational_unit_arns: Vec, } /// Grants or revokes permissions to Bottlerocket AMIs -#[derive(Debug, StructOpt)] -#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] -#[structopt(group = clap::ArgGroup::with_name("mode").required(true).multiple(false))] -#[structopt(group = clap::ArgGroup::with_name("who").required(true).multiple(true))] -pub(crate) struct PublishArgs { +#[derive(Debug, ClapArgs)] +#[group(required = true, multiple = true)] +pub(crate) struct Who { /// Path to the JSON file containing regional AMI IDs to modify - #[structopt(long)] + #[arg(long)] ami_input: PathBuf, /// Comma-separated list of regions to publish in, overriding Infra.toml; given regions must be /// in the --ami-input file - #[structopt(long, use_delimiter = true)] + #[arg(long, value_delimiter = ',')] regions: Vec, /// Grant access to the given users/groups - #[structopt(long, group = "mode")] + #[arg(long, group = "mode")] grant: bool, /// Revoke access from the given users/groups - #[structopt(long, group = "mode")] + #[arg(long, group = "mode")] revoke: bool, - #[structopt(flatten)] + #[command(flatten)] modify_opts: ModifyOptions, } /// Common entrypoint from main() -pub(crate) async fn run(args: &Args, publish_args: &PublishArgs) -> Result<()> { +pub(crate) async fn run(args: &Args, publish_args: &Who) -> Result<()> { let (operation, description) = if publish_args.grant { (OperationType::Add, "granting access") } else if publish_args.revoke { diff --git a/tools/pubsys/src/aws/ssm/mod.rs b/tools/pubsys/src/aws/ssm/mod.rs index 1dc02b03..da3e6f1f 100644 --- a/tools/pubsys/src/aws/ssm/mod.rs +++ b/tools/pubsys/src/aws/ssm/mod.rs @@ -15,6 +15,7 @@ use crate::Args; use aws_config::SdkConfig; use aws_sdk_ec2::{model::ArchitectureValues, Client as Ec2Client}; use aws_sdk_ssm::{Client as SsmClient, Region}; +use clap::Parser; use futures::stream::{StreamExt, TryStreamExt}; use governor::{prelude::*, Quota, RateLimiter}; use log::{error, info, trace}; @@ -28,47 +29,45 @@ use std::{ collections::{HashMap, HashSet}, fs::File, }; -use structopt::{clap, StructOpt}; /// Sets SSM parameters based on current build information -#[derive(Debug, StructOpt)] -#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +#[derive(Debug, Parser)] pub(crate) struct SsmArgs { // This is JSON output from `pubsys ami` like `{"us-west-2": "ami-123"}` /// Path to the JSON file containing regional AMI IDs to modify - #[structopt(long, parse(from_os_str))] + #[arg(long)] ami_input: PathBuf, /// The architecture of the machine image - #[structopt(long, parse(try_from_str = parse_arch))] + #[arg(long, value_parser = parse_arch)] arch: ArchitectureValues, /// The variant name for the current build - #[structopt(long)] + #[arg(long)] variant: String, /// The version of the current build - #[structopt(long)] + #[arg(long)] version: String, /// Regions where you want parameters published - #[structopt(long, use_delimiter = true)] + #[arg(long, value_delimiter = ',')] regions: Vec, /// File holding the parameter templates - #[structopt(long)] + #[arg(long)] template_path: PathBuf, /// Allows overwrite of existing parameters - #[structopt(long)] + #[arg(long)] allow_clobber: bool, /// Allows publishing non-public images to the `/aws/` namespace - #[structopt(long)] + #[arg(long)] allow_private_images: bool, /// If set, writes the generated SSM parameters to this path - #[structopt(long)] + #[arg(long)] ssm_parameter_output: Option, } diff --git a/tools/pubsys/src/aws/validate_ami/mod.rs b/tools/pubsys/src/aws/validate_ami/mod.rs index 02572dcd..f57bbc4b 100644 --- a/tools/pubsys/src/aws/validate_ami/mod.rs +++ b/tools/pubsys/src/aws/validate_ami/mod.rs @@ -10,34 +10,33 @@ use crate::aws::client::build_client_config; use crate::aws::validate_ami::ami::describe_images; use crate::Args; use aws_sdk_ec2::{Client as AmiClient, Region}; +use clap::Parser; use log::{error, info, trace}; use pubsys_config::InfraConfig; use snafu::ResultExt; use std::collections::{HashMap, HashSet}; use std::fs::File; use std::path::PathBuf; -use structopt::{clap, StructOpt}; /// Validates EC2 images by calling `describe-images` on all images in the file given by /// `expected-amis-path` and ensuring that the returned `public`, `ena-support`, /// `sriov-net-support`, and `launch-permissions` fields have the expected values. -#[derive(Debug, StructOpt)] -#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +#[derive(Debug, Parser)] pub(crate) struct ValidateAmiArgs { /// File holding the expected amis - #[structopt(long, parse(from_os_str))] + #[arg(long)] expected_amis_path: PathBuf, /// Optional path where the validation results should be written - #[structopt(long, parse(from_os_str))] + #[arg(long)] write_results_path: Option, - #[structopt(long, requires = "write-results-path")] + #[arg(long, requires = "write_results_path")] /// Optional filter to only write validation results with these statuses to the above path /// The available statuses are: `Correct`, `Incorrect`, `Missing`. write_results_filter: Option>, - #[structopt(long)] + #[arg(long)] /// If this argument is given, print the validation results summary as a JSON object instead /// of a plaintext table json: bool, diff --git a/tools/pubsys/src/aws/validate_ami/results.rs b/tools/pubsys/src/aws/validate_ami/results.rs index 50add39d..93b723cf 100644 --- a/tools/pubsys/src/aws/validate_ami/results.rs +++ b/tools/pubsys/src/aws/validate_ami/results.rs @@ -10,7 +10,7 @@ use std::fmt::{self, Display}; use tabled::{Table, Tabled}; /// Represent the possible status of an EC2 image validation -#[derive(Debug, Eq, Hash, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Eq, Hash, PartialEq, Serialize, Deserialize, Clone)] pub(crate) enum AmiValidationResultStatus { /// The image was found and its monitored fields have the expected values Correct, diff --git a/tools/pubsys/src/aws/validate_ssm/mod.rs b/tools/pubsys/src/aws/validate_ssm/mod.rs index 16df9380..4fbc9af2 100644 --- a/tools/pubsys/src/aws/validate_ssm/mod.rs +++ b/tools/pubsys/src/aws/validate_ssm/mod.rs @@ -9,39 +9,38 @@ use super::ssm::{SsmKey, SsmParameters}; use crate::aws::client::build_client_config; use crate::Args; use aws_sdk_ssm::{Client as SsmClient, Region}; +use clap::Parser; use log::{error, info, trace}; use pubsys_config::InfraConfig; use snafu::ResultExt; use std::collections::{HashMap, HashSet}; use std::fs::File; use std::path::PathBuf; -use structopt::{clap, StructOpt}; /// Validates SSM parameters and AMIs -#[derive(Debug, StructOpt)] -#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +#[derive(Debug, Parser)] pub struct ValidateSsmArgs { /// File holding the expected parameters - #[structopt(long, parse(from_os_str))] + #[arg(long)] expected_parameters_path: PathBuf, /// If this flag is set, check for unexpected parameters in the validation regions. If not, /// only the parameters present in the expected parameters file will be validated. - #[structopt(long)] + #[arg(long)] check_unexpected: bool, /// Optional path where the validation results should be written - #[structopt(long, parse(from_os_str))] + #[arg(long)] write_results_path: Option, /// Optional filter to only write validation results with these statuses to the above path /// Available statuses are: `Correct`, `Incorrect`, `Missing`, `Unexpected` - #[structopt(long, requires = "write-results-path")] + #[arg(long, requires = "write_results_path")] write_results_filter: Option>, /// If this flag is added, print the results summary table as JSON instead of a /// plaintext table - #[structopt(long)] + #[arg(long)] json: bool, } diff --git a/tools/pubsys/src/aws/validate_ssm/results.rs b/tools/pubsys/src/aws/validate_ssm/results.rs index dc098281..d21fc9af 100644 --- a/tools/pubsys/src/aws/validate_ssm/results.rs +++ b/tools/pubsys/src/aws/validate_ssm/results.rs @@ -9,7 +9,7 @@ use std::fmt::{self, Display}; use tabled::{Table, Tabled}; /// Represent the possible status of an SSM validation -#[derive(Debug, Eq, Hash, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Eq, Hash, PartialEq, Serialize, Deserialize, Clone)] pub enum SsmValidationResultStatus { /// The expected value was equal to the actual value Correct, diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs index 28b82f30..520ef4e5 100644 --- a/tools/pubsys/src/main.rs +++ b/tools/pubsys/src/main.rs @@ -26,17 +26,17 @@ mod aws; mod repo; mod vmware; +use clap::Parser; use semver::Version; use simplelog::{CombinedLogger, Config as LogConfig, ConfigBuilder, LevelFilter, SimpleLogger}; use snafu::ResultExt; use std::path::PathBuf; use std::process; -use structopt::{clap, StructOpt}; use tokio::runtime::Runtime; fn run() -> Result<()> { // Parse and store the args passed to the program - let args = Args::from_args(); + let args = Args::parse(); // SimpleLogger will send errors to stderr and anything less to stdout. // To reduce verbosity of messages related to the AWS SDK for Rust we need @@ -72,18 +72,18 @@ fn run() -> Result<()> { } match args.subcommand { - SubCommand::Repo(ref repo_args) => repo::run(&args, repo_args).context(error::RepoSnafu), - SubCommand::ValidateRepo(ref validate_repo_args) => { + SubCommands::Repo(ref repo_args) => repo::run(&args, repo_args).context(error::RepoSnafu), + SubCommands::ValidateRepo(ref validate_repo_args) => { repo::validate_repo::run(&args, validate_repo_args).context(error::ValidateRepoSnafu) } - SubCommand::CheckRepoExpirations(ref check_expirations_args) => { + SubCommands::CheckRepoExpirations(ref check_expirations_args) => { repo::check_expirations::run(&args, check_expirations_args) .context(error::CheckExpirationsSnafu) } - SubCommand::RefreshRepo(ref refresh_repo_args) => { + SubCommands::RefreshRepo(ref refresh_repo_args) => { repo::refresh_repo::run(&args, refresh_repo_args).context(error::RefreshRepoSnafu) } - SubCommand::Ami(ref ami_args) => { + SubCommands::Ami(ref ami_args) => { let rt = Runtime::new().context(error::RuntimeSnafu)?; rt.block_on(async { aws::ami::run(&args, ami_args) @@ -91,7 +91,7 @@ fn run() -> Result<()> { .context(error::AmiSnafu) }) } - SubCommand::PublishAmi(ref publish_args) => { + SubCommands::PublishAmi(ref publish_args) => { let rt = Runtime::new().context(error::RuntimeSnafu)?; rt.block_on(async { aws::publish_ami::run(&args, publish_args) @@ -99,7 +99,7 @@ fn run() -> Result<()> { .context(error::PublishAmiSnafu) }) } - SubCommand::Ssm(ref ssm_args) => { + SubCommands::Ssm(ref ssm_args) => { let rt = Runtime::new().context(error::RuntimeSnafu)?; rt.block_on(async { aws::ssm::run(&args, ssm_args) @@ -107,7 +107,7 @@ fn run() -> Result<()> { .context(error::SsmSnafu) }) } - SubCommand::PromoteSsm(ref promote_args) => { + SubCommands::PromoteSsm(ref promote_args) => { let rt = Runtime::new().context(error::RuntimeSnafu)?; rt.block_on(async { aws::promote_ssm::run(&args, promote_args) @@ -115,7 +115,7 @@ fn run() -> Result<()> { .context(error::PromoteSsmSnafu) }) } - SubCommand::ValidateSsm(ref validate_ssm_args) => { + SubCommands::ValidateSsm(ref validate_ssm_args) => { let rt = Runtime::new().context(error::RuntimeSnafu)?; rt.block_on(async { aws::validate_ssm::run(&args, validate_ssm_args) @@ -123,7 +123,7 @@ fn run() -> Result<()> { .context(error::ValidateSsmSnafu) }) } - SubCommand::ValidateAmi(ref validate_ami_args) => { + SubCommands::ValidateAmi(ref validate_ami_args) => { let rt = Runtime::new().context(error::RuntimeSnafu)?; rt.block_on(async { aws::validate_ami::run(&args, validate_ami_args) @@ -131,7 +131,7 @@ fn run() -> Result<()> { .context(error::ValidateAmiSnafu) }) } - SubCommand::UploadOva(ref upload_args) => { + SubCommands::UploadOva(ref upload_args) => { vmware::upload_ova::run(&args, upload_args).context(error::UploadOvaSnafu) } } @@ -145,30 +145,29 @@ fn main() { } /// Automates publishing of Bottlerocket updates -#[derive(Debug, StructOpt)] -#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +#[derive(Debug, Parser)] pub struct Args { - #[structopt(global = true, long, default_value = "INFO")] + #[arg(global = true, long, default_value = "INFO")] /// How much detail to log; from least to most: ERROR, WARN, INFO, DEBUG, TRACE log_level: LevelFilter, - #[structopt(long, parse(from_os_str))] - /// Path to Infra.toml (NOTE: must be specified before subcommand) + #[arg(long)] + /// Path to Infra.toml (NOTE: must be specified before subcommand) infra_config_path: PathBuf, - #[structopt(subcommand)] - subcommand: SubCommand, + #[command(subcommand)] + subcommand: SubCommands, } -#[derive(Debug, StructOpt)] -enum SubCommand { +#[derive(Debug, Parser)] +enum SubCommands { Repo(repo::RepoArgs), ValidateRepo(repo::validate_repo::ValidateRepoArgs), CheckRepoExpirations(repo::check_expirations::CheckExpirationsArgs), RefreshRepo(repo::refresh_repo::RefreshRepoArgs), Ami(aws::ami::AmiArgs), - PublishAmi(aws::publish_ami::PublishArgs), + PublishAmi(aws::publish_ami::Who), ValidateAmi(aws::validate_ami::ValidateAmiArgs), Ssm(aws::ssm::SsmArgs), diff --git a/tools/pubsys/src/repo.rs b/tools/pubsys/src/repo.rs index 940f5918..dcdb4366 100644 --- a/tools/pubsys/src/repo.rs +++ b/tools/pubsys/src/repo.rs @@ -7,6 +7,7 @@ pub(crate) mod validate_repo; use crate::{friendly_version, Args}; use aws_sdk_kms::{Client as KmsClient, Region}; use chrono::{DateTime, Utc}; +use clap::Parser; use lazy_static::lazy_static; use log::{debug, info, trace, warn}; use parse_datetime::parse_datetime; @@ -19,7 +20,6 @@ use std::convert::TryInto; use std::fs::{self, File}; use std::num::NonZeroU64; use std::path::{Path, PathBuf}; -use structopt::{clap, StructOpt}; use tempfile::NamedTempFile; use tokio::runtime::Runtime; use tough::{ @@ -39,66 +39,65 @@ lazy_static! { } /// Builds Bottlerocket repos using latest build artifacts -#[derive(Debug, StructOpt)] -#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +#[derive(Debug, Parser)] pub(crate) struct RepoArgs { // Metadata about the update - #[structopt(long)] + #[arg(long)] /// Use this named repo infrastructure from Infra.toml repo: String, - #[structopt(long)] + #[arg(long)] /// The architecture of the repo and the update being added arch: String, - #[structopt(long, parse(try_from_str=friendly_version))] + #[arg(long, value_parser = friendly_version)] /// The version of the update being added version: Version, - #[structopt(long)] + #[arg(long)] /// The variant of the update being added variant: String, // The images to add in this update - #[structopt(long, parse(from_os_str))] + #[arg(long)] /// Path to the image containing the boot partition boot_image: PathBuf, - #[structopt(long, parse(from_os_str))] + #[arg(long)] /// Path to the image containing the root partition root_image: PathBuf, - #[structopt(long, parse(from_os_str))] + #[arg(long)] /// Path to the image containing the verity hashes hash_image: PathBuf, // Optionally add other files to the repo - #[structopt(long = "link-target", parse(from_os_str))] + #[arg(long = "link-target")] /// Optional paths to add as targets and symlink into repo link_targets: Vec, - #[structopt(long = "copy-target", parse(from_os_str))] + #[arg(long = "copy-target")] /// Optional paths to add as targets and copy into repo copy_targets: Vec, // Policies that pubsys interprets to set repo parameters - #[structopt(long, parse(from_os_str))] + #[arg(long)] /// Path to file that defines when repo metadata should expire repo_expiration_policy_path: PathBuf, // Configuration that pubsys passes on to other tools - #[structopt(long, parse(from_os_str))] + #[arg(long)] /// Path to Release.toml release_config_path: PathBuf, - #[structopt(long, parse(from_os_str))] + #[arg(long)] /// Path to file that defines when this update will become available wave_policy_path: PathBuf, - #[structopt(long, parse(from_os_str))] + #[arg(long)] /// Path to root.json for this repo root_role_path: PathBuf, - #[structopt(long, parse(from_os_str))] + #[arg(long)] /// If we generated a local key, we'll find it here; used if Infra.toml has no key defined default_key_path: PathBuf, - #[structopt(long, parse(try_from_str = parse_datetime))] + #[arg(long, value_parser = parse_datetime)] /// When the waves and expiration timer will start; RFC3339 date or "in X hours/days/weeks" release_start_time: Option>, - #[structopt(long, parse(from_os_str))] + #[arg(long)] /// Where to store the created repo outdir: PathBuf, } diff --git a/tools/pubsys/src/repo/check_expirations/mod.rs b/tools/pubsys/src/repo/check_expirations/mod.rs index 792e0232..bebbcd25 100644 --- a/tools/pubsys/src/repo/check_expirations/mod.rs +++ b/tools/pubsys/src/repo/check_expirations/mod.rs @@ -4,6 +4,7 @@ use crate::repo::{error as repo_error, repo_urls}; use crate::Args; use chrono::{DateTime, Utc}; +use clap::Parser; use log::{error, info, trace, warn}; use parse_datetime::parse_datetime; use pubsys_config::InfraConfig; @@ -11,30 +12,28 @@ use snafu::{OptionExt, ResultExt}; use std::collections::HashMap; use std::fs::File; use std::path::PathBuf; -use structopt::{clap, StructOpt}; use tough::{ExpirationEnforcement, Repository, RepositoryLoader}; use url::Url; /// Checks for metadata expirations for a set of TUF repositories -#[derive(Debug, StructOpt)] -#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +#[derive(Debug, Parser)] pub(crate) struct CheckExpirationsArgs { - #[structopt(long)] + #[arg(long)] /// Use this named repo infrastructure from Infra.toml repo: String, - #[structopt(long)] + #[arg(long)] /// The architecture of the repo being checked for expirations arch: String, - #[structopt(long)] + #[arg(long)] /// The variant of the repo being checked for expirations variant: String, - #[structopt(long, parse(from_os_str))] + #[arg(long)] /// Path to root.json for this repo root_role_path: PathBuf, - #[structopt(long, parse(try_from_str = parse_datetime))] + #[arg(long, value_parser = parse_datetime)] /// Finds metadata files expiring between now and a specified time; RFC3339 date or "in X hours/days/weeks" expiration_limit: DateTime, } diff --git a/tools/pubsys/src/repo/refresh_repo/mod.rs b/tools/pubsys/src/repo/refresh_repo/mod.rs index 70024a23..be707876 100644 --- a/tools/pubsys/src/repo/refresh_repo/mod.rs +++ b/tools/pubsys/src/repo/refresh_repo/mod.rs @@ -6,6 +6,7 @@ use crate::repo::{ }; use crate::Args; use chrono::{DateTime, Utc}; +use clap::Parser; use lazy_static::lazy_static; use log::{info, trace}; use pubsys_config::{InfraConfig, RepoExpirationPolicy}; @@ -13,7 +14,6 @@ use snafu::{ensure, OptionExt, ResultExt}; use std::fs; use std::fs::File; use std::path::{Path, PathBuf}; -use structopt::{clap, StructOpt}; use tough::editor::RepositoryEditor; use tough::key_source::{KeySource, LocalKeySource}; use tough::{ExpirationEnforcement, RepositoryLoader}; @@ -24,37 +24,36 @@ lazy_static! { } /// Refreshes and re-sign TUF repositories' non-root metadata files with new expiration dates -#[derive(Debug, StructOpt)] -#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +#[derive(Debug, Parser)] pub(crate) struct RefreshRepoArgs { - #[structopt(long)] + #[arg(long)] /// Use this named repo infrastructure from Infra.toml repo: String, - #[structopt(long)] + #[arg(long)] /// The architecture of the repo being refreshed and re-signed arch: String, - #[structopt(long)] + #[arg(long)] /// The variant of the repo being refreshed and re-signed variant: String, - #[structopt(long, parse(from_os_str))] + #[arg(long)] /// Path to root.json for this repo root_role_path: PathBuf, - #[structopt(long, parse(from_os_str))] + #[arg(long)] /// If we generated a local key, we'll find it here; used if Infra.toml has no key defined default_key_path: PathBuf, - #[structopt(long, parse(from_os_str))] + #[arg(long)] /// Path to file that defines when repo non-root metadata should expire repo_expiration_policy_path: PathBuf, - #[structopt(long, parse(from_os_str))] + #[arg(long)] /// Where to store the refresh/re-signed repository (just the metadata files) outdir: PathBuf, - #[structopt(long)] + #[arg(long)] /// If this flag is set, repositories will succeed in loading and be refreshed even if they have /// expired metadata files. unsafe_refresh: bool, diff --git a/tools/pubsys/src/repo/validate_repo/mod.rs b/tools/pubsys/src/repo/validate_repo/mod.rs index a2965a37..1734f6bb 100644 --- a/tools/pubsys/src/repo/validate_repo/mod.rs +++ b/tools/pubsys/src/repo/validate_repo/mod.rs @@ -3,6 +3,7 @@ use crate::repo::{error as repo_error, repo_urls}; use crate::Args; +use clap::Parser; use log::{info, trace}; use pubsys_config::InfraConfig; use snafu::{OptionExt, ResultExt}; @@ -11,30 +12,28 @@ use std::fs::File; use std::io; use std::path::PathBuf; use std::sync::mpsc; -use structopt::{clap, StructOpt}; use tough::{Repository, RepositoryLoader, TargetName}; use url::Url; /// Validates a set of TUF repositories -#[derive(Debug, StructOpt)] -#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +#[derive(Debug, Parser)] pub(crate) struct ValidateRepoArgs { - #[structopt(long)] + #[arg(long)] /// Use this named repo infrastructure from Infra.toml repo: String, - #[structopt(long)] + #[arg(long)] /// The architecture of the repo being validated arch: String, - #[structopt(long)] + #[arg(long)] /// The variant of the repo being validated variant: String, - #[structopt(long, parse(from_os_str))] + #[arg(long)] /// Path to root.json for this repo root_role_path: PathBuf, - #[structopt(long)] + #[arg(long)] /// Specifies whether to validate all listed targets by attempting to download them validate_targets: bool, } diff --git a/tools/pubsys/src/vmware/upload_ova/mod.rs b/tools/pubsys/src/vmware/upload_ova/mod.rs index 49fc096f..3df0454d 100644 --- a/tools/pubsys/src/vmware/upload_ova/mod.rs +++ b/tools/pubsys/src/vmware/upload_ova/mod.rs @@ -2,6 +2,7 @@ //! the config necessary to upload an OVA bundle to VMware datacenters. use crate::vmware::govc::Govc; use crate::Args; +use clap::Parser; use log::{debug, info, trace}; use pubsys_config::vmware::{ Datacenter, DatacenterBuilder, DatacenterCreds, DatacenterCredsBuilder, DatacenterCredsConfig, @@ -12,34 +13,32 @@ use serde::Serialize; use snafu::{ensure, OptionExt, ResultExt}; use std::fs; use std::path::PathBuf; -use structopt::{clap, StructOpt}; use tempfile::NamedTempFile; use tinytemplate::TinyTemplate; const SPEC_TEMPLATE_NAME: &str = "spec_template"; /// Uploads a Bottlerocket OVA to VMware datacenters -#[derive(Debug, StructOpt)] -#[structopt(setting = clap::AppSettings::DeriveDisplayOrder)] +#[derive(Debug, Parser)] pub(crate) struct UploadArgs { /// Path to the OVA image - #[structopt(short = "o", long, parse(from_os_str))] + #[arg(short = 'o', long)] ova: PathBuf, /// Path to the import spec - #[structopt(short = "s", long, parse(from_os_str))] + #[arg(short = 's', long)] spec: PathBuf, /// The desired VM name - #[structopt(short = "n", long)] + #[arg(short = 'n', long)] name: String, /// Make the uploaded OVA a VM template - #[structopt(long)] + #[arg(long)] mark_as_template: bool, /// Datacenters to which you want to upload the OVA - #[structopt(long, use_delimiter = true)] + #[arg(long, value_delimiter = ',')] datacenters: Vec, } From 7f3bbb9f1931e7d239b8be804d688a5bb092f96e Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Fri, 23 Jun 2023 14:17:24 +0000 Subject: [PATCH 1002/1356] Bump clap to 4.3.5 in tools/testsys Tweaks for the newer version of clap. Signed-off-by: Sean McGinnis --- tools/Cargo.lock | 59 ++-------------------- tools/testsys/Cargo.toml | 2 +- tools/testsys/src/install.rs | 4 +- tools/testsys/src/main.rs | 6 +-- tools/testsys/src/run.rs | 94 ++++++++++++++++++------------------ tools/testsys/src/secret.rs | 16 +++--- tools/testsys/src/status.rs | 16 +++--- 7 files changed, 73 insertions(+), 124 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 3206a2ca..549ad782 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -883,28 +883,11 @@ dependencies = [ "atty", "bitflags", "strsim 0.8.0", - "textwrap 0.11.0", + "textwrap", "unicode-width", "vec_map", ] -[[package]] -name = "clap" -version = "3.2.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" -dependencies = [ - "atty", - "bitflags", - "clap_derive 3.2.18", - "clap_lex 0.2.4", - "indexmap", - "once_cell", - "strsim 0.10.0", - "termcolor", - "textwrap 0.16.0", -] - [[package]] name = "clap" version = "4.3.5" @@ -912,7 +895,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2686c4115cb0810d9a984776e197823d08ec94f176549a89a9efded477c456dc" dependencies = [ "clap_builder", - "clap_derive 4.3.2", + "clap_derive", "once_cell", ] @@ -925,23 +908,10 @@ dependencies = [ "anstream", "anstyle", "bitflags", - "clap_lex 0.5.0", + "clap_lex", "strsim 0.10.0", ] -[[package]] -name = "clap_derive" -version = "3.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea0c8bce528c4be4da13ea6fead8965e95b6073585a2f05204bd8f4119f82a65" -dependencies = [ - "heck 0.4.1", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "clap_derive" version = "4.3.2" @@ -954,15 +924,6 @@ dependencies = [ "syn 2.0.15", ] -[[package]] -name = "clap_lex" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" -dependencies = [ - "os_str_bytes", -] - [[package]] name = "clap_lex" version = "0.5.0" @@ -2254,12 +2215,6 @@ dependencies = [ "windows-sys 0.45.0", ] -[[package]] -name = "os_str_bytes" -version = "6.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ceedf44fb00f2d1984b0bc98102627ce622e083e49a5bacdb3e514fa4238e267" - [[package]] name = "output_vt100" version = "0.1.3" @@ -3306,7 +3261,7 @@ dependencies = [ "base64 0.20.0", "bottlerocket-types", "bottlerocket-variant", - "clap 3.2.23", + "clap 4.3.5", "env_logger", "fastrand", "futures", @@ -3387,12 +3342,6 @@ dependencies = [ "unicode-width", ] -[[package]] -name = "textwrap" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" - [[package]] name = "thiserror" version = "1.0.40" diff --git a/tools/testsys/Cargo.toml b/tools/testsys/Cargo.toml index 9e8ec9bb..fb91c214 100644 --- a/tools/testsys/Cargo.toml +++ b/tools/testsys/Cargo.toml @@ -13,7 +13,7 @@ aws-sdk-ec2 = "0.24" base64 = "0.20" bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.8", tag = "v0.0.8"} bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } -clap = { version = "3", features = ["derive", "env"] } +clap = { version = "4", features = ["derive", "env"] } env_logger = "0.10" futures = "0.3" handlebars = "4" diff --git a/tools/testsys/src/install.rs b/tools/testsys/src/install.rs index 83363520..17991869 100644 --- a/tools/testsys/src/install.rs +++ b/tools/testsys/src/install.rs @@ -11,10 +11,10 @@ use testsys_model::test_manager::{ImageConfig, TestManager}; #[derive(Debug, Parser)] pub(crate) struct Install { /// The path to `Test.toml` - #[clap(long, env = "TESTSYS_TEST_CONFIG_PATH", parse(from_os_str))] + #[arg(long, env = "TESTSYS_TEST_CONFIG_PATH")] test_config_path: PathBuf, - #[clap(flatten)] + #[command(flatten)] agent_images: TestsysImages, } diff --git a/tools/testsys/src/main.rs b/tools/testsys/src/main.rs index b078f891..26a97d4b 100644 --- a/tools/testsys/src/main.rs +++ b/tools/testsys/src/main.rs @@ -36,16 +36,16 @@ mod vmware_k8s; #[derive(Parser, Debug)] #[clap(about, long_about = None)] struct TestsysArgs { - #[structopt(global = true, long, default_value = "INFO")] + #[arg(global = true, long, default_value = "INFO")] /// How much detail to log; from least to most: ERROR, WARN, INFO, DEBUG, TRACE log_level: LevelFilter, /// Path to the kubeconfig file for the testsys cluster. Can also be passed with the KUBECONFIG /// environment variable. - #[clap(long)] + #[arg(long)] kubeconfig: Option, - #[clap(subcommand)] + #[command(subcommand)] command: Command, } diff --git a/tools/testsys/src/run.rs b/tools/testsys/src/run.rs index faa53c75..eb03de0a 100644 --- a/tools/testsys/src/run.rs +++ b/tools/testsys/src/run.rs @@ -30,89 +30,89 @@ pub(crate) struct Run { test_flavor: TestType, /// The architecture to test. Either x86_64 or aarch64. - #[clap(long, env = "BUILDSYS_ARCH")] + #[arg(long, env = "BUILDSYS_ARCH")] arch: String, /// The variant to test - #[clap(long, env = "BUILDSYS_VARIANT")] + #[arg(long, env = "BUILDSYS_VARIANT")] variant: String, /// The path to `Infra.toml` - #[clap(long, env = "PUBLISH_INFRA_CONFIG_PATH", parse(from_os_str))] + #[arg(long, env = "PUBLISH_INFRA_CONFIG_PATH")] infra_config_path: PathBuf, /// The path to `Test.toml` - #[clap(long, env = "TESTSYS_TEST_CONFIG_PATH", parse(from_os_str))] + #[arg(long, env = "TESTSYS_TEST_CONFIG_PATH")] test_config_path: PathBuf, /// The path to the `tests` directory - #[clap(long, env = "TESTSYS_TESTS_DIR", parse(from_os_str))] + #[arg(long, env = "TESTSYS_TESTS_DIR")] tests_directory: PathBuf, /// The path to the EKS-A management cluster kubeconfig for vSphere or metal K8s cluster creation - #[clap(long, env = "TESTSYS_MGMT_CLUSTER_KUBECONFIG", parse(from_os_str))] + #[arg(long, env = "TESTSYS_MGMT_CLUSTER_KUBECONFIG")] mgmt_cluster_kubeconfig: Option, /// Use this named repo infrastructure from Infra.toml for upgrade/downgrade testing. - #[clap(long, env = "PUBLISH_REPO")] + #[arg(long, env = "PUBLISH_REPO")] repo: Option, /// The name of the vSphere data center in `Infra.toml` that should be used for testing /// If no data center is provided, the first one in `vmware.datacenters` will be used - #[clap(long, env = "TESTSYS_DATACENTER")] + #[arg(long, env = "TESTSYS_DATACENTER")] datacenter: Option, /// The name of the VMware OVA that should be used for testing - #[clap(long, env = "BUILDSYS_OVA")] + #[arg(long, env = "BUILDSYS_OVA")] ova_name: Option, /// The name of the image that should be used for Bare Metal testing - #[clap(long, env = "BUILDSYS_NAME_FULL")] + #[arg(long, env = "BUILDSYS_NAME_FULL")] image_name: Option, /// The path to `amis.json` - #[clap(long, env = "AMI_INPUT")] + #[arg(long, env = "AMI_INPUT")] ami_input: Option, /// Override for the region the tests should be run in. If none is provided the first region in /// Infra.toml will be used. This is the region that the aws client is created with for testing /// and resource agents. - #[clap(long, env = "TESTSYS_TARGET_REGION")] + #[arg(long, env = "TESTSYS_TARGET_REGION")] target_region: Option, - #[clap(long, env = "BUILDSYS_VERSION_BUILD")] + #[arg(long, env = "BUILDSYS_VERSION_BUILD")] build_id: Option, - #[clap(flatten)] + #[command(flatten)] agent_images: TestsysImages, - #[clap(flatten)] + #[command(flatten)] config: CliConfig, // Migrations /// Override the starting image used for migrations. The image will be pulled from available /// amis in the users account if no override is provided. - #[clap(long, env = "TESTSYS_STARTING_IMAGE_ID")] + #[arg(long, env = "TESTSYS_STARTING_IMAGE_ID")] starting_image_id: Option, /// The starting version for migrations. This is required for all migrations tests. /// This is the version that will be created and migrated to `migration-target-version`. - #[clap(long, env = "TESTSYS_STARTING_VERSION")] + #[arg(long, env = "TESTSYS_STARTING_VERSION")] migration_starting_version: Option, /// The commit id of the starting version for migrations. This is required for all migrations /// tests unless `starting-image-id` is provided. This is the version that will be created and /// migrated to `migration-target-version`. - #[clap(long, env = "TESTSYS_STARTING_COMMIT")] + #[arg(long, env = "TESTSYS_STARTING_COMMIT")] migration_starting_commit: Option, /// The target version for migrations. This is required for all migration tests. This is the /// version that will be migrated to. - #[clap(long, env = "BUILDSYS_VERSION_IMAGE")] + #[arg(long, env = "BUILDSYS_VERSION_IMAGE")] migration_target_version: Option, /// The template file that should be used for custom testing. - #[clap(long = "template-file", short = 'f', parse(from_os_str))] + #[arg(long = "template-file", short = 'f')] custom_crd_template: Option, } @@ -121,63 +121,63 @@ pub(crate) struct Run { struct CliConfig { /// The repo containing images necessary for conformance testing. It may be omitted to use the /// default conformance image registry. - #[clap(long, env = "TESTSYS_CONFORMANCE_REGISTRY")] + #[arg(long, env = "TESTSYS_CONFORMANCE_REGISTRY")] conformance_registry: Option, /// The name of the cluster for resource agents (EKS resource agent, ECS resource agent). Note: /// This is not the name of the `testsys cluster` this is the name of the cluster that tests /// should be run on. If no cluster name is provided, the bottlerocket cluster /// naming convention `{{arch}}-{{variant}}` will be used. - #[clap(long, env = "TESTSYS_TARGET_CLUSTER_NAME")] + #[arg(long, env = "TESTSYS_TARGET_CLUSTER_NAME")] target_cluster_name: Option, /// The sonobuoy image that should be used for conformance testing. It may be omitted to use the default /// sonobuoy image. - #[clap(long, env = "TESTSYS_SONOBUOY_IMAGE")] + #[arg(long, env = "TESTSYS_SONOBUOY_IMAGE")] sonobuoy_image: Option, /// The image that should be used for conformance testing. It may be omitted to use the default /// testing image. - #[clap(long, env = "TESTSYS_CONFORMANCE_IMAGE")] + #[arg(long, env = "TESTSYS_CONFORMANCE_IMAGE")] conformance_image: Option, /// The role that should be assumed by the agents - #[clap(long, env = "TESTSYS_ASSUME_ROLE")] + #[arg(long, env = "TESTSYS_ASSUME_ROLE")] assume_role: Option, /// Specify the instance type that should be used. This is only applicable for aws-* variants. /// It can be omitted for non-aws variants and can be omitted to use default instance types. - #[clap(long, env = "TESTSYS_INSTANCE_TYPE")] + #[arg(long, env = "TESTSYS_INSTANCE_TYPE")] instance_type: Option, /// Add secrets to the testsys agents (`--secret awsCredentials=my-secret`) - #[clap(long, short, parse(try_from_str = parse_key_val), number_of_values = 1)] + #[arg(long, short, value_parser = parse_key_val, number_of_values = 1)] secret: Vec<(String, SecretName)>, /// The endpoint IP to reserve for the vSphere control plane VMs when creating a K8s cluster - #[clap(long, env = "TESTSYS_CONTROL_PLANE_ENDPOINT")] + #[arg(long, env = "TESTSYS_CONTROL_PLANE_ENDPOINT")] pub control_plane_endpoint: Option, /// Specify the path to the userdata that should be added for Bottlerocket launch - #[clap(long, env = "TESTSYS_USERDATA")] + #[arg(long, env = "TESTSYS_USERDATA")] pub userdata: Option, /// Specify the method that should be used to launch instances - #[clap(long, env = "TESTSYS_RESOURCE_AGENT")] + #[arg(long, env = "TESTSYS_RESOURCE_AGENT")] pub resource_agent_type: Option, /// A set of workloads that should be run for a workload test (--workload my-workload=) - #[clap(long = "workload", parse(try_from_str = parse_workloads), number_of_values = 1)] + #[arg(long = "workload", value_parser = parse_workloads, number_of_values = 1)] pub workloads: Vec<(String, String)>, /// The directory containing Bottlerocket images. For metal, this is the directory containing /// gzipped images. - #[clap(long)] + #[arg(long)] pub os_image_dir: Option, /// The hardware that should be used for provisioning Bottlerocket. For metal, this is the /// hardware csv that is passed to EKS Anywhere. - #[clap(long)] + #[arg(long)] pub hardware_csv: Option, } @@ -505,95 +505,95 @@ derive_display_from_serialize!(KnownTestType); #[derive(Debug, Parser)] pub(crate) struct TestsysImages { /// EKS resource agent URI. If not provided the latest released resource agent will be used. - #[clap( + #[arg( long = "eks-resource-agent-image", env = "TESTSYS_EKS_RESOURCE_AGENT_IMAGE" )] pub(crate) eks_resource: Option, /// ECS resource agent URI. If not provided the latest released resource agent will be used. - #[clap( + #[arg( long = "ecs-resource-agent-image", env = "TESTSYS_ECS_RESOURCE_AGENT_IMAGE" )] pub(crate) ecs_resource: Option, /// vSphere cluster resource agent URI. If not provided the latest released resource agent will be used. - #[clap( + #[arg( long = "vsphere-k8s-cluster-resource-agent-image", env = "TESTSYS_VSPHERE_K8S_CLUSTER_RESOURCE_AGENT_IMAGE" )] pub(crate) vsphere_k8s_cluster_resource: Option, /// Bare Metal cluster resource agent URI. If not provided the latest released resource agent will be used. - #[clap( + #[arg( long = "metal-k8s-cluster-resource-agent-image", env = "TESTSYS_METAL_K8S_CLUSTER_RESOURCE_AGENT_IMAGE" )] pub(crate) metal_k8s_cluster_resource: Option, /// EC2 resource agent URI. If not provided the latest released resource agent will be used. - #[clap( + #[arg( long = "ec2-resource-agent-image", env = "TESTSYS_EC2_RESOURCE_AGENT_IMAGE" )] pub(crate) ec2_resource: Option, /// EC2 Karpenter resource agent URI. If not provided the latest released resource agent will be used. - #[clap( + #[arg( long = "ec2-resource-agent-image", env = "TESTSYS_EC2_KARPENTER_RESOURCE_AGENT_IMAGE" )] pub(crate) ec2_karpenter_resource: Option, /// vSphere VM resource agent URI. If not provided the latest released resource agent will be used. - #[clap( + #[arg( long = "vsphere-vm-resource-agent-image", env = "TESTSYS_VSPHERE_VM_RESOURCE_AGENT_IMAGE" )] pub(crate) vsphere_vm_resource: Option, /// Sonobuoy test agent URI. If not provided the latest released test agent will be used. - #[clap( + #[arg( long = "sonobuoy-test-agent-image", env = "TESTSYS_SONOBUOY_TEST_AGENT_IMAGE" )] pub(crate) sonobuoy_test: Option, /// ECS test agent URI. If not provided the latest released test agent will be used. - #[clap(long = "ecs-test-agent-image", env = "TESTSYS_ECS_TEST_AGENT_IMAGE")] + #[arg(long = "ecs-test-agent-image", env = "TESTSYS_ECS_TEST_AGENT_IMAGE")] pub(crate) ecs_test: Option, /// Migration test agent URI. If not provided the latest released test agent will be used. - #[clap( + #[arg( long = "migration-test-agent-image", env = "TESTSYS_MIGRATION_TEST_AGENT_IMAGE" )] pub(crate) migration_test: Option, /// K8s workload agent URI. If not provided the latest released test agent will be used. - #[clap( + #[arg( long = "k8s-workload-agent-image", env = "TESTSYS_K8S_WORKLOAD_AGENT_IMAGE" )] pub(crate) k8s_workload: Option, /// ECS workload agent URI. If not provided the latest released test agent will be used. - #[clap( + #[arg( long = "ecs-workload-agent-image", env = "TESTSYS_ECS_WORKLOAD_AGENT_IMAGE" )] pub(crate) ecs_workload: Option, /// TestSys controller URI. If not provided the latest released controller will be used. - #[clap(long = "controller-image", env = "TESTSYS_CONTROLLER_IMAGE")] + #[arg(long = "controller-image", env = "TESTSYS_CONTROLLER_IMAGE")] pub(crate) controller_uri: Option, /// Images pull secret. This is the name of a Kubernetes secret that will be used to /// pull the container image from a private registry. For example, if you created a pull secret /// with `kubectl create secret docker-registry regcred` then you would pass /// `--images-pull-secret regcred`. - #[clap(long = "images-pull-secret", env = "TESTSYS_IMAGES_PULL_SECRET")] + #[arg(long = "images-pull-secret", env = "TESTSYS_IMAGES_PULL_SECRET")] pub(crate) secret: Option, } diff --git a/tools/testsys/src/secret.rs b/tools/testsys/src/secret.rs index c4c5260e..6343c163 100644 --- a/tools/testsys/src/secret.rs +++ b/tools/testsys/src/secret.rs @@ -7,7 +7,7 @@ use testsys_model::SecretName; /// Add a testsys object to the testsys cluster. #[derive(Debug, Parser)] pub(crate) struct Add { - #[clap(subcommand)] + #[command(subcommand)] command: AddCommand, } @@ -28,7 +28,7 @@ impl Add { /// Add a secret to the cluster. #[derive(Debug, Parser)] pub(crate) struct AddSecret { - #[clap(subcommand)] + #[command(subcommand)] command: Command, } @@ -53,11 +53,11 @@ impl AddSecret { #[derive(Debug, Parser)] pub(crate) struct AddSecretMap { /// Name of the secret - #[clap(short, long)] + #[arg(short, long)] name: SecretName, /// Key value pairs for secrets. (Key=value) - #[clap(parse(try_from_str = parse_key_val))] + #[arg(value_parser = parse_key_val)] args: Vec<(String, String)>, } @@ -84,19 +84,19 @@ fn parse_key_val(s: &str) -> Result<(String, String)> { #[derive(Debug, Parser)] pub(crate) struct AddSecretImage { /// Controller image pull username - #[clap(long, short = 'u')] + #[arg(long, short = 'u')] pull_username: String, /// Controller image pull password - #[clap(long, short = 'p')] + #[arg(long, short = 'p')] pull_password: String, /// Image uri - #[clap(long = "image-uri", short)] + #[arg(long = "image-uri", short)] image_uri: String, /// Controller image uri - #[clap(long, short = 'n')] + #[arg(long, short = 'n')] secret_name: String, } diff --git a/tools/testsys/src/status.rs b/tools/testsys/src/status.rs index b7a88f21..2aadcd99 100644 --- a/tools/testsys/src/status.rs +++ b/tools/testsys/src/status.rs @@ -10,31 +10,31 @@ use testsys_model::test_manager::{CrdState, CrdType, SelectionParams, StatusColu #[derive(Debug, Parser)] pub(crate) struct Status { /// Configure the output of the command (json, narrow, wide). - #[clap(long, short = 'o')] + #[arg(long, short = 'o')] output: Option, /// Focus status on a particular arch - #[clap(long)] + #[arg(long)] arch: Option, /// Focus status on a particular variant - #[clap(long)] + #[arg(long)] variant: Option, /// Only show tests - #[clap(long)] + #[arg(long)] test: bool, /// Only show passed tests - #[clap(long, conflicts_with_all=&["failed", "running"])] + #[arg(long, conflicts_with_all=&["failed", "running"])] passed: bool, /// Only show failed tests - #[clap(long, conflicts_with_all=&["passed", "running"])] + #[arg(long, conflicts_with_all=&["passed", "running"])] failed: bool, /// Only CRD's that haven't finished - #[clap(long, conflicts_with_all=&["passed", "failed"])] + #[arg(long, conflicts_with_all=&["passed", "failed"])] running: bool, } @@ -114,7 +114,7 @@ impl Status { } } -#[derive(Debug, Deserialize)] +#[derive(Debug, Deserialize, Clone)] #[serde(rename_all = "kebab-case")] enum StatusOutput { /// Output the status in json From 3f0dc91a97561ea502611a48e6d428e8356736dd Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Fri, 23 Jun 2023 14:21:48 +0000 Subject: [PATCH 1003/1356] Switch from structopt to clap in tools/pubsys-setup This removes the structopt dependency in favor of clap. Signed-off-by: Sean McGinnis --- tools/Cargo.lock | 47 +++------------------------------- tools/deny.toml | 3 --- tools/pubsys-setup/Cargo.toml | 2 +- tools/pubsys-setup/src/main.rs | 19 +++++++------- 4 files changed, 15 insertions(+), 56 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 549ad782..23246a1c 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -918,7 +918,7 @@ version = "4.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f" dependencies = [ - "heck 0.4.1", + "heck", "proc-macro2", "quote", "syn 2.0.15", @@ -1545,15 +1545,6 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -[[package]] -name = "heck" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" -dependencies = [ - "unicode-segmentation", -] - [[package]] name = "heck" version = "0.4.1" @@ -2509,6 +2500,7 @@ dependencies = [ name = "pubsys-setup" version = "0.1.0" dependencies = [ + "clap 4.3.5", "hex", "log", "pubsys-config", @@ -2517,7 +2509,6 @@ dependencies = [ "shell-words", "simplelog", "snafu", - "structopt", "tempfile", "toml", "url", @@ -3103,7 +3094,7 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "475b3bbe5245c26f2d8a6f62d67c1f30eb9fffeccee721c45d162c3ebbdf81b2" dependencies = [ - "heck 0.4.1", + "heck", "proc-macro2", "quote", "syn 1.0.109", @@ -3143,30 +3134,6 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" -[[package]] -name = "structopt" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c6b5c64445ba8094a6ab0c3cd2ad323e07171012d9c98b0b15651daf1787a10" -dependencies = [ - "clap 2.34.0", - "lazy_static", - "structopt-derive", -] - -[[package]] -name = "structopt-derive" -version = "0.4.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" -dependencies = [ - "heck 0.3.3", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "subtle" version = "2.4.1" @@ -3212,7 +3179,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "beca1b4eaceb4f2755df858b88d9b9315b7ccfd1ffd0d7a48a52602301f01a57" dependencies = [ - "heck 0.4.1", + "heck", "proc-macro-error", "proc-macro2", "quote", @@ -3743,12 +3710,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-segmentation" -version = "1.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" - [[package]] name = "unicode-width" version = "0.1.10" diff --git a/tools/deny.toml b/tools/deny.toml index d6432e92..a9b911c0 100644 --- a/tools/deny.toml +++ b/tools/deny.toml @@ -74,9 +74,6 @@ skip = [ ] skip-tree = [ - # structopt pulls in an older version of clap - { name = "structopt", version = "0.3.26" }, - # windows-sys is not a direct dependency. mio and schannel # are using different versions of windows-sys. we skip the # dependency tree because windows-sys has many sub-crates diff --git a/tools/pubsys-setup/Cargo.toml b/tools/pubsys-setup/Cargo.toml index 350cb4c2..e03fa2a6 100644 --- a/tools/pubsys-setup/Cargo.toml +++ b/tools/pubsys-setup/Cargo.toml @@ -7,6 +7,7 @@ edition = "2021" publish = false [dependencies] +clap = { version = "4", features = ["derive"] } hex = "0.4" log = "0.4" pubsys-config = { path = "../pubsys-config/", version = "0.1" } @@ -15,7 +16,6 @@ sha2 = "0.10" shell-words = "1" simplelog = "0.12" snafu = "0.7" -structopt = { version = "0.3", default-features = false } tempfile = "3" toml = "0.5" url = { version = "2", features = ["serde"] } diff --git a/tools/pubsys-setup/src/main.rs b/tools/pubsys-setup/src/main.rs index 423ca3b5..d07cb7a2 100644 --- a/tools/pubsys-setup/src/main.rs +++ b/tools/pubsys-setup/src/main.rs @@ -4,6 +4,7 @@ the repos you use to update them. Specifically, it can create a new key and rol existing role. */ +use clap::Parser; use log::{debug, info, trace, warn}; use pubsys_config::InfraConfig; use sha2::{Digest, Sha512}; @@ -14,33 +15,33 @@ use std::fs; use std::os::unix::fs::PermissionsExt; use std::path::PathBuf; use std::process::{self, Command}; -use structopt::StructOpt; use tempfile::NamedTempFile; use url::Url; /// Helps you get started with credentials to make Bottlerocket images and repos. -#[derive(Debug, StructOpt)] +#[derive(Debug, Parser)] struct Args { - #[structopt(global = true, long, default_value = "INFO")] + #[arg(global = true, long, default_value = "INFO")] /// How much detail to log; from least to most: ERROR, WARN, INFO, DEBUG, TRACE log_level: LevelFilter, - #[structopt(long, parse(from_os_str))] + #[arg(long)] /// Path to Infra.toml infra_config_path: PathBuf, - #[structopt(long)] + #[arg(long)] /// Use this named repo infrastructure from Infra.toml repo: String, - #[structopt(long, parse(from_os_str))] + #[arg(long)] /// Path to root.json root_role_path: PathBuf, - #[structopt(long, parse(from_os_str))] + + #[arg(long)] /// If we have to generate a local key, store it here default_key_path: PathBuf, - #[structopt(long)] + #[arg(long)] /// Allow setup to continue if we have a root role but no key for it allow_missing_key: bool, } @@ -71,7 +72,7 @@ macro_rules! tuftool { /// Main entry point for tuftool setup. fn run() -> Result<()> { // Parse and store the args passed to the program - let args = Args::from_args(); + let args = Args::parse(); // SimpleLogger will send errors to stderr and anything less to stdout. SimpleLogger::init(args.log_level, LogConfig::default()).context(error::LoggerSnafu)?; From f4b425b83fc5693345b53c9eab1beb98199217eb Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 8 Jun 2023 13:38:44 +0000 Subject: [PATCH 1004/1356] kmod-6.1-nvidia: Copy kmod-5.15-nvidia into new package Copy the package kmod-5.15-nvidia into a new package kmod-6.1-nvidia. Adjust the references to kernel versions to build for kernel 6.1. Signed-off-by: Leonard Foerster --- packages/kmod-6.1-nvidia/Cargo.toml | 25 ++ packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 323 ++++++++++++++++++ .../nvidia-dependencies-modules-load.conf | 2 + packages/kmod-6.1-nvidia/nvidia-ld.so.conf.in | 1 + .../nvidia-tesla-build-config.toml.in | 18 + .../kmod-6.1-nvidia/nvidia-tesla-path.env.in | 1 + .../nvidia-tesla-tmpfiles.conf.in | 3 + .../kmod-6.1-nvidia/nvidia-tmpfiles.conf.in | 2 + 8 files changed, 375 insertions(+) create mode 100644 packages/kmod-6.1-nvidia/Cargo.toml create mode 100644 packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec create mode 100644 packages/kmod-6.1-nvidia/nvidia-dependencies-modules-load.conf create mode 100644 packages/kmod-6.1-nvidia/nvidia-ld.so.conf.in create mode 100644 packages/kmod-6.1-nvidia/nvidia-tesla-build-config.toml.in create mode 100644 packages/kmod-6.1-nvidia/nvidia-tesla-path.env.in create mode 100644 packages/kmod-6.1-nvidia/nvidia-tesla-tmpfiles.conf.in create mode 100644 packages/kmod-6.1-nvidia/nvidia-tmpfiles.conf.in diff --git a/packages/kmod-6.1-nvidia/Cargo.toml b/packages/kmod-6.1-nvidia/Cargo.toml new file mode 100644 index 00000000..4a8fbf22 --- /dev/null +++ b/packages/kmod-6.1-nvidia/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "kmod-6_1-nvidia" +version = "0.1.0" +edition = "2021" +publish = false +build = "../build.rs" + +[lib] +path = "/dev/null" + +[package.metadata.build-package] +package-name = "kmod-6.1-nvidia" +releases-url = "https://docs.nvidia.com/datacenter/tesla/" + +[[package.metadata.build-package.external-files]] +url = "https://us.download.nvidia.com/tesla/515.86.01/NVIDIA-Linux-x86_64-515.86.01.run" +sha512 = "9a31e14afc017e847f1208577f597c490adb63c256d6dff1a9eae56b65cf85374a604516b0be9da7a43e9af93b3c5aec47b2ffefd6b4050a4b7e55f348cf4e7b" + +[[package.metadata.build-package.external-files]] +url = "https://us.download.nvidia.com/tesla/515.86.01/NVIDIA-Linux-aarch64-515.86.01.run" +sha512 = "43161f86143b1558d1f558acf4a060f53f538ea20e6235f76be24916fe4a9c374869645c7abf39eba66f1c2ca35f5d2b04f199bd1341b7ee6c1fdc879cb3ef96" + +[build-dependencies] +glibc = { path = "../glibc" } +kernel-6_1 = { path = "../kernel-6.1" } diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec new file mode 100644 index 00000000..e2f689b9 --- /dev/null +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -0,0 +1,323 @@ +%global tesla_515 515.86.01 +%global tesla_515_libdir %{_cross_libdir}/nvidia/tesla/%{tesla_515} +%global tesla_515_bindir %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} +%global tesla_515_firmwaredir %{_cross_libdir}/firmware/nvidia/%{tesla_515} +%global spdx_id %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml spdx-id nvidia) +%global license_file %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml path nvidia -p ./licenses) + +Name: %{_cross_os}kmod-6.1-nvidia +Version: 1.0.0 +Release: 1%{?dist} +Summary: NVIDIA drivers for the 6.1 kernel +# We use these licences because we only ship our own software in the main package, +# each subpackage includes the LICENSE file provided by the Licenses.toml file +License: Apache-2.0 OR MIT +URL: http://www.nvidia.com/ + +# NVIDIA .run scripts from 0 to 199 +Source0: https://us.download.nvidia.com/tesla/%{tesla_515}/NVIDIA-Linux-x86_64-%{tesla_515}.run +Source1: https://us.download.nvidia.com/tesla/%{tesla_515}/NVIDIA-Linux-aarch64-%{tesla_515}.run + +# Common NVIDIA conf files from 200 to 299 +Source200: nvidia-tmpfiles.conf.in +Source202: nvidia-dependencies-modules-load.conf + +# NVIDIA tesla conf files from 300 to 399 +Source300: nvidia-tesla-tmpfiles.conf.in +Source301: nvidia-tesla-build-config.toml.in +Source302: nvidia-tesla-path.env.in +Source303: nvidia-ld.so.conf.in + +BuildRequires: %{_cross_os}glibc-devel +BuildRequires: %{_cross_os}kernel-6.1-archive + +%description +%{summary}. + +%package tesla-515 +Summary: NVIDIA 515 Tesla driver +Version: %{tesla_515} +License: %{spdx_id} +Requires: %{name} + +%description tesla-515 +%{summary} + +%prep +# Extract nvidia sources with `-x`, otherwise the script will try to install +# the driver in the current run +sh %{_sourcedir}/NVIDIA-Linux-%{_cross_arch}-%{tesla_515}.run -x + +%global kernel_sources %{_builddir}/kernel-devel +tar -xf %{_cross_datadir}/bottlerocket/kernel-devel.tar.xz + +%build +pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_515}/kernel + +# This recipe was based in the NVIDIA yum/dnf specs: +# https://github.com/NVIDIA/yum-packaging-precompiled-kmod + +# We set IGNORE_CC_MISMATCH even though we are using the same compiler used to compile the kernel, if +# we don't set this flag the compilation fails +make %{?_smp_mflags} ARCH=%{_cross_karch} IGNORE_CC_MISMATCH=1 SYSSRC=%{kernel_sources} CC=%{_cross_target}-gcc LD=%{_cross_target}-ld + +%{_cross_target}-strip -g --strip-unneeded nvidia/nv-interface.o +%{_cross_target}-strip -g --strip-unneeded nvidia-uvm.o +%{_cross_target}-strip -g --strip-unneeded nvidia-drm.o +%{_cross_target}-strip -g --strip-unneeded nvidia-peermem/nvidia-peermem.o +%{_cross_target}-strip -g --strip-unneeded nvidia-modeset/nv-modeset-interface.o + +# We delete these files since we just stripped the input .o files above, and +# will be build at runtime in the host +rm nvidia{,-modeset,-peermem}.o + +# Delete the .ko files created in make command, just to be safe that we +# don't include any linked module in the base image +rm nvidia{,-modeset,-peermem,-drm}.ko + +popd + +%install +install -d %{buildroot}%{_cross_libexecdir} +install -d %{buildroot}%{_cross_libdir} +install -d %{buildroot}%{_cross_tmpfilesdir} +install -d %{buildroot}%{_cross_unitdir} +install -d %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/{drivers,ld.so.conf.d} + +KERNEL_VERSION=$(cat %{kernel_sources}/include/config/kernel.release) +sed \ + -e "s|__KERNEL_VERSION__|${KERNEL_VERSION}|" \ + -e "s|__PREFIX__|%{_cross_prefix}|" %{S:200} > nvidia.conf +install -p -m 0644 nvidia.conf %{buildroot}%{_cross_tmpfilesdir} + +# Install modules-load.d drop-in to autoload required kernel modules +install -d %{buildroot}%{_cross_libdir}/modules-load.d +install -p -m 0644 %{S:202} %{buildroot}%{_cross_libdir}/modules-load.d/nvidia-dependencies.conf + +# Begin NVIDIA tesla 515 +pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_515} +# We install bins and libs in a versioned directory to prevent collisions with future drivers versions +install -d %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} +install -d %{buildroot}%{tesla_515_libdir} +install -d %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install -d %{buildroot}%{_cross_factorydir}/nvidia/tesla/%{tesla_515} + +sed -e 's|__NVIDIA_VERSION__|%{tesla_515}|' %{S:300} > nvidia-tesla-%{tesla_515}.conf +install -m 0644 nvidia-tesla-%{tesla_515}.conf %{buildroot}%{_cross_tmpfilesdir}/ +sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/|' %{S:301} > \ + nvidia-tesla-%{tesla_515}.toml +install -m 0644 nvidia-tesla-%{tesla_515}.toml %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/drivers +# Install nvidia-path environment file, will be used as a drop-in for containerd.service since +# libnvidia-container locates and mounts helper binaries into the containers from either +# `PATH` or `NVIDIA_PATH` +sed -e 's|__NVIDIA_BINDIR__|%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}|' %{S:302} > nvidia-path.env +install -m 0644 nvidia-path.env %{buildroot}%{_cross_factorydir}/nvidia/tesla/%{tesla_515} +# We need to add `_cross_libdir/tesla_515` to the paths loaded by the ldconfig service +# because libnvidia-container uses the `ldcache` file created by the service, to locate and mount the +# libraries into the containers +sed -e 's|__LIBDIR__|%{_cross_libdir}|' %{S:303} | sed -e 's|__NVIDIA_VERSION__|%{tesla_515}|' \ + > nvidia-tesla-%{tesla_515}.conf +install -m 0644 nvidia-tesla-%{tesla_515}.conf %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/ + +# driver +install kernel/nvidia.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install kernel/nvidia/nv-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install kernel/nvidia/nv-kernel.o_binary %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nv-kernel.o + +# uvm +install kernel/nvidia-uvm.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install kernel/nvidia-uvm.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d + +# modeset +install kernel/nvidia-modeset.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install kernel/nvidia-modeset/nv-modeset-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install kernel/nvidia-modeset/nv-modeset-kernel.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d + +# peermem +install kernel/nvidia-peermem.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install kernel/nvidia-peermem/nvidia-peermem.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d + +# drm +install kernel/nvidia-drm.mod.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install kernel/nvidia-drm.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d + +# Binaries +install -m 755 nvidia-smi %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} +install -m 755 nvidia-debugdump %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} +install -m 755 nvidia-cuda-mps-control %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} +install -m 755 nvidia-cuda-mps-server %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} +%if "%{_cross_arch}" == "x86_64" +install -m 755 nvidia-ngx-updater %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} +%endif + +# We install all the libraries, and filter them out in the 'files' section, so we can catch +# when new libraries are added +install -m 755 *.so* %{buildroot}/%{tesla_515_libdir}/ + +# This library has the same SONAME as libEGL.so.1.1.0, this will cause collisions while +# the symlinks are created. For now, we only symlink libEGL.so.1.1.0. +EXCLUDED_LIBS="libEGL.so.%{tesla_515}" + +for lib in $(find . -maxdepth 1 -type f -name 'lib*.so.*' -printf '%%P\n'); do + [[ "${EXCLUDED_LIBS}" =~ "${lib}" ]] && continue + soname="$(%{_cross_target}-readelf -d "${lib}" | awk '/SONAME/{print $5}' | tr -d '[]')" + [ -n "${soname}" ] || continue + [ "${lib}" == "${soname}" ] && continue + ln -s "${lib}" %{buildroot}/%{tesla_515_libdir}/"${soname}" +done + +# Include the firmware file for GSP support +install -d %{buildroot}%{tesla_515_firmwaredir} +install -p -m 0644 firmware/gsp.bin %{buildroot}%{tesla_515_firmwaredir} + +popd + +%files +%{_cross_attribution_file} +%dir %{_cross_libexecdir}/nvidia +%dir %{_cross_libdir}/nvidia +%dir %{_cross_datadir}/nvidia +%dir %{_cross_libdir}/modules-load.d +%dir %{_cross_factorydir}%{_cross_sysconfdir}/drivers +%{_cross_tmpfilesdir}/nvidia.conf +%{_cross_libdir}/systemd/system/ +%{_cross_libdir}/modules-load.d/nvidia-dependencies.conf + +%files tesla-515 +%license %{license_file} +%dir %{_cross_datadir}/nvidia/tesla/%{tesla_515} +%dir %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} +%dir %{tesla_515_libdir} +%dir %{tesla_515_firmwaredir} +%dir %{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +%dir %{_cross_factorydir}/nvidia/tesla/%{tesla_515} + +# Binaries +%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}/nvidia-debugdump +%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}/nvidia-smi + +# Configuration files +%{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-tesla-%{tesla_515}.toml +%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/nvidia-tesla-%{tesla_515}.conf +%{_cross_factorydir}/nvidia/tesla/%{tesla_515}/nvidia-path.env + +# driver +%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia.mod.o +%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nv-interface.o +%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nv-kernel.o + +# uvm +%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-uvm.mod.o +%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-uvm.o + +# modeset +%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nv-modeset-interface.o +%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nv-modeset-kernel.o +%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-modeset.mod.o + +# tmpfiles +%{_cross_tmpfilesdir}/nvidia-tesla-%{tesla_515}.conf + +# We only install the libraries required by all the DRIVER_CAPABILITIES, described here: +# https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html#driver-capabilities + +# Utility libs +%{tesla_515_libdir}/libnvidia-ml.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-ml.so.1 +%{tesla_515_libdir}/libnvidia-cfg.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-cfg.so.1 +%{tesla_515_libdir}/libnvidia-nvvm.so.4 +%{tesla_515_libdir}/libnvidia-nvvm.so.%{tesla_515} + +# Compute libs +%{tesla_515_libdir}/libcuda.so.%{tesla_515} +%{tesla_515_libdir}/libcuda.so.1 +%{tesla_515_libdir}/libnvidia-opencl.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-opencl.so.1 +%{tesla_515_libdir}/libnvidia-ptxjitcompiler.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-ptxjitcompiler.so.1 +%{tesla_515_libdir}/libnvidia-allocator.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-allocator.so.1 +%{tesla_515_libdir}/libOpenCL.so.1.0.0 +%{tesla_515_libdir}/libOpenCL.so.1 +%if "%{_cross_arch}" == "x86_64" +%{tesla_515_libdir}/libnvidia-compiler.so.%{tesla_515} +%endif + +# Video libs +%{tesla_515_libdir}/libvdpau_nvidia.so.%{tesla_515} +%{tesla_515_libdir}/libvdpau_nvidia.so.1 +%{tesla_515_libdir}/libnvidia-encode.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-encode.so.1 +%{tesla_515_libdir}/libnvidia-opticalflow.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-opticalflow.so.1 +%{tesla_515_libdir}/libnvcuvid.so.%{tesla_515} +%{tesla_515_libdir}/libnvcuvid.so.1 + +# Graphics libs +%{tesla_515_libdir}/libnvidia-eglcore.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-glcore.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-tls.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-glsi.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-rtcore.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-fbc.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-fbc.so.1 +%{tesla_515_libdir}/libnvoptix.so.%{tesla_515} +%{tesla_515_libdir}/libnvoptix.so.1 +%{tesla_515_libdir}/libnvidia-vulkan-producer.so.%{tesla_515} + +# Graphics GLVND libs +%{tesla_515_libdir}/libnvidia-glvkspirv.so.%{tesla_515} +%{tesla_515_libdir}/libGLX_nvidia.so.%{tesla_515} +%{tesla_515_libdir}/libGLX_nvidia.so.0 +%{tesla_515_libdir}/libEGL_nvidia.so.%{tesla_515} +%{tesla_515_libdir}/libEGL_nvidia.so.0 +%{tesla_515_libdir}/libGLESv2_nvidia.so.%{tesla_515} +%{tesla_515_libdir}/libGLESv2_nvidia.so.2 +%{tesla_515_libdir}/libGLESv1_CM_nvidia.so.%{tesla_515} +%{tesla_515_libdir}/libGLESv1_CM_nvidia.so.1 + +# Graphics compat +%{tesla_515_libdir}/libEGL.so.1.1.0 +%{tesla_515_libdir}/libEGL.so.1 +%{tesla_515_libdir}/libEGL.so.%{tesla_515} +%{tesla_515_libdir}/libGL.so.1.7.0 +%{tesla_515_libdir}/libGL.so.1 +%{tesla_515_libdir}/libGLESv1_CM.so.1.2.0 +%{tesla_515_libdir}/libGLESv1_CM.so.1 +%{tesla_515_libdir}/libGLESv2.so.2.1.0 +%{tesla_515_libdir}/libGLESv2.so.2 + +# NGX +%{tesla_515_libdir}/libnvidia-ngx.so.%{tesla_515} +%{tesla_515_libdir}/libnvidia-ngx.so.1 + +# Firmware +%{tesla_515_firmwaredir}/gsp.bin + +# Neither nvidia-peermem nor nvidia-drm are included in driver container images, we exclude them +# for now, and we will add them if requested +%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-peermem.mod.o +%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-peermem.o +%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-drm.mod.o +%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-drm.o +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}/nvidia-cuda-mps-control +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}/nvidia-cuda-mps-server +%if "%{_cross_arch}" == "x86_64" +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}/nvidia-ngx-updater +%endif + +# None of these libraries are required by libnvidia-container, so they +# won't be used by a containerized workload +%exclude %{tesla_515_libdir}/libGLX.so.0 +%exclude %{tesla_515_libdir}/libGLdispatch.so.0 +%exclude %{tesla_515_libdir}/libOpenGL.so.0 +%exclude %{tesla_515_libdir}/libglxserver_nvidia.so.%{tesla_515} +%exclude %{tesla_515_libdir}/libnvidia-gtk2.so.%{tesla_515} +%exclude %{tesla_515_libdir}/libnvidia-gtk3.so.%{tesla_515} +%exclude %{tesla_515_libdir}/nvidia_drv.so +%exclude %{tesla_515_libdir}/libnvidia-egl-wayland.so.1 +%exclude %{tesla_515_libdir}/libnvidia-egl-gbm.so.1 +%exclude %{tesla_515_libdir}/libnvidia-egl-gbm.so.1.1.0 +%exclude %{tesla_515_libdir}/libnvidia-egl-wayland.so.1.1.9 +%exclude %{tesla_515_libdir}/libnvidia-wayland-client.so.%{tesla_515} diff --git a/packages/kmod-6.1-nvidia/nvidia-dependencies-modules-load.conf b/packages/kmod-6.1-nvidia/nvidia-dependencies-modules-load.conf new file mode 100644 index 00000000..86f884a6 --- /dev/null +++ b/packages/kmod-6.1-nvidia/nvidia-dependencies-modules-load.conf @@ -0,0 +1,2 @@ +i2c_core +ipmi_msghandler diff --git a/packages/kmod-6.1-nvidia/nvidia-ld.so.conf.in b/packages/kmod-6.1-nvidia/nvidia-ld.so.conf.in new file mode 100644 index 00000000..a07b0ccb --- /dev/null +++ b/packages/kmod-6.1-nvidia/nvidia-ld.so.conf.in @@ -0,0 +1 @@ +__LIBDIR__/nvidia/tesla/__NVIDIA_VERSION__/ diff --git a/packages/kmod-6.1-nvidia/nvidia-tesla-build-config.toml.in b/packages/kmod-6.1-nvidia/nvidia-tesla-build-config.toml.in new file mode 100644 index 00000000..fb74dc51 --- /dev/null +++ b/packages/kmod-6.1-nvidia/nvidia-tesla-build-config.toml.in @@ -0,0 +1,18 @@ +[nvidia-tesla] +lib-modules-path = "kernel/drivers/extra/video/nvidia/tesla" +objects-source = "__NVIDIA_MODULES__" + +[nvidia-tesla.object-files."nvidia.o"] +link-objects = ["nv-interface.o", "nv-kernel.o"] + +[nvidia-tesla.kernel-modules."nvidia.ko"] +link-objects = ["nvidia.o", "nvidia.mod.o"] + +[nvidia-tesla.object-files."nvidia-modeset.o"] +link-objects = ["nv-modeset-interface.o", "nv-modeset-kernel.o"] + +[nvidia-tesla.kernel-modules."nvidia-modeset.ko"] +link-objects = ["nvidia-modeset.o", "nvidia-modeset.mod.o"] + +[nvidia-tesla.kernel-modules."nvidia-uvm.ko"] +link-objects = ["nvidia-uvm.o", "nvidia-uvm.mod.o"] diff --git a/packages/kmod-6.1-nvidia/nvidia-tesla-path.env.in b/packages/kmod-6.1-nvidia/nvidia-tesla-path.env.in new file mode 100644 index 00000000..28f74deb --- /dev/null +++ b/packages/kmod-6.1-nvidia/nvidia-tesla-path.env.in @@ -0,0 +1 @@ +NVIDIA_PATH=__NVIDIA_BINDIR__ diff --git a/packages/kmod-6.1-nvidia/nvidia-tesla-tmpfiles.conf.in b/packages/kmod-6.1-nvidia/nvidia-tesla-tmpfiles.conf.in new file mode 100644 index 00000000..f208e1d2 --- /dev/null +++ b/packages/kmod-6.1-nvidia/nvidia-tesla-tmpfiles.conf.in @@ -0,0 +1,3 @@ +C /etc/drivers/nvidia-tesla-__NVIDIA_VERSION__.toml +C /etc/containerd/nvidia.env - - - - /usr/share/factory/nvidia/tesla/__NVIDIA_VERSION__/nvidia-path.env +C /etc/ld.so.conf.d/nvidia-tesla-__NVIDIA_VERSION__.conf diff --git a/packages/kmod-6.1-nvidia/nvidia-tmpfiles.conf.in b/packages/kmod-6.1-nvidia/nvidia-tmpfiles.conf.in new file mode 100644 index 00000000..d4763f28 --- /dev/null +++ b/packages/kmod-6.1-nvidia/nvidia-tmpfiles.conf.in @@ -0,0 +1,2 @@ +R __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/tesla - - - - - +d __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/tesla 0755 root root - - From d52083076ae4c44235a8f61724560d9f8d5ac71f Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 8 Jun 2023 14:00:04 +0000 Subject: [PATCH 1005/1356] kmod-6.1-nvidia: Update nvidia driver sources Pull in the new version (525.105.17) of the nvidia driver and adjust all references to a driver version to the new version. Signed-off-by: Leonard Foerster --- packages/kmod-6.1-nvidia/Cargo.toml | 9 +- packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 296 +++++++++--------- 2 files changed, 152 insertions(+), 153 deletions(-) diff --git a/packages/kmod-6.1-nvidia/Cargo.toml b/packages/kmod-6.1-nvidia/Cargo.toml index 4a8fbf22..a442c424 100644 --- a/packages/kmod-6.1-nvidia/Cargo.toml +++ b/packages/kmod-6.1-nvidia/Cargo.toml @@ -13,13 +13,12 @@ package-name = "kmod-6.1-nvidia" releases-url = "https://docs.nvidia.com/datacenter/tesla/" [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/515.86.01/NVIDIA-Linux-x86_64-515.86.01.run" -sha512 = "9a31e14afc017e847f1208577f597c490adb63c256d6dff1a9eae56b65cf85374a604516b0be9da7a43e9af93b3c5aec47b2ffefd6b4050a4b7e55f348cf4e7b" +url = "https://us.download.nvidia.com/tesla/525.105.17/NVIDIA-Linux-x86_64-525.105.17.run" +sha512 = "1a44a8d92d8434d356dcd6087c8a3277136e0819ffa5e4b6895854811cf63e44ad3dc08e0d248f149f8dc2280ab0993be6ee7fdf3c676fb9a85ff3dce83fd69a" [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/515.86.01/NVIDIA-Linux-aarch64-515.86.01.run" -sha512 = "43161f86143b1558d1f558acf4a060f53f538ea20e6235f76be24916fe4a9c374869645c7abf39eba66f1c2ca35f5d2b04f199bd1341b7ee6c1fdc879cb3ef96" - +url = "https://us.download.nvidia.com/tesla/525.105.17/NVIDIA-Linux-aarch64-525.105.17.run" +sha512 = "75192acf8448a206a956b94b01b5e1e05cd21c7f172557bd6a2d1f1d92877583734c7e0998cedc3b54a8bdc5c7e869531ddea4161639daf40cbd0bf035252759" [build-dependencies] glibc = { path = "../glibc" } kernel-6_1 = { path = "../kernel-6.1" } diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index e2f689b9..e7934096 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -1,7 +1,7 @@ -%global tesla_515 515.86.01 -%global tesla_515_libdir %{_cross_libdir}/nvidia/tesla/%{tesla_515} -%global tesla_515_bindir %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} -%global tesla_515_firmwaredir %{_cross_libdir}/firmware/nvidia/%{tesla_515} +%global tesla_525 525.105.17 +%global tesla_525_libdir %{_cross_libdir}/nvidia/tesla/%{tesla_525} +%global tesla_525_bindir %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525} +%global tesla_525_firmwaredir %{_cross_libdir}/firmware/nvidia/%{tesla_525} %global spdx_id %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml spdx-id nvidia) %global license_file %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml path nvidia -p ./licenses) @@ -15,8 +15,8 @@ License: Apache-2.0 OR MIT URL: http://www.nvidia.com/ # NVIDIA .run scripts from 0 to 199 -Source0: https://us.download.nvidia.com/tesla/%{tesla_515}/NVIDIA-Linux-x86_64-%{tesla_515}.run -Source1: https://us.download.nvidia.com/tesla/%{tesla_515}/NVIDIA-Linux-aarch64-%{tesla_515}.run +Source0: https://us.download.nvidia.com/tesla/%{tesla_525}/NVIDIA-Linux-x86_64-%{tesla_525}.run +Source1: https://us.download.nvidia.com/tesla/%{tesla_525}/NVIDIA-Linux-aarch64-%{tesla_525}.run # Common NVIDIA conf files from 200 to 299 Source200: nvidia-tmpfiles.conf.in @@ -34,25 +34,25 @@ BuildRequires: %{_cross_os}kernel-6.1-archive %description %{summary}. -%package tesla-515 -Summary: NVIDIA 515 Tesla driver -Version: %{tesla_515} +%package tesla-525 +Summary: NVIDIA 525 Tesla driver +Version: %{tesla_525} License: %{spdx_id} Requires: %{name} -%description tesla-515 +%description tesla-525 %{summary} %prep # Extract nvidia sources with `-x`, otherwise the script will try to install # the driver in the current run -sh %{_sourcedir}/NVIDIA-Linux-%{_cross_arch}-%{tesla_515}.run -x +sh %{_sourcedir}/NVIDIA-Linux-%{_cross_arch}-%{tesla_525}.run -x %global kernel_sources %{_builddir}/kernel-devel tar -xf %{_cross_datadir}/bottlerocket/kernel-devel.tar.xz %build -pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_515}/kernel +pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_525}/kernel # This recipe was based in the NVIDIA yum/dnf specs: # https://github.com/NVIDIA/yum-packaging-precompiled-kmod @@ -94,81 +94,81 @@ install -p -m 0644 nvidia.conf %{buildroot}%{_cross_tmpfilesdir} install -d %{buildroot}%{_cross_libdir}/modules-load.d install -p -m 0644 %{S:202} %{buildroot}%{_cross_libdir}/modules-load.d/nvidia-dependencies.conf -# Begin NVIDIA tesla 515 -pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_515} +# Begin NVIDIA tesla 525 +pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_525} # We install bins and libs in a versioned directory to prevent collisions with future drivers versions -install -d %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} -install -d %{buildroot}%{tesla_515_libdir} -install -d %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d -install -d %{buildroot}%{_cross_factorydir}/nvidia/tesla/%{tesla_515} - -sed -e 's|__NVIDIA_VERSION__|%{tesla_515}|' %{S:300} > nvidia-tesla-%{tesla_515}.conf -install -m 0644 nvidia-tesla-%{tesla_515}.conf %{buildroot}%{_cross_tmpfilesdir}/ -sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/|' %{S:301} > \ - nvidia-tesla-%{tesla_515}.toml -install -m 0644 nvidia-tesla-%{tesla_515}.toml %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/drivers +install -d %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525} +install -d %{buildroot}%{tesla_525_libdir} +install -d %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d +install -d %{buildroot}%{_cross_factorydir}/nvidia/tesla/%{tesla_525} + +sed -e 's|__NVIDIA_VERSION__|%{tesla_525}|' %{S:300} > nvidia-tesla-%{tesla_525}.conf +install -m 0644 nvidia-tesla-%{tesla_525}.conf %{buildroot}%{_cross_tmpfilesdir}/ +sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/|' %{S:301} > \ + nvidia-tesla-%{tesla_525}.toml +install -m 0644 nvidia-tesla-%{tesla_525}.toml %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/drivers # Install nvidia-path environment file, will be used as a drop-in for containerd.service since # libnvidia-container locates and mounts helper binaries into the containers from either # `PATH` or `NVIDIA_PATH` -sed -e 's|__NVIDIA_BINDIR__|%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}|' %{S:302} > nvidia-path.env -install -m 0644 nvidia-path.env %{buildroot}%{_cross_factorydir}/nvidia/tesla/%{tesla_515} -# We need to add `_cross_libdir/tesla_515` to the paths loaded by the ldconfig service +sed -e 's|__NVIDIA_BINDIR__|%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525}|' %{S:302} > nvidia-path.env +install -m 0644 nvidia-path.env %{buildroot}%{_cross_factorydir}/nvidia/tesla/%{tesla_525} +# We need to add `_cross_libdir/tesla_525` to the paths loaded by the ldconfig service # because libnvidia-container uses the `ldcache` file created by the service, to locate and mount the # libraries into the containers -sed -e 's|__LIBDIR__|%{_cross_libdir}|' %{S:303} | sed -e 's|__NVIDIA_VERSION__|%{tesla_515}|' \ - > nvidia-tesla-%{tesla_515}.conf -install -m 0644 nvidia-tesla-%{tesla_515}.conf %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/ +sed -e 's|__LIBDIR__|%{_cross_libdir}|' %{S:303} | sed -e 's|__NVIDIA_VERSION__|%{tesla_525}|' \ + > nvidia-tesla-%{tesla_525}.conf +install -m 0644 nvidia-tesla-%{tesla_525}.conf %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/ # driver -install kernel/nvidia.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d -install kernel/nvidia/nv-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d -install kernel/nvidia/nv-kernel.o_binary %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nv-kernel.o +install kernel/nvidia.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d +install kernel/nvidia/nv-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d +install kernel/nvidia/nv-kernel.o_binary %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nv-kernel.o # uvm -install kernel/nvidia-uvm.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d -install kernel/nvidia-uvm.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install kernel/nvidia-uvm.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d +install kernel/nvidia-uvm.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d # modeset -install kernel/nvidia-modeset.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d -install kernel/nvidia-modeset/nv-modeset-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d -install kernel/nvidia-modeset/nv-modeset-kernel.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install kernel/nvidia-modeset.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d +install kernel/nvidia-modeset/nv-modeset-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d +install kernel/nvidia-modeset/nv-modeset-kernel.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d # peermem -install kernel/nvidia-peermem.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d -install kernel/nvidia-peermem/nvidia-peermem.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install kernel/nvidia-peermem.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d +install kernel/nvidia-peermem/nvidia-peermem.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d # drm -install kernel/nvidia-drm.mod.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d -install kernel/nvidia-drm.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install kernel/nvidia-drm.mod.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d +install kernel/nvidia-drm.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d # Binaries -install -m 755 nvidia-smi %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} -install -m 755 nvidia-debugdump %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} -install -m 755 nvidia-cuda-mps-control %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} -install -m 755 nvidia-cuda-mps-server %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} +install -m 755 nvidia-smi %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525} +install -m 755 nvidia-debugdump %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525} +install -m 755 nvidia-cuda-mps-control %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525} +install -m 755 nvidia-cuda-mps-server %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525} %if "%{_cross_arch}" == "x86_64" -install -m 755 nvidia-ngx-updater %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} +install -m 755 nvidia-ngx-updater %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525} %endif # We install all the libraries, and filter them out in the 'files' section, so we can catch # when new libraries are added -install -m 755 *.so* %{buildroot}/%{tesla_515_libdir}/ +install -m 755 *.so* %{buildroot}/%{tesla_525_libdir}/ # This library has the same SONAME as libEGL.so.1.1.0, this will cause collisions while # the symlinks are created. For now, we only symlink libEGL.so.1.1.0. -EXCLUDED_LIBS="libEGL.so.%{tesla_515}" +EXCLUDED_LIBS="libEGL.so.%{tesla_525}" for lib in $(find . -maxdepth 1 -type f -name 'lib*.so.*' -printf '%%P\n'); do [[ "${EXCLUDED_LIBS}" =~ "${lib}" ]] && continue soname="$(%{_cross_target}-readelf -d "${lib}" | awk '/SONAME/{print $5}' | tr -d '[]')" [ -n "${soname}" ] || continue [ "${lib}" == "${soname}" ] && continue - ln -s "${lib}" %{buildroot}/%{tesla_515_libdir}/"${soname}" + ln -s "${lib}" %{buildroot}/%{tesla_525_libdir}/"${soname}" done # Include the firmware file for GSP support -install -d %{buildroot}%{tesla_515_firmwaredir} -install -p -m 0644 firmware/gsp.bin %{buildroot}%{tesla_515_firmwaredir} +install -d %{buildroot}%{tesla_525_firmwaredir} +install -p -m 0644 firmware/gsp.bin %{buildroot}%{tesla_525_firmwaredir} popd @@ -183,141 +183,141 @@ popd %{_cross_libdir}/systemd/system/ %{_cross_libdir}/modules-load.d/nvidia-dependencies.conf -%files tesla-515 +%files tesla-525 %license %{license_file} -%dir %{_cross_datadir}/nvidia/tesla/%{tesla_515} -%dir %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} -%dir %{tesla_515_libdir} -%dir %{tesla_515_firmwaredir} -%dir %{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d -%dir %{_cross_factorydir}/nvidia/tesla/%{tesla_515} +%dir %{_cross_datadir}/nvidia/tesla/%{tesla_525} +%dir %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525} +%dir %{tesla_525_libdir} +%dir %{tesla_525_firmwaredir} +%dir %{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d +%dir %{_cross_factorydir}/nvidia/tesla/%{tesla_525} # Binaries -%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}/nvidia-debugdump -%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}/nvidia-smi +%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525}/nvidia-debugdump +%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525}/nvidia-smi # Configuration files -%{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-tesla-%{tesla_515}.toml -%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/nvidia-tesla-%{tesla_515}.conf -%{_cross_factorydir}/nvidia/tesla/%{tesla_515}/nvidia-path.env +%{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-tesla-%{tesla_525}.toml +%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/nvidia-tesla-%{tesla_525}.conf +%{_cross_factorydir}/nvidia/tesla/%{tesla_525}/nvidia-path.env # driver -%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia.mod.o -%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nv-interface.o -%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nv-kernel.o +%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nvidia.mod.o +%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nv-interface.o +%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nv-kernel.o # uvm -%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-uvm.mod.o -%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-uvm.o +%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nvidia-uvm.mod.o +%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nvidia-uvm.o # modeset -%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nv-modeset-interface.o -%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nv-modeset-kernel.o -%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-modeset.mod.o +%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nv-modeset-interface.o +%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nv-modeset-kernel.o +%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nvidia-modeset.mod.o # tmpfiles -%{_cross_tmpfilesdir}/nvidia-tesla-%{tesla_515}.conf +%{_cross_tmpfilesdir}/nvidia-tesla-%{tesla_525}.conf # We only install the libraries required by all the DRIVER_CAPABILITIES, described here: # https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html#driver-capabilities # Utility libs -%{tesla_515_libdir}/libnvidia-ml.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-ml.so.1 -%{tesla_515_libdir}/libnvidia-cfg.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-cfg.so.1 -%{tesla_515_libdir}/libnvidia-nvvm.so.4 -%{tesla_515_libdir}/libnvidia-nvvm.so.%{tesla_515} +%{tesla_525_libdir}/libnvidia-ml.so.%{tesla_525} +%{tesla_525_libdir}/libnvidia-ml.so.1 +%{tesla_525_libdir}/libnvidia-cfg.so.%{tesla_525} +%{tesla_525_libdir}/libnvidia-cfg.so.1 +%{tesla_525_libdir}/libnvidia-nvvm.so.4 +%{tesla_525_libdir}/libnvidia-nvvm.so.%{tesla_525} # Compute libs -%{tesla_515_libdir}/libcuda.so.%{tesla_515} -%{tesla_515_libdir}/libcuda.so.1 -%{tesla_515_libdir}/libnvidia-opencl.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-opencl.so.1 -%{tesla_515_libdir}/libnvidia-ptxjitcompiler.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-ptxjitcompiler.so.1 -%{tesla_515_libdir}/libnvidia-allocator.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-allocator.so.1 -%{tesla_515_libdir}/libOpenCL.so.1.0.0 -%{tesla_515_libdir}/libOpenCL.so.1 +%{tesla_525_libdir}/libcuda.so.%{tesla_525} +%{tesla_525_libdir}/libcuda.so.1 +%{tesla_525_libdir}/libnvidia-opencl.so.%{tesla_525} +%{tesla_525_libdir}/libnvidia-opencl.so.1 +%{tesla_525_libdir}/libnvidia-ptxjitcompiler.so.%{tesla_525} +%{tesla_525_libdir}/libnvidia-ptxjitcompiler.so.1 +%{tesla_525_libdir}/libnvidia-allocator.so.%{tesla_525} +%{tesla_525_libdir}/libnvidia-allocator.so.1 +%{tesla_525_libdir}/libOpenCL.so.1.0.0 +%{tesla_525_libdir}/libOpenCL.so.1 %if "%{_cross_arch}" == "x86_64" -%{tesla_515_libdir}/libnvidia-compiler.so.%{tesla_515} +%{tesla_525_libdir}/libnvidia-compiler.so.%{tesla_525} %endif # Video libs -%{tesla_515_libdir}/libvdpau_nvidia.so.%{tesla_515} -%{tesla_515_libdir}/libvdpau_nvidia.so.1 -%{tesla_515_libdir}/libnvidia-encode.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-encode.so.1 -%{tesla_515_libdir}/libnvidia-opticalflow.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-opticalflow.so.1 -%{tesla_515_libdir}/libnvcuvid.so.%{tesla_515} -%{tesla_515_libdir}/libnvcuvid.so.1 +%{tesla_525_libdir}/libvdpau_nvidia.so.%{tesla_525} +%{tesla_525_libdir}/libvdpau_nvidia.so.1 +%{tesla_525_libdir}/libnvidia-encode.so.%{tesla_525} +%{tesla_525_libdir}/libnvidia-encode.so.1 +%{tesla_525_libdir}/libnvidia-opticalflow.so.%{tesla_525} +%{tesla_525_libdir}/libnvidia-opticalflow.so.1 +%{tesla_525_libdir}/libnvcuvid.so.%{tesla_525} +%{tesla_525_libdir}/libnvcuvid.so.1 # Graphics libs -%{tesla_515_libdir}/libnvidia-eglcore.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-glcore.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-tls.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-glsi.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-rtcore.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-fbc.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-fbc.so.1 -%{tesla_515_libdir}/libnvoptix.so.%{tesla_515} -%{tesla_515_libdir}/libnvoptix.so.1 -%{tesla_515_libdir}/libnvidia-vulkan-producer.so.%{tesla_515} +%{tesla_525_libdir}/libnvidia-eglcore.so.%{tesla_525} +%{tesla_525_libdir}/libnvidia-glcore.so.%{tesla_525} +%{tesla_525_libdir}/libnvidia-tls.so.%{tesla_525} +%{tesla_525_libdir}/libnvidia-glsi.so.%{tesla_525} +%{tesla_525_libdir}/libnvidia-rtcore.so.%{tesla_525} +%{tesla_525_libdir}/libnvidia-fbc.so.%{tesla_525} +%{tesla_525_libdir}/libnvidia-fbc.so.1 +%{tesla_525_libdir}/libnvoptix.so.%{tesla_525} +%{tesla_525_libdir}/libnvoptix.so.1 +%{tesla_525_libdir}/libnvidia-vulkan-producer.so.%{tesla_525} # Graphics GLVND libs -%{tesla_515_libdir}/libnvidia-glvkspirv.so.%{tesla_515} -%{tesla_515_libdir}/libGLX_nvidia.so.%{tesla_515} -%{tesla_515_libdir}/libGLX_nvidia.so.0 -%{tesla_515_libdir}/libEGL_nvidia.so.%{tesla_515} -%{tesla_515_libdir}/libEGL_nvidia.so.0 -%{tesla_515_libdir}/libGLESv2_nvidia.so.%{tesla_515} -%{tesla_515_libdir}/libGLESv2_nvidia.so.2 -%{tesla_515_libdir}/libGLESv1_CM_nvidia.so.%{tesla_515} -%{tesla_515_libdir}/libGLESv1_CM_nvidia.so.1 +%{tesla_525_libdir}/libnvidia-glvkspirv.so.%{tesla_525} +%{tesla_525_libdir}/libGLX_nvidia.so.%{tesla_525} +%{tesla_525_libdir}/libGLX_nvidia.so.0 +%{tesla_525_libdir}/libEGL_nvidia.so.%{tesla_525} +%{tesla_525_libdir}/libEGL_nvidia.so.0 +%{tesla_525_libdir}/libGLESv2_nvidia.so.%{tesla_525} +%{tesla_525_libdir}/libGLESv2_nvidia.so.2 +%{tesla_525_libdir}/libGLESv1_CM_nvidia.so.%{tesla_525} +%{tesla_525_libdir}/libGLESv1_CM_nvidia.so.1 # Graphics compat -%{tesla_515_libdir}/libEGL.so.1.1.0 -%{tesla_515_libdir}/libEGL.so.1 -%{tesla_515_libdir}/libEGL.so.%{tesla_515} -%{tesla_515_libdir}/libGL.so.1.7.0 -%{tesla_515_libdir}/libGL.so.1 -%{tesla_515_libdir}/libGLESv1_CM.so.1.2.0 -%{tesla_515_libdir}/libGLESv1_CM.so.1 -%{tesla_515_libdir}/libGLESv2.so.2.1.0 -%{tesla_515_libdir}/libGLESv2.so.2 +%{tesla_525_libdir}/libEGL.so.1.1.0 +%{tesla_525_libdir}/libEGL.so.1 +%{tesla_525_libdir}/libEGL.so.%{tesla_525} +%{tesla_525_libdir}/libGL.so.1.7.0 +%{tesla_525_libdir}/libGL.so.1 +%{tesla_525_libdir}/libGLESv1_CM.so.1.2.0 +%{tesla_525_libdir}/libGLESv1_CM.so.1 +%{tesla_525_libdir}/libGLESv2.so.2.1.0 +%{tesla_525_libdir}/libGLESv2.so.2 # NGX -%{tesla_515_libdir}/libnvidia-ngx.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-ngx.so.1 +%{tesla_525_libdir}/libnvidia-ngx.so.%{tesla_525} +%{tesla_525_libdir}/libnvidia-ngx.so.1 # Firmware -%{tesla_515_firmwaredir}/gsp.bin +%{tesla_525_firmwaredir}/gsp.bin # Neither nvidia-peermem nor nvidia-drm are included in driver container images, we exclude them # for now, and we will add them if requested -%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-peermem.mod.o -%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-peermem.o -%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-drm.mod.o -%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-drm.o -%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}/nvidia-cuda-mps-control -%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}/nvidia-cuda-mps-server +%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nvidia-peermem.mod.o +%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nvidia-peermem.o +%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nvidia-drm.mod.o +%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nvidia-drm.o +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525}/nvidia-cuda-mps-control +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525}/nvidia-cuda-mps-server %if "%{_cross_arch}" == "x86_64" -%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}/nvidia-ngx-updater +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525}/nvidia-ngx-updater %endif # None of these libraries are required by libnvidia-container, so they # won't be used by a containerized workload -%exclude %{tesla_515_libdir}/libGLX.so.0 -%exclude %{tesla_515_libdir}/libGLdispatch.so.0 -%exclude %{tesla_515_libdir}/libOpenGL.so.0 -%exclude %{tesla_515_libdir}/libglxserver_nvidia.so.%{tesla_515} -%exclude %{tesla_515_libdir}/libnvidia-gtk2.so.%{tesla_515} -%exclude %{tesla_515_libdir}/libnvidia-gtk3.so.%{tesla_515} -%exclude %{tesla_515_libdir}/nvidia_drv.so -%exclude %{tesla_515_libdir}/libnvidia-egl-wayland.so.1 -%exclude %{tesla_515_libdir}/libnvidia-egl-gbm.so.1 -%exclude %{tesla_515_libdir}/libnvidia-egl-gbm.so.1.1.0 -%exclude %{tesla_515_libdir}/libnvidia-egl-wayland.so.1.1.9 -%exclude %{tesla_515_libdir}/libnvidia-wayland-client.so.%{tesla_515} +%exclude %{tesla_525_libdir}/libGLX.so.0 +%exclude %{tesla_525_libdir}/libGLdispatch.so.0 +%exclude %{tesla_525_libdir}/libOpenGL.so.0 +%exclude %{tesla_525_libdir}/libglxserver_nvidia.so.%{tesla_525} +%exclude %{tesla_525_libdir}/libnvidia-gtk2.so.%{tesla_525} +%exclude %{tesla_525_libdir}/libnvidia-gtk3.so.%{tesla_525} +%exclude %{tesla_525_libdir}/nvidia_drv.so +%exclude %{tesla_525_libdir}/libnvidia-egl-wayland.so.1 +%exclude %{tesla_525_libdir}/libnvidia-egl-gbm.so.1 +%exclude %{tesla_525_libdir}/libnvidia-egl-gbm.so.1.1.0 +%exclude %{tesla_525_libdir}/libnvidia-egl-wayland.so.1.1.9 +%exclude %{tesla_525_libdir}/libnvidia-wayland-client.so.%{tesla_525} From 860f0c04cff949129558ff8b3b9bd5a7239a2255 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 15 Jun 2023 08:39:52 +0000 Subject: [PATCH 1006/1356] kmod-6.1-nvidia: Remove versioned directory layer We have never made use of having multiple versioned nvidia drivers built and installed in Bottlerocket. Remove the additional complexity of adding an extra directory layer for versioned drivers in the installation paths. However, the firmware needs to be placed in a versioned directory. Signed-off-by: Leonard Foerster --- packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 100 +++++++++--------- 1 file changed, 50 insertions(+), 50 deletions(-) diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index e7934096..641dfa81 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -1,6 +1,6 @@ %global tesla_525 525.105.17 -%global tesla_525_libdir %{_cross_libdir}/nvidia/tesla/%{tesla_525} -%global tesla_525_bindir %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525} +%global tesla_525_libdir %{_cross_libdir}/nvidia/tesla +%global tesla_525_bindir %{_cross_libexecdir}/nvidia/tesla/bin %global tesla_525_firmwaredir %{_cross_libdir}/firmware/nvidia/%{tesla_525} %global spdx_id %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml spdx-id nvidia) %global license_file %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml path nvidia -p ./licenses) @@ -97,21 +97,21 @@ install -p -m 0644 %{S:202} %{buildroot}%{_cross_libdir}/modules-load.d/nvidia-d # Begin NVIDIA tesla 525 pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_525} # We install bins and libs in a versioned directory to prevent collisions with future drivers versions -install -d %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525} +install -d %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin install -d %{buildroot}%{tesla_525_libdir} -install -d %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d -install -d %{buildroot}%{_cross_factorydir}/nvidia/tesla/%{tesla_525} +install -d %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d +install -d %{buildroot}%{_cross_factorydir}/nvidia/tesla sed -e 's|__NVIDIA_VERSION__|%{tesla_525}|' %{S:300} > nvidia-tesla-%{tesla_525}.conf install -m 0644 nvidia-tesla-%{tesla_525}.conf %{buildroot}%{_cross_tmpfilesdir}/ -sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/|' %{S:301} > \ +sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/tesla/module-objects.d/|' %{S:301} > \ nvidia-tesla-%{tesla_525}.toml install -m 0644 nvidia-tesla-%{tesla_525}.toml %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/drivers # Install nvidia-path environment file, will be used as a drop-in for containerd.service since # libnvidia-container locates and mounts helper binaries into the containers from either # `PATH` or `NVIDIA_PATH` -sed -e 's|__NVIDIA_BINDIR__|%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525}|' %{S:302} > nvidia-path.env -install -m 0644 nvidia-path.env %{buildroot}%{_cross_factorydir}/nvidia/tesla/%{tesla_525} +sed -e 's|__NVIDIA_BINDIR__|%{_cross_libexecdir}/nvidia/tesla/bin|' %{S:302} > nvidia-path.env +install -m 0644 nvidia-path.env %{buildroot}%{_cross_factorydir}/nvidia/tesla # We need to add `_cross_libdir/tesla_525` to the paths loaded by the ldconfig service # because libnvidia-container uses the `ldcache` file created by the service, to locate and mount the # libraries into the containers @@ -120,34 +120,34 @@ sed -e 's|__LIBDIR__|%{_cross_libdir}|' %{S:303} | sed -e 's|__NVIDIA_VERSION__| install -m 0644 nvidia-tesla-%{tesla_525}.conf %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/ # driver -install kernel/nvidia.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d -install kernel/nvidia/nv-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d -install kernel/nvidia/nv-kernel.o_binary %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nv-kernel.o +install kernel/nvidia.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d +install kernel/nvidia/nv-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d +install kernel/nvidia/nv-kernel.o_binary %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d/nv-kernel.o # uvm -install kernel/nvidia-uvm.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d -install kernel/nvidia-uvm.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d +install kernel/nvidia-uvm.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d +install kernel/nvidia-uvm.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d # modeset -install kernel/nvidia-modeset.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d -install kernel/nvidia-modeset/nv-modeset-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d -install kernel/nvidia-modeset/nv-modeset-kernel.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d +install kernel/nvidia-modeset.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d +install kernel/nvidia-modeset/nv-modeset-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d +install kernel/nvidia-modeset/nv-modeset-kernel.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d # peermem -install kernel/nvidia-peermem.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d -install kernel/nvidia-peermem/nvidia-peermem.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d +install kernel/nvidia-peermem.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d +install kernel/nvidia-peermem/nvidia-peermem.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d # drm -install kernel/nvidia-drm.mod.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d -install kernel/nvidia-drm.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d +install kernel/nvidia-drm.mod.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/module-objects.d +install kernel/nvidia-drm.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/module-objects.d # Binaries -install -m 755 nvidia-smi %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525} -install -m 755 nvidia-debugdump %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525} -install -m 755 nvidia-cuda-mps-control %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525} -install -m 755 nvidia-cuda-mps-server %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525} +install -m 755 nvidia-smi %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin +install -m 755 nvidia-debugdump %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin +install -m 755 nvidia-cuda-mps-control %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin +install -m 755 nvidia-cuda-mps-server %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin %if "%{_cross_arch}" == "x86_64" -install -m 755 nvidia-ngx-updater %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525} +install -m 755 nvidia-ngx-updater %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin %endif # We install all the libraries, and filter them out in the 'files' section, so we can catch @@ -185,38 +185,38 @@ popd %files tesla-525 %license %{license_file} -%dir %{_cross_datadir}/nvidia/tesla/%{tesla_525} -%dir %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525} +%dir %{_cross_datadir}/nvidia/tesla +%dir %{_cross_libexecdir}/nvidia/tesla/bin %dir %{tesla_525_libdir} %dir %{tesla_525_firmwaredir} -%dir %{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d -%dir %{_cross_factorydir}/nvidia/tesla/%{tesla_525} +%dir %{_cross_datadir}/nvidia/tesla/module-objects.d +%dir %{_cross_factorydir}/nvidia/tesla # Binaries -%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525}/nvidia-debugdump -%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525}/nvidia-smi +%{_cross_libexecdir}/nvidia/tesla/bin/nvidia-debugdump +%{_cross_libexecdir}/nvidia/tesla/bin/nvidia-smi # Configuration files -%{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-tesla-%{tesla_525}.toml -%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/nvidia-tesla-%{tesla_525}.conf -%{_cross_factorydir}/nvidia/tesla/%{tesla_525}/nvidia-path.env +%{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-tesla.toml +%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/nvidia-tesla.conf +%{_cross_factorydir}/nvidia/tesla/nvidia-path.env # driver -%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nvidia.mod.o -%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nv-interface.o -%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nv-kernel.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia.mod.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nv-interface.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nv-kernel.o # uvm -%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nvidia-uvm.mod.o -%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nvidia-uvm.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia-uvm.mod.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia-uvm.o # modeset -%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nv-modeset-interface.o -%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nv-modeset-kernel.o -%{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nvidia-modeset.mod.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nv-modeset-interface.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nv-modeset-kernel.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia-modeset.mod.o # tmpfiles -%{_cross_tmpfilesdir}/nvidia-tesla-%{tesla_525}.conf +%{_cross_tmpfilesdir}/nvidia-tesla.conf # We only install the libraries required by all the DRIVER_CAPABILITIES, described here: # https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html#driver-capabilities @@ -297,14 +297,14 @@ popd # Neither nvidia-peermem nor nvidia-drm are included in driver container images, we exclude them # for now, and we will add them if requested -%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nvidia-peermem.mod.o -%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nvidia-peermem.o -%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nvidia-drm.mod.o -%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_525}/module-objects.d/nvidia-drm.o -%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525}/nvidia-cuda-mps-control -%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525}/nvidia-cuda-mps-server +%exclude %{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia-peermem.mod.o +%exclude %{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia-peermem.o +%exclude %{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia-drm.mod.o +%exclude %{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia-drm.o +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/nvidia-cuda-mps-control +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/nvidia-cuda-mps-server %if "%{_cross_arch}" == "x86_64" -%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_525}/nvidia-ngx-updater +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/nvidia-ngx-updater %endif # None of these libraries are required by libnvidia-container, so they From 6431aaed45c5d263b8cf0deafcf6d54dd1c712c0 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 15 Jun 2023 08:51:09 +0000 Subject: [PATCH 1007/1356] kmod-6.1-nvidia: Simplify handling of config files Removing the option to have multiple versioned variants of the driver installed, enables us to simplify the build more by getting rid of some text replacement steps and removing version numbers in file names. Signed-off-by: Leonard Foerster --- packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 16 +++++++--------- packages/kmod-6.1-nvidia/nvidia-ld.so.conf.in | 2 +- .../kmod-6.1-nvidia/nvidia-tesla-tmpfiles.conf | 3 +++ .../nvidia-tesla-tmpfiles.conf.in | 3 --- 4 files changed, 11 insertions(+), 13 deletions(-) create mode 100644 packages/kmod-6.1-nvidia/nvidia-tesla-tmpfiles.conf delete mode 100644 packages/kmod-6.1-nvidia/nvidia-tesla-tmpfiles.conf.in diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index 641dfa81..37348e51 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -23,7 +23,7 @@ Source200: nvidia-tmpfiles.conf.in Source202: nvidia-dependencies-modules-load.conf # NVIDIA tesla conf files from 300 to 399 -Source300: nvidia-tesla-tmpfiles.conf.in +Source300: nvidia-tesla-tmpfiles.conf Source301: nvidia-tesla-build-config.toml.in Source302: nvidia-tesla-path.env.in Source303: nvidia-ld.so.conf.in @@ -102,22 +102,20 @@ install -d %{buildroot}%{tesla_525_libdir} install -d %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d install -d %{buildroot}%{_cross_factorydir}/nvidia/tesla -sed -e 's|__NVIDIA_VERSION__|%{tesla_525}|' %{S:300} > nvidia-tesla-%{tesla_525}.conf -install -m 0644 nvidia-tesla-%{tesla_525}.conf %{buildroot}%{_cross_tmpfilesdir}/ +install -m 0644 %{S:300} %{buildroot}%{_cross_tmpfilesdir}/nvidia-tesla.conf sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/tesla/module-objects.d/|' %{S:301} > \ - nvidia-tesla-%{tesla_525}.toml -install -m 0644 nvidia-tesla-%{tesla_525}.toml %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/drivers + nvidia-tesla.toml +install -m 0644 nvidia-tesla.toml %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/drivers # Install nvidia-path environment file, will be used as a drop-in for containerd.service since # libnvidia-container locates and mounts helper binaries into the containers from either # `PATH` or `NVIDIA_PATH` sed -e 's|__NVIDIA_BINDIR__|%{_cross_libexecdir}/nvidia/tesla/bin|' %{S:302} > nvidia-path.env install -m 0644 nvidia-path.env %{buildroot}%{_cross_factorydir}/nvidia/tesla -# We need to add `_cross_libdir/tesla_525` to the paths loaded by the ldconfig service +# We need to add `_cross_libdir` to the paths loaded by the ldconfig service # because libnvidia-container uses the `ldcache` file created by the service, to locate and mount the # libraries into the containers -sed -e 's|__LIBDIR__|%{_cross_libdir}|' %{S:303} | sed -e 's|__NVIDIA_VERSION__|%{tesla_525}|' \ - > nvidia-tesla-%{tesla_525}.conf -install -m 0644 nvidia-tesla-%{tesla_525}.conf %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/ +sed -e 's|__LIBDIR__|%{_cross_libdir}|' %{S:303} > nvidia-tesla.conf +install -m 0644 nvidia-tesla.conf %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/ # driver install kernel/nvidia.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d diff --git a/packages/kmod-6.1-nvidia/nvidia-ld.so.conf.in b/packages/kmod-6.1-nvidia/nvidia-ld.so.conf.in index a07b0ccb..f992bf22 100644 --- a/packages/kmod-6.1-nvidia/nvidia-ld.so.conf.in +++ b/packages/kmod-6.1-nvidia/nvidia-ld.so.conf.in @@ -1 +1 @@ -__LIBDIR__/nvidia/tesla/__NVIDIA_VERSION__/ +__LIBDIR__/nvidia/tesla/ diff --git a/packages/kmod-6.1-nvidia/nvidia-tesla-tmpfiles.conf b/packages/kmod-6.1-nvidia/nvidia-tesla-tmpfiles.conf new file mode 100644 index 00000000..ddcac3e4 --- /dev/null +++ b/packages/kmod-6.1-nvidia/nvidia-tesla-tmpfiles.conf @@ -0,0 +1,3 @@ +C /etc/drivers/nvidia-tesla.toml +C /etc/containerd/nvidia.env - - - - /usr/share/factory/nvidia/tesla/nvidia-path.env +C /etc/ld.so.conf.d/nvidia-tesla.conf diff --git a/packages/kmod-6.1-nvidia/nvidia-tesla-tmpfiles.conf.in b/packages/kmod-6.1-nvidia/nvidia-tesla-tmpfiles.conf.in deleted file mode 100644 index f208e1d2..00000000 --- a/packages/kmod-6.1-nvidia/nvidia-tesla-tmpfiles.conf.in +++ /dev/null @@ -1,3 +0,0 @@ -C /etc/drivers/nvidia-tesla-__NVIDIA_VERSION__.toml -C /etc/containerd/nvidia.env - - - - /usr/share/factory/nvidia/tesla/__NVIDIA_VERSION__/nvidia-path.env -C /etc/ld.so.conf.d/nvidia-tesla-__NVIDIA_VERSION__.conf From efee62b9a1dd0512a1ece32fd783f2e5f0d15037 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Fri, 9 Jun 2023 15:34:46 +0000 Subject: [PATCH 1008/1356] kmod-6.1.nvidia: Accommodate for newly split firmware Starting with nvidia driver 525 the firmware file gsp.bin has been split into gsp_tu10x.bin and gsp_ad10x.bin. Adjust the spec file to ship both files. The new format of the bin files is not compatible with the strip command used during rpm-build. We work around the automatic stripping of all binary files by overwriting the __strip macro used in __spec_install_post macro to skip any stripping of binary files. Signed-off-by: Leonard Foerster --- packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index 37348e51..78233138 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -5,6 +5,13 @@ %global spdx_id %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml spdx-id nvidia) %global license_file %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml path nvidia -p ./licenses) +# With the split of the firmware binary from firmware/gsp.bin to firmware/gsp_ad10x.bin +# and firmware/gsp_tu10x.bin the file format changed from executable to relocatable. +# The __spec_install_post macro will by default try to strip all binary files. +# Unfortunately the strip used is not compatible with the new file format. +# Redefine strip, so that these firmware binaries do not derail the build. +%global __strip /usr/bin/true + Name: %{_cross_os}kmod-6.1-nvidia Version: 1.0.0 Release: 1%{?dist} @@ -166,7 +173,8 @@ done # Include the firmware file for GSP support install -d %{buildroot}%{tesla_525_firmwaredir} -install -p -m 0644 firmware/gsp.bin %{buildroot}%{tesla_525_firmwaredir} +install -p -m 0644 firmware/gsp_ad10x.bin %{buildroot}%{tesla_525_firmwaredir} +install -p -m 0644 firmware/gsp_tu10x.bin %{buildroot}%{tesla_525_firmwaredir} popd @@ -291,7 +299,8 @@ popd %{tesla_525_libdir}/libnvidia-ngx.so.1 # Firmware -%{tesla_525_firmwaredir}/gsp.bin +%{tesla_525_firmwaredir}/gsp_ad10x.bin +%{tesla_525_firmwaredir}/gsp_tu10x.bin # Neither nvidia-peermem nor nvidia-drm are included in driver container images, we exclude them # for now, and we will add them if requested From a9e9fd8f593566928d3fb5bc46dbf12475c7d144 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Fri, 9 Jun 2023 15:38:03 +0000 Subject: [PATCH 1009/1356] kmod-6.1-nvidia: Add the new libraries to the package The new driver comes with three additional libraries (libnvidia-api.so.1 and libcudadebugger.so.*) as well as a new version of libnvidia-egl-wayland. Adjust the spec file accordingly to include these files. Signed-off-by: Leonard Foerster --- packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index 78233138..4d9b04ed 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -228,6 +228,7 @@ popd # https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html#driver-capabilities # Utility libs +%{tesla_525_libdir}/libnvidia-api.so.1 %{tesla_525_libdir}/libnvidia-ml.so.%{tesla_525} %{tesla_525_libdir}/libnvidia-ml.so.1 %{tesla_525_libdir}/libnvidia-cfg.so.%{tesla_525} @@ -238,6 +239,8 @@ popd # Compute libs %{tesla_525_libdir}/libcuda.so.%{tesla_525} %{tesla_525_libdir}/libcuda.so.1 +%{tesla_525_libdir}/libcudadebugger.so.%{tesla_525} +%{tesla_525_libdir}/libcudadebugger.so.1 %{tesla_525_libdir}/libnvidia-opencl.so.%{tesla_525} %{tesla_525_libdir}/libnvidia-opencl.so.1 %{tesla_525_libdir}/libnvidia-ptxjitcompiler.so.%{tesla_525} @@ -326,5 +329,5 @@ popd %exclude %{tesla_525_libdir}/libnvidia-egl-wayland.so.1 %exclude %{tesla_525_libdir}/libnvidia-egl-gbm.so.1 %exclude %{tesla_525_libdir}/libnvidia-egl-gbm.so.1.1.0 -%exclude %{tesla_525_libdir}/libnvidia-egl-wayland.so.1.1.9 +%exclude %{tesla_525_libdir}/libnvidia-egl-wayland.so.1.1.10 %exclude %{tesla_525_libdir}/libnvidia-wayland-client.so.%{tesla_525} From 9728cdabcdbddbbabe4e80ef5084e2f97edb04ba Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 15 Jun 2023 09:18:58 +0000 Subject: [PATCH 1010/1356] kmod-6.1-nvidia: Cleanup unnecessary macros In the past we built drivers and installed them into versioned directory trees. Remove the unnecessary macros now that we do not do that anymore. Signed-off-by: Leonard Foerster --- packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 163 +++++++++--------- 1 file changed, 80 insertions(+), 83 deletions(-) diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index 4d9b04ed..ad5c332a 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -1,7 +1,4 @@ %global tesla_525 525.105.17 -%global tesla_525_libdir %{_cross_libdir}/nvidia/tesla -%global tesla_525_bindir %{_cross_libexecdir}/nvidia/tesla/bin -%global tesla_525_firmwaredir %{_cross_libdir}/firmware/nvidia/%{tesla_525} %global spdx_id %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml spdx-id nvidia) %global license_file %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml path nvidia -p ./licenses) @@ -105,7 +102,7 @@ install -p -m 0644 %{S:202} %{buildroot}%{_cross_libdir}/modules-load.d/nvidia-d pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_525} # We install bins and libs in a versioned directory to prevent collisions with future drivers versions install -d %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin -install -d %{buildroot}%{tesla_525_libdir} +install -d %{buildroot}%{_cross_libdir}/nvidia/tesla install -d %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d install -d %{buildroot}%{_cross_factorydir}/nvidia/tesla @@ -157,7 +154,7 @@ install -m 755 nvidia-ngx-updater %{buildroot}%{_cross_libexecdir}/nvidia/tesla/ # We install all the libraries, and filter them out in the 'files' section, so we can catch # when new libraries are added -install -m 755 *.so* %{buildroot}/%{tesla_525_libdir}/ +install -m 755 *.so* %{buildroot}/%{_cross_libdir}/nvidia/tesla/ # This library has the same SONAME as libEGL.so.1.1.0, this will cause collisions while # the symlinks are created. For now, we only symlink libEGL.so.1.1.0. @@ -168,13 +165,13 @@ for lib in $(find . -maxdepth 1 -type f -name 'lib*.so.*' -printf '%%P\n'); do soname="$(%{_cross_target}-readelf -d "${lib}" | awk '/SONAME/{print $5}' | tr -d '[]')" [ -n "${soname}" ] || continue [ "${lib}" == "${soname}" ] && continue - ln -s "${lib}" %{buildroot}/%{tesla_525_libdir}/"${soname}" + ln -s "${lib}" %{buildroot}/%{_cross_libdir}/nvidia/tesla/"${soname}" done # Include the firmware file for GSP support -install -d %{buildroot}%{tesla_525_firmwaredir} -install -p -m 0644 firmware/gsp_ad10x.bin %{buildroot}%{tesla_525_firmwaredir} -install -p -m 0644 firmware/gsp_tu10x.bin %{buildroot}%{tesla_525_firmwaredir} +install -d %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_525} +install -p -m 0644 firmware/gsp_ad10x.bin %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_525} +install -p -m 0644 firmware/gsp_tu10x.bin %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_525} popd @@ -193,8 +190,8 @@ popd %license %{license_file} %dir %{_cross_datadir}/nvidia/tesla %dir %{_cross_libexecdir}/nvidia/tesla/bin -%dir %{tesla_525_libdir} -%dir %{tesla_525_firmwaredir} +%dir %{_cross_libdir}/nvidia/tesla +%dir %{_cross_libdir}/firmware/nvidia/%{tesla_525} %dir %{_cross_datadir}/nvidia/tesla/module-objects.d %dir %{_cross_factorydir}/nvidia/tesla @@ -228,82 +225,82 @@ popd # https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html#driver-capabilities # Utility libs -%{tesla_525_libdir}/libnvidia-api.so.1 -%{tesla_525_libdir}/libnvidia-ml.so.%{tesla_525} -%{tesla_525_libdir}/libnvidia-ml.so.1 -%{tesla_525_libdir}/libnvidia-cfg.so.%{tesla_525} -%{tesla_525_libdir}/libnvidia-cfg.so.1 -%{tesla_525_libdir}/libnvidia-nvvm.so.4 -%{tesla_525_libdir}/libnvidia-nvvm.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-api.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-ml.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-ml.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-cfg.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-cfg.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-nvvm.so.4 +%{_cross_libdir}/nvidia/tesla/libnvidia-nvvm.so.%{tesla_525} # Compute libs -%{tesla_525_libdir}/libcuda.so.%{tesla_525} -%{tesla_525_libdir}/libcuda.so.1 -%{tesla_525_libdir}/libcudadebugger.so.%{tesla_525} -%{tesla_525_libdir}/libcudadebugger.so.1 -%{tesla_525_libdir}/libnvidia-opencl.so.%{tesla_525} -%{tesla_525_libdir}/libnvidia-opencl.so.1 -%{tesla_525_libdir}/libnvidia-ptxjitcompiler.so.%{tesla_525} -%{tesla_525_libdir}/libnvidia-ptxjitcompiler.so.1 -%{tesla_525_libdir}/libnvidia-allocator.so.%{tesla_525} -%{tesla_525_libdir}/libnvidia-allocator.so.1 -%{tesla_525_libdir}/libOpenCL.so.1.0.0 -%{tesla_525_libdir}/libOpenCL.so.1 +%{_cross_libdir}/nvidia/tesla/libcuda.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libcuda.so.1 +%{_cross_libdir}/nvidia/tesla/libcudadebugger.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libcudadebugger.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-opencl.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-opencl.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-ptxjitcompiler.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-ptxjitcompiler.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-allocator.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-allocator.so.1 +%{_cross_libdir}/nvidia/tesla/libOpenCL.so.1.0.0 +%{_cross_libdir}/nvidia/tesla/libOpenCL.so.1 %if "%{_cross_arch}" == "x86_64" -%{tesla_525_libdir}/libnvidia-compiler.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-compiler.so.%{tesla_525} %endif # Video libs -%{tesla_525_libdir}/libvdpau_nvidia.so.%{tesla_525} -%{tesla_525_libdir}/libvdpau_nvidia.so.1 -%{tesla_525_libdir}/libnvidia-encode.so.%{tesla_525} -%{tesla_525_libdir}/libnvidia-encode.so.1 -%{tesla_525_libdir}/libnvidia-opticalflow.so.%{tesla_525} -%{tesla_525_libdir}/libnvidia-opticalflow.so.1 -%{tesla_525_libdir}/libnvcuvid.so.%{tesla_525} -%{tesla_525_libdir}/libnvcuvid.so.1 +%{_cross_libdir}/nvidia/tesla/libvdpau_nvidia.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libvdpau_nvidia.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-encode.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-encode.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-opticalflow.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-opticalflow.so.1 +%{_cross_libdir}/nvidia/tesla/libnvcuvid.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvcuvid.so.1 # Graphics libs -%{tesla_525_libdir}/libnvidia-eglcore.so.%{tesla_525} -%{tesla_525_libdir}/libnvidia-glcore.so.%{tesla_525} -%{tesla_525_libdir}/libnvidia-tls.so.%{tesla_525} -%{tesla_525_libdir}/libnvidia-glsi.so.%{tesla_525} -%{tesla_525_libdir}/libnvidia-rtcore.so.%{tesla_525} -%{tesla_525_libdir}/libnvidia-fbc.so.%{tesla_525} -%{tesla_525_libdir}/libnvidia-fbc.so.1 -%{tesla_525_libdir}/libnvoptix.so.%{tesla_525} -%{tesla_525_libdir}/libnvoptix.so.1 -%{tesla_525_libdir}/libnvidia-vulkan-producer.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-eglcore.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-glcore.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-tls.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-glsi.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-rtcore.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-fbc.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-fbc.so.1 +%{_cross_libdir}/nvidia/tesla/libnvoptix.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvoptix.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-vulkan-producer.so.%{tesla_525} # Graphics GLVND libs -%{tesla_525_libdir}/libnvidia-glvkspirv.so.%{tesla_525} -%{tesla_525_libdir}/libGLX_nvidia.so.%{tesla_525} -%{tesla_525_libdir}/libGLX_nvidia.so.0 -%{tesla_525_libdir}/libEGL_nvidia.so.%{tesla_525} -%{tesla_525_libdir}/libEGL_nvidia.so.0 -%{tesla_525_libdir}/libGLESv2_nvidia.so.%{tesla_525} -%{tesla_525_libdir}/libGLESv2_nvidia.so.2 -%{tesla_525_libdir}/libGLESv1_CM_nvidia.so.%{tesla_525} -%{tesla_525_libdir}/libGLESv1_CM_nvidia.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-glvkspirv.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libGLX_nvidia.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libGLX_nvidia.so.0 +%{_cross_libdir}/nvidia/tesla/libEGL_nvidia.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libEGL_nvidia.so.0 +%{_cross_libdir}/nvidia/tesla/libGLESv2_nvidia.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libGLESv2_nvidia.so.2 +%{_cross_libdir}/nvidia/tesla/libGLESv1_CM_nvidia.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libGLESv1_CM_nvidia.so.1 # Graphics compat -%{tesla_525_libdir}/libEGL.so.1.1.0 -%{tesla_525_libdir}/libEGL.so.1 -%{tesla_525_libdir}/libEGL.so.%{tesla_525} -%{tesla_525_libdir}/libGL.so.1.7.0 -%{tesla_525_libdir}/libGL.so.1 -%{tesla_525_libdir}/libGLESv1_CM.so.1.2.0 -%{tesla_525_libdir}/libGLESv1_CM.so.1 -%{tesla_525_libdir}/libGLESv2.so.2.1.0 -%{tesla_525_libdir}/libGLESv2.so.2 +%{_cross_libdir}/nvidia/tesla/libEGL.so.1.1.0 +%{_cross_libdir}/nvidia/tesla/libEGL.so.1 +%{_cross_libdir}/nvidia/tesla/libEGL.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libGL.so.1.7.0 +%{_cross_libdir}/nvidia/tesla/libGL.so.1 +%{_cross_libdir}/nvidia/tesla/libGLESv1_CM.so.1.2.0 +%{_cross_libdir}/nvidia/tesla/libGLESv1_CM.so.1 +%{_cross_libdir}/nvidia/tesla/libGLESv2.so.2.1.0 +%{_cross_libdir}/nvidia/tesla/libGLESv2.so.2 # NGX -%{tesla_525_libdir}/libnvidia-ngx.so.%{tesla_525} -%{tesla_525_libdir}/libnvidia-ngx.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-ngx.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-ngx.so.1 # Firmware -%{tesla_525_firmwaredir}/gsp_ad10x.bin -%{tesla_525_firmwaredir}/gsp_tu10x.bin +%{_cross_libdir}/firmware/nvidia/%{tesla_525}/gsp_ad10x.bin +%{_cross_libdir}/firmware/nvidia/%{tesla_525}/gsp_tu10x.bin # Neither nvidia-peermem nor nvidia-drm are included in driver container images, we exclude them # for now, and we will add them if requested @@ -319,15 +316,15 @@ popd # None of these libraries are required by libnvidia-container, so they # won't be used by a containerized workload -%exclude %{tesla_525_libdir}/libGLX.so.0 -%exclude %{tesla_525_libdir}/libGLdispatch.so.0 -%exclude %{tesla_525_libdir}/libOpenGL.so.0 -%exclude %{tesla_525_libdir}/libglxserver_nvidia.so.%{tesla_525} -%exclude %{tesla_525_libdir}/libnvidia-gtk2.so.%{tesla_525} -%exclude %{tesla_525_libdir}/libnvidia-gtk3.so.%{tesla_525} -%exclude %{tesla_525_libdir}/nvidia_drv.so -%exclude %{tesla_525_libdir}/libnvidia-egl-wayland.so.1 -%exclude %{tesla_525_libdir}/libnvidia-egl-gbm.so.1 -%exclude %{tesla_525_libdir}/libnvidia-egl-gbm.so.1.1.0 -%exclude %{tesla_525_libdir}/libnvidia-egl-wayland.so.1.1.10 -%exclude %{tesla_525_libdir}/libnvidia-wayland-client.so.%{tesla_525} +%exclude %{_cross_libdir}/nvidia/tesla/libGLX.so.0 +%exclude %{_cross_libdir}/nvidia/tesla/libGLdispatch.so.0 +%exclude %{_cross_libdir}/nvidia/tesla/libOpenGL.so.0 +%exclude %{_cross_libdir}/nvidia/tesla/libglxserver_nvidia.so.%{tesla_525} +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-gtk2.so.%{tesla_525} +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-gtk3.so.%{tesla_525} +%exclude %{_cross_libdir}/nvidia/tesla/nvidia_drv.so +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-wayland.so.1 +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-gbm.so.1 +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-gbm.so.1.1.0 +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-wayland.so.1.1.10 +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-wayland-client.so.%{tesla_525} From 05cd9af47a845b5ba9c6e71fa4b86f0ecf2ad340 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 15 Jun 2023 09:25:14 +0000 Subject: [PATCH 1011/1356] kmod-6.1-nvidia: Simplify version number handling in spec Simplify the usage of version number in the spec file. Having part of the version numbers in the variable name makes changing versions a lot harder than it should be. Signed-off-by: Leonard Foerster --- packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 105 +++++++++--------- 1 file changed, 54 insertions(+), 51 deletions(-) diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index ad5c332a..49607d46 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -1,4 +1,7 @@ -%global tesla_525 525.105.17 +%global tesla_major 525 +%global tesla_minor 105 +%global tesla_patch 17 +%global tesla_ver %{tesla_major}.%{tesla_minor}.%{tesla_patch} %global spdx_id %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml spdx-id nvidia) %global license_file %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml path nvidia -p ./licenses) @@ -19,8 +22,8 @@ License: Apache-2.0 OR MIT URL: http://www.nvidia.com/ # NVIDIA .run scripts from 0 to 199 -Source0: https://us.download.nvidia.com/tesla/%{tesla_525}/NVIDIA-Linux-x86_64-%{tesla_525}.run -Source1: https://us.download.nvidia.com/tesla/%{tesla_525}/NVIDIA-Linux-aarch64-%{tesla_525}.run +Source0: https://us.download.nvidia.com/tesla/%{tesla_ver}/NVIDIA-Linux-x86_64-%{tesla_ver}.run +Source1: https://us.download.nvidia.com/tesla/%{tesla_ver}/NVIDIA-Linux-aarch64-%{tesla_ver}.run # Common NVIDIA conf files from 200 to 299 Source200: nvidia-tmpfiles.conf.in @@ -38,25 +41,25 @@ BuildRequires: %{_cross_os}kernel-6.1-archive %description %{summary}. -%package tesla-525 -Summary: NVIDIA 525 Tesla driver -Version: %{tesla_525} +%package tesla-%{tesla_major} +Summary: NVIDIA %{tesla_major} Tesla driver +Version: %{tesla_ver} License: %{spdx_id} Requires: %{name} -%description tesla-525 +%description tesla-%{tesla_major} %{summary} %prep # Extract nvidia sources with `-x`, otherwise the script will try to install # the driver in the current run -sh %{_sourcedir}/NVIDIA-Linux-%{_cross_arch}-%{tesla_525}.run -x +sh %{_sourcedir}/NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}.run -x %global kernel_sources %{_builddir}/kernel-devel tar -xf %{_cross_datadir}/bottlerocket/kernel-devel.tar.xz %build -pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_525}/kernel +pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}/kernel # This recipe was based in the NVIDIA yum/dnf specs: # https://github.com/NVIDIA/yum-packaging-precompiled-kmod @@ -98,8 +101,8 @@ install -p -m 0644 nvidia.conf %{buildroot}%{_cross_tmpfilesdir} install -d %{buildroot}%{_cross_libdir}/modules-load.d install -p -m 0644 %{S:202} %{buildroot}%{_cross_libdir}/modules-load.d/nvidia-dependencies.conf -# Begin NVIDIA tesla 525 -pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_525} +# Begin NVIDIA tesla driver +pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_ver} # We install bins and libs in a versioned directory to prevent collisions with future drivers versions install -d %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin install -d %{buildroot}%{_cross_libdir}/nvidia/tesla @@ -158,7 +161,7 @@ install -m 755 *.so* %{buildroot}/%{_cross_libdir}/nvidia/tesla/ # This library has the same SONAME as libEGL.so.1.1.0, this will cause collisions while # the symlinks are created. For now, we only symlink libEGL.so.1.1.0. -EXCLUDED_LIBS="libEGL.so.%{tesla_525}" +EXCLUDED_LIBS="libEGL.so.%{tesla_ver}" for lib in $(find . -maxdepth 1 -type f -name 'lib*.so.*' -printf '%%P\n'); do [[ "${EXCLUDED_LIBS}" =~ "${lib}" ]] && continue @@ -169,9 +172,9 @@ for lib in $(find . -maxdepth 1 -type f -name 'lib*.so.*' -printf '%%P\n'); do done # Include the firmware file for GSP support -install -d %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_525} -install -p -m 0644 firmware/gsp_ad10x.bin %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_525} -install -p -m 0644 firmware/gsp_tu10x.bin %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_525} +install -d %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_ver} +install -p -m 0644 firmware/gsp_ad10x.bin %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_ver} +install -p -m 0644 firmware/gsp_tu10x.bin %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_ver} popd @@ -186,12 +189,12 @@ popd %{_cross_libdir}/systemd/system/ %{_cross_libdir}/modules-load.d/nvidia-dependencies.conf -%files tesla-525 +%files tesla-%{tesla_major} %license %{license_file} %dir %{_cross_datadir}/nvidia/tesla %dir %{_cross_libexecdir}/nvidia/tesla/bin %dir %{_cross_libdir}/nvidia/tesla -%dir %{_cross_libdir}/firmware/nvidia/%{tesla_525} +%dir %{_cross_libdir}/firmware/nvidia/%{tesla_ver} %dir %{_cross_datadir}/nvidia/tesla/module-objects.d %dir %{_cross_factorydir}/nvidia/tesla @@ -226,67 +229,67 @@ popd # Utility libs %{_cross_libdir}/nvidia/tesla/libnvidia-api.so.1 -%{_cross_libdir}/nvidia/tesla/libnvidia-ml.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-ml.so.%{tesla_ver} %{_cross_libdir}/nvidia/tesla/libnvidia-ml.so.1 -%{_cross_libdir}/nvidia/tesla/libnvidia-cfg.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-cfg.so.%{tesla_ver} %{_cross_libdir}/nvidia/tesla/libnvidia-cfg.so.1 %{_cross_libdir}/nvidia/tesla/libnvidia-nvvm.so.4 -%{_cross_libdir}/nvidia/tesla/libnvidia-nvvm.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-nvvm.so.%{tesla_ver} # Compute libs -%{_cross_libdir}/nvidia/tesla/libcuda.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libcuda.so.%{tesla_ver} %{_cross_libdir}/nvidia/tesla/libcuda.so.1 -%{_cross_libdir}/nvidia/tesla/libcudadebugger.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libcudadebugger.so.%{tesla_ver} %{_cross_libdir}/nvidia/tesla/libcudadebugger.so.1 -%{_cross_libdir}/nvidia/tesla/libnvidia-opencl.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-opencl.so.%{tesla_ver} %{_cross_libdir}/nvidia/tesla/libnvidia-opencl.so.1 -%{_cross_libdir}/nvidia/tesla/libnvidia-ptxjitcompiler.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-ptxjitcompiler.so.%{tesla_ver} %{_cross_libdir}/nvidia/tesla/libnvidia-ptxjitcompiler.so.1 -%{_cross_libdir}/nvidia/tesla/libnvidia-allocator.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-allocator.so.%{tesla_ver} %{_cross_libdir}/nvidia/tesla/libnvidia-allocator.so.1 %{_cross_libdir}/nvidia/tesla/libOpenCL.so.1.0.0 %{_cross_libdir}/nvidia/tesla/libOpenCL.so.1 %if "%{_cross_arch}" == "x86_64" -%{_cross_libdir}/nvidia/tesla/libnvidia-compiler.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-compiler.so.%{tesla_ver} %endif # Video libs -%{_cross_libdir}/nvidia/tesla/libvdpau_nvidia.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libvdpau_nvidia.so.%{tesla_ver} %{_cross_libdir}/nvidia/tesla/libvdpau_nvidia.so.1 -%{_cross_libdir}/nvidia/tesla/libnvidia-encode.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-encode.so.%{tesla_ver} %{_cross_libdir}/nvidia/tesla/libnvidia-encode.so.1 -%{_cross_libdir}/nvidia/tesla/libnvidia-opticalflow.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-opticalflow.so.%{tesla_ver} %{_cross_libdir}/nvidia/tesla/libnvidia-opticalflow.so.1 -%{_cross_libdir}/nvidia/tesla/libnvcuvid.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvcuvid.so.%{tesla_ver} %{_cross_libdir}/nvidia/tesla/libnvcuvid.so.1 # Graphics libs -%{_cross_libdir}/nvidia/tesla/libnvidia-eglcore.so.%{tesla_525} -%{_cross_libdir}/nvidia/tesla/libnvidia-glcore.so.%{tesla_525} -%{_cross_libdir}/nvidia/tesla/libnvidia-tls.so.%{tesla_525} -%{_cross_libdir}/nvidia/tesla/libnvidia-glsi.so.%{tesla_525} -%{_cross_libdir}/nvidia/tesla/libnvidia-rtcore.so.%{tesla_525} -%{_cross_libdir}/nvidia/tesla/libnvidia-fbc.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-eglcore.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-glcore.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-tls.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-glsi.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-rtcore.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-fbc.so.%{tesla_ver} %{_cross_libdir}/nvidia/tesla/libnvidia-fbc.so.1 -%{_cross_libdir}/nvidia/tesla/libnvoptix.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvoptix.so.%{tesla_ver} %{_cross_libdir}/nvidia/tesla/libnvoptix.so.1 -%{_cross_libdir}/nvidia/tesla/libnvidia-vulkan-producer.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-vulkan-producer.so.%{tesla_ver} # Graphics GLVND libs -%{_cross_libdir}/nvidia/tesla/libnvidia-glvkspirv.so.%{tesla_525} -%{_cross_libdir}/nvidia/tesla/libGLX_nvidia.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-glvkspirv.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libGLX_nvidia.so.%{tesla_ver} %{_cross_libdir}/nvidia/tesla/libGLX_nvidia.so.0 -%{_cross_libdir}/nvidia/tesla/libEGL_nvidia.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libEGL_nvidia.so.%{tesla_ver} %{_cross_libdir}/nvidia/tesla/libEGL_nvidia.so.0 -%{_cross_libdir}/nvidia/tesla/libGLESv2_nvidia.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libGLESv2_nvidia.so.%{tesla_ver} %{_cross_libdir}/nvidia/tesla/libGLESv2_nvidia.so.2 -%{_cross_libdir}/nvidia/tesla/libGLESv1_CM_nvidia.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libGLESv1_CM_nvidia.so.%{tesla_ver} %{_cross_libdir}/nvidia/tesla/libGLESv1_CM_nvidia.so.1 # Graphics compat %{_cross_libdir}/nvidia/tesla/libEGL.so.1.1.0 %{_cross_libdir}/nvidia/tesla/libEGL.so.1 -%{_cross_libdir}/nvidia/tesla/libEGL.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libEGL.so.%{tesla_ver} %{_cross_libdir}/nvidia/tesla/libGL.so.1.7.0 %{_cross_libdir}/nvidia/tesla/libGL.so.1 %{_cross_libdir}/nvidia/tesla/libGLESv1_CM.so.1.2.0 @@ -295,12 +298,12 @@ popd %{_cross_libdir}/nvidia/tesla/libGLESv2.so.2 # NGX -%{_cross_libdir}/nvidia/tesla/libnvidia-ngx.so.%{tesla_525} +%{_cross_libdir}/nvidia/tesla/libnvidia-ngx.so.%{tesla_ver} %{_cross_libdir}/nvidia/tesla/libnvidia-ngx.so.1 # Firmware -%{_cross_libdir}/firmware/nvidia/%{tesla_525}/gsp_ad10x.bin -%{_cross_libdir}/firmware/nvidia/%{tesla_525}/gsp_tu10x.bin +%{_cross_libdir}/firmware/nvidia/%{tesla_ver}/gsp_ad10x.bin +%{_cross_libdir}/firmware/nvidia/%{tesla_ver}/gsp_tu10x.bin # Neither nvidia-peermem nor nvidia-drm are included in driver container images, we exclude them # for now, and we will add them if requested @@ -319,12 +322,12 @@ popd %exclude %{_cross_libdir}/nvidia/tesla/libGLX.so.0 %exclude %{_cross_libdir}/nvidia/tesla/libGLdispatch.so.0 %exclude %{_cross_libdir}/nvidia/tesla/libOpenGL.so.0 -%exclude %{_cross_libdir}/nvidia/tesla/libglxserver_nvidia.so.%{tesla_525} -%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-gtk2.so.%{tesla_525} -%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-gtk3.so.%{tesla_525} +%exclude %{_cross_libdir}/nvidia/tesla/libglxserver_nvidia.so.%{tesla_ver} +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-gtk2.so.%{tesla_ver} +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-gtk3.so.%{tesla_ver} %exclude %{_cross_libdir}/nvidia/tesla/nvidia_drv.so %exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-wayland.so.1 %exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-gbm.so.1 %exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-gbm.so.1.1.0 %exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-wayland.so.1.1.10 -%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-wayland-client.so.%{tesla_525} +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-wayland-client.so.%{tesla_ver} From d0352b581541c159242bea5fb6d2bdb064dda5eb Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 29 Jun 2023 12:23:33 +0000 Subject: [PATCH 1012/1356] kernel-6.1: Add simpledrm driver for nvidia driver dependencies In order for the nvidia driver to have all the necessary symbols and interfaces built into the kernel we need to enable DRM_KMS_HELPERS, however, that option can not be enabled individually. Choose the simpledrm driver that selects these options as a minimal driver to pull these dependencies in. Signed-off-by: Leonard Foerster --- packages/kernel-6.1/config-bottlerocket-aws | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/packages/kernel-6.1/config-bottlerocket-aws b/packages/kernel-6.1/config-bottlerocket-aws index 6b4ed404..27237b59 100644 --- a/packages/kernel-6.1/config-bottlerocket-aws +++ b/packages/kernel-6.1/config-bottlerocket-aws @@ -11,3 +11,10 @@ CONFIG_MLX5_CORE=m CONFIG_MLX5_CORE_EN=y CONFIG_MLX5_INFINIBAND=m CONFIG_MLXFW=m + +# With 6.1 some of the functionalities used by the nvidia driver have moved behind +# some extra config options CONFIG_DRM_KMS_HELPER and CONFIG_DRM_DISPLAY_HELPER. +# These config options can not be selected individually, but are selected by certain +# drivers. Enable the SIMPLEDRM driver, which is a minimal drm driver enabling +# those helpers for platform provided framebuffers. +CONFIG_DRM_SIMPLEDRM=m From 0b3da3b99e468b7f5ea6fc5c731134fc92460a0b Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 29 Jun 2023 12:25:36 +0000 Subject: [PATCH 1013/1356] kernel-6.1: Make objtool build dependent on CONFIG_OBJTOOL In the past we have added objtool as a dependency to build external kmods. However, objtool is dependent on architectural features and other config settings and is thus not available on all architectures. For Bottlerocket this means that objtool is not available on arm64. Since 5.15 a config option CONFIG_OBJTOOL has been introduced which we can use decide if objtool is supported in the current build scenario. Signed-off-by: Leonard Foerster --- ...d-prepare-target-for-external-modules.patch | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/packages/kernel-6.1/1001-Makefile-add-prepare-target-for-external-modules.patch b/packages/kernel-6.1/1001-Makefile-add-prepare-target-for-external-modules.patch index 4a52dd19..84f46d63 100644 --- a/packages/kernel-6.1/1001-Makefile-add-prepare-target-for-external-modules.patch +++ b/packages/kernel-6.1/1001-Makefile-add-prepare-target-for-external-modules.patch @@ -1,4 +1,4 @@ -From fc06fd8a1e59838d431c85bc8017c2520bf08695 Mon Sep 17 00:00:00 2001 +From e6e9b5adc830c0924d81c348c8d5b12e3dc4242e Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 19 Apr 2021 18:46:04 +0000 Subject: [PATCH] Makefile: add prepare target for external modules @@ -23,29 +23,33 @@ existing solutions. Signed-off-by: Ben Cressey Signed-off-by: Arnaldo Garcia Rincon --- - Makefile | 9 +++++++++ - 1 file changed, 9 insertions(+) + Makefile | 13 +++++++++++++ + 1 file changed, 13 insertions(+) diff --git a/Makefile b/Makefile -index 23390805e..346b898eb 100644 +index 23390805e..2f78ac123 100644 --- a/Makefile +++ b/Makefile -@@ -1874,6 +1874,15 @@ else # KBUILD_EXTMOD +@@ -1874,6 +1874,19 @@ else # KBUILD_EXTMOD KBUILD_BUILTIN := KBUILD_MODULES := 1 +PHONY += modules_prepare -+modules_prepare: tools/objtool ++modules_prepare: + $(Q)$(MAKE) $(build)=scripts/basic + $(Q)$(MAKE) $(build)=scripts/dtc + $(Q)$(MAKE) $(build)=scripts/mod + $(Q)$(MAKE) $(build)=scripts + ++ifdef CONFIG_OBJTOOL ++prepare: tools/objtool ++endif ++ +prepare: modules_prepare + build-dir := $(KBUILD_EXTMOD) compile_commands.json: $(extmod_prefix)compile_commands.json -- -2.39.1 +2.40.0 From ab77544d5a9dc4c26761932b8ca2d9c41b5a8105 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 29 Jun 2023 08:19:07 +0000 Subject: [PATCH 1014/1356] kmod-6.1-nvidia: Update to latest version 535.54.03 Pull in new version (535.54.03) of the nvidia driver. Signed-off-by: Leonard Foerster --- packages/kmod-6.1-nvidia/Cargo.toml | 9 +++++---- packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 6 +++--- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/packages/kmod-6.1-nvidia/Cargo.toml b/packages/kmod-6.1-nvidia/Cargo.toml index a442c424..d29e18f2 100644 --- a/packages/kmod-6.1-nvidia/Cargo.toml +++ b/packages/kmod-6.1-nvidia/Cargo.toml @@ -13,12 +13,13 @@ package-name = "kmod-6.1-nvidia" releases-url = "https://docs.nvidia.com/datacenter/tesla/" [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/525.105.17/NVIDIA-Linux-x86_64-525.105.17.run" -sha512 = "1a44a8d92d8434d356dcd6087c8a3277136e0819ffa5e4b6895854811cf63e44ad3dc08e0d248f149f8dc2280ab0993be6ee7fdf3c676fb9a85ff3dce83fd69a" +url = "https://us.download.nvidia.com/tesla/535.54.03/NVIDIA-Linux-x86_64-535.54.03.run" +sha512 = "45b72b34272d3df14b56136bb61537d00145d55734b72d58390af4694d96f03b2b49433beb4a5bede4d978442b707b08e05f2f31b2fcfd9453091e7f0b945cff" [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/525.105.17/NVIDIA-Linux-aarch64-525.105.17.run" -sha512 = "75192acf8448a206a956b94b01b5e1e05cd21c7f172557bd6a2d1f1d92877583734c7e0998cedc3b54a8bdc5c7e869531ddea4161639daf40cbd0bf035252759" +url = "https://us.download.nvidia.com/tesla/535.54.03/NVIDIA-Linux-aarch64-535.54.03.run" +sha512 = "57b06a6fa16838176866c364a8722c546084529ad91c57e979aca7750692127cab1485b5a44aee398c5494782ed987e82f66061aa39e802bc6eefa2b40a33bc3" + [build-dependencies] glibc = { path = "../glibc" } kernel-6_1 = { path = "../kernel-6.1" } diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index 49607d46..2ae8b82d 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -1,6 +1,6 @@ -%global tesla_major 525 -%global tesla_minor 105 -%global tesla_patch 17 +%global tesla_major 535 +%global tesla_minor 54 +%global tesla_patch 03 %global tesla_ver %{tesla_major}.%{tesla_minor}.%{tesla_patch} %global spdx_id %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml spdx-id nvidia) %global license_file %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml path nvidia -p ./licenses) From ace079775592c658bd50006a20c740428ad4b9eb Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 29 Jun 2023 10:00:34 +0000 Subject: [PATCH 1015/1356] kmod-6.1-nvidia: Adjust names of gsp firmware blobs With the update to nvidia driver 535.54.03 the firmware blob gsp_ad10x.bin has been renamed to gsp_ga10x.bin. Signed-off-by: Leonard Foerster --- packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index 2ae8b82d..d94655cc 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -5,7 +5,7 @@ %global spdx_id %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml spdx-id nvidia) %global license_file %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml path nvidia -p ./licenses) -# With the split of the firmware binary from firmware/gsp.bin to firmware/gsp_ad10x.bin +# With the split of the firmware binary from firmware/gsp.bin to firmware/gsp_ga10x.bin # and firmware/gsp_tu10x.bin the file format changed from executable to relocatable. # The __spec_install_post macro will by default try to strip all binary files. # Unfortunately the strip used is not compatible with the new file format. @@ -173,7 +173,7 @@ done # Include the firmware file for GSP support install -d %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_ver} -install -p -m 0644 firmware/gsp_ad10x.bin %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_ver} +install -p -m 0644 firmware/gsp_ga10x.bin %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_ver} install -p -m 0644 firmware/gsp_tu10x.bin %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_ver} popd @@ -302,7 +302,7 @@ popd %{_cross_libdir}/nvidia/tesla/libnvidia-ngx.so.1 # Firmware -%{_cross_libdir}/firmware/nvidia/%{tesla_ver}/gsp_ad10x.bin +%{_cross_libdir}/firmware/nvidia/%{tesla_ver}/gsp_ga10x.bin %{_cross_libdir}/firmware/nvidia/%{tesla_ver}/gsp_tu10x.bin # Neither nvidia-peermem nor nvidia-drm are included in driver container images, we exclude them From c30f6749db2f19614e8b6e3edb4c613cfdd67d0c Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 29 Jun 2023 10:02:08 +0000 Subject: [PATCH 1016/1356] kmod-6.1-nvidia: Add/Remove libraries Multiple changes are needed to accommodate for changes in the library structure: * libnvidia-compiler: functionality has been moved to other libraries * libnvidia-pkcs11*: pkcs11 functionality has been factored out into separate library * libnvidia-egl-wayland: has been updated Signed-off-by: Leonard Foerster --- packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index d94655cc..e208bf93 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -250,7 +250,8 @@ popd %{_cross_libdir}/nvidia/tesla/libOpenCL.so.1.0.0 %{_cross_libdir}/nvidia/tesla/libOpenCL.so.1 %if "%{_cross_arch}" == "x86_64" -%{_cross_libdir}/nvidia/tesla/libnvidia-compiler.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-pkcs11.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-pkcs11-openssl3.so.%{tesla_ver} %endif # Video libs @@ -329,5 +330,5 @@ popd %exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-wayland.so.1 %exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-gbm.so.1 %exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-gbm.so.1.1.0 -%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-wayland.so.1.1.10 +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-wayland.so.1.1.11 %exclude %{_cross_libdir}/nvidia/tesla/libnvidia-wayland-client.so.%{tesla_ver} From 78785140beeed1a95bdb5dcfb3cfeb489c302b60 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Tue, 9 May 2023 22:49:37 +0000 Subject: [PATCH 1017/1356] Actions: Only use GOPROXY=direct on nightly build This adds a new workflow to run nightly that will use the existing GOPROXY=direct setting to verify that the caching of Go modules is not hiding an issue with upstream module availability changes. It trims down the normal PR workflow to skip the `-dev` variants and not perform the additional checks like unit-tests and shellchecks. With the addition of the nightly workflow to identify these issues, this removes the GOPROXY setting from the `build` workflow that runs on each PR. This should reduce the number of failed jobs that need to be rerun due to throttling of go module fetches that seems to happen very often during normal working hours. Signed-off-by: Sean McGinnis --- .github/workflows/nightly.yml | 64 +++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 .github/workflows/nightly.yml diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml new file mode 100644 index 00000000..fb748663 --- /dev/null +++ b/.github/workflows/nightly.yml @@ -0,0 +1,64 @@ +# This is basically a duplicate of the main "build" workflow, but uses GOPROXY=direct +# to try to catch errors close to their introduction due to yanked Go modules. These +# could otherwise be covered up by caching and not discovered until much later when +# bypassing the main cache. +name: Nightly +on: + schedule: + # Run once a day at 02:15 UTC. Randomly chosen as a "quiet" time for this to run. + # See syntax for format details: https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#onschedule + - cron: '15 2 * * *' + +env: + # When Go packages are built, buildsys will vendor in dependent Go code for + # that package and bundle it up in a tarball. This env variable is consumed + # and used to configure Go to directly download code from its upstream source. + # This is a useful early signal during GitHub actions to see if there are + # upstream Go code problems. + GOPROXY: direct + +jobs: + list-variants: + # This needs to be its own job since the build job needs its output before + # it can initialize + if: github.repository == 'bottlerocket-os/bottlerocket' + name: "Determine variants" + runs-on: ubuntu-latest + outputs: + variants: ${{ steps.get-variants.outputs.variants }} + aarch-enemies: ${{ steps.get-variants.outputs.aarch-enemies }} + steps: + - uses: actions/checkout@v3 + - uses: ./.github/actions/list-variants + id: get-variants + + build: + needs: list-variants + runs-on: ubuntu-latest + continue-on-error: true + strategy: + matrix: + variant: ${{ fromJson(needs.list-variants.outputs.variants) }} + arch: [x86_64, aarch64] + exclude: ${{ fromJson(needs.list-variants.outputs.aarch-enemies) }} + fail-fast: false + name: "Build ${{ matrix.variant }}-${{ matrix.arch }}" + steps: + - uses: actions/checkout@v3 + - name: Preflight step to set up the runner + uses: ./.github/actions/setup-node + - if: contains(matrix.variant, 'nvidia') + run: | + cat <<-EOF > Licenses.toml + [nvidia] + spdx-id = "LICENSE-LicenseRef-NVIDIA-Customer" + licenses = [ + { path = "NVIDIA", license-url = "https://www.nvidia.com/en-us/drivers/nvidia-license/" } + ] + EOF + - run: | + cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} \ + -e BUILDSYS_ARCH=${{ matrix.arch }} \ + -e BUILDSYS_JOBS=12 \ + -e BUILDSYS_UPSTREAM_SOURCE_FALLBACK="${{ contains(matrix.variant, 'nvidia') }}" \ + -e BUILDSYS_UPSTREAM_LICENSE_FETCH="${{ contains(matrix.variant, 'nvidia') }}" From 28974f241f5af782f5c431832304136aafd52bef Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Tue, 11 Jul 2023 23:21:20 +0000 Subject: [PATCH 1018/1356] build: Add support for XFS DATA Partition Add the ability to specify XFS for a variant's DATA partition. The variants now allow xfs-data-partition to be set to enable this feature. If not specified, the DATA partition defaults to ext4. Signed-off-by: Matthew Yeazel --- tools/buildsys/src/manifest.rs | 10 ++++++++++ tools/rpm2img | 33 +++++++++++++++++++++++++++++++-- 2 files changed, 41 insertions(+), 2 deletions(-) diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index bf4ac68f..1ee01d8b 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -214,6 +214,13 @@ line arguments set in the boot configuration. [package.metadata.build-variant.image-features] unified-cgroup-hierarchy = true ``` + +`xfs-data-partition` changes the filesystem for the data partition from ext4 to xfs. The +default will remain ext4 and xfs is opt-in. + +```ignore +[package.metadata.build-variant.image-features] +xfs-data-partition = true */ mod error; @@ -505,6 +512,7 @@ pub enum ImageFeature { GrubSetPrivateVar, SystemdNetworkd, UnifiedCgroupHierarchy, + XfsDataPartition, } impl TryFrom for ImageFeature { @@ -514,6 +522,7 @@ impl TryFrom for ImageFeature { "grub-set-private-var" => Ok(ImageFeature::GrubSetPrivateVar), "systemd-networkd" => Ok(ImageFeature::SystemdNetworkd), "unified-cgroup-hierarchy" => Ok(ImageFeature::UnifiedCgroupHierarchy), + "xfs-data-partition" => Ok(ImageFeature::XfsDataPartition), _ => error::ParseImageFeatureSnafu { what: s }.fail()?, } } @@ -525,6 +534,7 @@ impl fmt::Display for ImageFeature { ImageFeature::GrubSetPrivateVar => write!(f, "GRUB_SET_PRIVATE_VAR"), ImageFeature::SystemdNetworkd => write!(f, "SYSTEMD_NETWORKD"), ImageFeature::UnifiedCgroupHierarchy => write!(f, "UNIFIED_CGROUP_HIERARCHY"), + ImageFeature::XfsDataPartition => write!(f, "XFS_DATA_PARTITION"), } } } diff --git a/tools/rpm2img b/tools/rpm2img index 3841c797..f0915d78 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -13,6 +13,7 @@ BUILDER_ARCH="$(uname -m)" OVF_TEMPLATE="" GRUB_SET_PRIVATE_VAR="no" +XFS_DATA_PARTITION="no" for opt in "$@"; do optarg="$(expr "${opt}" : '[^=]*=\(.*\)')" @@ -27,6 +28,7 @@ for opt in "$@"; do --partition-plan=*) PARTITION_PLAN="${optarg}" ;; --ovf-template=*) OVF_TEMPLATE="${optarg}" ;; --with-grub-set-private-var=*) GRUB_SET_PRIVATE_VAR="${optarg}" ;; + --xfs-data-partition=*) XFS_DATA_PARTITION="${optarg}" ;; esac done @@ -277,6 +279,13 @@ SUPPORT_URL="https://github.com/bottlerocket-os/bottlerocket/discussions" BUG_REPORT_URL="https://github.com/bottlerocket-os/bottlerocket/issues" EOF +# Set the BOTTLEROCKET-DATA Filesystem for creating/mounting +if [ "${XFS_DATA_PARTITION}" == "yes" ] ; then + printf "%s\n" "DATA_PARTITION_FILESYSTEM=xfs" >> "${ROOT_MOUNT}/${SYS_ROOT}/usr/share/bottlerocket/image-features.env" +else + printf "%s\n" "DATA_PARTITION_FILESYSTEM=ext4" >> "${ROOT_MOUNT}/${SYS_ROOT}/usr/share/bottlerocket/image-features.env" +fi + # BOTTLEROCKET-ROOT-A mkdir -p "${ROOT_MOUNT}/lost+found" ROOT_LABELS=$(setfiles -n -d -F -m -r "${ROOT_MOUNT}" \ @@ -408,8 +417,28 @@ mkfs_data() { target="${1:?}" size="${2:?}" offset="${3:?}" - mkfs.ext4 -m 0 -d "${DATA_MOUNT}" "${BOTTLEROCKET_DATA}" "${size}" - echo "${UNLABELED}" | debugfs -w -f - "${BOTTLEROCKET_DATA}" + # Create an XFS filesystem if requested + if [ "${XFS_DATA_PARTITION}" == "yes" ] ; then + echo "writing XFS filesystem for DATA" + # Create a file to write the filesystem to first + dd if=/dev/zero of="${BOTTLEROCKET_DATA}" bs=1M count=${size%?} + # block size of 4096, directory block size of 16384 + # enable inotbtcount, bigtime, and reflink + # use an internal log with starting size of 64m + # use the minimal 2 Allocation groups, this still overprovisions when expanded + # set strip units of 512k and sectsize to make EBS volumes align + mkfs.xfs \ + -b size=4096 -n size=16384 \ + -m inobtcount=1,bigtime=1,reflink=1 \ + -l internal,size=64m \ + -d agcount=2,su=512k,sw=1,sectsize=4096 \ + -f "${BOTTLEROCKET_DATA}" + else + # default to ext4 + echo "writing ext4 filesystem for DATA" + mkfs.ext4 -m 0 -d "${DATA_MOUNT}" "${BOTTLEROCKET_DATA}" "${size}" + echo "${UNLABELED}" | debugfs -w -f - "${BOTTLEROCKET_DATA}" + fi dd if="${BOTTLEROCKET_DATA}" of="${target}" conv=notrunc bs=1M seek="${offset}" } From dd92a07ca0774c0a3468ce48163bbb2f0a68f343 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 9 May 2023 23:53:05 +0000 Subject: [PATCH 1019/1356] packages: update grub Signed-off-by: Ben Cressey --- packages/grub/Cargo.toml | 4 ++-- packages/grub/grub.spec | 2 +- packages/grub/latest-srpm-url.sh | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/grub/Cargo.toml b/packages/grub/Cargo.toml index 630025a9..7c9e4b19 100644 --- a/packages/grub/Cargo.toml +++ b/packages/grub/Cargo.toml @@ -9,5 +9,5 @@ build = "../build.rs" path = "/dev/null" [[package.metadata.build-package.external-files]] -url = "https://al2022-repos-us-west-2-9761ab97.s3.dualstack.us-west-2.amazonaws.com/blobstore/aa41fdf9982b65a4c4dad5df5b49ba143b1710d60f82688221966f3c790c6c63/grub2-2.06-42.amzn2022.0.1.src.rpm" -sha512 = "3dbfc0cc48dc7125dca445ca9b6538ecb2c548cadc77714b930eb9992697e6eaef6c5eaece6a367b232d20a2d693a4fbd93b537d79596de4791c576f3b8ecc18" +url = "https://cdn.amazonlinux.com/al2023/blobstore/74f9ee6e75b8f89fe91ccda86896243179968a8664ba045bece11dc5aff61f4e/grub2-2.06-61.amzn2023.0.6.src.rpm" +sha512 = "aac3fbee3ec5e5a28176d338eab85c660c9525ef3b34ccf84f7c837c724c72b089bc2b57207e36b12c09a7cdd2c7d6e658288c98b9a66cb98e8edd650f302ba5" diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index c620fd9a..74931a4e 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -15,7 +15,7 @@ Release: 1%{?dist} Summary: Bootloader with support for Linux and more License: GPL-3.0-or-later AND Unicode-DFS-2015 URL: https://www.gnu.org/software/grub/ -Source0: https://al2022-repos-us-west-2-9761ab97.s3.dualstack.us-west-2.amazonaws.com/blobstore/aa41fdf9982b65a4c4dad5df5b49ba143b1710d60f82688221966f3c790c6c63/grub2-2.06-42.amzn2022.0.1.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/74f9ee6e75b8f89fe91ccda86896243179968a8664ba045bece11dc5aff61f4e/grub2-2.06-61.amzn2023.0.6.src.rpm Source1: bios.cfg Source2: efi.cfg Patch0001: 0001-setup-Add-root-device-argument-to-grub-setup.patch diff --git a/packages/grub/latest-srpm-url.sh b/packages/grub/latest-srpm-url.sh index bc1e0125..2c5e313e 100755 --- a/packages/grub/latest-srpm-url.sh +++ b/packages/grub/latest-srpm-url.sh @@ -1,6 +1,6 @@ #!/bin/sh cmd='dnf install -q -y --releasever=latest yum-utils && yumdownloader -q --releasever=latest --source --urls grub2' -docker run --rm amazonlinux:2022 sh -c "${cmd}" \ +docker run --rm amazonlinux:2023 sh -c "${cmd}" \ | grep '^http' \ | xargs --max-args=1 --no-run-if-empty realpath --canonicalize-missing --relative-to=. \ | sed 's_:/_://_' From 61fea587adf9309f196c23bee3aeb2a4c5bd6f03 Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Tue, 9 May 2023 23:53:05 +0000 Subject: [PATCH 1020/1356] grub: move embedded public key into dedicated section For the EFI build, the Secure Boot config signing key is embedded into the GRUB image so that GRUB can verify the signature of any files it loads, e.g. its configuration. By default, the public key is embedded into a section alongside various executable GRUB modules. This set of patches instead moves the key into its own dedicated section in the image, where it can easily be replaced with tools such as objcopy or dd to reuse a set of binary artifacts with different Secure Boot keys. Signed-off-by: Markus Boehme --- ...p-EFI-PE-header-size-to-accommodate-.patch | 46 +++ ...id-adding-section-table-entry-outsid.patch | 98 +++++++ ...al-size-of-section-found-by-grub_efi.patch | 93 +++++++ ...-single-public-key-into-its-own-sect.patch | 261 ++++++++++++++++++ packages/grub/grub.spec | 4 + 5 files changed, 502 insertions(+) create mode 100644 packages/grub/0042-util-mkimage-Bump-EFI-PE-header-size-to-accommodate-.patch create mode 100644 packages/grub/0043-util-mkimage-avoid-adding-section-table-entry-outsid.patch create mode 100644 packages/grub/0044-efi-return-virtual-size-of-section-found-by-grub_efi.patch create mode 100644 packages/grub/0045-mkimage-pgp-move-single-public-key-into-its-own-sect.patch diff --git a/packages/grub/0042-util-mkimage-Bump-EFI-PE-header-size-to-accommodate-.patch b/packages/grub/0042-util-mkimage-Bump-EFI-PE-header-size-to-accommodate-.patch new file mode 100644 index 00000000..2a61a130 --- /dev/null +++ b/packages/grub/0042-util-mkimage-Bump-EFI-PE-header-size-to-accommodate-.patch @@ -0,0 +1,46 @@ +From 2dd65e321c9786dbec249e0826c58f530bcb5883 Mon Sep 17 00:00:00 2001 +From: Markus Boehme +Date: Fri, 28 Oct 2022 13:39:03 +0000 +Subject: [PATCH] util/mkimage: Bump EFI PE header size to accommodate .sbat + section + +With the --sbat option mkimage can embed SBAT metadata into a dedicated +.sbat section of the EFI image. However, no space was explicitly +reserved for this section in the section table of the PE header. + +The miss has no adverse effects since there was enough padding in the +header anyway due to alignment constraints. An earlier commit, +a51f953f4ee8 ("mkimage: Align efi sections on 4k boundary"), increased +alignment to 4 KiB, so that the extra section table entry fit +comfortably inside the space reserved for the entire header anyway. + +Fixes: b11547137703 ("util/mkimage: Add an option to import SBAT metadata into a .sbat section") +Signed-off-by: Markus Boehme +--- + util/mkimage.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/util/mkimage.c b/util/mkimage.c +index c3d33aa..2db1045 100644 +--- a/util/mkimage.c ++++ b/util/mkimage.c +@@ -65,14 +65,14 @@ + + GRUB_PE32_SIGNATURE_SIZE \ + + sizeof (struct grub_pe32_coff_header) \ + + sizeof (struct grub_pe32_optional_header) \ +- + 4 * sizeof (struct grub_pe32_section_table), \ ++ + 5 * sizeof (struct grub_pe32_section_table), \ + GRUB_PE32_FILE_ALIGNMENT) + + #define EFI64_HEADER_SIZE ALIGN_UP (GRUB_PE32_MSDOS_STUB_SIZE \ + + GRUB_PE32_SIGNATURE_SIZE \ + + sizeof (struct grub_pe32_coff_header) \ + + sizeof (struct grub_pe64_optional_header) \ +- + 4 * sizeof (struct grub_pe32_section_table), \ ++ + 5 * sizeof (struct grub_pe32_section_table), \ + GRUB_PE32_FILE_ALIGNMENT) + + static const struct grub_install_image_target_desc image_targets[] = +-- +2.39.0 + diff --git a/packages/grub/0043-util-mkimage-avoid-adding-section-table-entry-outsid.patch b/packages/grub/0043-util-mkimage-avoid-adding-section-table-entry-outsid.patch new file mode 100644 index 00000000..820313f4 --- /dev/null +++ b/packages/grub/0043-util-mkimage-avoid-adding-section-table-entry-outsid.patch @@ -0,0 +1,98 @@ +From 45468642affc3d641b5d73c5d6e63e1578bf9150 Mon Sep 17 00:00:00 2001 +From: Markus Boehme +Date: Fri, 28 Oct 2022 16:09:05 +0000 +Subject: [PATCH] util/mkimage: avoid adding section table entry outside PE EFI + header + +The number of sections in a PE EFI image can vary depending on the +options passed to mkimage, but their maximum number must be known at +compile time. Potentially adding a new section to a PE EFI image +therefore requires changes in at least two places of the code. + +The prior commit fixed a situation where the maximum number of sections +was not bumped while implementing support for an new section. Catch +these situations at runtime rather than silently relying on sufficient +padding being available for the new section table entry or overwriting +part of the image. + +Signed-off-by: Markus Boehme +--- + util/mkimage.c | 20 +++++++++++++++----- + 1 file changed, 15 insertions(+), 5 deletions(-) + +diff --git a/util/mkimage.c b/util/mkimage.c +index 2db1045..1455c94 100644 +--- a/util/mkimage.c ++++ b/util/mkimage.c +@@ -821,6 +821,7 @@ grub_install_get_image_targets_string (void) + */ + static struct grub_pe32_section_table * + init_pe_section(const struct grub_install_image_target_desc *image_target, ++ const char *pe_img, + struct grub_pe32_section_table *section, + const char * const name, + grub_uint32_t *vma, grub_uint32_t vsz, grub_uint32_t valign, +@@ -828,6 +829,15 @@ init_pe_section(const struct grub_install_image_target_desc *image_target, + grub_uint32_t characteristics) + { + size_t len = strlen (name); ++ const char *pe_header_end; ++ ++ if (image_target->voidp_sizeof == 4) ++ pe_header_end = pe_img + EFI32_HEADER_SIZE; ++ else ++ pe_header_end = pe_img + EFI64_HEADER_SIZE; ++ ++ if ((char *) section >= pe_header_end) ++ grub_util_error (_("section table space exhausted trying to add %s"), name); + + if (len > sizeof (section->name)) + grub_util_error (_("section name %s length is bigger than %lu"), +@@ -1438,7 +1448,7 @@ grub_install_generate_image (const char *dir, const char *prefix, + /* The sections. */ + PE_OHDR (o32, o64, code_base) = grub_host_to_target32 (vma); + PE_OHDR (o32, o64, code_size) = grub_host_to_target32 (layout.exec_size); +- section = init_pe_section (image_target, section, ".text", ++ section = init_pe_section (image_target, pe_img, section, ".text", + &vma, layout.exec_size, + image_target->section_align, + &raw_data, layout.exec_size, +@@ -1452,7 +1462,7 @@ grub_install_generate_image (const char *dir, const char *prefix, + ALIGN_UP (total_module_size, + GRUB_PE32_FILE_ALIGNMENT)); + +- section = init_pe_section (image_target, section, ".data", ++ section = init_pe_section (image_target, pe_img, section, ".data", + &vma, scn_size, image_target->section_align, + &raw_data, scn_size, + GRUB_PE32_SCN_CNT_INITIALIZED_DATA | +@@ -1460,7 +1470,7 @@ grub_install_generate_image (const char *dir, const char *prefix, + GRUB_PE32_SCN_MEM_WRITE); + + scn_size = pe_size - layout.reloc_size - sbat_size - raw_data; +- section = init_pe_section (image_target, section, "mods", ++ section = init_pe_section (image_target, pe_img, section, "mods", + &vma, scn_size, image_target->section_align, + &raw_data, scn_size, + GRUB_PE32_SCN_CNT_INITIALIZED_DATA | +@@ -1472,7 +1482,7 @@ grub_install_generate_image (const char *dir, const char *prefix, + pe_sbat = pe_img + raw_data; + grub_util_load_image (sbat_path, pe_sbat); + +- section = init_pe_section (image_target, section, ".sbat", ++ section = init_pe_section (image_target, pe_img, section, ".sbat", + &vma, sbat_size, + image_target->section_align, + &raw_data, sbat_size, +@@ -1484,7 +1494,7 @@ grub_install_generate_image (const char *dir, const char *prefix, + PE_OHDR (o32, o64, base_relocation_table.rva) = grub_host_to_target32 (vma); + PE_OHDR (o32, o64, base_relocation_table.size) = grub_host_to_target32 (scn_size); + memcpy (pe_img + raw_data, layout.reloc_section, scn_size); +- init_pe_section (image_target, section, ".reloc", ++ init_pe_section (image_target, pe_img, section, ".reloc", + &vma, scn_size, image_target->section_align, + &raw_data, scn_size, + GRUB_PE32_SCN_CNT_INITIALIZED_DATA | +-- +2.39.0 + diff --git a/packages/grub/0044-efi-return-virtual-size-of-section-found-by-grub_efi.patch b/packages/grub/0044-efi-return-virtual-size-of-section-found-by-grub_efi.patch new file mode 100644 index 00000000..404baa3f --- /dev/null +++ b/packages/grub/0044-efi-return-virtual-size-of-section-found-by-grub_efi.patch @@ -0,0 +1,93 @@ +From 076f520ec85198e82ddc0615e596df2a1ce4264b Mon Sep 17 00:00:00 2001 +From: Markus Boehme +Date: Wed, 2 Nov 2022 11:43:32 +0000 +Subject: [PATCH] efi: return virtual size of section found by + grub_efi_section_addr + +Return the virtual size of a section found by grub_efi_section_addr if +the caller asked for it. Modify the few existing callers who don't care +about the section size to fit the extended interface. + +This is close to the point where the implementation could do with a +refactoring, e.g. have a separate grub_efi_find_section that returns the +entire section table entry, and more focused convenience functions to +access those. However, grub_efi_find_section itself is already the +result of a Fedora refactoring, so I'm keeping the changes minimal. + +Signed-off-by: Markus Boehme +--- + grub-core/kern/efi/efi.c | 8 ++++++-- + grub-core/kern/efi/init.c | 6 +++--- + include/grub/efi/efi.h | 2 +- + 3 files changed, 10 insertions(+), 6 deletions(-) + +diff --git a/grub-core/kern/efi/efi.c b/grub-core/kern/efi/efi.c +index 4ac2b27..3a4475c 100644 +--- a/grub-core/kern/efi/efi.c ++++ b/grub-core/kern/efi/efi.c +@@ -310,9 +310,11 @@ grub_efi_get_variable (const char *var, const grub_efi_guid_t *guid, + #pragma GCC diagnostic ignored "-Wcast-align" + + /* Search the mods section from the PE32/PE32+ image. This code uses +- a PE32 header, but should work with PE32+ as well. */ ++ a PE32 header, but should work with PE32+ as well. If vsz is not ++ NULL and the section is found, the virtual size of the section ++ is written to *vsz. */ + grub_addr_t +-grub_efi_section_addr (const char *section_name) ++grub_efi_section_addr (const char *section_name, grub_uint32_t *vsz) + { + grub_efi_loaded_image_t *image; + struct grub_pe32_header *header; +@@ -359,6 +361,8 @@ grub_efi_section_addr (const char *section_name) + + grub_dprintf("sections", "returning section info for section %d: \"%s\"\n", + i, section->name); ++ if (vsz) ++ *vsz = section->virtual_size; + return (grub_addr_t) info; + } + +diff --git a/grub-core/kern/efi/init.c b/grub-core/kern/efi/init.c +index 0574d8d..533de93 100644 +--- a/grub-core/kern/efi/init.c ++++ b/grub-core/kern/efi/init.c +@@ -116,11 +116,11 @@ grub_efi_print_gdb_info (void) + grub_addr_t text; + grub_addr_t data; + +- text = grub_efi_section_addr (".text"); ++ text = grub_efi_section_addr (".text", NULL); + if (!text) + return; + +- data = grub_efi_section_addr (".data"); ++ data = grub_efi_section_addr (".data", NULL); + if (data) + grub_qdprintf ("gdb", + "add-symbol-file /usr/lib/debug/usr/lib/grub/%s-%s/" +@@ -136,7 +136,7 @@ grub_efi_print_gdb_info (void) + void + grub_efi_init (void) + { +- grub_modbase = grub_efi_section_addr ("mods"); ++ grub_modbase = grub_efi_section_addr ("mods", NULL); + /* First of all, initialize the console so that GRUB can display + messages. */ + grub_console_init (); +diff --git a/include/grub/efi/efi.h b/include/grub/efi/efi.h +index 449e552..5841a2e 100644 +--- a/include/grub/efi/efi.h ++++ b/include/grub/efi/efi.h +@@ -152,7 +152,7 @@ grub_err_t grub_arch_efi_linux_boot_image(grub_addr_t addr, grub_size_t size, + char *args, int nx_enabled); + #endif + +-grub_addr_t grub_efi_section_addr (const char *section); ++grub_addr_t grub_efi_section_addr (const char *section, grub_uint32_t *vsz); + + void grub_efi_mm_init (void); + void grub_efi_mm_fini (void); +-- +2.39.0 + diff --git a/packages/grub/0045-mkimage-pgp-move-single-public-key-into-its-own-sect.patch b/packages/grub/0045-mkimage-pgp-move-single-public-key-into-its-own-sect.patch new file mode 100644 index 00000000..537f53bc --- /dev/null +++ b/packages/grub/0045-mkimage-pgp-move-single-public-key-into-its-own-sect.patch @@ -0,0 +1,261 @@ +From eaa371d26be8f76f9d7ce7e9c4b5d30684a48f11 Mon Sep 17 00:00:00 2001 +From: Markus Boehme +Date: Mon, 24 Oct 2022 15:20:23 +0000 +Subject: [PATCH] mkimage, pgp: move single public key into its own section of + EFI image + +If mkimage is asked to embed a single public key for signature checking +into an EFI image, move that key into a new .pubkey section of the PE +file. Moving the key into its dedicated section allows for easily +swapping the public key, e.g. using objcopy, without having to rebuild +the image. + +If more than one key is to be embedded, no new section is created and +all keys are embedded as modules just as before. The PGP signature +verification will check both of these sources for valid keys. + +Signed-off-by: Markus Boehme +--- + grub-core/commands/pgp.c | 28 ++++++++++++++ + include/grub/efi/efi.h | 2 +- + util/mkimage.c | 84 +++++++++++++++++++++++++++------------- + 3 files changed, 87 insertions(+), 27 deletions(-) + +diff --git a/grub-core/commands/pgp.c b/grub-core/commands/pgp.c +index b81ac0a..5fa1e8e 100644 +--- a/grub-core/commands/pgp.c ++++ b/grub-core/commands/pgp.c +@@ -32,6 +32,7 @@ + #include + #include + #include ++#include + + GRUB_MOD_LICENSE ("GPLv3+"); + +@@ -959,6 +960,33 @@ GRUB_MOD_INIT(pgp) + grub_pk_trusted = pk; + } + ++#ifdef GRUB_MACHINE_EFI ++ { ++ grub_addr_t pubkey_section; ++ grub_uint32_t pubkey_vsz; ++ ++ pubkey_section = grub_efi_section_addr (".pubkey", &pubkey_vsz); ++ if (pubkey_section) ++ { ++ struct grub_file pseudo_file; ++ struct grub_public_key *pk; ++ ++ grub_memset (&pseudo_file, 0, sizeof (pseudo_file)); ++ ++ pseudo_file.fs = &pseudo_fs; ++ pseudo_file.size = pubkey_vsz; ++ pseudo_file.data = (char *) pubkey_section; ++ ++ pk = grub_load_public_key (&pseudo_file); ++ if (!pk) ++ grub_fatal ("error loading key from .pubkey section: %s\n", grub_errmsg); ++ ++ pk->next = grub_pk_trusted; ++ grub_pk_trusted = pk; ++ } ++ } ++#endif /* GRUB_MACHINE_EFI */ ++ + if (!val) + grub_env_set ("check_signatures", grub_pk_trusted ? "enforce" : "no"); + +diff --git a/include/grub/efi/efi.h b/include/grub/efi/efi.h +index 5841a2e..d580b6b 100644 +--- a/include/grub/efi/efi.h ++++ b/include/grub/efi/efi.h +@@ -152,7 +152,7 @@ grub_err_t grub_arch_efi_linux_boot_image(grub_addr_t addr, grub_size_t size, + char *args, int nx_enabled); + #endif + +-grub_addr_t grub_efi_section_addr (const char *section, grub_uint32_t *vsz); ++grub_addr_t EXPORT_FUNC(grub_efi_section_addr) (const char *section, grub_uint32_t *vsz); + + void grub_efi_mm_init (void); + void grub_efi_mm_fini (void); +diff --git a/util/mkimage.c b/util/mkimage.c +index 1455c94..01362d1 100644 +--- a/util/mkimage.c ++++ b/util/mkimage.c +@@ -65,14 +65,14 @@ + + GRUB_PE32_SIGNATURE_SIZE \ + + sizeof (struct grub_pe32_coff_header) \ + + sizeof (struct grub_pe32_optional_header) \ +- + 5 * sizeof (struct grub_pe32_section_table), \ ++ + 6 * sizeof (struct grub_pe32_section_table), \ + GRUB_PE32_FILE_ALIGNMENT) + + #define EFI64_HEADER_SIZE ALIGN_UP (GRUB_PE32_MSDOS_STUB_SIZE \ + + GRUB_PE32_SIGNATURE_SIZE \ + + sizeof (struct grub_pe32_coff_header) \ + + sizeof (struct grub_pe64_optional_header) \ +- + 5 * sizeof (struct grub_pe32_section_table), \ ++ + 6 * sizeof (struct grub_pe32_section_table), \ + GRUB_PE32_FILE_ALIGNMENT) + + static const struct grub_install_image_target_desc image_targets[] = +@@ -887,12 +887,13 @@ grub_install_generate_image (const char *dir, const char *prefix, + char *kernel_img, *core_img; + size_t total_module_size, core_size; + size_t memdisk_size = 0, config_size = 0; +- size_t prefix_size = 0, dtb_size = 0, sbat_size = 0; ++ size_t prefix_size = 0, dtb_size = 0, pubkey_size = 0, sbat_size = 0; + char *kernel_path; + size_t offset; + struct grub_util_path_list *path_list, *p; + size_t decompress_size = 0; + struct grub_mkimage_layout layout; ++ int pubkey_section = 0; + + if (comp == GRUB_COMPRESSION_AUTO) + comp = image_target->default_compression; +@@ -911,7 +912,10 @@ grub_install_generate_image (const char *dir, const char *prefix, + else + total_module_size = sizeof (struct grub_module_info32); + +- { ++ if (npubkeys == 1 && image_target->id == IMAGE_EFI) ++ pubkey_section = 1; ++ ++ if (!pubkey_section) { + size_t i; + for (i = 0; i < npubkeys; i++) + { +@@ -1048,24 +1052,25 @@ grub_install_generate_image (const char *dir, const char *prefix, + offset += mod_size; + } + +- { +- size_t i; +- for (i = 0; i < npubkeys; i++) +- { +- size_t curs; +- struct grub_module_header *header; +- +- curs = grub_util_get_image_size (pubkey_paths[i]); +- +- header = (struct grub_module_header *) (kernel_img + offset); +- header->type = grub_host_to_target32 (OBJ_TYPE_GPG_PUBKEY); +- header->size = grub_host_to_target32 (curs + sizeof (*header)); +- offset += sizeof (*header); +- +- grub_util_load_image (pubkey_paths[i], kernel_img + offset); +- offset += ALIGN_ADDR (curs); +- } +- } ++ if (!pubkey_section) ++ { ++ size_t i; ++ for (i = 0; i < npubkeys; i++) ++ { ++ size_t curs; ++ struct grub_module_header *header; ++ ++ curs = grub_util_get_image_size (pubkey_paths[i]); ++ ++ header = (struct grub_module_header *) (kernel_img + offset); ++ header->type = grub_host_to_target32 (OBJ_TYPE_GPG_PUBKEY); ++ header->size = grub_host_to_target32 (curs + sizeof (*header)); ++ offset += sizeof (*header); ++ ++ grub_util_load_image (pubkey_paths[i], kernel_img + offset); ++ offset += ALIGN_ADDR (curs); ++ } ++ } + + { + size_t i; +@@ -1351,7 +1356,7 @@ grub_install_generate_image (const char *dir, const char *prefix, + break; + case IMAGE_EFI: + { +- char *pe_img, *pe_sbat, *header; ++ char *pe_img, *pe_pubkey, *pe_sbat, *header; + struct grub_pe32_section_table *section; + size_t n_sections = 4; + size_t scn_size; +@@ -1369,6 +1374,16 @@ grub_install_generate_image (const char *dir, const char *prefix, + + vma = raw_data = header_size; + ++ if (pubkey_section) ++ { ++ pubkey_size = ALIGN_ADDR (grub_util_get_image_size (pubkey_paths[0])); ++ pubkey_size = ALIGN_UP (pubkey_size, GRUB_PE32_FILE_ALIGNMENT); ++ if (pubkey_size == 0) ++ grub_util_error ( ++ _("embedding public key '%s' would result in invalid empty section"), ++ pubkey_paths[0]); ++ } ++ + if (sbat_path != NULL) + { + sbat_size = ALIGN_ADDR (grub_util_get_image_size (sbat_path)); +@@ -1376,7 +1391,8 @@ grub_install_generate_image (const char *dir, const char *prefix, + } + + pe_size = ALIGN_UP (header_size + core_size, GRUB_PE32_FILE_ALIGNMENT) + +- ALIGN_UP (layout.reloc_size, GRUB_PE32_FILE_ALIGNMENT) + sbat_size; ++ ALIGN_UP (layout.reloc_size, GRUB_PE32_FILE_ALIGNMENT) + ++ pubkey_size + sbat_size; + header = pe_img = xcalloc (1, pe_size); + + memcpy (pe_img + raw_data, core_img, core_size); +@@ -1391,6 +1407,9 @@ grub_install_generate_image (const char *dir, const char *prefix, + + GRUB_PE32_SIGNATURE_SIZE); + c->machine = grub_host_to_target16 (image_target->pe_target); + ++ if (pubkey_section) ++ n_sections++; ++ + if (sbat_path != NULL) + n_sections++; + +@@ -1458,7 +1477,7 @@ grub_install_generate_image (const char *dir, const char *prefix, + + scn_size = ALIGN_UP (layout.kernel_size - layout.exec_size, GRUB_PE32_FILE_ALIGNMENT); + /* ALIGN_UP (sbat_size, GRUB_PE32_FILE_ALIGNMENT) is done earlier. */ +- PE_OHDR (o32, o64, data_size) = grub_host_to_target32 (scn_size + sbat_size + ++ PE_OHDR (o32, o64, data_size) = grub_host_to_target32 (scn_size + pubkey_size + sbat_size + + ALIGN_UP (total_module_size, + GRUB_PE32_FILE_ALIGNMENT)); + +@@ -1469,7 +1488,7 @@ grub_install_generate_image (const char *dir, const char *prefix, + GRUB_PE32_SCN_MEM_READ | + GRUB_PE32_SCN_MEM_WRITE); + +- scn_size = pe_size - layout.reloc_size - sbat_size - raw_data; ++ scn_size = pe_size - layout.reloc_size - pubkey_size - sbat_size - raw_data; + section = init_pe_section (image_target, pe_img, section, "mods", + &vma, scn_size, image_target->section_align, + &raw_data, scn_size, +@@ -1477,6 +1496,19 @@ grub_install_generate_image (const char *dir, const char *prefix, + GRUB_PE32_SCN_MEM_READ | + GRUB_PE32_SCN_MEM_WRITE); + ++ if (pubkey_section) ++ { ++ pe_pubkey = pe_img + raw_data; ++ grub_util_load_image (pubkey_paths[0], pe_pubkey); ++ ++ section = init_pe_section (image_target, pe_img, section, ".pubkey", ++ &vma, pubkey_size, ++ image_target->section_align, ++ &raw_data, pubkey_size, ++ GRUB_PE32_SCN_CNT_INITIALIZED_DATA | ++ GRUB_PE32_SCN_MEM_READ); ++ } ++ + if (sbat_path != NULL) + { + pe_sbat = pe_img + raw_data; +-- +2.39.0 + diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index 74931a4e..18040d14 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -59,6 +59,10 @@ Patch0038: 0038-gpt-report-all-revalidation-errors.patch Patch0039: 0039-gpt-rename-and-update-documentation-for-grub_gpt_upd.patch Patch0040: 0040-gpt-write-backup-GPT-first-skip-if-inaccessible.patch Patch0041: 0041-gptprio-Use-Bottlerocket-boot-partition-type-GUID.patch +Patch0042: 0042-util-mkimage-Bump-EFI-PE-header-size-to-accommodate-.patch +Patch0043: 0043-util-mkimage-avoid-adding-section-table-entry-outsid.patch +Patch0044: 0044-efi-return-virtual-size-of-section-found-by-grub_efi.patch +Patch0045: 0045-mkimage-pgp-move-single-public-key-into-its-own-sect.patch BuildRequires: automake BuildRequires: bison From 70251db299ea6a914067a436e0f1dfeda10b3ce4 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 9 May 2023 23:53:05 +0000 Subject: [PATCH 1021/1356] grub: add SBAT info shim expects to find an SBAT section in GRUB, and will not continue booting if it is missing. Bottlerocket's build of GRUB is downstream from Amazon Linux, which is downstream from Fedora and RHEL. However, most of GRUB's modules are not included in the BIOS and EFI images, and the configuration file built into the image disables loading any other modules. Hence it's not a given that vulnerabilities in either upstream should lead to SBAT-based revocations. Bottlerocket carries quite a few out-of-tree patches which add the `gpt` and `gptprio` modules, and consequently has an unenviable, but useful, claim to its own vendor entry. Clearly the vast majority of GRUB development happens elsewhere, and the use of a distro-specific vendor entry is not meant to imply otherwise. There are no current plans for Bottlerocket to participate in the wider ecosystem of Secure Boot for Linux distributions, by way of a Microsoft-signed shim, so the choice of SBAT metadata is not relevant elsewhere. Signed-off-by: Ben Cressey --- packages/grub/grub.spec | 4 ++++ packages/grub/sbat.csv.in | 3 +++ 2 files changed, 7 insertions(+) create mode 100644 packages/grub/sbat.csv.in diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index 18040d14..cffae7ad 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -18,6 +18,7 @@ URL: https://www.gnu.org/software/grub/ Source0: https://cdn.amazonlinux.com/al2023/blobstore/74f9ee6e75b8f89fe91ccda86896243179968a8664ba045bece11dc5aff61f4e/grub2-2.06-61.amzn2023.0.6.src.rpm Source1: bios.cfg Source2: efi.cfg +Source3: sbat.csv.in Patch0001: 0001-setup-Add-root-device-argument-to-grub-setup.patch Patch0002: 0002-gpt-start-new-GPT-module.patch Patch0003: 0003-gpt-rename-misnamed-header-location-fields.patch @@ -156,6 +157,8 @@ popd mkdir efi-build pushd efi-build +sed -e "s,__VERSION__,%{version},g" %{S:3} > sbat.csv + %cross_configure \ CFLAGS="" \ LDFLAGS="" \ @@ -201,6 +204,7 @@ mkdir -p %{buildroot}%{efidir} -O "%{_cross_grub_efi_format}" \ -o "%{buildroot}%{efidir}/%{efi_image}" \ -p "/EFI/BOOT" \ + --sbat sbat.csv \ efi_gop ${MODS} popd diff --git a/packages/grub/sbat.csv.in b/packages/grub/sbat.csv.in new file mode 100644 index 00000000..78e44b9d --- /dev/null +++ b/packages/grub/sbat.csv.in @@ -0,0 +1,3 @@ +sbat,1,SBAT Version,sbat,1,https://github.com/rhboot/shim/blob/main/SBAT.md +grub,3,Free Software Foundation,grub,__VERSION__,https://www.gnu.org/software/grub/ +grub.bottlerocket,1,Bottlerocket,grub,__VERSION__,https://github.com/bottlerocket-os/bottlerocket/blob/develop/SECURITY.md From f0bd6d4ff1bc31ba7f887030da6d87037b05a556 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 9 May 2023 23:53:05 +0000 Subject: [PATCH 1022/1356] grub: add modules for GPG signature verification Build the GRUB EFI image with the modules needed for GPG signature verification, and embed a large placeholder public key so it can be replaced in the final stage of the build. Signed-off-by: Ben Cressey --- packages/grub/grub.spec | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index cffae7ad..e13c066b 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -177,7 +177,11 @@ sed -e "s,__VERSION__,%{version},g" %{S:3} > sbat.csv popd %install -MODS="configfile echo ext2 gptprio linux normal part_gpt reboot sleep zstd search" +MODS=(configfile echo ext2 gptprio linux normal part_gpt reboot sleep zstd search) + +# These modules are needed for signature verification, which is currently only +# done for the EFI build of GRUB. +VERIFY_MODS=(pgp crypto gcry_sha256 gcry_sha512 gcry_dsa gcry_rsa) %if "%{_cross_arch}" == "x86_64" pushd bios-build @@ -189,7 +193,7 @@ mkdir -p %{buildroot}%{biosdir} -O "i386-pc" \ -o "%{buildroot}%{biosdir}/core.img" \ -p "(hd0,gpt2)/boot/grub" \ - biosdisk serial ${MODS} + biosdisk serial ${MODS[@]} install -m 0644 ./grub-core/boot.img \ %{buildroot}%{biosdir}/boot.img popd @@ -198,14 +202,20 @@ popd pushd efi-build %make_install mkdir -p %{buildroot}%{efidir} + +# Make sure the `.pubkey` section is large enough to cover a replacement +# certificate, or `objcopy` may silently retain the existing section. +truncate -s 4096 empty.pubkey + %{buildroot}%{_cross_bindir}/grub-mkimage \ -c %{S:2} \ -d ./grub-core/ \ -O "%{_cross_grub_efi_format}" \ -o "%{buildroot}%{efidir}/%{efi_image}" \ -p "/EFI/BOOT" \ + --pubkey empty.pubkey \ --sbat sbat.csv \ - efi_gop ${MODS} + efi_gop ${MODS[@]} ${VERIFY_MODS[@]} popd %files From 5ffa27b61239b2d34d518f3d982055b37021ef6e Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Tue, 9 May 2023 23:53:05 +0000 Subject: [PATCH 1023/1356] grub: add script to replace the public key embedded in GRUB The newly added replace-grub-pubkey script allows to replace the public key embedded in a GRUB image. After replacing the key, the image will be properly signed again. Example invocation: ./replace-grub-pubkey grubx64.efi new-grub.pubkey ~/bottlerocket/sbkeys/local/ The script assumes that new-grub.pubkey is a public key file in GPG format containing a single public key. Signed-off-by: Markus Boehme --- packages/grub/replace-grub-pubkey | 45 +++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100755 packages/grub/replace-grub-pubkey diff --git a/packages/grub/replace-grub-pubkey b/packages/grub/replace-grub-pubkey new file mode 100755 index 00000000..7d8a69b5 --- /dev/null +++ b/packages/grub/replace-grub-pubkey @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +set -e + +readonly grub_image="${1:?expecting GRUB image as first argument}" +readonly public_key="${2:?expecting GPG public key file as second argument}" +readonly local_keys="${3:?expecting directory containing local signing keys as third argument}" + + +# +# Create unsigned image with embedded key replaced +# + +rm -f "${grub_image}.unsigned" +pesign -r -u 0 -i "${grub_image}" -o "${grub_image}.unsigned" +objcopy --update-section .pubkey="${public_key}" "${grub_image}.unsigned" + + +# +# Re-sign resulting image (steps copied from rpm2img) +# + +# Generate the PKCS12 archive for import. +openssl pkcs12 \ + -export \ + -passout pass: \ + -inkey "${local_keys}/code-sign.key" \ + -in "${local_keys}/code-sign.crt" \ + -certfile "${local_keys}/CA.crt" \ + -out "${local_keys}/code-sign.p12" + +# Import certificates and private key archive. +PEDB="/etc/pki/pesign" +certutil -d "${PEDB}" -A -n CA -i "${local_keys}/CA.crt" -t "CT,C,C" +certutil -d "${PEDB}" -A -n code-sign-key -i "${local_keys}/code-sign.crt" -t ",,P" +pk12util -d "${PEDB}" -i "${local_keys}/code-sign.p12" -W "" +certutil -d "${PEDB}" -L +PESIGN_KEY="-c code-sign-key" + +openssl x509 \ + -inform PEM -in "${local_keys}/CA.crt" \ + -outform DER -out "${local_keys}/CA.der" + +pesign -i "${grub_image}.unsigned" -o "${grub_image}" -f -s ${PESIGN_KEY} +pesigcheck -i "${grub_image}" -n 0 -c "${local_keys}/CA.der" From 93ae8747d192ca2153cf9763d1081427b03cca10 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 9 May 2023 23:53:05 +0000 Subject: [PATCH 1024/1356] packages: build shim shim will now be used as the first-stage bootloader on EFI platforms, and will handle part of the Secure Boot verification chain when that feature is enabled. Adjust the macros used by the grub package so that shim runs first, and finds GRUB with the expected filename. Signed-off-by: Ben Cressey --- packages/grub/grub.spec | 2 +- packages/shim/Cargo.toml | 18 ++++++++++++ packages/shim/shim.spec | 61 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 80 insertions(+), 1 deletion(-) create mode 100644 packages/shim/Cargo.toml create mode 100644 packages/shim/shim.spec diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index e13c066b..c99a4f40 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -2,7 +2,7 @@ %global __strip %{_bindir}/true %global efidir /boot/efi/EFI/BOOT -%global efi_image boot%{_cross_efi_arch}.efi +%global efi_image grub%{_cross_efi_arch}.efi %global biosdir /boot/grub # This is specific to the upstream source RPM, and will likely need to be diff --git a/packages/shim/Cargo.toml b/packages/shim/Cargo.toml new file mode 100644 index 00000000..feb02fb6 --- /dev/null +++ b/packages/shim/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "shim" +version = "0.1.0" +edition = "2021" +publish = false +build = "../build.rs" + +[lib] +path = "/dev/null" + +[[package.metadata.build-package.external-files]] +url = "https://github.com/rhboot/shim/archive/15.7/shim-15.7.tar.gz" +sha512 = "95ef9c0125269cfa0263a32e4f343d8ccc8813d71fa918a2f54850781e3a2d6a06a719249be355fdb24c935899e0e11370815501ecde1800bdd974a9a79c5612" + +[[package.metadata.build-package.external-files]] +url = "https://github.com/rhboot/gnu-efi/archive/refs/heads/shim-15.6.tar.gz" +path = "gnu-efi-shim-15.6.tar.gz" +sha512 = "d09dbb9e461d60e23294326ed4178301a6ab5959ade912bf559dbeb050362d994c8e63c8e062c19569055a269e5dbb65f0572317da4725177e19aae82e3c6978" diff --git a/packages/shim/shim.spec b/packages/shim/shim.spec new file mode 100644 index 00000000..36454b70 --- /dev/null +++ b/packages/shim/shim.spec @@ -0,0 +1,61 @@ +%global debug_package %{nil} +%global __strip %{_bindir}/true + +%global efidir /boot/efi/EFI/BOOT +%global boot_efi_image boot%{_cross_efi_arch}.efi +%global grub_efi_image grub%{_cross_efi_arch}.efi +%global shim_efi_image shim%{_cross_efi_arch}.efi +%global mokm_efi_image mm%{_cross_efi_arch}.efi + +%global shimver 15.7 +%global gnuefiver 15.6 +%global commit 11491619f4336fef41c3519877ba242161763580 + +Name: %{_cross_os}shim +Version: %{shimver} +Release: 1%{?dist} +Summary: UEFI shim loader +License: BSD-3-Clause +URL: https://github.com/rhboot/shim/ +Source0: https://github.com/rhboot/shim/archive/%{shimver}/shim-%{shimver}.tar.gz +Source1: https://github.com/rhboot/gnu-efi/archive/refs/heads/shim-%{gnuefiver}.tar.gz#/gnu-efi-shim-%{gnuefiver}.tar.gz + +%description +%{summary}. + +%prep +%autosetup -n shim-%{shimver} -p1 +%setup -T -D -n shim-%{shimver} -a 1 +rmdir gnu-efi +mv gnu-efi-shim-%{gnuefiver} gnu-efi + +%global shim_make \ +%make_build\\\ + ARCH="%{_cross_arch}"\\\ + CROSS_COMPILE="%{_cross_target}-"\\\ + COMMIT_ID="%{commit}"\\\ + RELEASE="%{release}"\\\ + DEFAULT_LOADER="%{grub_efi_image}"\\\ + DISABLE_REMOVABLE_LOAD_OPTIONS=y\\\ + DESTDIR="%{buildroot}"\\\ + EFIDIR="BOOT"\\\ +%{nil} + +%build +%shim_make + +%install +%shim_make install-as-data +install -d %{buildroot}%{efidir} +find %{buildroot}%{_datadir} -name '%{shim_efi_image}' -exec \ + mv {} "%{buildroot}%{efidir}/%{boot_efi_image}" \; +find %{buildroot}%{_datadir} -name '%{mokm_efi_image}' -exec \ + mv {} "%{buildroot}%{efidir}/%{mokm_efi_image}" \; +rm -rf %{buildroot}%{_datadir} + +%files +%license COPYRIGHT +%{_cross_attribution_file} +%dir %{efidir} +%{efidir}/%{boot_efi_image} +%{efidir}/%{mokm_efi_image} From c1512047109a2a93f51db030e34c3b06bb22ed54 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 9 May 2023 23:53:05 +0000 Subject: [PATCH 1025/1356] shim: embed placeholder for vendor cert Add a large placeholder certificate to the shim binary to facilitate replacement in the final stage of the build. Note that shim is used as the first-stage UEFI bootloader for all variants, but only enforces Secure Boot if the relevant EFI variables are set, and will not use the vendor certificate otherwise. Signed-off-by: Ben Cressey --- packages/shim/shim.spec | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/packages/shim/shim.spec b/packages/shim/shim.spec index 36454b70..f887537c 100644 --- a/packages/shim/shim.spec +++ b/packages/shim/shim.spec @@ -29,6 +29,11 @@ Source1: https://github.com/rhboot/gnu-efi/archive/refs/heads/shim-%{gnuefiver}. rmdir gnu-efi mv gnu-efi-shim-%{gnuefiver} gnu-efi +# Make sure the `.vendor_cert` section is large enough to cover a replacement +# certificate, or `objcopy` may silently retain the existing section. +# 4096 - 16 (for cert_table structure) = 4080 bytes. +truncate -s 4080 empty.cer + %global shim_make \ %make_build\\\ ARCH="%{_cross_arch}"\\\ @@ -39,6 +44,7 @@ mv gnu-efi-shim-%{gnuefiver} gnu-efi DISABLE_REMOVABLE_LOAD_OPTIONS=y\\\ DESTDIR="%{buildroot}"\\\ EFIDIR="BOOT"\\\ + VENDOR_CERT_FILE="empty.cer"\\\ %{nil} %build From a08ff5f2cf49e4f4bf5e41ac269806d88e104265 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 9 May 2023 23:53:05 +0000 Subject: [PATCH 1026/1356] build: add scripts to create Secure Boot profiles For Secure Boot, various certificates, keys, and configuration files are needed to sign binaries and register images. Provide two scripts to simplify the process of generating the correct artifacts. The "local" version of the script is only meant for non-production use, for example by individual developers or by automated CI testing, where the variant builds must support the feature but do not need to be maintained indefinitely. The "aws" version of the script expects various AWS resources such as private CAs and managed keys to be available. This can be costly and only makes sense for official builds of supported variants. Signed-off-by: Ben Cressey --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index e995a802..81553e84 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,7 @@ /*.pem /keys /roles +/sbkeys/**/ /Licenses.toml /licenses *.run From 8416f53e52c482d5ea2c300995767865525cba7a Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 9 May 2023 23:53:05 +0000 Subject: [PATCH 1027/1356] build: auto-create a local Secure Boot profile Ensure that at least one Secure Boot profile is always available, for cases where the variant has Secure Boot enabled. This uses the "local" version of the script, since locally generated keys cost nothing and are guaranteed to be available. This is similar to the locally generated keys used by default for TUF repositories, in that neither is suitable for long-term production use. The individual Secure Boot profile and the directory where profiles are stored can both be overridden, so that profiles can be stored in a different location, such as another Git repository. Profiles are checked for completeness, to allow the expected files to evolve over time, for example to add support for a new platform that expects EFI variables in a different format. Signed-off-by: Ben Cressey --- .dockerignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.dockerignore b/.dockerignore index fadd933b..d02d740d 100644 --- a/.dockerignore +++ b/.dockerignore @@ -7,4 +7,5 @@ /build/rpms/*-debuginfo-*.rpm /build/rpms/*-debugsource-*.rpm **/target/* -/tests \ No newline at end of file +/sbkeys +/tests From b9b44d4f2cf9fef0d10c9177b049db4b060abbb5 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 9 May 2023 23:53:05 +0000 Subject: [PATCH 1028/1356] build: pass Secure Boot profile secrets to builds `buildsys` needs to pass through files from the Secure Boot keys profile in order to sign artifacts during the variant build step. Those files might include an `aws-kms-pkcs11` configuration that uses KMS for signing, which requires network access to be enabled for variant builds. On an EC2 instance, credentials from IMDS will be used automatically by `aws-kms-pkcs11`, but otherwise they need to come from environment variables or an AWS CLI configuration file. Accordingly, `buildsys` now passes the most important of these AWS environment variables as additional secrets. Signed-off-by: Ben Cressey --- tools/buildsys/src/builder.rs | 63 +++++++++++++++++++++++++++++++++-- 1 file changed, 61 insertions(+), 2 deletions(-) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index 29048412..8faf4bc1 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -16,7 +16,7 @@ use sha2::{Digest, Sha512}; use snafu::{ensure, OptionExt, ResultExt}; use std::collections::HashSet; use std::env; -use std::fs::{self, File}; +use std::fs::{self, read_dir, File}; use std::num::NonZeroU16; use std::path::{Path, PathBuf}; use std::process::Output; @@ -98,6 +98,8 @@ impl PackageBuilder { .goarch(); let mut args = Vec::new(); + args.push("--network".into()); + args.push("none".into()); args.build_arg("PACKAGE", package); args.build_arg("ARCH", &arch); args.build_arg("GOARCH", goarch); @@ -168,6 +170,8 @@ impl VariantBuilder { image_layout.publish_image_sizes_gib(); let mut args = Vec::new(); + args.push("--network".into()); + args.push("host".into()); args.build_arg("PACKAGES", packages.join(" ")); args.build_arg("ARCH", &arch); args.build_arg("GOARCH", goarch); @@ -214,6 +218,9 @@ impl VariantBuilder { } } + // Add known secrets to the build argments. + add_secrets(&mut args)?; + // Always rebuild variants since they are located in a different workspace, // and don't directly track changes in the underlying packages. getenv("BUILDSYS_TIMESTAMP")?; @@ -277,7 +284,6 @@ fn build( let mut build = format!( "build . \ - --network none \ --target {target} \ --tag {tag}", target = target, @@ -383,6 +389,37 @@ enum Retry<'a> { // =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= +/// Add secrets that might be needed for builds. Since most builds won't use +/// them, they are not automatically tracked for changes. If necessary, builds +/// can emit the relevant cargo directives for tracking in their build script. +fn add_secrets(args: &mut Vec) -> Result<()> { + let sbkeys_var = "BUILDSYS_SBKEYS_PROFILE_DIR"; + let sbkeys_dir = env::var(sbkeys_var).context(error::EnvironmentSnafu { var: sbkeys_var })?; + + let sbkeys = read_dir(&sbkeys_dir).context(error::DirectoryReadSnafu { path: &sbkeys_dir })?; + for s in sbkeys { + let s = s.context(error::DirectoryReadSnafu { path: &sbkeys_dir })?; + args.build_secret( + "file", + &s.file_name().to_string_lossy(), + &s.path().to_string_lossy(), + ); + } + + for var in &[ + "AWS_ACCESS_KEY_ID", + "AWS_SECRET_ACCESS_KEY", + "AWS_SESSION_TOKEN", + ] { + let id = format!("{}.env", var.to_lowercase().replace('_', "-")); + args.build_secret("env", &id, var); + } + + Ok(()) +} + +// =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= + /// Create a directory for build artifacts. fn create_build_dir(kind: &BuildType, name: &str, arch: &str) -> Result { let prefix = match kind { @@ -575,6 +612,28 @@ impl BuildArg for Vec { } } +/// Helper trait for constructing buildkit --secret arguments. +trait BuildSecret { + fn build_secret(&mut self, typ: S, id: S, src: S) + where + S: AsRef; +} + +impl BuildSecret for Vec { + fn build_secret(&mut self, typ: S, id: S, src: S) + where + S: AsRef, + { + self.push("--secret".to_string()); + self.push(format!( + "type={},id={},src={}", + typ.as_ref(), + id.as_ref(), + src.as_ref() + )); + } +} + /// Helper trait for splitting a string on spaces into owned Strings. /// /// If you need an element with internal spaces, you should handle that separately, for example From f9574e302b21255514265a3deb9b9519d6aa401c Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 9 May 2023 23:53:05 +0000 Subject: [PATCH 1029/1356] build: add image feature flag for Secure Boot Secure Boot is incompatible with existing variants, so a feature flag is required to conditionally enable it. Signed-off-by: Ben Cressey --- tools/buildsys/src/manifest.rs | 15 +++++++++++++++ tools/rpm2img | 2 ++ 2 files changed, 17 insertions(+) diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index 1ee01d8b..73fee6ce 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -221,6 +221,18 @@ default will remain ext4 and xfs is opt-in. ```ignore [package.metadata.build-variant.image-features] xfs-data-partition = true +``` + +`uefi-secure-boot` means that the bootloader and kernel are signed. The grub image for the current +variant will have a public GPG baked in, and will expect the grub config file to have a valid +detached signature. Published artifacts such as AMIs and OVAs will enforce the signature checks +when the platform supports it. + +```ignore +[package.metadata.build-variant.image-features] +uefi-secure-boot = true +``` + */ mod error; @@ -513,6 +525,7 @@ pub enum ImageFeature { SystemdNetworkd, UnifiedCgroupHierarchy, XfsDataPartition, + UefiSecureBoot, } impl TryFrom for ImageFeature { @@ -523,6 +536,7 @@ impl TryFrom for ImageFeature { "systemd-networkd" => Ok(ImageFeature::SystemdNetworkd), "unified-cgroup-hierarchy" => Ok(ImageFeature::UnifiedCgroupHierarchy), "xfs-data-partition" => Ok(ImageFeature::XfsDataPartition), + "uefi-secure-boot" => Ok(ImageFeature::UefiSecureBoot), _ => error::ParseImageFeatureSnafu { what: s }.fail()?, } } @@ -535,6 +549,7 @@ impl fmt::Display for ImageFeature { ImageFeature::SystemdNetworkd => write!(f, "SYSTEMD_NETWORKD"), ImageFeature::UnifiedCgroupHierarchy => write!(f, "UNIFIED_CGROUP_HIERARCHY"), ImageFeature::XfsDataPartition => write!(f, "XFS_DATA_PARTITION"), + ImageFeature::UefiSecureBoot => write!(f, "UEFI_SECURE_BOOT"), } } } diff --git a/tools/rpm2img b/tools/rpm2img index f0915d78..e071ef23 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -14,6 +14,7 @@ OVF_TEMPLATE="" GRUB_SET_PRIVATE_VAR="no" XFS_DATA_PARTITION="no" +UEFI_SECURE_BOOT="no" for opt in "$@"; do optarg="$(expr "${opt}" : '[^=]*=\(.*\)')" @@ -29,6 +30,7 @@ for opt in "$@"; do --ovf-template=*) OVF_TEMPLATE="${optarg}" ;; --with-grub-set-private-var=*) GRUB_SET_PRIVATE_VAR="${optarg}" ;; --xfs-data-partition=*) XFS_DATA_PARTITION="${optarg}" ;; + --with-uefi-secure-boot=*) UEFI_SECURE_BOOT="${optarg}" ;; esac done From 50b0dabefdeedee3093c8acada1e8a65aa0cd90d Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 9 May 2023 23:53:05 +0000 Subject: [PATCH 1030/1356] build: sign artifacts for Secure Boot When Secure Boot is enabled for the variant, ensure that EFI binaries and the GRUB configuration are signed with the expected keys, and replace the placeholder certificates and keys with the right ones. When building an OVA, populate the template with the EFI variable data. Because GRUB will have an embedded GPG public key in this mode, it will automatically verify the detached signature for every file read. This is the desired behavior for `grub.cfg`, which contains sensitive parameters like the dm-verity root hash. However, it's redundant with the EFI binary verification performed by the shim verifier. It would also prevent reading the "initrd" that contains the Boot Config data for kernel command line parameters, because this is generated on the local system and cannot be signed by a trusted key. Since this is the only remaining file to read that is not an EFI binary, it's OK to disable signature checking inside the verified `grub.cfg`. Have GRUB reboot if the kernel cannot be loaded, and backstop this by blocking edits to menu entries with an empty superusers group. This prevents runtime modifications to the expected configuration, which could otherwise be used to alter the dm-verity root hash. Signed-off-by: Ben Cressey Signed-off-by: Markus Boehme --- tools/rpm2img | 234 +++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 232 insertions(+), 2 deletions(-) diff --git a/tools/rpm2img b/tools/rpm2img index e071ef23..26a09462 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -70,6 +70,13 @@ if [ "${OUTPUT_FMT}" == "vmdk" ] ; then exit 1 fi fi + + if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then + if ! grep -Fq '{{DB_CERT_DER_HEX}}' "${OVF_TEMPLATE}" ; then + echo "Missing CA certificate field in OVF template, which is required for Secure Boot support." >&2 + exit 1 + fi + fi fi # Store output artifacts in a versioned directory. @@ -121,6 +128,8 @@ DATA_MOUNT="$(mktemp -d)" EFI_MOUNT="$(mktemp -d)" PRIVATE_MOUNT="$(mktemp -d)" +SBKEYS="${HOME}/sbkeys" + SELINUX_ROOT="/etc/selinux" SELINUX_POLICY="fortified" SELINUX_FILE_CONTEXTS="${ROOT_MOUNT}/${SELINUX_ROOT}/${SELINUX_POLICY}/contexts/files/file_contexts" @@ -254,11 +263,184 @@ fi # package has placed the image in /boot/efi/EFI/BOOT. mv "${ROOT_MOUNT}/boot/efi"/* "${EFI_MOUNT}" +# Do the setup required for `pesign` and `gpg` signing and +# verification to "just work" later on, regardless of which +# type of signing profile we have. +if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then + declare -a SHIM_SIGN_KEY + declare -a CODE_SIGN_KEY + + # For an AWS profile, we expect a config file for the PKCS11 + # helper. Otherwise, there should be a local key and cert. + if [ -s "${HOME}/.config/aws-kms-pkcs11/config.json" ] ; then + # Set AWS environment variables from build secrets, if present. + for var in AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN ; do + val="${var,,}" + val="${HOME}/.aws/${val//_/-}.env" + [ -s "${val}" ] || continue + declare -x "${var}=$(cat "${val}")" + done + # Verify that AWS credentials are functional. + aws sts get-caller-identity + # Log all PKCS11 helper activity, to simplify debugging. + export AWS_KMS_PKCS11_DEBUG=1 + SB_KEY_SOURCE="aws" + SHIM_SIGN_KEY=(-c shim-sign-key -t shim-sign-key) + CODE_SIGN_KEY=(-c code-sign-key -t code-sign-key) + else + # Disable the PKCS11 helper. + rm /etc/pkcs11/modules/aws-kms-pkcs11.module + + # Generate the PKCS12 archives for import. + openssl pkcs12 \ + -export \ + -passout pass: \ + -inkey "${SBKEYS}/shim-sign.key" \ + -in "${SBKEYS}/shim-sign.crt" \ + -certfile "${SBKEYS}/db.crt" \ + -out "${SBKEYS}/shim-sign.p12" + + openssl pkcs12 \ + -export \ + -passout pass: \ + -inkey "${SBKEYS}/code-sign.key" \ + -in "${SBKEYS}/code-sign.crt" \ + -certfile "${SBKEYS}/vendor.crt" \ + -out "${SBKEYS}/code-sign.p12" + + # Import certificates and private key archive. + PEDB="/etc/pki/pesign" + + certutil -d "${PEDB}" -A -n db -i "${SBKEYS}/db.crt" -t ",,C" + certutil -d "${PEDB}" -A -n shim-sign-key -i "${SBKEYS}/shim-sign.crt" -t ",,P" + pk12util -d "${PEDB}" -i "${SBKEYS}/shim-sign.p12" -W "" + + certutil -d "${PEDB}" -A -n vendor -i "${SBKEYS}/vendor.crt" -t ",,C" + certutil -d "${PEDB}" -A -n code-sign-key -i "${SBKEYS}/code-sign.crt" -t ",,P" + pk12util -d "${PEDB}" -i "${SBKEYS}/code-sign.p12" -W "" + + certutil -d "${PEDB}" -L + SB_KEY_SOURCE="local" + SHIM_SIGN_KEY=(-c shim-sign-key) + CODE_SIGN_KEY=(-c code-sign-key) + fi + + # Convert certificates from PEM format (ASCII) to DER (binary). This could be + # done when the certificates are created, but the resulting binary files are + # not as nice to store in source control. + for cert in PK KEK db vendor ; do + openssl x509 \ + -inform PEM -in "${SBKEYS}/${cert}.crt" \ + -outform DER -out "${SBKEYS}/${cert}.cer" + done + + # For signing the grub config, we need to embed the GPG public key in binary + # form, which is similarly awkward to store in source control. + gpg --import "${SBKEYS}/config-sign.key" + if [ "${SB_KEY_SOURCE}" == "aws" ] ; then + gpg --card-status + fi + gpg --export > "${SBKEYS}/config-sign.pubkey" + gpg --list-keys +fi + +# shim expects the following data structure in `.vendor_cert`: +# +# struct { +# uint32_t vendor_authorized_size; +# uint32_t vendor_deauthorized_size; +# uint32_t vendor_authorized_offset; +# uint32_t vendor_deauthorized_offset; +# } cert_table; +# +cert_table() { + local input output size offset uint32_t + input="${1:?}" + output="${2:?}" + size="$(stat -c %s "${input}")" + rm -f "${output}" + # The cert payload is offset by four 4-byte uint32_t values in the header. + offset="$((4 * 4))" + for n in "${size}" 0 "${offset}" "$(( size + offset ))" ; do + printf -v uint32_t '\\x%02x\\x%02x\\x%02x\\x%02x' \ + $((n & 255)) $((n >> 8 & 255)) $((n >> 16 & 255)) $((n >> 24 & 255)) + printf "${uint32_t}" >> "${output}" + done + cat "${input}" >> "${output}" + # Zero-pad the output to the expected section size. Otherwise a subsequent + # `objcopy` operation on the same section might fail to replace it, if the + # new vendor certificate is larger than this one. + truncate -s 4096 "${output}" +} + +# Helper function to log the object layout before and after changes. +objdumpcopy() { + local obj objdump objcopy + obj="${1:?}" + shift + objdump="${ARCH}-bottlerocket-linux-gnu-objdump" + objcopy="${ARCH}-bottlerocket-linux-gnu-objcopy" + "${objdump}" -h "${obj}" + "${objcopy}" "${@}" "${obj}" + "${objdump}" -h "${obj}" +} + +pushd "${EFI_MOUNT}/EFI/BOOT" >/dev/null +shims=(boot*.efi) +shim="${shims[0]}" +grubs=(grub*.efi) +grub="${grubs[0]}" +mokms=(mm*.efi) +mokm="${mokms[0]}" +if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then + # Convert the vendor certificate to the expected format. + cert_table "${SBKEYS}/vendor.cer" "${SBKEYS}/vendor.obj" + + # Replace the embedded vendor certificate, then sign shim with the db key. + objdumpcopy "${shim}" \ + --update-section ".vendor_cert=${SBKEYS}/vendor.obj" + pesign -i "${shim}" -o "${shim}.signed" -s "${SHIM_SIGN_KEY[@]}" + mv "${shim}.signed" "${shim}" + pesigcheck -i "${shim}" -n 0 -c "${SBKEYS}/db.cer" + + # Sign the MOK manager as well. + pesign -i "${mokm}" -o "${mokm}.signed" -s "${CODE_SIGN_KEY[@]}" + mv "${mokm}.signed" "${mokm}" + pesigcheck -i "${mokm}" -n 0 -c "${SBKEYS}/vendor.cer" + + # Replace the embedded gpg public key, then sign grub with the vendor key. + objdumpcopy "${grub}" \ + --file-alignment 4096 \ + --update-section ".pubkey=${SBKEYS}/config-sign.pubkey" + pesign -i "${grub}" -o "${grub}.signed" -s "${CODE_SIGN_KEY[@]}" + mv "${grub}.signed" "${grub}" + pesigcheck -i "${grub}" -n 0 -c "${SBKEYS}/vendor.cer" +else + # Generate a zero-sized certificate in the expected format. + cert_table /dev/null "${SBKEYS}/vendor.obj" + + # Replace the embedded vendor certificate with the zero-sized one, which shim + # will ignore when Secure Boot is disabled. + objdumpcopy "${shim}" \ + --update-section ".vendor_cert=${SBKEYS}/vendor.obj" + + # Remove the embedded gpg public key to disable GRUB's signature checks. + objdumpcopy "${grub}" \ + --file-alignment 4096 \ + --remove-section ".pubkey" +fi +popd >/dev/null + dd if=/dev/zero of="${EFI_IMAGE}" bs=1M count="${partsize[EFI-A]}" mkfs.vfat -I -S 512 "${EFI_IMAGE}" $((partsize[EFI-A] * 1024)) mmd -i "${EFI_IMAGE}" ::/EFI mmd -i "${EFI_IMAGE}" ::/EFI/BOOT mcopy -i "${EFI_IMAGE}" "${EFI_MOUNT}/EFI/BOOT"/*.efi ::/EFI/BOOT +if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then + # Make the signing certificate available on the EFI system partition so it + # can be imported through the firmware setup UI on bare metal systems. + mcopy -i "${EFI_IMAGE}" "${SBKEYS}"/db.{crt,cer} ::/EFI/BOOT +fi dd if="${EFI_IMAGE}" of="${OS_IMAGE}" conv=notrunc bs=1M seek="${partoff[EFI-A]}" # Ensure that the grub directory exists. @@ -267,6 +449,15 @@ mkdir -p "${ROOT_MOUNT}/boot/grub" # Now that we're done messing with /, move /boot out of it mv "${ROOT_MOUNT}/boot"/* "${BOOT_MOUNT}" +if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then + pushd "${BOOT_MOUNT}" >/dev/null + vmlinuz="vmlinuz" + pesign -i "${vmlinuz}" -o "${vmlinuz}.signed" -s "${CODE_SIGN_KEY[@]}" + mv "${vmlinuz}.signed" "${vmlinuz}" + pesigcheck -i "${vmlinuz}" -n 0 -c "${SBKEYS}/vendor.cer" + popd >/dev/null +fi + # Set the Bottlerocket variant, version, and build-id SYS_ROOT="${ARCH}-bottlerocket-linux-gnu/sys-root" VERSION="${VERSION_ID} (${VARIANT})" @@ -352,12 +543,29 @@ else INITRD="" fi -cat < "${BOOT_MOUNT}/grub/grub.cfg" +# If UEFI_SECURE_BOOT is set, disable interactive edits. Otherwise the intended +# kernel command line parameters could be changed if the boot fails. Disable +# signature checking as well, since grub.cfg will have already been verified +# before we reach this point. bootconfig.data is generated at runtime and can't +# be signed with a trusted key, so continuing to check signatures would prevent +# it from being read. If boot fails, trigger an automatic reboot, since nothing +# can be changed for troubleshooting purposes. +if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then + echo 'set superusers=""' > "${BOOT_MOUNT}/grub/grub.cfg" + echo 'set check_signatures="no"' >> "${BOOT_MOUNT}/grub/grub.cfg" + FALLBACK=$' echo "rebooting in 30 seconds..."\n' + FALLBACK+=$' sleep 30\n' + FALLBACK+=$' reboot\n' +else + FALLBACK="" +fi + +cat <> "${BOOT_MOUNT}/grub/grub.cfg" set default="0" set timeout="0" set dm_verity_root="${DM_VERITY_ROOT[@]}" -menuentry "${PRETTY_NAME} ${VERSION_ID}" { +menuentry "${PRETTY_NAME} ${VERSION_ID}" --unrestricted { linux (\$root)/vmlinuz \\ ${KERNEL_PARAMETERS} \\ ${BOOTCONFIG} \\ @@ -371,9 +579,16 @@ menuentry "${PRETTY_NAME} ${VERSION_ID}" { systemd.log_color=0 \\ systemd.show_status=true ${INITRD} + boot + ${FALLBACK} } EOF +if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then + gpg --detach-sign "${BOOT_MOUNT}/grub/grub.cfg" + gpg --verify "${BOOT_MOUNT}/grub/grub.cfg.sig" +fi + # BOTTLEROCKET-BOOT-A mkdir -p "${BOOT_MOUNT}/lost+found" chmod -R go-rwx "${BOOT_MOUNT}" @@ -521,6 +736,21 @@ if [ "${OUTPUT_FMT}" == "vmdk" ] ; then -e "s/{{DATA_DISK_BYTES}}/${data_disk_bytes}/g" \ > "${ova_dir}/${ovf}" + # The manifest templates for Secure Boot expect the cert data for + # PK, KEK, db, and dbx. + if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then + pk_cert_der_hex="$(hexdump -ve '1/1 "%02x"' "${SBKEYS}/PK.cer")" + kek_cert_der_hex="$(hexdump -ve '1/1 "%02x"' "${SBKEYS}/KEK.cer")" + db_cert_der_hex="$(hexdump -ve '1/1 "%02x"' "${SBKEYS}/db.cer")" + dbx_empty_hash_hex="$(sha256sum /dev/null | awk '{ print $1 }')" + sed -i \ + -e "s/{{PK_CERT_DER_HEX}}/${pk_cert_der_hex}/g" \ + -e "s/{{KEK_CERT_DER_HEX}}/${kek_cert_der_hex}/g" \ + -e "s/{{DB_CERT_DER_HEX}}/${db_cert_der_hex}/g" \ + -e "s/{{DBX_EMPTY_HASH_HEX}}/${dbx_empty_hash_hex}/g" \ + "${ova_dir}/${ovf}" + fi + # Make sure we replaced all the '{{...}}' fields with real values. if grep -F -e '{{' -e '}}' "${ova_dir}/${ovf}" ; then echo "Failed to fully render the OVF template" >&2 From bc9d461ad4f5639c690a9194787246751e7e7488 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 9 May 2023 23:53:05 +0000 Subject: [PATCH 1031/1356] pubsys: register AMIs with Secure Boot support If Secure Boot is enabled for the variant, AMIs should be registered with the UEFI boot mode along with the relevant EFI variable payload. Signed-off-by: Ben Cressey --- tools/pubsys/src/aws/ami/mod.rs | 4 ++++ tools/pubsys/src/aws/ami/register.rs | 27 ++++++++++++++++++++++++++- 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index 425fd344..24f75a76 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -50,6 +50,10 @@ pub(crate) struct AmiArgs { #[arg(short = 'v', long)] variant_manifest: PathBuf, + /// Path to the UEFI data + #[arg(short = 'e', long)] + uefi_data: PathBuf, + /// The architecture of the machine image #[arg(short = 'a', long, value_parser = parse_arch)] arch: ArchitectureValues, diff --git a/tools/pubsys/src/aws/ami/register.rs b/tools/pubsys/src/aws/ami/register.rs index e125a63c..9601409d 100644 --- a/tools/pubsys/src/aws/ami/register.rs +++ b/tools/pubsys/src/aws/ami/register.rs @@ -4,7 +4,7 @@ use aws_sdk_ec2::model::{ ArchitectureValues, BlockDeviceMapping, EbsBlockDevice, Filter, VolumeType, }; use aws_sdk_ec2::{Client as Ec2Client, Region}; -use buildsys::manifest; +use buildsys::manifest::{self, ImageFeature}; use coldsnap::{SnapshotUploader, SnapshotWaiter}; use log::{debug, info, warn}; use snafu::{ensure, OptionExt, ResultExt}; @@ -47,6 +47,11 @@ async fn _register_image( let (os_volume_size, data_volume_size) = image_layout.publish_image_sizes_gib(); + let uefi_data = + std::fs::read_to_string(&ami_args.uefi_data).context(error::LoadUefiDataSnafu { + path: &ami_args.uefi_data, + })?; + debug!("Uploading images into EBS snapshots in {}", region); let uploader = SnapshotUploader::new(ebs_client); let os_snapshot = @@ -117,11 +122,25 @@ async fn _register_image( block_device_mappings.push(data_bdm); } + let uefi_secure_boot_enabled = variant_manifest + .image_features() + .iter() + .flatten() + .any(|f| **f == ImageFeature::UefiSecureBoot); + + let (boot_mode, uefi_data) = if uefi_secure_boot_enabled { + (Some("uefi-preferred".into()), Some(uefi_data)) + } else { + (None, None) + }; + info!("Making register image call in {}", region); let register_response = ec2_client .register_image() .set_architecture(Some(ami_args.arch.clone())) .set_block_device_mappings(Some(block_device_mappings)) + .set_boot_mode(boot_mode) + .set_uefi_data(uefi_data) .set_description(ami_args.description.clone()) .set_ena_support(Some(ENA)) .set_name(Some(ami_args.name.clone())) @@ -271,6 +290,12 @@ mod error { source: buildsys::manifest::Error, }, + #[snafu(display("Failed to load UEFI data from {}: {}", path.display(), source))] + LoadUefiData { + path: PathBuf, + source: std::io::Error, + }, + #[snafu(display("Could not find image layout for {}", path.display()))] MissingImageLayout { path: PathBuf }, From 53d3ce7f1b45126e2d4b8cc0b30e334a443e4abc Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 9 May 2023 23:53:05 +0000 Subject: [PATCH 1032/1356] start-local-vm: add options to override firmware Both new options are meant to help with Secure Boot testing. The `firmware-code` option allows a custom edk2 build to be used, for example with aarch64 where the AAVMF build packaged by Fedora doesn't have the Secure Boot feature available. The `firmware-vars` option populates the initial firmware variables from a different file, and sets it to the correct size for aarch64. Signed-off-by: Ben Cressey --- tools/start-local-vm | 70 ++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 65 insertions(+), 5 deletions(-) diff --git a/tools/start-local-vm b/tools/start-local-vm index c316488e..1045905e 100755 --- a/tools/start-local-vm +++ b/tools/start-local-vm @@ -1,6 +1,29 @@ #!/usr/bin/env bash # shellcheck disable=SC2054 # Arrays are formatted for passing args to other tools +# +# Common error handling +# +# +exit_trap_cmds=() + +on_exit() { + exit_trap_cmds+=( "$1" ) +} + +run_exit_trap_cmds() { + for cmd in "${exit_trap_cmds[@]}"; do + eval "${cmd}" + done +} + +trap run_exit_trap_cmds exit + +bail() { + >&2 echo "$@" + exit 1 +} + shopt -s nullglob arch=${BUILDSYS_ARCH} @@ -15,10 +38,6 @@ declare -A extra_files=() boot_image= data_image= -bail() { - >&2 echo "$@" - exit 1 -} if ! git_toplevel=$(git rev-parse --show-toplevel); then bail "Failed to get the root of the repo." @@ -59,6 +78,8 @@ Options: Bottlerocket image before launching the virtual machine (may be given multiple times); existing data on the private partition will be lost + --firmware-code override the default firmware executable file + --firmware-vars override the initial firmware variable storage file --help shows this usage text By default, the virtual machine's port 22 (SSH) will be exposed via the local @@ -118,6 +139,12 @@ parse_args() { fi extra_files[${local_file}]=${image_file} ;; + --firmware-code) + shift; firmware_code=$1 + ;; + --firmware-vars) + shift; firmware_vars=$1 + ;; *) usage_error "unknown option '$1'" ;; esac @@ -167,6 +194,35 @@ prepare_raw_images() { fi } +prepare_firmware() { + # Create local copies of the edk2 firmware variable storage, to help with + # faciliate Secure Boot testing where custom variables are needed for both + # architectures, but can't safely be reused across QEMU invocations. Also + # set reasonable defaults for both firmware files, if nothing more specific + # was requested. + local original_vars + + if [[ ${arch} = x86_64 ]]; then + firmware_code=${firmware_code:-/usr/share/edk2/ovmf/OVMF_CODE.fd} + original_vars=${firmware_vars:-/usr/share/edk2/ovmf/OVMF_VARS.fd} + firmware_vars="$(mktemp)" + on_exit "rm '${firmware_vars}'" + cp "${original_vars}" "${firmware_vars}" + fi + + if [[ ${arch} = aarch64 ]]; then + original_code=${firmware_code:-/usr/share/edk2/aarch64/QEMU_EFI.silent.fd} + original_vars=${firmware_vars:-/usr/share/edk2/aarch64/QEMU_VARS.fd} + firmware_code="$(mktemp)" + firmware_vars="$(mktemp)" + on_exit "rm '${firmware_code}' '${firmware_vars}'" + cat "${original_code}" /dev/zero \ + | head -c 64m > "${firmware_code}" + cat "${original_vars}" /dev/zero \ + | head -c 64m > "${firmware_vars}" + fi +} + create_extra_files() { # Explicitly instruct the kernel to send its output to the serial port on # x86 via a bootconfig initrd. Passing in settings via user-data would be @@ -213,6 +269,7 @@ inject_files() { local private_mount private_image private_mount=$(mktemp -d) private_image=$(mktemp) + on_exit "rm -rf '${private_mount}' '${private_image}'" for local_file in "${!extra_files[@]}"; do local image_file=${extra_files[${local_file}]} @@ -235,6 +292,8 @@ launch_vm() { -cpu host -smp "${vm_cpus}" -m "${vm_mem}" + -drive if=pflash,format=raw,unit=0,file="${firmware_code}",readonly=on + -drive if=pflash,format=raw,unit=1,file="${firmware_vars}" -drive index=0,if=virtio,format=raw,file="${boot_image}" ) @@ -251,11 +310,11 @@ launch_vm() { # emulated x86_64 chipset only to achieve parity. if [[ ${arch} = x86_64 ]]; then qemu_args+=( -global PIIX4_PM.acpi-root-pci-hotplug=off ) + qemu_args+=( -machine q35,smm=on ) fi if [[ ${arch} = aarch64 ]]; then qemu_args+=( -machine virt ) - qemu_args+=( -bios /usr/share/edk2/aarch64/QEMU_EFI.silent.fd ) fi if [[ -n ${data_image} ]]; then @@ -267,6 +326,7 @@ launch_vm() { parse_args "$@" prepare_raw_images +prepare_firmware create_extra_files inject_files launch_vm \ No newline at end of file From e4d72d6fb2368667693a133167fc09e8a98270e0 Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Wed, 21 Jun 2023 20:32:32 +0000 Subject: [PATCH 1033/1356] tools/diff-kernel-config: adapt to new RPM naming scheme With the recent cut-over to the RPM macros being sourced from the SDK (commit 6aed1b97f4c5) the naming scheme for RPMs changed. The target architecture is no longer custom encoded in the prefix of the name (with the architecture suffix indicating which architecture the package has been built on), but properly reflected in the suffix. Adapt the `diff-kernel-config` to deal with that by adding another glob pattern. Resolve ambiguity if a change is diffed before and after that name change by picking the latest build. For determining the kernel version, go straight to the RPM meta data instead of trying to pick apart the name--due to `rpm2cpio` we already have a dependency on the `rpm` package anyway. Signed-off-by: Markus Boehme --- tools/diff-kernel-config | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/tools/diff-kernel-config b/tools/diff-kernel-config index 7e2c200c..350b978a 100755 --- a/tools/diff-kernel-config +++ b/tools/diff-kernel-config @@ -18,6 +18,10 @@ run_exit_trap_cmds() { trap run_exit_trap_cmds EXIT +warn() { + >&2 echo "Warning: $*" +} + bail() { if [[ $# -gt 0 ]]; then >&2 echo "Error: $*" @@ -187,6 +191,7 @@ for state in after before; do shopt -s nullglob kernel_rpms=( + ./build/rpms/bottlerocket-*kernel-"${kver}"-"${kver}".*."${arch}".rpm ./build/rpms/bottlerocket-"${arch}"-*kernel-"${kver}"-"${kver}".*.rpm ) shopt -u nullglob @@ -194,10 +199,15 @@ for state in after before; do case ${#kernel_rpms[@]} in 0) bail "No kernel RPM found for ${debug_id}" ;; 1) kernel_rpm=${kernel_rpms[0]} ;; - *) bail "More than one kernel RPM found for ${debug_id}" ;; + *) + # shellcheck disable=SC2012 # find(1) cannot sort by mtime + kernel_rpm=$(ls -1t "${kernel_rpms[@]}" | head -n 1) + warn "More than one kernel RPM found for ${debug_id}. Choosing '${kernel_rpm}' as the latest build." + ;; esac - kver_full=$(echo "${kernel_rpm}" | cut -d '-' -f 5) + kver_full=$(rpm --query --queryformat '%{VERSION}' "${kernel_rpm}") + # # Extract kernel config # @@ -209,9 +219,9 @@ for state in after before; do echo "config-${arch}-${variant}-${state} -> ${kver_full}" >> "${output_dir}"/kver_mapping - done #arch + done # arch - done #variant + done # variant done # state @@ -224,7 +234,7 @@ done # state # in the kernel-archive RPM from where it can be extracted. Here we extract the # latest version of the script, but any kernel version and arch will do. latest_kver=$(printf '%s\n' "${kernel_versions[@]}" | sort -V | tail -n1) -latest_archive_rpms=( ./build/rpms/bottlerocket-aarch64-kernel-"${latest_kver}"-archive-*.rpm ) +latest_archive_rpms=( ./build/rpms/bottlerocket-*kernel-"${latest_kver}"-archive-*.rpm ) diffconfig=$(mktemp --suffix -bottlerocket-diffconfig) on_exit "rm '${diffconfig}'" rpm2cpio "${latest_archive_rpms[0]}" \ From 039b41ee683db3222d792e034693efecf3448021 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Fri, 14 Jul 2023 14:51:33 +0000 Subject: [PATCH 1034/1356] actions: Use larger runners for nightly checks The nightly checks were recently added for performing extra validation by bypassing the GOPROXY cache. These have been failing due to using the default runners for execution. This updates the job definition to use the same runners we use for the per-PR build workflow to make sure there is enough disk space and other resources to actually perform the build. Signed-off-by: Sean McGinnis --- .github/workflows/nightly.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index fb748663..7282e2be 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -34,7 +34,9 @@ jobs: build: needs: list-variants - runs-on: ubuntu-latest + runs-on: + group: bottlerocket + labels: bottlerocket_ubuntu-latest_32-core continue-on-error: true strategy: matrix: From 4630f28979c5d687844d611ccad4f4eafbe9f483 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Fri, 14 Jul 2023 16:40:54 +0000 Subject: [PATCH 1035/1356] actions: Set nightly continue-on-failure to false With this set to true, even though steps were failing in the nightly runs, the overall workflow status was showing green. There are no other steps that need to run, so no reason to continue on failure for this job. If any of the jobs fail, we want that failure status to reflect in the overall run status. Signed-off-by: Sean McGinnis --- .github/workflows/nightly.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 7282e2be..279a61c1 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -37,7 +37,7 @@ jobs: runs-on: group: bottlerocket labels: bottlerocket_ubuntu-latest_32-core - continue-on-error: true + continue-on-error: false strategy: matrix: variant: ${{ fromJson(needs.list-variants.outputs.variants) }} From b4d43e82debde407c8ff7572cbfe8890573529ec Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Fri, 21 Jul 2023 18:06:04 +0000 Subject: [PATCH 1036/1356] pubsys: Fix publish-ami argument groups With the upgrade to a newer version of the clap library in 91ec2a69baa636ab3a9d7bdf768ca16909fa4d3b, there was an error in translating the new derive settings for ArgGroups. This updates to the correct syntax so the correct arguments are recognized and functional. Signed-off-by: Sean McGinnis --- tools/pubsys/src/aws/publish_ami/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/pubsys/src/aws/publish_ami/mod.rs b/tools/pubsys/src/aws/publish_ami/mod.rs index e1277433..605de3f4 100644 --- a/tools/pubsys/src/aws/publish_ami/mod.rs +++ b/tools/pubsys/src/aws/publish_ami/mod.rs @@ -26,6 +26,7 @@ use std::iter::FromIterator; use std::path::PathBuf; #[derive(Debug, Parser)] +#[group(id = "who", required = true, multiple = true)] pub(crate) struct ModifyOptions { /// User IDs to give/remove access #[arg(long, value_delimiter = ',', group = "who")] @@ -43,7 +44,7 @@ pub(crate) struct ModifyOptions { /// Grants or revokes permissions to Bottlerocket AMIs #[derive(Debug, ClapArgs)] -#[group(required = true, multiple = true)] +#[group(id = "mode", required = true, multiple = false)] pub(crate) struct Who { /// Path to the JSON file containing regional AMI IDs to modify #[arg(long)] From 0fd07bbcb93ff7e61b9bde1282b10ec96824852d Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Fri, 21 Jul 2023 17:16:54 +0000 Subject: [PATCH 1037/1356] infrasys: Remove indirect dependencies This removes indirect dependencies from the infrasys Cargo.toml file. Signed-off-by: Sean McGinnis --- tools/Cargo.lock | 1 - tools/infrasys/Cargo.toml | 1 - 2 files changed, 2 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 23246a1c..5a0afa78 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -1794,7 +1794,6 @@ dependencies = [ "simplelog", "snafu", "tokio", - "toml", "url", ] diff --git a/tools/infrasys/Cargo.toml b/tools/infrasys/Cargo.toml index a2538a09..98d54d9e 100644 --- a/tools/infrasys/Cargo.toml +++ b/tools/infrasys/Cargo.toml @@ -23,7 +23,6 @@ shell-words = "1" simplelog = "0.12" snafu = "0.7" tokio = { version = "1", default-features = false, features = ["macros", "rt-multi-thread"] } -toml = "0.5" url = "2" [dev-dependencies] From 8ce8e577510488dfe2780e3bb8e4903a755e1199 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Fri, 21 Jul 2023 17:30:57 +0000 Subject: [PATCH 1038/1356] pubsys: Remove indirect dependencies This removes indirect dependencies from the pubsys Cargo.toml file. Signed-off-by: Sean McGinnis --- tools/Cargo.lock | 2 -- tools/pubsys/Cargo.toml | 2 -- 2 files changed, 4 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 5a0afa78..34e4f5d7 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -2433,7 +2433,6 @@ dependencies = [ name = "pubsys" version = "0.1.0" dependencies = [ - "async-trait", "aws-config", "aws-credential-types", "aws-sdk-ebs", @@ -2450,7 +2449,6 @@ dependencies = [ "duct", "futures", "governor", - "http", "indicatif", "lazy_static", "log", diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index 8a63145a..bb2b6e98 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -7,7 +7,6 @@ edition = "2021" publish = false [dependencies] -async-trait = "0.1" aws-config = "0.54" aws-credential-types = "0.54" aws-sdk-ebs = "0.24" @@ -24,7 +23,6 @@ coldsnap = { version = "0.5", default-features = false, features = ["aws-sdk-rus duct = "0.13" futures = "0.3" governor = "0.5" -http = "0.2" indicatif = "0.17" lazy_static = "1" log = "0.4" From 2e77a19ce195d33c8fe4e0e0a18684e2b5c6d957 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Fri, 21 Jul 2023 17:33:36 +0000 Subject: [PATCH 1039/1356] pubsys-setup: Remove indirect dependencies This removes indirect dependencies from the pubsys-setup Cargo.toml file. Signed-off-by: Sean McGinnis --- tools/Cargo.lock | 1 - tools/pubsys-setup/Cargo.toml | 1 - 2 files changed, 2 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 34e4f5d7..2abc9d7a 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -2507,7 +2507,6 @@ dependencies = [ "simplelog", "snafu", "tempfile", - "toml", "url", ] diff --git a/tools/pubsys-setup/Cargo.toml b/tools/pubsys-setup/Cargo.toml index e03fa2a6..f16852bc 100644 --- a/tools/pubsys-setup/Cargo.toml +++ b/tools/pubsys-setup/Cargo.toml @@ -17,5 +17,4 @@ shell-words = "1" simplelog = "0.12" snafu = "0.7" tempfile = "3" -toml = "0.5" url = { version = "2", features = ["serde"] } From 7c94a153dd22a25ad2700d9ad54efe042868ee39 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Fri, 21 Jul 2023 17:36:26 +0000 Subject: [PATCH 1040/1356] testsys-config: Remove indirect dependencies This removes indirect dependencies from the testsys-setup Cargo.toml file. Signed-off-by: Sean McGinnis --- tools/Cargo.lock | 3 --- tools/testsys-config/Cargo.toml | 3 --- 2 files changed, 6 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 2abc9d7a..81a8b0b4 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -3252,8 +3252,6 @@ dependencies = [ "bottlerocket-types", "bottlerocket-variant", "handlebars", - "home", - "lazy_static", "log", "maplit", "serde", @@ -3262,7 +3260,6 @@ dependencies = [ "snafu", "testsys-model", "toml", - "url", ] [[package]] diff --git a/tools/testsys-config/Cargo.toml b/tools/testsys-config/Cargo.toml index 2835f7a7..66790089 100644 --- a/tools/testsys-config/Cargo.toml +++ b/tools/testsys-config/Cargo.toml @@ -10,8 +10,6 @@ publish = false bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.8", tag = "v0.0.8"} bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } handlebars = "4" -home = "0.5" -lazy_static = "1" log = "0.4" maplit="1" testsys-model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.8", tag = "v0.0.8"} @@ -20,4 +18,3 @@ serde_plain = "1" serde_yaml = "0.8" snafu = "0.7" toml = "0.5" -url = { version = "2", features = ["serde"] } From c48539c0a92c78b756c3dbafe9e3628d7d350a0a Mon Sep 17 00:00:00 2001 From: "Kyle J. Davis" Date: Mon, 24 Jul 2023 12:31:18 -0600 Subject: [PATCH 1041/1356] adds notice about bottlerocket website (#3286) --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index c3382ea7..2cfd7ba6 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,8 @@ Welcome to Bottlerocket! Bottlerocket is a free and open-source Linux-based operating system meant for hosting containers. -If you’re ready to jump right in, read one of our setup guides for running Bottlerocket in [Amazon EKS](QUICKSTART-EKS.md), [Amazon ECS](QUICKSTART-ECS.md), or [VMware](QUICKSTART-VMWARE.md). +To learn more about Bottlerocket, visit the [official Bottlerocket website and documentation](https://bottlerocket.dev/). +Otherwise, if you’re ready to jump right in, read one of our setup guides for running Bottlerocket in [Amazon EKS](QUICKSTART-EKS.md), [Amazon ECS](QUICKSTART-ECS.md), or [VMware](QUICKSTART-VMWARE.md). If you're interested in running Bottlerocket on bare metal servers, please refer to the [provisioning guide](PROVISIONING-METAL.md) to get started. Bottlerocket focuses on security and maintainability, providing a reliable, consistent, and safe platform for container-based workloads. From 1eca99113963c20992a0658d41988103a59def2a Mon Sep 17 00:00:00 2001 From: Matthew James Briggs Date: Fri, 21 Jul 2023 11:49:23 -0700 Subject: [PATCH 1042/1356] build: use static files instead of /dev/null An unintended consequence of this `path = "/dev/null"` in the Cargo.toml files of packages and variants is that a change to the device causes a complete rebuild of all of Bottlerocket. So, for example, restarting a machine would cause Bottlerocket to rebuild all packages. This was an issue in the new Twoliter build system because each time Twoliter is executed, a container is created with a different /dev/null device, triggering a full rebuild. Though less cool, this commit points all packages at a single packages.rs file and all variants at a variants.rs file to fix this problem. --- packages/grub/Cargo.toml | 2 +- packages/kernel-5.10/Cargo.toml | 2 +- packages/kernel-5.15/Cargo.toml | 2 +- packages/kernel-6.1/Cargo.toml | 2 +- packages/kmod-5.10-nvidia/Cargo.toml | 2 +- packages/kmod-5.15-nvidia/Cargo.toml | 2 +- packages/kmod-6.1-nvidia/Cargo.toml | 2 +- packages/microcode/Cargo.toml | 2 +- packages/packages.rs | 7 +++++++ packages/shim/Cargo.toml | 2 +- 10 files changed, 16 insertions(+), 9 deletions(-) create mode 100644 packages/packages.rs diff --git a/packages/grub/Cargo.toml b/packages/grub/Cargo.toml index 7c9e4b19..8ea9177b 100644 --- a/packages/grub/Cargo.toml +++ b/packages/grub/Cargo.toml @@ -6,7 +6,7 @@ publish = false build = "../build.rs" [lib] -path = "/dev/null" +path = "../packages.rs" [[package.metadata.build-package.external-files]] url = "https://cdn.amazonlinux.com/al2023/blobstore/74f9ee6e75b8f89fe91ccda86896243179968a8664ba045bece11dc5aff61f4e/grub2-2.06-61.amzn2023.0.6.src.rpm" diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 36dc4d97..502c8e45 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -10,7 +10,7 @@ variant-sensitive = "platform" package-name = "kernel-5.10" [lib] -path = "/dev/null" +path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index a1464a8a..24313643 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -10,7 +10,7 @@ variant-sensitive = "platform" package-name = "kernel-5.15" [lib] -path = "/dev/null" +path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 8965c4b1..f4b18d03 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -10,7 +10,7 @@ variant-sensitive = "platform" package-name = "kernel-6.1" [lib] -path = "/dev/null" +path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. diff --git a/packages/kmod-5.10-nvidia/Cargo.toml b/packages/kmod-5.10-nvidia/Cargo.toml index 40dcadde..2e53b318 100644 --- a/packages/kmod-5.10-nvidia/Cargo.toml +++ b/packages/kmod-5.10-nvidia/Cargo.toml @@ -6,7 +6,7 @@ publish = false build = "../build.rs" [lib] -path = "/dev/null" +path = "../packages.rs" [package.metadata.build-package] package-name = "kmod-5.10-nvidia" diff --git a/packages/kmod-5.15-nvidia/Cargo.toml b/packages/kmod-5.15-nvidia/Cargo.toml index f8ccbdea..59a8beec 100644 --- a/packages/kmod-5.15-nvidia/Cargo.toml +++ b/packages/kmod-5.15-nvidia/Cargo.toml @@ -6,7 +6,7 @@ publish = false build = "../build.rs" [lib] -path = "/dev/null" +path = "../packages.rs" [package.metadata.build-package] package-name = "kmod-5.15-nvidia" diff --git a/packages/kmod-6.1-nvidia/Cargo.toml b/packages/kmod-6.1-nvidia/Cargo.toml index d29e18f2..a597333d 100644 --- a/packages/kmod-6.1-nvidia/Cargo.toml +++ b/packages/kmod-6.1-nvidia/Cargo.toml @@ -6,7 +6,7 @@ publish = false build = "../build.rs" [lib] -path = "/dev/null" +path = "../packages.rs" [package.metadata.build-package] package-name = "kmod-6.1-nvidia" diff --git a/packages/microcode/Cargo.toml b/packages/microcode/Cargo.toml index 5380a1ad..3cb5d305 100644 --- a/packages/microcode/Cargo.toml +++ b/packages/microcode/Cargo.toml @@ -6,7 +6,7 @@ publish = false build = "../build.rs" [lib] -path = "/dev/null" +path = "../packages.rs" # Use latest-srpm-urls.sh to get these. diff --git a/packages/packages.rs b/packages/packages.rs new file mode 100644 index 00000000..36be246d --- /dev/null +++ b/packages/packages.rs @@ -0,0 +1,7 @@ +/*! + +This is an intentionally empty file that all of the package `Cargo.toml` files can point to as their +`lib.rs`. The build system uses `build.rs` to invoke `buildsys` but Cargo needs something to compile +so we give it an empty `lib.rs` file. + +!*/ diff --git a/packages/shim/Cargo.toml b/packages/shim/Cargo.toml index feb02fb6..40eacb8b 100644 --- a/packages/shim/Cargo.toml +++ b/packages/shim/Cargo.toml @@ -6,7 +6,7 @@ publish = false build = "../build.rs" [lib] -path = "/dev/null" +path = "../packages.rs" [[package.metadata.build-package.external-files]] url = "https://github.com/rhboot/shim/archive/15.7/shim-15.7.tar.gz" From 2dd02e524bd5ee08b080075bdf51b33ee47ca7d9 Mon Sep 17 00:00:00 2001 From: Matthew James Briggs Date: Tue, 25 Jul 2023 15:59:08 -0700 Subject: [PATCH 1043/1356] docker-go: change docker run mounts Twoliter will be calling this script from within a container that has the host's docker socket mounted. It is necessary to match internal and external paths for any state that we wish to preserve on the host. Because the data we want to cache is at $GOPATH/pkg/go, it was necessary to create this as a subdirectory under .gomodcache and mount that so that we could point GOPATH to .gomodcache. --- tools/docker-go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tools/docker-go b/tools/docker-go index d0b625c4..177d1075 100755 --- a/tools/docker-go +++ b/tools/docker-go @@ -51,6 +51,9 @@ parse_args() { required_arg "--command" "${COMMAND}" } +# We need to mount the ../.. parent of GO_MOD_CACHE +GOPATH=$(cd "${GO_MOD_CACHE}/../.." && pwd) + DOCKER_RUN_ARGS="--network=host" parse_args "${@}" @@ -78,14 +81,14 @@ done docker run --rm \ -e GOCACHE='/tmp/.cache' \ - -e GOPATH='/tmp/go' \ + -e GOPATH="${GOPATH}" \ "${go_env[@]}" \ "${proxy_env[@]}" \ --user "$(id -u):$(id -g)" \ --security-opt label:disable \ ${DOCKER_RUN_ARGS} \ - -v "${GO_MOD_CACHE}":/tmp/go/pkg/mod \ - -v "${GO_MODULE_PATH}":/usr/src/module \ - -w /usr/src/module \ + -v "${GOPATH}":"${GOPATH}" \ + -v "${GO_MODULE_PATH}":"${GO_MODULE_PATH}" \ + -w "${GO_MODULE_PATH}" \ "${SDK_IMAGE}" \ bash -c "${COMMAND}" From cbf165d1038c21bc0397d57d0923964b700d8dfe Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Wed, 26 Jul 2023 11:44:15 +0200 Subject: [PATCH 1044/1356] kernel-6.1: cherry-pick fix for CVE-2023-20593 ("Zenbleed") The fix for CVE-2023-20593 is currently only available in the kernel.org upstream 6.1 stable kernel, but not yet in an Amazon Linux kernel release. Cherry-pick it from the upstream kernel. Signed-off-by: Markus Boehme --- ...-the-errata-checking-functionality-u.patch | 184 ++++++++++++++++++ .../5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch | 172 ++++++++++++++++ packages/kernel-6.1/kernel-6.1.spec | 5 + 3 files changed, 361 insertions(+) create mode 100644 packages/kernel-6.1/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch create mode 100644 packages/kernel-6.1/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch diff --git a/packages/kernel-6.1/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch b/packages/kernel-6.1/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch new file mode 100644 index 00000000..2accaae4 --- /dev/null +++ b/packages/kernel-6.1/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch @@ -0,0 +1,184 @@ +From 5fc203d8d3ed416bee054e9f2e6513df51d74577 Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Sat, 15 Jul 2023 13:31:32 +0200 +Subject: [PATCH] x86/cpu/amd: Move the errata checking functionality up + +Upstream commit: 8b6f687743dacce83dbb0c7cfacf88bab00f808a + +Avoid new and remove old forward declarations. + +No functional changes. + +Signed-off-by: Borislav Petkov (AMD) +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/cpu/amd.c | 139 ++++++++++++++++++-------------------- + 1 file changed, 67 insertions(+), 72 deletions(-) + +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index d2dbbc50b3a7..16b05029e068 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -27,11 +27,6 @@ + + #include "cpu.h" + +-static const int amd_erratum_383[]; +-static const int amd_erratum_400[]; +-static const int amd_erratum_1054[]; +-static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); +- + /* + * nodes_per_socket: Stores the number of nodes per socket. + * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX +@@ -39,6 +34,73 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); + */ + static u32 nodes_per_socket = 1; + ++/* ++ * AMD errata checking ++ * ++ * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or ++ * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that ++ * have an OSVW id assigned, which it takes as first argument. Both take a ++ * variable number of family-specific model-stepping ranges created by ++ * AMD_MODEL_RANGE(). ++ * ++ * Example: ++ * ++ * const int amd_erratum_319[] = ++ * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), ++ * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), ++ * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); ++ */ ++ ++#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } ++#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } ++#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ ++ ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) ++#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) ++#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) ++#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) ++ ++static const int amd_erratum_400[] = ++ AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), ++ AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); ++ ++static const int amd_erratum_383[] = ++ AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); ++ ++/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ ++static const int amd_erratum_1054[] = ++ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); ++ ++static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) ++{ ++ int osvw_id = *erratum++; ++ u32 range; ++ u32 ms; ++ ++ if (osvw_id >= 0 && osvw_id < 65536 && ++ cpu_has(cpu, X86_FEATURE_OSVW)) { ++ u64 osvw_len; ++ ++ rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); ++ if (osvw_id < osvw_len) { ++ u64 osvw_bits; ++ ++ rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), ++ osvw_bits); ++ return osvw_bits & (1ULL << (osvw_id & 0x3f)); ++ } ++ } ++ ++ /* OSVW unavailable or ID unknown, match family-model-stepping range */ ++ ms = (cpu->x86_model << 4) | cpu->x86_stepping; ++ while ((range = *erratum++)) ++ if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && ++ (ms >= AMD_MODEL_RANGE_START(range)) && ++ (ms <= AMD_MODEL_RANGE_END(range))) ++ return true; ++ ++ return false; ++} ++ + static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) + { + u32 gprs[8] = { 0 }; +@@ -1100,73 +1162,6 @@ static const struct cpu_dev amd_cpu_dev = { + + cpu_dev_register(amd_cpu_dev); + +-/* +- * AMD errata checking +- * +- * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or +- * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that +- * have an OSVW id assigned, which it takes as first argument. Both take a +- * variable number of family-specific model-stepping ranges created by +- * AMD_MODEL_RANGE(). +- * +- * Example: +- * +- * const int amd_erratum_319[] = +- * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), +- * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), +- * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); +- */ +- +-#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } +-#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } +-#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ +- ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) +-#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) +-#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) +-#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) +- +-static const int amd_erratum_400[] = +- AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), +- AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); +- +-static const int amd_erratum_383[] = +- AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); +- +-/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ +-static const int amd_erratum_1054[] = +- AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); +- +-static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) +-{ +- int osvw_id = *erratum++; +- u32 range; +- u32 ms; +- +- if (osvw_id >= 0 && osvw_id < 65536 && +- cpu_has(cpu, X86_FEATURE_OSVW)) { +- u64 osvw_len; +- +- rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); +- if (osvw_id < osvw_len) { +- u64 osvw_bits; +- +- rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), +- osvw_bits); +- return osvw_bits & (1ULL << (osvw_id & 0x3f)); +- } +- } +- +- /* OSVW unavailable or ID unknown, match family-model-stepping range */ +- ms = (cpu->x86_model << 4) | cpu->x86_stepping; +- while ((range = *erratum++)) +- if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && +- (ms >= AMD_MODEL_RANGE_START(range)) && +- (ms <= AMD_MODEL_RANGE_END(range))) +- return true; +- +- return false; +-} +- + void set_dr_addr_mask(unsigned long mask, int dr) + { + if (!boot_cpu_has(X86_FEATURE_BPEXT)) +-- +2.25.1 + diff --git a/packages/kernel-6.1/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch b/packages/kernel-6.1/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch new file mode 100644 index 00000000..c18f3b57 --- /dev/null +++ b/packages/kernel-6.1/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch @@ -0,0 +1,172 @@ +From ed9b87010aa84c157096f98c322491e9af8e8f07 Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Sat, 15 Jul 2023 13:41:28 +0200 +Subject: [PATCH] x86/cpu/amd: Add a Zenbleed fix + +Upstream commit: 522b1d69219d8f083173819fde04f994aa051a98 + +Add a fix for the Zen2 VZEROUPPER data corruption bug where under +certain circumstances executing VZEROUPPER can cause register +corruption or leak data. + +The optimal fix is through microcode but in the case the proper +microcode revision has not been applied, enable a fallback fix using +a chicken bit. + +Signed-off-by: Borislav Petkov (AMD) +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/microcode.h | 1 + + arch/x86/include/asm/microcode_amd.h | 2 + + arch/x86/include/asm/msr-index.h | 1 + + arch/x86/kernel/cpu/amd.c | 60 ++++++++++++++++++++++++++++ + arch/x86/kernel/cpu/common.c | 2 + + 5 files changed, 66 insertions(+) + +diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h +index 79b1d009e34e..19a0b4005ffa 100644 +--- a/arch/x86/include/asm/microcode.h ++++ b/arch/x86/include/asm/microcode.h +@@ -5,6 +5,7 @@ + #include + #include + #include ++#include + + struct ucode_patch { + struct list_head plist; +diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h +index e6662adf3af4..9675c621c1ca 100644 +--- a/arch/x86/include/asm/microcode_amd.h ++++ b/arch/x86/include/asm/microcode_amd.h +@@ -48,11 +48,13 @@ extern void __init load_ucode_amd_bsp(unsigned int family); + extern void load_ucode_amd_ap(unsigned int family); + extern int __init save_microcode_in_initrd_amd(unsigned int family); + void reload_ucode_amd(unsigned int cpu); ++extern void amd_check_microcode(void); + #else + static inline void __init load_ucode_amd_bsp(unsigned int family) {} + static inline void load_ucode_amd_ap(unsigned int family) {} + static inline int __init + save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } + static inline void reload_ucode_amd(unsigned int cpu) {} ++static inline void amd_check_microcode(void) {} + #endif + #endif /* _ASM_X86_MICROCODE_AMD_H */ +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h +index 117e4e977b55..846067e1ee8b 100644 +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -543,6 +543,7 @@ + #define MSR_AMD64_DE_CFG 0xc0011029 + #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1 + #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT) ++#define MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT 9 + + #define MSR_AMD64_BU_CFG2 0xc001102a + #define MSR_AMD64_IBSFETCHCTL 0xc0011030 +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index 16b05029e068..7f4eb8b027cc 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -70,6 +70,11 @@ static const int amd_erratum_383[] = + static const int amd_erratum_1054[] = + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); + ++static const int amd_zenbleed[] = ++ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf), ++ AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), ++ AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); ++ + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) + { + int osvw_id = *erratum++; +@@ -978,6 +983,47 @@ static void init_amd_zn(struct cpuinfo_x86 *c) + } + } + ++static bool cpu_has_zenbleed_microcode(void) ++{ ++ u32 good_rev = 0; ++ ++ switch (boot_cpu_data.x86_model) { ++ case 0x30 ... 0x3f: good_rev = 0x0830107a; break; ++ case 0x60 ... 0x67: good_rev = 0x0860010b; break; ++ case 0x68 ... 0x6f: good_rev = 0x08608105; break; ++ case 0x70 ... 0x7f: good_rev = 0x08701032; break; ++ case 0xa0 ... 0xaf: good_rev = 0x08a00008; break; ++ ++ default: ++ return false; ++ break; ++ } ++ ++ if (boot_cpu_data.microcode < good_rev) ++ return false; ++ ++ return true; ++} ++ ++static void zenbleed_check(struct cpuinfo_x86 *c) ++{ ++ if (!cpu_has_amd_erratum(c, amd_zenbleed)) ++ return; ++ ++ if (cpu_has(c, X86_FEATURE_HYPERVISOR)) ++ return; ++ ++ if (!cpu_has(c, X86_FEATURE_AVX)) ++ return; ++ ++ if (!cpu_has_zenbleed_microcode()) { ++ pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n"); ++ msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); ++ } else { ++ msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); ++ } ++} ++ + static void init_amd(struct cpuinfo_x86 *c) + { + early_init_amd(c); +@@ -1067,6 +1113,8 @@ static void init_amd(struct cpuinfo_x86 *c) + msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); + + check_null_seg_clears_base(c); ++ ++ zenbleed_check(c); + } + + #ifdef CONFIG_X86_32 +@@ -1196,3 +1244,15 @@ u32 amd_get_highest_perf(void) + return 255; + } + EXPORT_SYMBOL_GPL(amd_get_highest_perf); ++ ++static void zenbleed_check_cpu(void *unused) ++{ ++ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); ++ ++ zenbleed_check(c); ++} ++ ++void amd_check_microcode(void) ++{ ++ on_each_cpu(zenbleed_check_cpu, NULL, 1); ++} +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index c34bdba57993..d298d70f74ce 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -2346,6 +2346,8 @@ void microcode_check(struct cpuinfo_x86 *prev_info) + + perf_check_microcode(); + ++ amd_check_microcode(); ++ + store_cpu_caps(&curr_info); + + if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability, +-- +2.25.1 + diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 6b5df7e2..d1028e14 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -22,6 +22,11 @@ Patch1003: 1003-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch # Increase default of sysctl net.unix.max_dgram_qlen to 512. Patch1004: 1004-af_unix-increase-default-max_dgram_qlen-to-512.patch +# Cherry-picked fix for CVE-2023-20593 ("Zenbleed"). Can be dropped when moving +# upstream to 6.1.41 or later. +Patch5001: 5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch +Patch5002: 5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch + BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From 54d8cf30ae18124bbe5711c23649ef513de70ccc Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Wed, 26 Jul 2023 11:54:43 +0200 Subject: [PATCH 1045/1356] kernel-5.15: cherry-pick fix for CVE-2023-20593 ("Zenbleed") The fix for CVE-2023-20593 is currently only available in the kernel.org upstream 5.15 stable kernel, but not yet in an Amazon Linux kernel release. Cherry-pick it from the upstream kernel. Signed-off-by: Markus Boehme --- ...-the-errata-checking-functionality-u.patch | 184 ++++++++++++++++++ .../5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch | 172 ++++++++++++++++ packages/kernel-5.15/kernel-5.15.spec | 5 + 3 files changed, 361 insertions(+) create mode 100644 packages/kernel-5.15/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch create mode 100644 packages/kernel-5.15/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch diff --git a/packages/kernel-5.15/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch b/packages/kernel-5.15/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch new file mode 100644 index 00000000..72214460 --- /dev/null +++ b/packages/kernel-5.15/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch @@ -0,0 +1,184 @@ +From 5398be2c48aa22189c3992a0d92288e67853cb47 Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Sat, 15 Jul 2023 13:31:32 +0200 +Subject: [PATCH] x86/cpu/amd: Move the errata checking functionality up + +Upstream commit: 8b6f687743dacce83dbb0c7cfacf88bab00f808a + +Avoid new and remove old forward declarations. + +No functional changes. + +Signed-off-by: Borislav Petkov (AMD) +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/cpu/amd.c | 139 ++++++++++++++++++-------------------- + 1 file changed, 67 insertions(+), 72 deletions(-) + +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index 83bf26eaff2e..f8228a929ff3 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -27,11 +27,6 @@ + + #include "cpu.h" + +-static const int amd_erratum_383[]; +-static const int amd_erratum_400[]; +-static const int amd_erratum_1054[]; +-static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); +- + /* + * nodes_per_socket: Stores the number of nodes per socket. + * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX +@@ -39,6 +34,73 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); + */ + static u32 nodes_per_socket = 1; + ++/* ++ * AMD errata checking ++ * ++ * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or ++ * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that ++ * have an OSVW id assigned, which it takes as first argument. Both take a ++ * variable number of family-specific model-stepping ranges created by ++ * AMD_MODEL_RANGE(). ++ * ++ * Example: ++ * ++ * const int amd_erratum_319[] = ++ * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), ++ * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), ++ * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); ++ */ ++ ++#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } ++#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } ++#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ ++ ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) ++#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) ++#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) ++#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) ++ ++static const int amd_erratum_400[] = ++ AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), ++ AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); ++ ++static const int amd_erratum_383[] = ++ AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); ++ ++/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ ++static const int amd_erratum_1054[] = ++ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); ++ ++static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) ++{ ++ int osvw_id = *erratum++; ++ u32 range; ++ u32 ms; ++ ++ if (osvw_id >= 0 && osvw_id < 65536 && ++ cpu_has(cpu, X86_FEATURE_OSVW)) { ++ u64 osvw_len; ++ ++ rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); ++ if (osvw_id < osvw_len) { ++ u64 osvw_bits; ++ ++ rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), ++ osvw_bits); ++ return osvw_bits & (1ULL << (osvw_id & 0x3f)); ++ } ++ } ++ ++ /* OSVW unavailable or ID unknown, match family-model-stepping range */ ++ ms = (cpu->x86_model << 4) | cpu->x86_stepping; ++ while ((range = *erratum++)) ++ if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && ++ (ms >= AMD_MODEL_RANGE_START(range)) && ++ (ms <= AMD_MODEL_RANGE_END(range))) ++ return true; ++ ++ return false; ++} ++ + static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) + { + u32 gprs[8] = { 0 }; +@@ -1125,73 +1187,6 @@ static const struct cpu_dev amd_cpu_dev = { + + cpu_dev_register(amd_cpu_dev); + +-/* +- * AMD errata checking +- * +- * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or +- * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that +- * have an OSVW id assigned, which it takes as first argument. Both take a +- * variable number of family-specific model-stepping ranges created by +- * AMD_MODEL_RANGE(). +- * +- * Example: +- * +- * const int amd_erratum_319[] = +- * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), +- * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), +- * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); +- */ +- +-#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } +-#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } +-#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ +- ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) +-#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) +-#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) +-#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) +- +-static const int amd_erratum_400[] = +- AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), +- AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); +- +-static const int amd_erratum_383[] = +- AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); +- +-/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ +-static const int amd_erratum_1054[] = +- AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); +- +-static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) +-{ +- int osvw_id = *erratum++; +- u32 range; +- u32 ms; +- +- if (osvw_id >= 0 && osvw_id < 65536 && +- cpu_has(cpu, X86_FEATURE_OSVW)) { +- u64 osvw_len; +- +- rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); +- if (osvw_id < osvw_len) { +- u64 osvw_bits; +- +- rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), +- osvw_bits); +- return osvw_bits & (1ULL << (osvw_id & 0x3f)); +- } +- } +- +- /* OSVW unavailable or ID unknown, match family-model-stepping range */ +- ms = (cpu->x86_model << 4) | cpu->x86_stepping; +- while ((range = *erratum++)) +- if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && +- (ms >= AMD_MODEL_RANGE_START(range)) && +- (ms <= AMD_MODEL_RANGE_END(range))) +- return true; +- +- return false; +-} +- + void set_dr_addr_mask(unsigned long mask, int dr) + { + if (!boot_cpu_has(X86_FEATURE_BPEXT)) +-- +2.25.1 + diff --git a/packages/kernel-5.15/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch b/packages/kernel-5.15/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch new file mode 100644 index 00000000..4e6d6194 --- /dev/null +++ b/packages/kernel-5.15/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch @@ -0,0 +1,172 @@ +From be824fdb827dc06f77a31122949fe1bc011e3e1e Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Sat, 15 Jul 2023 13:41:28 +0200 +Subject: [PATCH] x86/cpu/amd: Add a Zenbleed fix + +Upstream commit: 522b1d69219d8f083173819fde04f994aa051a98 + +Add a fix for the Zen2 VZEROUPPER data corruption bug where under +certain circumstances executing VZEROUPPER can cause register +corruption or leak data. + +The optimal fix is through microcode but in the case the proper +microcode revision has not been applied, enable a fallback fix using +a chicken bit. + +Signed-off-by: Borislav Petkov (AMD) +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/microcode.h | 1 + + arch/x86/include/asm/microcode_amd.h | 2 + + arch/x86/include/asm/msr-index.h | 1 + + arch/x86/kernel/cpu/amd.c | 60 ++++++++++++++++++++++++++++ + arch/x86/kernel/cpu/common.c | 2 + + 5 files changed, 66 insertions(+) + +diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h +index 1bf064a14b95..4ca377efc986 100644 +--- a/arch/x86/include/asm/microcode.h ++++ b/arch/x86/include/asm/microcode.h +@@ -5,6 +5,7 @@ + #include + #include + #include ++#include + + struct ucode_patch { + struct list_head plist; +diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h +index a645b25ee442..403a8e76b310 100644 +--- a/arch/x86/include/asm/microcode_amd.h ++++ b/arch/x86/include/asm/microcode_amd.h +@@ -48,11 +48,13 @@ extern void __init load_ucode_amd_bsp(unsigned int family); + extern void load_ucode_amd_ap(unsigned int family); + extern int __init save_microcode_in_initrd_amd(unsigned int family); + void reload_ucode_amd(unsigned int cpu); ++extern void amd_check_microcode(void); + #else + static inline void __init load_ucode_amd_bsp(unsigned int family) {} + static inline void load_ucode_amd_ap(unsigned int family) {} + static inline int __init + save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } + static inline void reload_ucode_amd(unsigned int cpu) {} ++static inline void amd_check_microcode(void) {} + #endif + #endif /* _ASM_X86_MICROCODE_AMD_H */ +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h +index 3588b799c63f..e78755ed82cf 100644 +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -503,6 +503,7 @@ + #define MSR_AMD64_DE_CFG 0xc0011029 + #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1 + #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT) ++#define MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT 9 + + #define MSR_AMD64_BU_CFG2 0xc001102a + #define MSR_AMD64_IBSFETCHCTL 0xc0011030 +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index f8228a929ff3..3daceadf5d1f 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -70,6 +70,11 @@ static const int amd_erratum_383[] = + static const int amd_erratum_1054[] = + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); + ++static const int amd_zenbleed[] = ++ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf), ++ AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), ++ AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); ++ + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) + { + int osvw_id = *erratum++; +@@ -1002,6 +1007,47 @@ static void init_amd_zn(struct cpuinfo_x86 *c) + } + } + ++static bool cpu_has_zenbleed_microcode(void) ++{ ++ u32 good_rev = 0; ++ ++ switch (boot_cpu_data.x86_model) { ++ case 0x30 ... 0x3f: good_rev = 0x0830107a; break; ++ case 0x60 ... 0x67: good_rev = 0x0860010b; break; ++ case 0x68 ... 0x6f: good_rev = 0x08608105; break; ++ case 0x70 ... 0x7f: good_rev = 0x08701032; break; ++ case 0xa0 ... 0xaf: good_rev = 0x08a00008; break; ++ ++ default: ++ return false; ++ break; ++ } ++ ++ if (boot_cpu_data.microcode < good_rev) ++ return false; ++ ++ return true; ++} ++ ++static void zenbleed_check(struct cpuinfo_x86 *c) ++{ ++ if (!cpu_has_amd_erratum(c, amd_zenbleed)) ++ return; ++ ++ if (cpu_has(c, X86_FEATURE_HYPERVISOR)) ++ return; ++ ++ if (!cpu_has(c, X86_FEATURE_AVX)) ++ return; ++ ++ if (!cpu_has_zenbleed_microcode()) { ++ pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n"); ++ msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); ++ } else { ++ msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); ++ } ++} ++ + static void init_amd(struct cpuinfo_x86 *c) + { + early_init_amd(c); +@@ -1092,6 +1138,8 @@ static void init_amd(struct cpuinfo_x86 *c) + msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); + + check_null_seg_clears_base(c); ++ ++ zenbleed_check(c); + } + + #ifdef CONFIG_X86_32 +@@ -1221,3 +1269,15 @@ u32 amd_get_highest_perf(void) + return 255; + } + EXPORT_SYMBOL_GPL(amd_get_highest_perf); ++ ++static void zenbleed_check_cpu(void *unused) ++{ ++ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); ++ ++ zenbleed_check(c); ++} ++ ++void amd_check_microcode(void) ++{ ++ on_each_cpu(zenbleed_check_cpu, NULL, 1); ++} +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index f7b4bbe71cdf..69752745a5b1 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -2185,6 +2185,8 @@ void microcode_check(struct cpuinfo_x86 *prev_info) + + perf_check_microcode(); + ++ amd_check_microcode(); ++ + store_cpu_caps(&curr_info); + + if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability, +-- +2.25.1 + diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 0783e99f..26bfb990 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -22,6 +22,11 @@ Patch1003: 1003-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch # Increase default of sysctl net.unix.max_dgram_qlen to 512. Patch1004: 1004-af_unix-increase-default-max_dgram_qlen-to-512.patch +# Cherry-picked fix for CVE-2023-20593 ("Zenbleed"). Can be dropped when moving +# upstream to 5.15.122 or later. +Patch5001: 5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch +Patch5002: 5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch + BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From f371c9fcc9835845f65c8ee600412b1846f8c226 Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Wed, 26 Jul 2023 12:00:07 +0200 Subject: [PATCH 1046/1356] kernel-5.10: cherry-pick fix for CVE-2023-20593 ("Zenbleed") The fix for CVE-2023-20593 is currently only available in the kernel.org upstream 5.10 stable kernel, but not yet in an Amazon Linux kernel release. Cherry-pick it from the upstream kernel. Signed-off-by: Markus Boehme --- ...-the-errata-checking-functionality-u.patch | 184 ++++++++++++++++++ .../5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch | 172 ++++++++++++++++ packages/kernel-5.10/kernel-5.10.spec | 5 + 3 files changed, 361 insertions(+) create mode 100644 packages/kernel-5.10/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch create mode 100644 packages/kernel-5.10/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch diff --git a/packages/kernel-5.10/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch b/packages/kernel-5.10/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch new file mode 100644 index 00000000..b5b84593 --- /dev/null +++ b/packages/kernel-5.10/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch @@ -0,0 +1,184 @@ +From 191b8f9b0e3708e8325d8d28e1005a1fbe5e3991 Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Sat, 15 Jul 2023 13:31:32 +0200 +Subject: [PATCH] x86/cpu/amd: Move the errata checking functionality up + +Upstream commit: 8b6f687743dacce83dbb0c7cfacf88bab00f808a + +Avoid new and remove old forward declarations. + +No functional changes. + +Signed-off-by: Borislav Petkov (AMD) +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/cpu/amd.c | 139 ++++++++++++++++++-------------------- + 1 file changed, 67 insertions(+), 72 deletions(-) + +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index 89a9b7754476..6eea37f827b1 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -28,11 +28,6 @@ + + #include "cpu.h" + +-static const int amd_erratum_383[]; +-static const int amd_erratum_400[]; +-static const int amd_erratum_1054[]; +-static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); +- + /* + * nodes_per_socket: Stores the number of nodes per socket. + * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX +@@ -40,6 +35,73 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); + */ + static u32 nodes_per_socket = 1; + ++/* ++ * AMD errata checking ++ * ++ * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or ++ * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that ++ * have an OSVW id assigned, which it takes as first argument. Both take a ++ * variable number of family-specific model-stepping ranges created by ++ * AMD_MODEL_RANGE(). ++ * ++ * Example: ++ * ++ * const int amd_erratum_319[] = ++ * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), ++ * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), ++ * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); ++ */ ++ ++#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } ++#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } ++#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ ++ ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) ++#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) ++#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) ++#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) ++ ++static const int amd_erratum_400[] = ++ AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), ++ AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); ++ ++static const int amd_erratum_383[] = ++ AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); ++ ++/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ ++static const int amd_erratum_1054[] = ++ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); ++ ++static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) ++{ ++ int osvw_id = *erratum++; ++ u32 range; ++ u32 ms; ++ ++ if (osvw_id >= 0 && osvw_id < 65536 && ++ cpu_has(cpu, X86_FEATURE_OSVW)) { ++ u64 osvw_len; ++ ++ rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); ++ if (osvw_id < osvw_len) { ++ u64 osvw_bits; ++ ++ rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), ++ osvw_bits); ++ return osvw_bits & (1ULL << (osvw_id & 0x3f)); ++ } ++ } ++ ++ /* OSVW unavailable or ID unknown, match family-model-stepping range */ ++ ms = (cpu->x86_model << 4) | cpu->x86_stepping; ++ while ((range = *erratum++)) ++ if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && ++ (ms >= AMD_MODEL_RANGE_START(range)) && ++ (ms <= AMD_MODEL_RANGE_END(range))) ++ return true; ++ ++ return false; ++} ++ + static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) + { + u32 gprs[8] = { 0 }; +@@ -1153,73 +1215,6 @@ static const struct cpu_dev amd_cpu_dev = { + + cpu_dev_register(amd_cpu_dev); + +-/* +- * AMD errata checking +- * +- * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or +- * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that +- * have an OSVW id assigned, which it takes as first argument. Both take a +- * variable number of family-specific model-stepping ranges created by +- * AMD_MODEL_RANGE(). +- * +- * Example: +- * +- * const int amd_erratum_319[] = +- * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), +- * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), +- * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); +- */ +- +-#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } +-#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } +-#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ +- ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) +-#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) +-#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) +-#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) +- +-static const int amd_erratum_400[] = +- AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), +- AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); +- +-static const int amd_erratum_383[] = +- AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); +- +-/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ +-static const int amd_erratum_1054[] = +- AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); +- +-static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) +-{ +- int osvw_id = *erratum++; +- u32 range; +- u32 ms; +- +- if (osvw_id >= 0 && osvw_id < 65536 && +- cpu_has(cpu, X86_FEATURE_OSVW)) { +- u64 osvw_len; +- +- rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); +- if (osvw_id < osvw_len) { +- u64 osvw_bits; +- +- rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), +- osvw_bits); +- return osvw_bits & (1ULL << (osvw_id & 0x3f)); +- } +- } +- +- /* OSVW unavailable or ID unknown, match family-model-stepping range */ +- ms = (cpu->x86_model << 4) | cpu->x86_stepping; +- while ((range = *erratum++)) +- if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && +- (ms >= AMD_MODEL_RANGE_START(range)) && +- (ms <= AMD_MODEL_RANGE_END(range))) +- return true; +- +- return false; +-} +- + void set_dr_addr_mask(unsigned long mask, int dr) + { + if (!boot_cpu_has(X86_FEATURE_BPEXT)) +-- +2.25.1 + diff --git a/packages/kernel-5.10/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch b/packages/kernel-5.10/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch new file mode 100644 index 00000000..b25384ba --- /dev/null +++ b/packages/kernel-5.10/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch @@ -0,0 +1,172 @@ +From 93df00f9d48d48466ddbe01a06eaaf3311ecfb53 Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Sat, 15 Jul 2023 13:41:28 +0200 +Subject: [PATCH] x86/cpu/amd: Add a Zenbleed fix + +Upstream commit: 522b1d69219d8f083173819fde04f994aa051a98 + +Add a fix for the Zen2 VZEROUPPER data corruption bug where under +certain circumstances executing VZEROUPPER can cause register +corruption or leak data. + +The optimal fix is through microcode but in the case the proper +microcode revision has not been applied, enable a fallback fix using +a chicken bit. + +Signed-off-by: Borislav Petkov (AMD) +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/microcode.h | 1 + + arch/x86/include/asm/microcode_amd.h | 2 + + arch/x86/include/asm/msr-index.h | 1 + + arch/x86/kernel/cpu/amd.c | 60 ++++++++++++++++++++++++++++ + arch/x86/kernel/cpu/common.c | 2 + + 5 files changed, 66 insertions(+) + +diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h +index 509cc0262fdc..394605e59f2b 100644 +--- a/arch/x86/include/asm/microcode.h ++++ b/arch/x86/include/asm/microcode.h +@@ -5,6 +5,7 @@ + #include + #include + #include ++#include + + struct ucode_patch { + struct list_head plist; +diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h +index a645b25ee442..403a8e76b310 100644 +--- a/arch/x86/include/asm/microcode_amd.h ++++ b/arch/x86/include/asm/microcode_amd.h +@@ -48,11 +48,13 @@ extern void __init load_ucode_amd_bsp(unsigned int family); + extern void load_ucode_amd_ap(unsigned int family); + extern int __init save_microcode_in_initrd_amd(unsigned int family); + void reload_ucode_amd(unsigned int cpu); ++extern void amd_check_microcode(void); + #else + static inline void __init load_ucode_amd_bsp(unsigned int family) {} + static inline void load_ucode_amd_ap(unsigned int family) {} + static inline int __init + save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } + static inline void reload_ucode_amd(unsigned int cpu) {} ++static inline void amd_check_microcode(void) {} + #endif + #endif /* _ASM_X86_MICROCODE_AMD_H */ +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h +index f71a177b6b18..3fab152809ab 100644 +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -497,6 +497,7 @@ + #define MSR_AMD64_DE_CFG 0xc0011029 + #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1 + #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT) ++#define MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT 9 + + #define MSR_AMD64_BU_CFG2 0xc001102a + #define MSR_AMD64_IBSFETCHCTL 0xc0011030 +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index 6eea37f827b1..3d99a823ffac 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -71,6 +71,11 @@ static const int amd_erratum_383[] = + static const int amd_erratum_1054[] = + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); + ++static const int amd_zenbleed[] = ++ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf), ++ AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), ++ AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); ++ + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) + { + int osvw_id = *erratum++; +@@ -1030,6 +1035,47 @@ static void init_amd_zn(struct cpuinfo_x86 *c) + } + } + ++static bool cpu_has_zenbleed_microcode(void) ++{ ++ u32 good_rev = 0; ++ ++ switch (boot_cpu_data.x86_model) { ++ case 0x30 ... 0x3f: good_rev = 0x0830107a; break; ++ case 0x60 ... 0x67: good_rev = 0x0860010b; break; ++ case 0x68 ... 0x6f: good_rev = 0x08608105; break; ++ case 0x70 ... 0x7f: good_rev = 0x08701032; break; ++ case 0xa0 ... 0xaf: good_rev = 0x08a00008; break; ++ ++ default: ++ return false; ++ break; ++ } ++ ++ if (boot_cpu_data.microcode < good_rev) ++ return false; ++ ++ return true; ++} ++ ++static void zenbleed_check(struct cpuinfo_x86 *c) ++{ ++ if (!cpu_has_amd_erratum(c, amd_zenbleed)) ++ return; ++ ++ if (cpu_has(c, X86_FEATURE_HYPERVISOR)) ++ return; ++ ++ if (!cpu_has(c, X86_FEATURE_AVX)) ++ return; ++ ++ if (!cpu_has_zenbleed_microcode()) { ++ pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n"); ++ msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); ++ } else { ++ msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); ++ } ++} ++ + static void init_amd(struct cpuinfo_x86 *c) + { + early_init_amd(c); +@@ -1120,6 +1166,8 @@ static void init_amd(struct cpuinfo_x86 *c) + msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); + + check_null_seg_clears_base(c); ++ ++ zenbleed_check(c); + } + + #ifdef CONFIG_X86_32 +@@ -1233,3 +1281,15 @@ void set_dr_addr_mask(unsigned long mask, int dr) + break; + } + } ++ ++static void zenbleed_check_cpu(void *unused) ++{ ++ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); ++ ++ zenbleed_check(c); ++} ++ ++void amd_check_microcode(void) ++{ ++ on_each_cpu(zenbleed_check_cpu, NULL, 1); ++} +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index e2dee6010846..f41781d06a5f 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -2165,6 +2165,8 @@ void microcode_check(struct cpuinfo_x86 *prev_info) + + perf_check_microcode(); + ++ amd_check_microcode(); ++ + store_cpu_caps(&curr_info); + + if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability, +-- +2.25.1 + diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 2c6c1fba..f48d4c55 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -24,6 +24,11 @@ Patch1003: 1003-af_unix-increase-default-max_dgram_qlen-to-512.patch Patch2000: 2000-kbuild-move-module-strip-compression-code-into-scrip.patch Patch2001: 2001-kbuild-add-support-for-zstd-compressed-modules.patch +# Cherry-picked fix for CVE-2023-20593 ("Zenbleed"). Can be dropped when moving +# upstream to 5.10.187 or later. +Patch5001: 5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch +Patch5002: 5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch + BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From 5c778cac888ad439eec8dbca6d2e4d281c141ca8 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 27 Jul 2023 07:10:10 +0000 Subject: [PATCH 1047/1356] tools/diff-kernel-config: Fix usage text to reflect reality In commit 43234d3cc4 - tools/diff-kernel-config: Adjust script to work on variants we switched from specifying kernel versions to specifying variants. The kernel versions are then parsed from the variant definition which has advantages. Properly represent that in the usage message. Signed-off-by: Leonard Foerster --- tools/diff-kernel-config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/diff-kernel-config b/tools/diff-kernel-config index 350b978a..06ce0de9 100755 --- a/tools/diff-kernel-config +++ b/tools/diff-kernel-config @@ -31,7 +31,7 @@ bail() { usage() { cat < Date: Thu, 27 Jul 2023 07:17:01 +0000 Subject: [PATCH 1048/1356] tools/diff-kernel-config: Add release number to full kernel version When saving the full kernel version for later reference in the kernel mapping we may come across situations where we update to a newer version of our base kernel srpm from Amazon Linux which added only added patches from Amazon Linux but stayed at the same upstream base version. The before and after would be indistinguishable when only comparing the version. Add the release number to disambiguate in that situation. Signed-off-by: Leonard Foerster --- tools/diff-kernel-config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/diff-kernel-config b/tools/diff-kernel-config index 06ce0de9..3f5f2847 100755 --- a/tools/diff-kernel-config +++ b/tools/diff-kernel-config @@ -206,7 +206,7 @@ for state in after before; do ;; esac - kver_full=$(rpm --query --queryformat '%{VERSION}' "${kernel_rpm}") + kver_full=$(rpm --query --queryformat '%{VERSION}-%{RELEASE}' "${kernel_rpm}") # # Extract kernel config From b65c0a97daa7696366a83ee97cbf3d8058b69ac8 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 20 Jun 2023 08:27:24 +0000 Subject: [PATCH 1049/1356] tools/diff-kernel-config: Add resume functionality Sometimes, the script may fail for a specific variant after we already have spent time successfully building other variants. In order to not let that time go to waste, enable resuming of previous runs of the script. Be aware that the invocation has to be the same as the original invocation, plus the resume flag. Signed-off-by: Leonard Foerster --- tools/diff-kernel-config | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/tools/diff-kernel-config b/tools/diff-kernel-config index 3f5f2847..c433c63d 100755 --- a/tools/diff-kernel-config +++ b/tools/diff-kernel-config @@ -31,7 +31,7 @@ bail() { usage() { cat <"${config_path}" [[ -s "${config_path}" ]] || bail "Failed to extract config for ${debug_id}" From d26b1d4ef418fb377d3ebec734f6bfe714648b2d Mon Sep 17 00:00:00 2001 From: Shikha Vyaghra Date: Fri, 16 Jun 2023 14:45:47 +0000 Subject: [PATCH 1050/1356] containerd: add all resource-limit settings for oci default Add following rlimits settings for k8s variants, to enable updating these from api client. MaxAddressSpace, MaxCoreFileSize, MaxCpuTime, MaxDataSize, MaxFileLocks, MaxFileSize, MaxLockedMemory, MaxMsgqueueSize, MaxNicePriority, MaxPendingSignals, MaxProcesses, MaxRealtimePriority, MaxRealtimeTimeout, MaxResidentSet, MaxStackSize --- README.md | 189 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 183 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 2cfd7ba6..0b3068b6 100644 --- a/README.md +++ b/README.md @@ -756,12 +756,189 @@ Each of the `resource-limits` settings below contain two numeric fields: `hard-l Please see the [`getrlimit` linux manpage](https://man7.org/linux/man-pages/man7/capabilities.7.html) for meanings of `hard-limit` and `soft-limit`. The full list of resource limits that can be configured in Bottlerocket are: - -resource limit | setting | default value ------ | ----- | ----- -`RLIMIT_NOFILE` | `settings.oci-defaults.resource-limits.max-open-files.hard-limit` | 1048576 -`RLIMIT_NOFILE` | `settings.oci-defaults.resource-limits.max-open-files.soft-limit` | 65536 - +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Resource limitSettingDefault valueUnit
RLIMIT_AS settings.oci-defaults.resource-limits.max-address-space.soft-limit-bytes
settings.oci-defaults.resource-limits.max-address-space.hard-limit-
RLIMIT_COREsettings.oci-defaults.resource-limits.max-core-file-size.soft-limit-bytes
settings.oci-defaults.resource-limits.max-core-file-size.hard-limit-
RLIMIT_CPUsettings.oci-defaults.resource-limits.max-cpu-time.soft-limit-seconds
settings.oci-defaults.resource-limits.max-cpu-time.hard-limit-
RLIMIT_DATAsettings.oci-defaults.resource-limits.max-data-size.soft-limit-bytes
settings.oci-defaults.resource-limits.max-data-size.hard-limit-
RLIMIT_LOCKSsettings.oci-defaults.resource-limits.max-file-locks.soft-limit-locks
settings.oci-defaults.resource-limits.max-file-locks.hard-limit-
RLIMIT_FSIZEsettings.oci-defaults.resource-limits.max-file-size.soft-limit-bytes
settings.oci-defaults.resource-limits.max-file-size.hard-limit-
RLIMIT_MEMLOCKsettings.oci-defaults.resource-limits.max-locked-memory.soft-limit-bytes
settings.oci-defaults.resource-limits.max-locked-memory.hard-limit-
RLIMIT_MSGQUEUEsettings.oci-defaults.resource-limits.max-msgqueue-size.soft-limit-bytes
settings.oci-defaults.resource-limits.max-msgqueue-size.hard-limit-
RLIMIT_NICEsettings.oci-defaults.resource-limits.max-nice-priority.soft-limit--
settings.oci-defaults.resource-limits.max-nice-priority.hard-limit-
RLIMIT_NOFILEsettings.oci-defaults.resource-limits.max-open-files.soft-limit65536files
settings.oci-defaults.resource-limits.max-open-files.hard-limit1048576
RLIMIT_SIGPENDINGsettings.oci-defaults.resource-limits.max-pending-signals.soft-limit-signals
settings.oci-defaults.resource-limits.max-pending-signals.hard-limit-
RLIMIT_NPROCsettings.oci-defaults.resource-limits.max-processes.soft-limit-processes
settings.oci-defaults.resource-limits.max-processes.hard-limit-
RLIMIT_RTPRIOsettings.oci-defaults.resource-limits.max-realtime-priority.soft-limit--
settings.oci-defaults.resource-limits.max-realtime-priority.hard-limit-
RLIMIT_RTTIMEsettings.oci-defaults.resource-limits.max-realtime-timeout.soft-limit-microseconds
settings.oci-defaults.resource-limits.max-realtime-timeout.hard-limit-
RLIMIT_RSSsettings.oci-defaults.resource-limits.max-resident-set.soft-limit-bytes
settings.oci-defaults.resource-limits.max-resident-set.hard-limit-
RLIMIT_STACKsettings.oci-defaults.resource-limits.max-stack-size.soft-limit-bytes
settings.oci-defaults.resource-limits.max-stack-size.hard-limit-
+ +Limits can be any integer between 0 to `int64::MAX`. Either `-1` or `"unlimited"` can be used to remove the limit. +* Specifying the maximum value (`i64::MAX`) for a limit: + ```toml + [settings.oci-defaults.resource-limits.>] + soft-limit = 65536 + hard-limit = 9223372036854775807 + ``` +* Removing a limit: + ```toml + [settings.oci-defaults.resource-limits.>] + soft-limit = 65536 + hard-limit = "unlimited" + ``` + #### Container image registry settings The following setting is optional and allows you to configure image registry mirrors and pull-through caches for your containers. From 7875777908d5a1d7aea97ea44b31a2425716f1c5 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Wed, 9 Aug 2023 07:27:48 +0000 Subject: [PATCH 1051/1356] buildsys: Add per package option for upstream source fallback For some packages we build in Bottlerocket we can not cache the external files for licensing restrictions (i.e. nvidia drivers). These packages will always fail to build without having the package downloaded previously or setting `BUILDSYS_UPSTREAM_SOURCE_FALLBACK=true` in the environment. Add an optional setting for external files enabling forcing upstream fallback. This can be added to those packages we never intend to upload to the lookaside cache. Signed-off-by: Leonard Foerster --- tools/buildsys/src/cache.rs | 4 +++- tools/buildsys/src/manifest.rs | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/buildsys/src/cache.rs b/tools/buildsys/src/cache.rs index 1cb2084e..6bc4e8ac 100644 --- a/tools/buildsys/src/cache.rs +++ b/tools/buildsys/src/cache.rs @@ -65,7 +65,9 @@ impl LookasideCache { } // next check with upstream, if permitted - if std::env::var("BUILDSYS_UPSTREAM_SOURCE_FALLBACK") == Ok("true".to_string()) { + if f.force_upstream.unwrap_or(false) + || std::env::var("BUILDSYS_UPSTREAM_SOURCE_FALLBACK") == Ok("true".to_string()) + { println!("Fetching {:?} from upstream source", url_file_name); Self::fetch_file(&f.url, &tmp, hash)?; fs::rename(&tmp, path).context(error::ExternalFileRenameSnafu { path: &tmp })?; diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs index 73fee6ce..1df31abe 100644 --- a/tools/buildsys/src/manifest.rs +++ b/tools/buildsys/src/manifest.rs @@ -566,6 +566,7 @@ pub struct ExternalFile { pub path: Option, pub sha512: String, pub url: String, + pub force_upstream: Option, pub bundle_modules: Option>, pub bundle_root_path: Option, pub bundle_output_path: Option, From 3598ba9025981c02fd439c41808de02824373c37 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Wed, 9 Aug 2023 07:34:52 +0000 Subject: [PATCH 1052/1356] kmod-*-nvidia: Always force using upstream source We will not upload the nvidia driver sources to our lookaside cache. As they always have to come from upstream, force getting the files from there. Signed-off-by: Leonard Foerster --- packages/kmod-5.10-nvidia/Cargo.toml | 2 ++ packages/kmod-5.15-nvidia/Cargo.toml | 2 ++ packages/kmod-6.1-nvidia/Cargo.toml | 2 ++ 3 files changed, 6 insertions(+) diff --git a/packages/kmod-5.10-nvidia/Cargo.toml b/packages/kmod-5.10-nvidia/Cargo.toml index 2e53b318..d62972b4 100644 --- a/packages/kmod-5.10-nvidia/Cargo.toml +++ b/packages/kmod-5.10-nvidia/Cargo.toml @@ -15,10 +15,12 @@ releases-url = "https://docs.nvidia.com/datacenter/tesla/" [[package.metadata.build-package.external-files]] url = "https://us.download.nvidia.com/tesla/470.161.03/NVIDIA-Linux-x86_64-470.161.03.run" sha512 = "26b1640f9427847b68233ffacf5c4a07e75ed9923429dfc9e5de3d7e5c1f109dfaf0fe0a0639cbd47f056784ed3e00e2e741d5c84532df79590a0c9ffa5ba625" +force-upstream = true [[package.metadata.build-package.external-files]] url = "https://us.download.nvidia.com/tesla/470.161.03/NVIDIA-Linux-aarch64-470.161.03.run" sha512 = "16e83c4d3ea66b2da07c43fca912c839e5feb9d42bee279b9de3476ffbd5e2314fddc83c1a38c198adb2d5ea6b4f2b00bb4a4c32d6fd0bfcdbccc392043f99ce" +force-upstream = true [build-dependencies] glibc = { path = "../glibc" } diff --git a/packages/kmod-5.15-nvidia/Cargo.toml b/packages/kmod-5.15-nvidia/Cargo.toml index 59a8beec..5e211252 100644 --- a/packages/kmod-5.15-nvidia/Cargo.toml +++ b/packages/kmod-5.15-nvidia/Cargo.toml @@ -15,10 +15,12 @@ releases-url = "https://docs.nvidia.com/datacenter/tesla/" [[package.metadata.build-package.external-files]] url = "https://us.download.nvidia.com/tesla/515.86.01/NVIDIA-Linux-x86_64-515.86.01.run" sha512 = "9a31e14afc017e847f1208577f597c490adb63c256d6dff1a9eae56b65cf85374a604516b0be9da7a43e9af93b3c5aec47b2ffefd6b4050a4b7e55f348cf4e7b" +force-upstream = true [[package.metadata.build-package.external-files]] url = "https://us.download.nvidia.com/tesla/515.86.01/NVIDIA-Linux-aarch64-515.86.01.run" sha512 = "43161f86143b1558d1f558acf4a060f53f538ea20e6235f76be24916fe4a9c374869645c7abf39eba66f1c2ca35f5d2b04f199bd1341b7ee6c1fdc879cb3ef96" +force-upstream = true [build-dependencies] glibc = { path = "../glibc" } diff --git a/packages/kmod-6.1-nvidia/Cargo.toml b/packages/kmod-6.1-nvidia/Cargo.toml index a597333d..75fd1f3d 100644 --- a/packages/kmod-6.1-nvidia/Cargo.toml +++ b/packages/kmod-6.1-nvidia/Cargo.toml @@ -15,10 +15,12 @@ releases-url = "https://docs.nvidia.com/datacenter/tesla/" [[package.metadata.build-package.external-files]] url = "https://us.download.nvidia.com/tesla/535.54.03/NVIDIA-Linux-x86_64-535.54.03.run" sha512 = "45b72b34272d3df14b56136bb61537d00145d55734b72d58390af4694d96f03b2b49433beb4a5bede4d978442b707b08e05f2f31b2fcfd9453091e7f0b945cff" +force-upstream = true [[package.metadata.build-package.external-files]] url = "https://us.download.nvidia.com/tesla/535.54.03/NVIDIA-Linux-aarch64-535.54.03.run" sha512 = "57b06a6fa16838176866c364a8722c546084529ad91c57e979aca7750692127cab1485b5a44aee398c5494782ed987e82f66061aa39e802bc6eefa2b40a33bc3" +force-upstream = true [build-dependencies] glibc = { path = "../glibc" } From 1118ca3c177848f70d3f0415b165a7853f7f776d Mon Sep 17 00:00:00 2001 From: Cartrius Phipps Date: Mon, 7 Aug 2023 20:59:49 +0000 Subject: [PATCH 1053/1356] kubelet: Add support for SeccompDefault setting for k8s 1.25+ Enable RuntimeDefault as the default seccomp profile for all workloads via kubelet-configuration. This is disabled by default. --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 0b3068b6..7e8b6853 100644 --- a/README.md +++ b/README.md @@ -559,6 +559,7 @@ The following settings are optional and allow you to further configure your clus * `settings.kubernetes.provider-id`: This sets the unique ID of the instance that an external provider (i.e. cloudprovider) can use to identify a specific node. * `settings.kubernetes.registry-burst`: The maximum size of bursty pulls. * `settings.kubernetes.registry-qps`: The registry pull QPS. +* `settings.kubernetes.seccomp-default`: Enable RuntimeDefault as the default seccomp profile for all workloads via kubelet-configuration. This is disabled by default. * `settings.kubernetes.server-tls-bootstrap`: Enables or disables server certificate bootstrap. When enabled, the kubelet will request a certificate from the certificates.k8s.io API. This requires an approver to approve the certificate signing requests (CSR). Defaults to `true`. * `settings.kubernetes.shutdown-grace-period`: Delay the node should wait for pod termination before shutdown. Default is `0s`. * `settings.kubernetes.shutdown-grace-period-for-critical-pods`: The portion of the shutdown delay that should be dedicated to critical pod shutdown. Default is `0s`. From 3a8497e9e60c26ed6511bade0e589c7a940a95ab Mon Sep 17 00:00:00 2001 From: "Tung Bui (Leo)" <85242618+tungbq@users.noreply.github.com> Date: Wed, 16 Aug 2023 00:07:22 +0700 Subject: [PATCH 1054/1356] tools: add DOCUMENTATION_URL to os-release --- tools/rpm2img | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/rpm2img b/tools/rpm2img index 26a09462..245c15ad 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -470,6 +470,7 @@ BUILD_ID=${BUILD_ID} HOME_URL="https://github.com/bottlerocket-os/bottlerocket" SUPPORT_URL="https://github.com/bottlerocket-os/bottlerocket/discussions" BUG_REPORT_URL="https://github.com/bottlerocket-os/bottlerocket/issues" +DOCUMENTATION_URL="https://bottlerocket.dev" EOF # Set the BOTTLEROCKET-DATA Filesystem for creating/mounting From 740c0d54cad503330e23febae773d3ed5ac59056 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Sun, 29 Jan 2023 08:43:24 +0000 Subject: [PATCH 1055/1356] Update docker run `--security-opt` command format During our build process we have several places where we use the Bottlerocket SDK to perform the build. In these `docker run` commands we pass `--security-opt label:disable` to avoid labeling a large number of files. It appears this syntax may have changed since we started using it, and the correct syntax is no `label=disable`. Based on reports in the Docker repo, the way we had it of `label:disable` may not actually work. While there are other issues to be addressed, the older format generates an error when trying to build with finch, while the newer format - though not recognized - only generates a warning message. Signed-off-by: Sean McGinnis --- tools/docker-go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/docker-go b/tools/docker-go index 177d1075..50915d98 100755 --- a/tools/docker-go +++ b/tools/docker-go @@ -85,7 +85,7 @@ docker run --rm \ "${go_env[@]}" \ "${proxy_env[@]}" \ --user "$(id -u):$(id -g)" \ - --security-opt label:disable \ + --security-opt="label=disable" \ ${DOCKER_RUN_ARGS} \ -v "${GOPATH}":"${GOPATH}" \ -v "${GO_MODULE_PATH}":"${GO_MODULE_PATH}" \ From c4d2d96fba6470e249c4cf65a3c7e500a29f0d7c Mon Sep 17 00:00:00 2001 From: ecpullen Date: Thu, 17 Aug 2023 19:22:10 +0000 Subject: [PATCH 1056/1356] testsys: Add support for gpu based ecs workloads --- tools/testsys/src/aws_ecs.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/testsys/src/aws_ecs.rs b/tools/testsys/src/aws_ecs.rs index c95a063b..2d76e431 100644 --- a/tools/testsys/src/aws_ecs.rs +++ b/tools/testsys/src/aws_ecs.rs @@ -222,6 +222,7 @@ pub(crate) fn workload_crd(region: &str, test_input: TestInput) -> Result "testsys/type".to_string() => test_input.test_type.to_string(), "testsys/cluster".to_string() => cluster_resource_name.to_string(), }); + let gpu = test_input.crd_input.variant.variant_flavor() == Some("nvidia"); let plugins: Vec<_> = test_input .crd_input .config @@ -230,7 +231,7 @@ pub(crate) fn workload_crd(region: &str, test_input: TestInput) -> Result .map(|(name, image)| WorkloadTest { name: name.to_string(), image: image.to_string(), - ..Default::default() + gpu, }) .collect(); if plugins.is_empty() { From 26f37d7b6b75d874b664064dfa8cdc62f0a05eeb Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 10 Aug 2023 14:35:16 +0000 Subject: [PATCH 1057/1356] kernel-5.10: update to 5.10.186 Rebase to Amazon Linux upstream version 5.10.186-179.751.amzn2. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 502c8e45..3fafe00f 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/73e966edcb947b3b7d077150dcea95b838666a21da320092f9659ddafa3409fb/kernel-5.10.184-175.731.amzn2.src.rpm" -sha512 = "5245ba11ae97b9f646ea817960e204283acd86b1b6c6e42e0b268268723d1d3e516c97ce0d868341de9e82476a9f8c5b21ef4eb5d7e11b7a38a5a8234c1b1a72" +url = "https://cdn.amazonlinux.com/blobstore/e6326ee4512d019820a49568094b3393f82a963a90b4201cbb45eea26a66ce02/kernel-5.10.186-179.751.amzn2.src.rpm" +sha512 = "6753ecfd149bf30a7ac8661ac2e711aa73a1b3ed9122e9545d2053c09b430c8ea8ca142f9500a096fc770007e989c417496e578eddf363442a262af2a5c17ee1" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index f48d4c55..45579c4c 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.184 +Version: 5.10.186 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/73e966edcb947b3b7d077150dcea95b838666a21da320092f9659ddafa3409fb/kernel-5.10.184-175.731.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/e6326ee4512d019820a49568094b3393f82a963a90b4201cbb45eea26a66ce02/kernel-5.10.186-179.751.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 8dd6331cf4ba36cf4df3d5deec10e04b3eb9d572 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 10 Aug 2023 14:35:46 +0000 Subject: [PATCH 1058/1356] kernel-5.15: update to 5.15.122 Rebase to Amazon Linux upstream version 5.15.122-77.145.amzn2. Signed-off-by: Leonard Foerster --- ...-the-errata-checking-functionality-u.patch | 184 ------------------ .../5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch | 172 ---------------- packages/kernel-5.15/Cargo.toml | 4 +- packages/kernel-5.15/kernel-5.15.spec | 9 +- 4 files changed, 4 insertions(+), 365 deletions(-) delete mode 100644 packages/kernel-5.15/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch delete mode 100644 packages/kernel-5.15/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch diff --git a/packages/kernel-5.15/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch b/packages/kernel-5.15/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch deleted file mode 100644 index 72214460..00000000 --- a/packages/kernel-5.15/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch +++ /dev/null @@ -1,184 +0,0 @@ -From 5398be2c48aa22189c3992a0d92288e67853cb47 Mon Sep 17 00:00:00 2001 -From: "Borislav Petkov (AMD)" -Date: Sat, 15 Jul 2023 13:31:32 +0200 -Subject: [PATCH] x86/cpu/amd: Move the errata checking functionality up - -Upstream commit: 8b6f687743dacce83dbb0c7cfacf88bab00f808a - -Avoid new and remove old forward declarations. - -No functional changes. - -Signed-off-by: Borislav Petkov (AMD) -Signed-off-by: Greg Kroah-Hartman ---- - arch/x86/kernel/cpu/amd.c | 139 ++++++++++++++++++-------------------- - 1 file changed, 67 insertions(+), 72 deletions(-) - -diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c -index 83bf26eaff2e..f8228a929ff3 100644 ---- a/arch/x86/kernel/cpu/amd.c -+++ b/arch/x86/kernel/cpu/amd.c -@@ -27,11 +27,6 @@ - - #include "cpu.h" - --static const int amd_erratum_383[]; --static const int amd_erratum_400[]; --static const int amd_erratum_1054[]; --static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); -- - /* - * nodes_per_socket: Stores the number of nodes per socket. - * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX -@@ -39,6 +34,73 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); - */ - static u32 nodes_per_socket = 1; - -+/* -+ * AMD errata checking -+ * -+ * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or -+ * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that -+ * have an OSVW id assigned, which it takes as first argument. Both take a -+ * variable number of family-specific model-stepping ranges created by -+ * AMD_MODEL_RANGE(). -+ * -+ * Example: -+ * -+ * const int amd_erratum_319[] = -+ * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), -+ * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), -+ * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); -+ */ -+ -+#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } -+#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } -+#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ -+ ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) -+#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) -+#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) -+#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) -+ -+static const int amd_erratum_400[] = -+ AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), -+ AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); -+ -+static const int amd_erratum_383[] = -+ AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); -+ -+/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ -+static const int amd_erratum_1054[] = -+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); -+ -+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) -+{ -+ int osvw_id = *erratum++; -+ u32 range; -+ u32 ms; -+ -+ if (osvw_id >= 0 && osvw_id < 65536 && -+ cpu_has(cpu, X86_FEATURE_OSVW)) { -+ u64 osvw_len; -+ -+ rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); -+ if (osvw_id < osvw_len) { -+ u64 osvw_bits; -+ -+ rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), -+ osvw_bits); -+ return osvw_bits & (1ULL << (osvw_id & 0x3f)); -+ } -+ } -+ -+ /* OSVW unavailable or ID unknown, match family-model-stepping range */ -+ ms = (cpu->x86_model << 4) | cpu->x86_stepping; -+ while ((range = *erratum++)) -+ if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && -+ (ms >= AMD_MODEL_RANGE_START(range)) && -+ (ms <= AMD_MODEL_RANGE_END(range))) -+ return true; -+ -+ return false; -+} -+ - static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) - { - u32 gprs[8] = { 0 }; -@@ -1125,73 +1187,6 @@ static const struct cpu_dev amd_cpu_dev = { - - cpu_dev_register(amd_cpu_dev); - --/* -- * AMD errata checking -- * -- * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or -- * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that -- * have an OSVW id assigned, which it takes as first argument. Both take a -- * variable number of family-specific model-stepping ranges created by -- * AMD_MODEL_RANGE(). -- * -- * Example: -- * -- * const int amd_erratum_319[] = -- * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), -- * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), -- * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); -- */ -- --#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } --#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } --#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ -- ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) --#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) --#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) --#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) -- --static const int amd_erratum_400[] = -- AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), -- AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); -- --static const int amd_erratum_383[] = -- AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); -- --/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ --static const int amd_erratum_1054[] = -- AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); -- --static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) --{ -- int osvw_id = *erratum++; -- u32 range; -- u32 ms; -- -- if (osvw_id >= 0 && osvw_id < 65536 && -- cpu_has(cpu, X86_FEATURE_OSVW)) { -- u64 osvw_len; -- -- rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); -- if (osvw_id < osvw_len) { -- u64 osvw_bits; -- -- rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), -- osvw_bits); -- return osvw_bits & (1ULL << (osvw_id & 0x3f)); -- } -- } -- -- /* OSVW unavailable or ID unknown, match family-model-stepping range */ -- ms = (cpu->x86_model << 4) | cpu->x86_stepping; -- while ((range = *erratum++)) -- if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && -- (ms >= AMD_MODEL_RANGE_START(range)) && -- (ms <= AMD_MODEL_RANGE_END(range))) -- return true; -- -- return false; --} -- - void set_dr_addr_mask(unsigned long mask, int dr) - { - if (!boot_cpu_has(X86_FEATURE_BPEXT)) --- -2.25.1 - diff --git a/packages/kernel-5.15/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch b/packages/kernel-5.15/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch deleted file mode 100644 index 4e6d6194..00000000 --- a/packages/kernel-5.15/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch +++ /dev/null @@ -1,172 +0,0 @@ -From be824fdb827dc06f77a31122949fe1bc011e3e1e Mon Sep 17 00:00:00 2001 -From: "Borislav Petkov (AMD)" -Date: Sat, 15 Jul 2023 13:41:28 +0200 -Subject: [PATCH] x86/cpu/amd: Add a Zenbleed fix - -Upstream commit: 522b1d69219d8f083173819fde04f994aa051a98 - -Add a fix for the Zen2 VZEROUPPER data corruption bug where under -certain circumstances executing VZEROUPPER can cause register -corruption or leak data. - -The optimal fix is through microcode but in the case the proper -microcode revision has not been applied, enable a fallback fix using -a chicken bit. - -Signed-off-by: Borislav Petkov (AMD) -Signed-off-by: Greg Kroah-Hartman ---- - arch/x86/include/asm/microcode.h | 1 + - arch/x86/include/asm/microcode_amd.h | 2 + - arch/x86/include/asm/msr-index.h | 1 + - arch/x86/kernel/cpu/amd.c | 60 ++++++++++++++++++++++++++++ - arch/x86/kernel/cpu/common.c | 2 + - 5 files changed, 66 insertions(+) - -diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h -index 1bf064a14b95..4ca377efc986 100644 ---- a/arch/x86/include/asm/microcode.h -+++ b/arch/x86/include/asm/microcode.h -@@ -5,6 +5,7 @@ - #include - #include - #include -+#include - - struct ucode_patch { - struct list_head plist; -diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h -index a645b25ee442..403a8e76b310 100644 ---- a/arch/x86/include/asm/microcode_amd.h -+++ b/arch/x86/include/asm/microcode_amd.h -@@ -48,11 +48,13 @@ extern void __init load_ucode_amd_bsp(unsigned int family); - extern void load_ucode_amd_ap(unsigned int family); - extern int __init save_microcode_in_initrd_amd(unsigned int family); - void reload_ucode_amd(unsigned int cpu); -+extern void amd_check_microcode(void); - #else - static inline void __init load_ucode_amd_bsp(unsigned int family) {} - static inline void load_ucode_amd_ap(unsigned int family) {} - static inline int __init - save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } - static inline void reload_ucode_amd(unsigned int cpu) {} -+static inline void amd_check_microcode(void) {} - #endif - #endif /* _ASM_X86_MICROCODE_AMD_H */ -diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h -index 3588b799c63f..e78755ed82cf 100644 ---- a/arch/x86/include/asm/msr-index.h -+++ b/arch/x86/include/asm/msr-index.h -@@ -503,6 +503,7 @@ - #define MSR_AMD64_DE_CFG 0xc0011029 - #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1 - #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT) -+#define MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT 9 - - #define MSR_AMD64_BU_CFG2 0xc001102a - #define MSR_AMD64_IBSFETCHCTL 0xc0011030 -diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c -index f8228a929ff3..3daceadf5d1f 100644 ---- a/arch/x86/kernel/cpu/amd.c -+++ b/arch/x86/kernel/cpu/amd.c -@@ -70,6 +70,11 @@ static const int amd_erratum_383[] = - static const int amd_erratum_1054[] = - AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); - -+static const int amd_zenbleed[] = -+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf), -+ AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), -+ AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); -+ - static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) - { - int osvw_id = *erratum++; -@@ -1002,6 +1007,47 @@ static void init_amd_zn(struct cpuinfo_x86 *c) - } - } - -+static bool cpu_has_zenbleed_microcode(void) -+{ -+ u32 good_rev = 0; -+ -+ switch (boot_cpu_data.x86_model) { -+ case 0x30 ... 0x3f: good_rev = 0x0830107a; break; -+ case 0x60 ... 0x67: good_rev = 0x0860010b; break; -+ case 0x68 ... 0x6f: good_rev = 0x08608105; break; -+ case 0x70 ... 0x7f: good_rev = 0x08701032; break; -+ case 0xa0 ... 0xaf: good_rev = 0x08a00008; break; -+ -+ default: -+ return false; -+ break; -+ } -+ -+ if (boot_cpu_data.microcode < good_rev) -+ return false; -+ -+ return true; -+} -+ -+static void zenbleed_check(struct cpuinfo_x86 *c) -+{ -+ if (!cpu_has_amd_erratum(c, amd_zenbleed)) -+ return; -+ -+ if (cpu_has(c, X86_FEATURE_HYPERVISOR)) -+ return; -+ -+ if (!cpu_has(c, X86_FEATURE_AVX)) -+ return; -+ -+ if (!cpu_has_zenbleed_microcode()) { -+ pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n"); -+ msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); -+ } else { -+ msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); -+ } -+} -+ - static void init_amd(struct cpuinfo_x86 *c) - { - early_init_amd(c); -@@ -1092,6 +1138,8 @@ static void init_amd(struct cpuinfo_x86 *c) - msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); - - check_null_seg_clears_base(c); -+ -+ zenbleed_check(c); - } - - #ifdef CONFIG_X86_32 -@@ -1221,3 +1269,15 @@ u32 amd_get_highest_perf(void) - return 255; - } - EXPORT_SYMBOL_GPL(amd_get_highest_perf); -+ -+static void zenbleed_check_cpu(void *unused) -+{ -+ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); -+ -+ zenbleed_check(c); -+} -+ -+void amd_check_microcode(void) -+{ -+ on_each_cpu(zenbleed_check_cpu, NULL, 1); -+} -diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c -index f7b4bbe71cdf..69752745a5b1 100644 ---- a/arch/x86/kernel/cpu/common.c -+++ b/arch/x86/kernel/cpu/common.c -@@ -2185,6 +2185,8 @@ void microcode_check(struct cpuinfo_x86 *prev_info) - - perf_check_microcode(); - -+ amd_check_microcode(); -+ - store_cpu_caps(&curr_info); - - if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability, --- -2.25.1 - diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 24313643..be8a8543 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/dee03ce3e2dcaf93eec3457db4f5a6973c1837abd3c96229897cb29e5c72d348/kernel-5.15.117-73.143.amzn2.src.rpm" -sha512 = "5b846ce8b18cf155925534a26faf6ef26f47c808a7adaf089248fdce0fc2f06acad389e49595eeda390bd28ca64c9f47765ea7431b64709c6913f52266063024" +url = "https://cdn.amazonlinux.com/blobstore/d73ac4b2ddb2c5ed91308adfcd7ccf4d7ba53882d31c9a6461e1661766159b62/kernel-5.15.122-77.145.amzn2.src.rpm" +sha512 = "37742f1923dcafa20e9144d9754e5238a85956bbb814caa89dbc4db2549e62b64c9e3af9ceaf0bc32d71560eef9a60d86f35ae3df86c5893094fd86b63b58ffb" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 26bfb990..4e2dc548 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.117 +Version: 5.15.122 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/dee03ce3e2dcaf93eec3457db4f5a6973c1837abd3c96229897cb29e5c72d348/kernel-5.15.117-73.143.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/d73ac4b2ddb2c5ed91308adfcd7ccf4d7ba53882d31c9a6461e1661766159b62/kernel-5.15.122-77.145.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal @@ -22,11 +22,6 @@ Patch1003: 1003-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch # Increase default of sysctl net.unix.max_dgram_qlen to 512. Patch1004: 1004-af_unix-increase-default-max_dgram_qlen-to-512.patch -# Cherry-picked fix for CVE-2023-20593 ("Zenbleed"). Can be dropped when moving -# upstream to 5.15.122 or later. -Patch5001: 5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch -Patch5002: 5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch - BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From 5aab3c530e3f4df2787d081e7bff71ae0bd70d9d Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 10 Aug 2023 14:36:04 +0000 Subject: [PATCH 1059/1356] kernel-6.1: update to 6.1.41 Rebase to Amazon Linux upstream version 6.1.41-63.114.amzn2023. Drop backport of Zenbleed mitigation in favour of upstream variant included in upstream version v6.1.41. Signed-off-by: Leonard Foerster --- ...-the-errata-checking-functionality-u.patch | 184 ------------------ .../5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch | 172 ---------------- packages/kernel-6.1/Cargo.toml | 4 +- packages/kernel-6.1/kernel-6.1.spec | 9 +- 4 files changed, 4 insertions(+), 365 deletions(-) delete mode 100644 packages/kernel-6.1/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch delete mode 100644 packages/kernel-6.1/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch diff --git a/packages/kernel-6.1/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch b/packages/kernel-6.1/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch deleted file mode 100644 index 2accaae4..00000000 --- a/packages/kernel-6.1/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch +++ /dev/null @@ -1,184 +0,0 @@ -From 5fc203d8d3ed416bee054e9f2e6513df51d74577 Mon Sep 17 00:00:00 2001 -From: "Borislav Petkov (AMD)" -Date: Sat, 15 Jul 2023 13:31:32 +0200 -Subject: [PATCH] x86/cpu/amd: Move the errata checking functionality up - -Upstream commit: 8b6f687743dacce83dbb0c7cfacf88bab00f808a - -Avoid new and remove old forward declarations. - -No functional changes. - -Signed-off-by: Borislav Petkov (AMD) -Signed-off-by: Greg Kroah-Hartman ---- - arch/x86/kernel/cpu/amd.c | 139 ++++++++++++++++++-------------------- - 1 file changed, 67 insertions(+), 72 deletions(-) - -diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c -index d2dbbc50b3a7..16b05029e068 100644 ---- a/arch/x86/kernel/cpu/amd.c -+++ b/arch/x86/kernel/cpu/amd.c -@@ -27,11 +27,6 @@ - - #include "cpu.h" - --static const int amd_erratum_383[]; --static const int amd_erratum_400[]; --static const int amd_erratum_1054[]; --static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); -- - /* - * nodes_per_socket: Stores the number of nodes per socket. - * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX -@@ -39,6 +34,73 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); - */ - static u32 nodes_per_socket = 1; - -+/* -+ * AMD errata checking -+ * -+ * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or -+ * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that -+ * have an OSVW id assigned, which it takes as first argument. Both take a -+ * variable number of family-specific model-stepping ranges created by -+ * AMD_MODEL_RANGE(). -+ * -+ * Example: -+ * -+ * const int amd_erratum_319[] = -+ * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), -+ * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), -+ * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); -+ */ -+ -+#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } -+#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } -+#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ -+ ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) -+#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) -+#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) -+#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) -+ -+static const int amd_erratum_400[] = -+ AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), -+ AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); -+ -+static const int amd_erratum_383[] = -+ AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); -+ -+/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ -+static const int amd_erratum_1054[] = -+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); -+ -+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) -+{ -+ int osvw_id = *erratum++; -+ u32 range; -+ u32 ms; -+ -+ if (osvw_id >= 0 && osvw_id < 65536 && -+ cpu_has(cpu, X86_FEATURE_OSVW)) { -+ u64 osvw_len; -+ -+ rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); -+ if (osvw_id < osvw_len) { -+ u64 osvw_bits; -+ -+ rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), -+ osvw_bits); -+ return osvw_bits & (1ULL << (osvw_id & 0x3f)); -+ } -+ } -+ -+ /* OSVW unavailable or ID unknown, match family-model-stepping range */ -+ ms = (cpu->x86_model << 4) | cpu->x86_stepping; -+ while ((range = *erratum++)) -+ if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && -+ (ms >= AMD_MODEL_RANGE_START(range)) && -+ (ms <= AMD_MODEL_RANGE_END(range))) -+ return true; -+ -+ return false; -+} -+ - static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) - { - u32 gprs[8] = { 0 }; -@@ -1100,73 +1162,6 @@ static const struct cpu_dev amd_cpu_dev = { - - cpu_dev_register(amd_cpu_dev); - --/* -- * AMD errata checking -- * -- * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or -- * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that -- * have an OSVW id assigned, which it takes as first argument. Both take a -- * variable number of family-specific model-stepping ranges created by -- * AMD_MODEL_RANGE(). -- * -- * Example: -- * -- * const int amd_erratum_319[] = -- * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), -- * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), -- * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); -- */ -- --#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } --#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } --#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ -- ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) --#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) --#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) --#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) -- --static const int amd_erratum_400[] = -- AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), -- AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); -- --static const int amd_erratum_383[] = -- AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); -- --/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ --static const int amd_erratum_1054[] = -- AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); -- --static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) --{ -- int osvw_id = *erratum++; -- u32 range; -- u32 ms; -- -- if (osvw_id >= 0 && osvw_id < 65536 && -- cpu_has(cpu, X86_FEATURE_OSVW)) { -- u64 osvw_len; -- -- rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); -- if (osvw_id < osvw_len) { -- u64 osvw_bits; -- -- rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), -- osvw_bits); -- return osvw_bits & (1ULL << (osvw_id & 0x3f)); -- } -- } -- -- /* OSVW unavailable or ID unknown, match family-model-stepping range */ -- ms = (cpu->x86_model << 4) | cpu->x86_stepping; -- while ((range = *erratum++)) -- if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && -- (ms >= AMD_MODEL_RANGE_START(range)) && -- (ms <= AMD_MODEL_RANGE_END(range))) -- return true; -- -- return false; --} -- - void set_dr_addr_mask(unsigned long mask, int dr) - { - if (!boot_cpu_has(X86_FEATURE_BPEXT)) --- -2.25.1 - diff --git a/packages/kernel-6.1/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch b/packages/kernel-6.1/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch deleted file mode 100644 index c18f3b57..00000000 --- a/packages/kernel-6.1/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch +++ /dev/null @@ -1,172 +0,0 @@ -From ed9b87010aa84c157096f98c322491e9af8e8f07 Mon Sep 17 00:00:00 2001 -From: "Borislav Petkov (AMD)" -Date: Sat, 15 Jul 2023 13:41:28 +0200 -Subject: [PATCH] x86/cpu/amd: Add a Zenbleed fix - -Upstream commit: 522b1d69219d8f083173819fde04f994aa051a98 - -Add a fix for the Zen2 VZEROUPPER data corruption bug where under -certain circumstances executing VZEROUPPER can cause register -corruption or leak data. - -The optimal fix is through microcode but in the case the proper -microcode revision has not been applied, enable a fallback fix using -a chicken bit. - -Signed-off-by: Borislav Petkov (AMD) -Signed-off-by: Greg Kroah-Hartman ---- - arch/x86/include/asm/microcode.h | 1 + - arch/x86/include/asm/microcode_amd.h | 2 + - arch/x86/include/asm/msr-index.h | 1 + - arch/x86/kernel/cpu/amd.c | 60 ++++++++++++++++++++++++++++ - arch/x86/kernel/cpu/common.c | 2 + - 5 files changed, 66 insertions(+) - -diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h -index 79b1d009e34e..19a0b4005ffa 100644 ---- a/arch/x86/include/asm/microcode.h -+++ b/arch/x86/include/asm/microcode.h -@@ -5,6 +5,7 @@ - #include - #include - #include -+#include - - struct ucode_patch { - struct list_head plist; -diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h -index e6662adf3af4..9675c621c1ca 100644 ---- a/arch/x86/include/asm/microcode_amd.h -+++ b/arch/x86/include/asm/microcode_amd.h -@@ -48,11 +48,13 @@ extern void __init load_ucode_amd_bsp(unsigned int family); - extern void load_ucode_amd_ap(unsigned int family); - extern int __init save_microcode_in_initrd_amd(unsigned int family); - void reload_ucode_amd(unsigned int cpu); -+extern void amd_check_microcode(void); - #else - static inline void __init load_ucode_amd_bsp(unsigned int family) {} - static inline void load_ucode_amd_ap(unsigned int family) {} - static inline int __init - save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } - static inline void reload_ucode_amd(unsigned int cpu) {} -+static inline void amd_check_microcode(void) {} - #endif - #endif /* _ASM_X86_MICROCODE_AMD_H */ -diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h -index 117e4e977b55..846067e1ee8b 100644 ---- a/arch/x86/include/asm/msr-index.h -+++ b/arch/x86/include/asm/msr-index.h -@@ -543,6 +543,7 @@ - #define MSR_AMD64_DE_CFG 0xc0011029 - #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1 - #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT) -+#define MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT 9 - - #define MSR_AMD64_BU_CFG2 0xc001102a - #define MSR_AMD64_IBSFETCHCTL 0xc0011030 -diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c -index 16b05029e068..7f4eb8b027cc 100644 ---- a/arch/x86/kernel/cpu/amd.c -+++ b/arch/x86/kernel/cpu/amd.c -@@ -70,6 +70,11 @@ static const int amd_erratum_383[] = - static const int amd_erratum_1054[] = - AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); - -+static const int amd_zenbleed[] = -+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf), -+ AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), -+ AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); -+ - static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) - { - int osvw_id = *erratum++; -@@ -978,6 +983,47 @@ static void init_amd_zn(struct cpuinfo_x86 *c) - } - } - -+static bool cpu_has_zenbleed_microcode(void) -+{ -+ u32 good_rev = 0; -+ -+ switch (boot_cpu_data.x86_model) { -+ case 0x30 ... 0x3f: good_rev = 0x0830107a; break; -+ case 0x60 ... 0x67: good_rev = 0x0860010b; break; -+ case 0x68 ... 0x6f: good_rev = 0x08608105; break; -+ case 0x70 ... 0x7f: good_rev = 0x08701032; break; -+ case 0xa0 ... 0xaf: good_rev = 0x08a00008; break; -+ -+ default: -+ return false; -+ break; -+ } -+ -+ if (boot_cpu_data.microcode < good_rev) -+ return false; -+ -+ return true; -+} -+ -+static void zenbleed_check(struct cpuinfo_x86 *c) -+{ -+ if (!cpu_has_amd_erratum(c, amd_zenbleed)) -+ return; -+ -+ if (cpu_has(c, X86_FEATURE_HYPERVISOR)) -+ return; -+ -+ if (!cpu_has(c, X86_FEATURE_AVX)) -+ return; -+ -+ if (!cpu_has_zenbleed_microcode()) { -+ pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n"); -+ msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); -+ } else { -+ msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); -+ } -+} -+ - static void init_amd(struct cpuinfo_x86 *c) - { - early_init_amd(c); -@@ -1067,6 +1113,8 @@ static void init_amd(struct cpuinfo_x86 *c) - msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); - - check_null_seg_clears_base(c); -+ -+ zenbleed_check(c); - } - - #ifdef CONFIG_X86_32 -@@ -1196,3 +1244,15 @@ u32 amd_get_highest_perf(void) - return 255; - } - EXPORT_SYMBOL_GPL(amd_get_highest_perf); -+ -+static void zenbleed_check_cpu(void *unused) -+{ -+ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); -+ -+ zenbleed_check(c); -+} -+ -+void amd_check_microcode(void) -+{ -+ on_each_cpu(zenbleed_check_cpu, NULL, 1); -+} -diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c -index c34bdba57993..d298d70f74ce 100644 ---- a/arch/x86/kernel/cpu/common.c -+++ b/arch/x86/kernel/cpu/common.c -@@ -2346,6 +2346,8 @@ void microcode_check(struct cpuinfo_x86 *prev_info) - - perf_check_microcode(); - -+ amd_check_microcode(); -+ - store_cpu_caps(&curr_info); - - if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability, --- -2.25.1 - diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index f4b18d03..9f694803 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/fc78f9cacdcb6227481fd326c05429914f6b085d7abad49c0b1fd896ec02dd4b/kernel-6.1.29-50.88.amzn2023.src.rpm" -sha512 = "0d3a40a5811d36c0ac8a731686a816ae47f66f10ce8ca945f4e727f6c188c9d0a54c504667c25a86b7c80437c9fddafa3973205ad73ed7330b8957b526eff5ed" +url = "https://cdn.amazonlinux.com/al2023/blobstore/789848dec5baccf864b022af695a2a3ac1ba52392a2b6aa83f19dc07d050df0a/kernel-6.1.41-63.114.amzn2023.src.rpm" +sha512 = "6a66562d23a21ac3fba56cb13680ef2cc0c3fe9b2b77e83c3e6da47ca36016413cd5ebac9266419e835d04f10fb509b00536fc3e38eb0e8d707db5f8fdd8f10e" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index d1028e14..28ffa496 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.29 +Version: 6.1.41 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/fc78f9cacdcb6227481fd326c05429914f6b085d7abad49c0b1fd896ec02dd4b/kernel-6.1.29-50.88.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/789848dec5baccf864b022af695a2a3ac1ba52392a2b6aa83f19dc07d050df0a/kernel-6.1.41-63.114.amzn2023.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal @@ -22,11 +22,6 @@ Patch1003: 1003-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch # Increase default of sysctl net.unix.max_dgram_qlen to 512. Patch1004: 1004-af_unix-increase-default-max_dgram_qlen-to-512.patch -# Cherry-picked fix for CVE-2023-20593 ("Zenbleed"). Can be dropped when moving -# upstream to 6.1.41 or later. -Patch5001: 5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch -Patch5002: 5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch - BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From f1ef0b4183707b0e1bd4aa84a5ab194280fc3081 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Fri, 11 Aug 2023 06:53:58 +0000 Subject: [PATCH 1060/1356] kernel-6.1: Revert back to upstream behaviour for fb_helper Amazon Linux reverted an upstream patch cleaning up DRM config option dependency as they wanted to retain the old behavior in order to provide dependency options for nvidia DKMS. For Bottlerocket we had solved this issue already by enableing DRM_SIMPLEDRM in fd73bff24a78, retaining upstream functionality. Signed-off-by: Leonard Foerster --- ...m-fb_helper-improve-CONFIG_FB-depend.patch | 36 +++++++++++++++++++ packages/kernel-6.1/kernel-6.1.spec | 3 ++ 2 files changed, 39 insertions(+) create mode 100644 packages/kernel-6.1/1005-Revert-Revert-drm-fb_helper-improve-CONFIG_FB-depend.patch diff --git a/packages/kernel-6.1/1005-Revert-Revert-drm-fb_helper-improve-CONFIG_FB-depend.patch b/packages/kernel-6.1/1005-Revert-Revert-drm-fb_helper-improve-CONFIG_FB-depend.patch new file mode 100644 index 00000000..01a3d1b8 --- /dev/null +++ b/packages/kernel-6.1/1005-Revert-Revert-drm-fb_helper-improve-CONFIG_FB-depend.patch @@ -0,0 +1,36 @@ +From 97942a7563e670dbc481a322b34f29010a1ed9ec Mon Sep 17 00:00:00 2001 +From: Leonard Foerster +Date: Fri, 11 Aug 2023 06:41:44 +0000 +Subject: [PATCH] Revert "Revert "drm: fb_helper: improve CONFIG_FB + dependency"" + +This reverts commit 9200a3864170e49e8d076870ee18fad6de4fd356. + +Amazon Linux has reverted this upstream commit in order to have +certain DRM options set to allow building nvidia DKMS. Instead +of reverting an upstream commit, we added DRM_SIMPLEDRM with +Bottlerocket commit fd73bff24a78 in order to supply the necessary +dependecies for nvidia drivers. +--- + drivers/gpu/drm/Kconfig | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig +index e0264211ca84..f30f99166531 100644 +--- a/drivers/gpu/drm/Kconfig ++++ b/drivers/gpu/drm/Kconfig +@@ -124,9 +124,8 @@ config DRM_DEBUG_MODESET_LOCK + + config DRM_FBDEV_EMULATION + bool "Enable legacy fbdev support for your modesetting driver" +- depends on DRM +- depends on FB=y || FB=DRM +- select DRM_KMS_HELPER ++ depends on DRM_KMS_HELPER ++ depends on FB=y || FB=DRM_KMS_HELPER + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT +-- +2.40.1 + diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 28ffa496..eea8b89b 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -21,6 +21,9 @@ Patch1002: 1002-Revert-kbuild-hide-tools-build-targets-from-external.patch Patch1003: 1003-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch # Increase default of sysctl net.unix.max_dgram_qlen to 512. Patch1004: 1004-af_unix-increase-default-max_dgram_qlen-to-512.patch +# Drop AL revert of upstream patch to minimize delta. The necessary dependency +# options for nvidia are instead included through DRM_SIMPLE +Patch1005: 1005-Revert-Revert-drm-fb_helper-improve-CONFIG_FB-depend.patch BuildRequires: bc BuildRequires: elfutils-devel From 612c8cc81ba04df9a2ba99e007723823fa0b8608 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Fri, 11 Aug 2023 11:13:07 +0000 Subject: [PATCH 1061/1356] kernel-6.1: Disable specific SCSI drivers for the generic images Amazon Linux has added some specific SCSI drivers on their kernels. We do not need these for vmware and aws use cases and had set them already for the metal variants separately. Disable them for the generic case in order to not introduce additional bloat. Signed-off-by: Leonard Foerster --- packages/kernel-6.1/config-bottlerocket | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/packages/kernel-6.1/config-bottlerocket b/packages/kernel-6.1/config-bottlerocket index c501e5f4..809e36e4 100644 --- a/packages/kernel-6.1/config-bottlerocket +++ b/packages/kernel-6.1/config-bottlerocket @@ -192,6 +192,13 @@ CONFIG_EXT4_USE_FOR_EXT2=y # - sch_cake targets home routers and residential links # CONFIG_NET_SCH_CAKE is not set +# Disable specific SCSI drivers for the generic case. We have enabled necessary +# drivers on metal specifically +# CONFIG_SCSI_MPI3MR is not set +# CONFIG_SCSI_MPT3SAS is not set +# CONFIG_SCSI_SMARTPQI is not set +# CONFIG_SCSI_SAS_ATTRS is not set + # Provide minimal iSCSI via TCP support for initiator and target mode # initiator side CONFIG_ISCSI_TCP=m From 6fd3f19eb37addce284dce242de4f97039440368 Mon Sep 17 00:00:00 2001 From: ecpullen Date: Mon, 21 Aug 2023 20:43:08 +0000 Subject: [PATCH 1062/1356] testsys: Add labels to ecs clluster crds --- tools/testsys/src/aws_ecs.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testsys/src/aws_ecs.rs b/tools/testsys/src/aws_ecs.rs index 2d76e431..f021528d 100644 --- a/tools/testsys/src/aws_ecs.rs +++ b/tools/testsys/src/aws_ecs.rs @@ -100,6 +100,7 @@ impl CrdCreator for AwsEcsCreator { .testsys_agent_pull_secret .to_owned(), ) + .set_labels(Some(labels)) .set_secrets(Some(cluster_input.crd_input.config.secrets.clone())) .build(cluster_input.cluster_name) .context(error::BuildSnafu { From 01afe6ec2d802e922bbacabc5b8d0c710ffa4959 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Aug 2023 18:09:33 +0000 Subject: [PATCH 1063/1356] build(deps): bump rustls-webpki from 0.100.1 to 0.100.2 in /tools Bumps [rustls-webpki](https://github.com/rustls/webpki) from 0.100.1 to 0.100.2. - [Release notes](https://github.com/rustls/webpki/releases) - [Commits](https://github.com/rustls/webpki/compare/v/0.100.1...v/0.100.2) --- updated-dependencies: - dependency-name: rustls-webpki dependency-type: indirect ... Signed-off-by: dependabot[bot] --- tools/Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 81a8b0b4..16478ec2 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -2772,9 +2772,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.100.1" +version = "0.100.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" +checksum = "e98ff011474fa39949b7e5c0428f9b4937eda7da7848bbb947786b7be0b27dab" dependencies = [ "ring", "untrusted", From 75de0a9825c0165f9fdccc6bfcac4dc627cadce3 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Tue, 22 Aug 2023 23:25:10 +0000 Subject: [PATCH 1064/1356] tools: update aws-sdk-rust, coldsnap, and tough --- tools/Cargo.lock | 222 ++++++------------ tools/infrasys/Cargo.toml | 8 +- tools/infrasys/src/error.rs | 10 +- tools/infrasys/src/main.rs | 2 +- tools/infrasys/src/s3.rs | 4 +- tools/infrasys/src/shared.rs | 2 +- tools/pubsys/Cargo.toml | 26 +- .../pubsys/src/aws/ami/launch_permissions.rs | 13 +- tools/pubsys/src/aws/ami/mod.rs | 22 +- tools/pubsys/src/aws/ami/public.rs | 4 +- tools/pubsys/src/aws/ami/register.rs | 10 +- tools/pubsys/src/aws/ami/wait.rs | 7 +- tools/pubsys/src/aws/mod.rs | 4 +- tools/pubsys/src/aws/promote_ssm/mod.rs | 6 +- tools/pubsys/src/aws/publish_ami/mod.rs | 19 +- tools/pubsys/src/aws/ssm/mod.rs | 4 +- tools/pubsys/src/aws/ssm/ssm.rs | 17 +- tools/pubsys/src/aws/ssm/template.rs | 4 +- tools/pubsys/src/aws/validate_ami/ami.rs | 7 +- tools/pubsys/src/aws/validate_ami/mod.rs | 4 +- tools/pubsys/src/aws/validate_ami/results.rs | 4 +- tools/pubsys/src/aws/validate_ssm/mod.rs | 4 +- tools/pubsys/src/aws/validate_ssm/results.rs | 4 +- tools/pubsys/src/repo.rs | 2 +- tools/testsys/Cargo.toml | 4 +- tools/testsys/src/aws_resources.rs | 4 +- tools/testsys/src/error.rs | 4 +- 27 files changed, 181 insertions(+), 240 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 16478ec2..9ddcacca 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -173,9 +173,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "aws-config" -version = "0.54.1" +version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3d1e2a1f1ab3ac6c4b884e37413eaa03eb9d901e4fc68ee8f5c1d49721680e" +checksum = "bcdcf0d683fe9c23d32cf5b53c9918ea0a500375a9fb20109802552658e576c9" dependencies = [ "aws-credential-types", "aws-http", @@ -189,6 +189,7 @@ dependencies = [ "aws-smithy-types", "aws-types", "bytes", + "fastrand", "hex", "http", "hyper", @@ -202,12 +203,13 @@ dependencies = [ [[package]] name = "aws-credential-types" -version = "0.54.1" +version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb0696a0523a39a19087747e4dafda0362dc867531e3d72a3f195564c84e5e08" +checksum = "1fcdb2f7acbc076ff5ad05e7864bdb191ca70a6fd07668dc3a1a8bcd051de5ae" dependencies = [ "aws-smithy-async", "aws-smithy-types", + "fastrand", "tokio", "tracing", "zeroize", @@ -215,9 +217,9 @@ dependencies = [ [[package]] name = "aws-endpoint" -version = "0.54.1" +version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80a4f935ab6a1919fbfd6102a80c4fccd9ff5f47f94ba154074afe1051903261" +checksum = "8cce1c41a6cfaa726adee9ebb9a56fcd2bbfd8be49fd8a04c5e20fd968330b04" dependencies = [ "aws-smithy-http", "aws-smithy-types", @@ -229,9 +231,9 @@ dependencies = [ [[package]] name = "aws-http" -version = "0.54.1" +version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82976ca4e426ee9ca3ffcf919d9b2c8d14d0cd80d43cc02173737a8f07f28d4d" +checksum = "aadbc44e7a8f3e71c8b374e03ecd972869eb91dd2bc89ed018954a52ba84bc44" dependencies = [ "aws-credential-types", "aws-smithy-http", @@ -248,9 +250,9 @@ dependencies = [ [[package]] name = "aws-sdk-cloudformation" -version = "0.24.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ca505d83bd39d7f53a6f03fcc8872ab948898ec21530f6117a18d2589b0cc87" +checksum = "2f32bb66da99e2955ce49e346200cb14421784755a39c74fe2c043536b2d57ba" dependencies = [ "aws-credential-types", "aws-endpoint", @@ -271,13 +273,14 @@ dependencies = [ "regex", "tokio-stream", "tower", + "tracing", ] [[package]] name = "aws-sdk-ebs" -version = "0.24.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c87ce8538f4a1652b92457fde67df8e9a739783941a708cf426c9d2fcfc8969" +checksum = "0c44666651c93b43b78bc3d0bc280efffa64ab6c23ecb3370ed0760d6e69d417" dependencies = [ "aws-credential-types", "aws-endpoint", @@ -296,13 +299,14 @@ dependencies = [ "regex", "tokio-stream", "tower", + "tracing", ] [[package]] name = "aws-sdk-ec2" -version = "0.24.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b40ee2d853d8300a49513778beb79b1574ff9e9c94b30b1531bc0171d730ad64" +checksum = "eab2493c5857725eeafe12ec66ba4ce6feb3355e3af6828d9ef28d6152972a27" dependencies = [ "aws-credential-types", "aws-endpoint", @@ -328,9 +332,9 @@ dependencies = [ [[package]] name = "aws-sdk-kms" -version = "0.24.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "434d7097fc824eee1d94cf6c5e3a30714da15b81a5b99618f8feb67f8eb2f70a" +checksum = "545335abd7c6ef7285d2972a67b9f8279ff5fec8bbb3ffc637fa436ba1e6e434" dependencies = [ "aws-credential-types", "aws-endpoint", @@ -353,9 +357,9 @@ dependencies = [ [[package]] name = "aws-sdk-s3" -version = "0.24.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1533be023eeac69668eb718b1c48af7bd5e26305ed770553d2877ab1f7507b68" +checksum = "fba197193cbb4bcb6aad8d99796b2291f36fa89562ded5d4501363055b0de89f" dependencies = [ "aws-credential-types", "aws-endpoint", @@ -373,8 +377,6 @@ dependencies = [ "aws-smithy-xml", "aws-types", "bytes", - "bytes-utils", - "fastrand", "http", "http-body", "once_cell", @@ -388,9 +390,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssm" -version = "0.24.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47a1993b71d6301d8f68f2ce6d87768b2f76130709b3c666d00e7fee52adb73c" +checksum = "014a095ed73c1f789699dfeb45a2b1debb03119910392bd7fcda4a07a72b3af4" dependencies = [ "aws-credential-types", "aws-endpoint", @@ -409,13 +411,14 @@ dependencies = [ "regex", "tokio-stream", "tower", + "tracing", ] [[package]] name = "aws-sdk-sso" -version = "0.24.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca0119bacf0c42f587506769390983223ba834e605f049babe514b2bd646dbb2" +checksum = "c8b812340d86d4a766b2ca73f740dfd47a97c2dff0c06c8517a16d88241957e4" dependencies = [ "aws-credential-types", "aws-endpoint", @@ -433,13 +436,14 @@ dependencies = [ "regex", "tokio-stream", "tower", + "tracing", ] [[package]] name = "aws-sdk-sts" -version = "0.24.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "270b6a33969ebfcb193512fbd5e8ee5306888ad6c6d5d775cdbfb2d50d94de26" +checksum = "265fac131fbfc188e5c3d96652ea90ecc676a934e3174eaaee523c6cec040b3b" dependencies = [ "aws-credential-types", "aws-endpoint", @@ -463,9 +467,9 @@ dependencies = [ [[package]] name = "aws-sig-auth" -version = "0.54.1" +version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "660a02a98ab1af83bd8d714afbab2d502ba9b18c49e7e4cddd6bf8837ff778cb" +checksum = "3b94acb10af0c879ecd5c7bdf51cda6679a0a4f4643ce630905a77673bfa3c61" dependencies = [ "aws-credential-types", "aws-sigv4", @@ -478,9 +482,9 @@ dependencies = [ [[package]] name = "aws-sigv4" -version = "0.54.2" +version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86529e7b64d902efea8fff52c1b2529368d04f90305cf632729e3713f6b57dc0" +checksum = "9d2ce6f507be68e968a33485ced670111d1cbad161ddbbab1e313c03d37d8f4c" dependencies = [ "aws-smithy-eventstream", "aws-smithy-http", @@ -499,9 +503,9 @@ dependencies = [ [[package]] name = "aws-smithy-async" -version = "0.54.1" +version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "075d87b46420b28b64140f2ba88fa6b158c2877466a2acdbeaf396c25e4b9b33" +checksum = "13bda3996044c202d75b91afeb11a9afae9db9a721c6a7a427410018e286b880" dependencies = [ "futures-util", "pin-project-lite", @@ -511,9 +515,9 @@ dependencies = [ [[package]] name = "aws-smithy-checksums" -version = "0.54.1" +version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55fe82d7463becdd632f8c6446cbdb2cbe34ad42a7d92c480d8fca08749d07a4" +checksum = "07ed8b96d95402f3f6b8b57eb4e0e45ee365f78b1a924faf20ff6e97abf1eae6" dependencies = [ "aws-smithy-http", "aws-smithy-types", @@ -532,14 +536,13 @@ dependencies = [ [[package]] name = "aws-smithy-client" -version = "0.54.1" +version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17d44078855a64d757e5c1727df29ffa6679022c38cfc4ba4e63ee9567133141" +checksum = "0a86aa6e21e86c4252ad6a0e3e74da9617295d8d6e374d552be7d3059c41cedd" dependencies = [ "aws-smithy-async", "aws-smithy-http", "aws-smithy-http-tower", - "aws-smithy-protocol-test", "aws-smithy-types", "bytes", "fastrand", @@ -549,7 +552,7 @@ dependencies = [ "hyper-rustls 0.23.2", "lazy_static", "pin-project-lite", - "serde", + "rustls 0.20.8", "tokio", "tower", "tracing", @@ -557,9 +560,9 @@ dependencies = [ [[package]] name = "aws-smithy-eventstream" -version = "0.54.1" +version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "652a99272024770cbe33579dc0016914a09922b27f9a4d12f37472aacbbe71c1" +checksum = "460c8da5110835e3d9a717c61f5556b20d03c32a1dec57f8fc559b360f733bb8" dependencies = [ "aws-smithy-types", "bytes", @@ -568,9 +571,9 @@ dependencies = [ [[package]] name = "aws-smithy-http" -version = "0.54.1" +version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5bd86f48d7e36fb24ee922d04d79c8353e01724b1c38757ed92593179223aa7" +checksum = "2b3b693869133551f135e1f2c77cb0b8277d9e3e17feaf2213f735857c4f0d28" dependencies = [ "aws-smithy-eventstream", "aws-smithy-types", @@ -591,9 +594,9 @@ dependencies = [ [[package]] name = "aws-smithy-http-tower" -version = "0.54.1" +version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8972d1b4ae3aba1a10e7106fed53a5a36bc8ef86170a84f6ddd33d36fac12ad" +checksum = "3ae4f6c5798a247fac98a867698197d9ac22643596dc3777f0c76b91917616b9" dependencies = [ "aws-smithy-http", "aws-smithy-types", @@ -607,33 +610,18 @@ dependencies = [ [[package]] name = "aws-smithy-json" -version = "0.54.1" +version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18973f12721e27b54891386497a57e1ba09975df1c6cfeccafaf541198962aef" +checksum = "23f9f42fbfa96d095194a632fbac19f60077748eba536eb0b9fecc28659807f8" dependencies = [ "aws-smithy-types", ] -[[package]] -name = "aws-smithy-protocol-test" -version = "0.54.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b72e9ac0818d0016ced540ba0d06975299d27684ff514173b21c9976fd72062b" -dependencies = [ - "assert-json-diff", - "http", - "pretty_assertions", - "regex", - "roxmltree", - "serde_json", - "thiserror", -] - [[package]] name = "aws-smithy-query" -version = "0.54.1" +version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2881effde104a2b0619badaad9f30ae67805e86fbbdb99e5fcc176e8bfbc1a85" +checksum = "98819eb0b04020a1c791903533b638534ae6c12e2aceda3e6e6fba015608d51d" dependencies = [ "aws-smithy-types", "urlencoding", @@ -641,9 +629,9 @@ dependencies = [ [[package]] name = "aws-smithy-types" -version = "0.54.1" +version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da7e499c4b15bab8eb6b234df31833cc83a1bdaa691ba72d5d81efc109d9d705" +checksum = "16a3d0bf4f324f4ef9793b86a1701d9700fbcdbd12a846da45eed104c634c6e8" dependencies = [ "base64-simd", "itoa", @@ -654,18 +642,18 @@ dependencies = [ [[package]] name = "aws-smithy-xml" -version = "0.54.1" +version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a73082f023f4a361fe811954da0061076709198792a3d2ad3a7498e10b606a0" +checksum = "b1b9d12875731bd07e767be7baad95700c3137b56730ec9ddeedb52a5e5ca63b" dependencies = [ "xmlparser", ] [[package]] name = "aws-types" -version = "0.54.1" +version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8f15b34253b68cde08e39b0627cc6101bcca64351229484b4743392c035d057" +checksum = "6dd209616cc8d7bfb82f87811a5c655dc97537f592689b18743bddf5dc5c4829" dependencies = [ "aws-credential-types", "aws-smithy-async", @@ -712,11 +700,12 @@ checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" [[package]] name = "base64-simd" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "781dd20c3aff0bd194fe7d2a977dd92f21c173891f3a03b677359e5fa457e5d5" +checksum = "339abbe78e73178762e23bea9dfd08e697eb3f3301cd4be981c0f78ba5859195" dependencies = [ - "simd-abstraction", + "outref", + "vsimd", ] [[package]] @@ -942,9 +931,9 @@ dependencies = [ [[package]] name = "coldsnap" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "543f50d38e0db1460c01915674b1f329438d5b8e0bb40057862ee63bc1077681" +checksum = "faa54b44a1a199e3f37ba30ffb7391ed2fe1e4deb15cc55232786b2ca228cb33" dependencies = [ "argh", "async-trait", @@ -1090,16 +1079,6 @@ dependencies = [ "typenum", ] -[[package]] -name = "ctor" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" -dependencies = [ - "quote", - "syn 1.0.109", -] - [[package]] name = "cxx" version = "1.0.92" @@ -1192,12 +1171,6 @@ dependencies = [ "parking_lot_core", ] -[[package]] -name = "diff" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" - [[package]] name = "digest" version = "0.10.6" @@ -2205,20 +2178,11 @@ dependencies = [ "windows-sys 0.45.0", ] -[[package]] -name = "output_vt100" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628223faebab4e3e40667ee0b2336d34a5b960ff60ea743ddfdbcf7770bcfb66" -dependencies = [ - "winapi", -] - [[package]] name = "outref" -version = "0.1.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f222829ae9293e33a9f5e9f440c6760a3d450a64affe1846486b140db81c1f4" +checksum = "4030760ffd992bef45b0ae3f10ce1aba99e33464c90d14dd7c039884963ddc7a" [[package]] name = "papergrid" @@ -2384,18 +2348,6 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" -[[package]] -name = "pretty_assertions" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a25e9bcb20aa780fd0bb16b72403a9064d6b3f22f026946029acb941a50af755" -dependencies = [ - "ctor", - "diff", - "output_vt100", - "yansi", -] - [[package]] name = "proc-macro-error" version = "1.0.4" @@ -2687,15 +2639,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "roxmltree" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "921904a62e410e37e215c40381b7117f830d9d89ba60ab5236170541dd25646b" -dependencies = [ - "xmlparser", -] - [[package]] name = "rustc-demangle" version = "0.1.22" @@ -3038,15 +2981,6 @@ dependencies = [ "libc", ] -[[package]] -name = "simd-abstraction" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cadb29c57caadc51ff8346233b5cec1d240b68ce55cf1afc764818791876987" -dependencies = [ - "outref", -] - [[package]] name = "simplelog" version = "0.12.1" @@ -3492,9 +3426,9 @@ checksum = "ea68304e134ecd095ac6c3574494fc62b909f416c4fca77e440530221e549d3d" [[package]] name = "tough" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c259b2bd13fdff3305a5a92b45befb1adb315d664612c8991be57fb6a83dc126" +checksum = "eda3efa9005cf9c1966984c3b9a44c3f37b7ed2c95ba338d6ad51bba70e989a0" dependencies = [ "chrono", "dyn-clone", @@ -3519,9 +3453,9 @@ dependencies = [ [[package]] name = "tough-kms" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72673807e50c73071b1f522f1fc53410bb66ae9958d572e70e6581af35beaa90" +checksum = "cc49c1a5300e54484604162ec78417fc39306f0c9e2c98166df3ebfa203d6800" dependencies = [ "aws-config", "aws-sdk-kms", @@ -3534,14 +3468,12 @@ dependencies = [ [[package]] name = "tough-ssm" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f66050278d78786eae031e26d0d290be173da16bda6cf613546a8ec70df13e2" +checksum = "bcf4932265842607b42840e65f3fde9dde2834eaa97209b994d6c1a7ff9f3fd7" dependencies = [ "aws-config", "aws-sdk-ssm", - "serde", - "serde_json", "snafu", "tokio", "tough", @@ -3778,6 +3710,12 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "vsimd" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c3082ca00d5a5ef149bb8b555a72ae84c9c59f7250f013ac822ac2e49b19c64" + [[package]] name = "walkdir" version = "2.3.3" @@ -4107,12 +4045,6 @@ dependencies = [ "linked-hash-map", ] -[[package]] -name = "yansi" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" - [[package]] name = "zeroize" version = "1.6.0" diff --git a/tools/infrasys/Cargo.toml b/tools/infrasys/Cargo.toml index 98d54d9e..39c5f51f 100644 --- a/tools/infrasys/Cargo.toml +++ b/tools/infrasys/Cargo.toml @@ -12,10 +12,10 @@ clap = { version = "4", features = ["derive"] } hex = "0.4" log = "0.4" pubsys-config = { path = "../pubsys-config/", version = "0.1" } -aws-config = "0.54" -aws-types = "0.54" -aws-sdk-cloudformation = "0.24" -aws-sdk-s3 = "0.24" +aws-config = "0.55" +aws-types = "0.55" +aws-sdk-cloudformation = "0.28" +aws-sdk-s3 = "0.28" serde_json = "1" serde_yaml = "0.8" sha2 = "0.10" diff --git a/tools/infrasys/src/error.rs b/tools/infrasys/src/error.rs index f5624ddf..1a3b668b 100644 --- a/tools/infrasys/src/error.rs +++ b/tools/infrasys/src/error.rs @@ -1,4 +1,4 @@ -use aws_sdk_s3::types::SdkError; +use aws_sdk_s3::error::SdkError; use snafu::Snafu; use std::io; use std::path::PathBuf; @@ -15,7 +15,7 @@ pub enum Error { CreateStack { stack_name: String, region: String, - source: SdkError, + source: SdkError, }, #[snafu(display( @@ -53,7 +53,7 @@ pub enum Error { DescribeStack { stack_name: String, region: String, - source: SdkError, + source: SdkError, }, #[snafu(display("Missing environment variable '{}'", var))] @@ -143,7 +143,7 @@ pub enum Error { #[snafu(display("Failed to push object to bucket '{}': {}", bucket_name, source))] PutObject { bucket_name: String, - source: SdkError, + source: SdkError, }, #[snafu(display( @@ -153,7 +153,7 @@ pub enum Error { ))] PutPolicy { bucket_name: String, - source: SdkError, + source: SdkError, }, #[snafu(display("Failed to create async runtime: {}", source))] diff --git a/tools/infrasys/src/main.rs b/tools/infrasys/src/main.rs index ef330f6d..7fa8ce81 100644 --- a/tools/infrasys/src/main.rs +++ b/tools/infrasys/src/main.rs @@ -4,7 +4,7 @@ mod root; mod s3; mod shared; -use aws_sdk_cloudformation::Region; +use aws_sdk_cloudformation::config::Region; use clap::Parser; use error::Result; use log::{error, info}; diff --git a/tools/infrasys/src/s3.rs b/tools/infrasys/src/s3.rs index 990ebcd1..6fc9c804 100644 --- a/tools/infrasys/src/s3.rs +++ b/tools/infrasys/src/s3.rs @@ -1,4 +1,4 @@ -use aws_sdk_cloudformation::{Client as CloudFormationClient, Region}; +use aws_sdk_cloudformation::{config::Region, Client as CloudFormationClient}; use aws_sdk_s3::Client as S3Client; use snafu::{OptionExt, ResultExt}; use std::fs; @@ -200,7 +200,7 @@ pub async fn upload_file( .put_object() .bucket(format!("{}{}", bucket_name, prefix)) .key("root.json".to_string()) - .body(aws_sdk_s3::types::ByteStream::from(buffer)) + .body(aws_sdk_s3::primitives::ByteStream::from(buffer)) .send() .await .context(error::PutObjectSnafu { bucket_name })?; diff --git a/tools/infrasys/src/shared.rs b/tools/infrasys/src/shared.rs index c4d04b6d..a12a6770 100644 --- a/tools/infrasys/src/shared.rs +++ b/tools/infrasys/src/shared.rs @@ -1,4 +1,4 @@ -use aws_sdk_cloudformation::model::{Output, Parameter}; +use aws_sdk_cloudformation::types::{Output, Parameter}; use aws_sdk_cloudformation::Client as CloudFormationClient; use clap::Parser; use log::info; diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml index bb2b6e98..c306d808 100644 --- a/tools/pubsys/Cargo.toml +++ b/tools/pubsys/Cargo.toml @@ -7,19 +7,19 @@ edition = "2021" publish = false [dependencies] -aws-config = "0.54" -aws-credential-types = "0.54" -aws-sdk-ebs = "0.24" -aws-sdk-ec2 = "0.24" -aws-sdk-kms = "0.24" -aws-sdk-ssm = "0.24" -aws-sdk-sts = "0.24" -aws-smithy-types = "0.54" -aws-types = "0.54" +aws-config = "0.55" +aws-credential-types = "0.55" +aws-sdk-ebs = "0.28" +aws-sdk-ec2 = "0.28" +aws-sdk-kms = "0.28" +aws-sdk-ssm = "0.28" +aws-sdk-sts = "0.28" +aws-smithy-types = "0.55" +aws-types = "0.55" buildsys = { path = "../buildsys", version = "0.1" } chrono = { version = "0.4", default-features = false, features = ["std", "clock"] } clap = { version = "4", features = ["derive"] } -coldsnap = { version = "0.5", default-features = false, features = ["aws-sdk-rust-rustls"] } +coldsnap = { version = "0.6", default-features = false, features = ["aws-sdk-rust-rustls"] } duct = "0.13" futures = "0.3" governor = "0.5" @@ -45,8 +45,8 @@ tinytemplate = "1" tokio = { version = "1", features = ["full"] } # LTS tokio-stream = { version = "0.1", features = ["time"] } toml = "0.5" -tough = { version = "0.13", features = ["http"] } -tough-kms = "0.5" -tough-ssm = "0.8" +tough = { version = "0.14", features = ["http"] } +tough-kms = "0.6" +tough-ssm = "0.9" update_metadata = { path = "../../sources/updater/update_metadata/", version = "0.1" } url = { version = "2", features = ["serde"] } diff --git a/tools/pubsys/src/aws/ami/launch_permissions.rs b/tools/pubsys/src/aws/ami/launch_permissions.rs index 467a0838..f8f58447 100644 --- a/tools/pubsys/src/aws/ami/launch_permissions.rs +++ b/tools/pubsys/src/aws/ami/launch_permissions.rs @@ -1,4 +1,7 @@ -use aws_sdk_ec2::{model::LaunchPermission, Client as Ec2Client}; +use aws_sdk_ec2::{ + types::{ImageAttributeName, LaunchPermission}, + Client as Ec2Client, +}; use serde::{Deserialize, Serialize}; use snafu::ResultExt; @@ -11,7 +14,7 @@ pub(crate) async fn get_launch_permissions( let ec2_response = ec2_client .describe_image_attribute() .image_id(ami_id) - .attribute(aws_sdk_ec2::model::ImageAttributeName::LaunchPermission) + .attribute(ImageAttributeName::LaunchPermission) .send() .await .context(error::DescribeImageAttributeSnafu { @@ -73,9 +76,9 @@ impl TryFrom for LaunchPermissionDef { } mod error { - use aws_sdk_ec2::error::DescribeImageAttributeError; - use aws_sdk_ec2::model::LaunchPermission; - use aws_sdk_ec2::types::SdkError; + use aws_sdk_ec2::error::SdkError; + use aws_sdk_ec2::operation::describe_image_attribute::DescribeImageAttributeError; + use aws_sdk_ec2::types::LaunchPermission; use snafu::Snafu; #[derive(Debug, Snafu)] diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs index 24f75a76..825f23dc 100644 --- a/tools/pubsys/src/aws/ami/mod.rs +++ b/tools/pubsys/src/aws/ami/mod.rs @@ -13,13 +13,13 @@ use crate::aws::publish_ami::{get_snapshots, modify_image, modify_snapshots, Mod use crate::aws::{client::build_client_config, parse_arch, region_from_string}; use crate::Args; use aws_sdk_ebs::Client as EbsClient; -use aws_sdk_ec2::error::CopyImageError; -use aws_sdk_ec2::model::{ArchitectureValues, OperationType}; -use aws_sdk_ec2::output::CopyImageOutput; -use aws_sdk_ec2::types::SdkError; -use aws_sdk_ec2::{Client as Ec2Client, Region}; -use aws_sdk_sts::error::GetCallerIdentityError; -use aws_sdk_sts::output::GetCallerIdentityOutput; +use aws_sdk_ec2::error::{ProvideErrorMetadata, SdkError}; +use aws_sdk_ec2::operation::copy_image::{CopyImageError, CopyImageOutput}; +use aws_sdk_ec2::types::{ArchitectureValues, OperationType}; +use aws_sdk_ec2::{config::Region, Client as Ec2Client}; +use aws_sdk_sts::operation::get_caller_identity::{ + GetCallerIdentityError, GetCallerIdentityOutput, +}; use aws_sdk_sts::Client as StsClient; use clap::Parser; use futures::future::{join, lazy, ready, FutureExt}; @@ -497,10 +497,10 @@ async fn get_account_ids( mod error { use crate::aws::{ami, publish_ami}; - use aws_sdk_ec2::error::ModifyImageAttributeError; - use aws_sdk_ec2::model::LaunchPermission; - use aws_sdk_ec2::types::SdkError; - use aws_sdk_sts::error::GetCallerIdentityError; + use aws_sdk_ec2::error::SdkError; + use aws_sdk_ec2::operation::modify_image_attribute::ModifyImageAttributeError; + use aws_sdk_ec2::types::LaunchPermission; + use aws_sdk_sts::operation::get_caller_identity::GetCallerIdentityError; use snafu::Snafu; use std::path::PathBuf; diff --git a/tools/pubsys/src/aws/ami/public.rs b/tools/pubsys/src/aws/ami/public.rs index a29af11d..6404abda 100644 --- a/tools/pubsys/src/aws/ami/public.rs +++ b/tools/pubsys/src/aws/ami/public.rs @@ -38,8 +38,8 @@ pub(crate) async fn ami_is_public( } mod error { - use aws_sdk_ec2::error::DescribeImagesError; - use aws_sdk_ec2::types::SdkError; + use aws_sdk_ec2::error::SdkError; + use aws_sdk_ec2::operation::describe_images::DescribeImagesError; use snafu::Snafu; #[derive(Debug, Snafu)] diff --git a/tools/pubsys/src/aws/ami/register.rs b/tools/pubsys/src/aws/ami/register.rs index 9601409d..aed614ae 100644 --- a/tools/pubsys/src/aws/ami/register.rs +++ b/tools/pubsys/src/aws/ami/register.rs @@ -1,9 +1,9 @@ use super::{snapshot::snapshot_from_image, AmiArgs}; use aws_sdk_ebs::Client as EbsClient; -use aws_sdk_ec2::model::{ +use aws_sdk_ec2::types::{ ArchitectureValues, BlockDeviceMapping, EbsBlockDevice, Filter, VolumeType, }; -use aws_sdk_ec2::{Client as Ec2Client, Region}; +use aws_sdk_ec2::{config::Region, Client as Ec2Client}; use buildsys::manifest::{self, ImageFeature}; use coldsnap::{SnapshotUploader, SnapshotWaiter}; use log::{debug, info, warn}; @@ -270,8 +270,10 @@ where mod error { use crate::aws::ami; - use aws_sdk_ec2::error::{DescribeImagesError, RegisterImageError}; - use aws_sdk_ec2::types::SdkError; + use aws_sdk_ec2::error::SdkError; + use aws_sdk_ec2::operation::{ + describe_images::DescribeImagesError, register_image::RegisterImageError, + }; use snafu::Snafu; use std::path::PathBuf; diff --git a/tools/pubsys/src/aws/ami/wait.rs b/tools/pubsys/src/aws/ami/wait.rs index f9ec8d4b..9a2c7cd5 100644 --- a/tools/pubsys/src/aws/ami/wait.rs +++ b/tools/pubsys/src/aws/ami/wait.rs @@ -1,6 +1,5 @@ use crate::aws::client::build_client_config; -use aws_sdk_ec2::model::ImageState; -use aws_sdk_ec2::{Client as Ec2Client, Region}; +use aws_sdk_ec2::{config::Region, types::ImageState, Client as Ec2Client}; use log::info; use pubsys_config::AwsConfig as PubsysAwsConfig; use snafu::{ensure, ResultExt}; @@ -102,8 +101,8 @@ pub(crate) async fn wait_for_ami( } mod error { - use aws_sdk_ec2::error::DescribeImagesError; - use aws_sdk_ec2::types::SdkError; + use aws_sdk_ec2::error::SdkError; + use aws_sdk_ec2::operation::describe_images::DescribeImagesError; use snafu::Snafu; #[derive(Debug, Snafu)] diff --git a/tools/pubsys/src/aws/mod.rs b/tools/pubsys/src/aws/mod.rs index 4c4c0a1a..80e06de5 100644 --- a/tools/pubsys/src/aws/mod.rs +++ b/tools/pubsys/src/aws/mod.rs @@ -1,5 +1,5 @@ -use aws_sdk_ec2::model::ArchitectureValues; -use aws_sdk_ec2::Region; +use aws_sdk_ec2::config::Region; +use aws_sdk_ec2::types::ArchitectureValues; #[macro_use] pub(crate) mod client; diff --git a/tools/pubsys/src/aws/promote_ssm/mod.rs b/tools/pubsys/src/aws/promote_ssm/mod.rs index f8aeaa56..28b6b292 100644 --- a/tools/pubsys/src/aws/promote_ssm/mod.rs +++ b/tools/pubsys/src/aws/promote_ssm/mod.rs @@ -7,8 +7,8 @@ use crate::aws::ssm::{key_difference, ssm, template, BuildContext, SsmKey}; use crate::aws::validate_ssm::parse_parameters; use crate::aws::{parse_arch, region_from_string}; use crate::Args; -use aws_sdk_ec2::model::ArchitectureValues; -use aws_sdk_ssm::{Client as SsmClient, Region}; +use aws_sdk_ec2::types::ArchitectureValues; +use aws_sdk_ssm::{config::Region, Client as SsmClient}; use clap::Parser; use log::{info, trace}; use pubsys_config::InfraConfig; @@ -376,7 +376,7 @@ mod test { use std::collections::HashMap; use crate::aws::{promote_ssm::merge_parameters, ssm::SsmKey}; - use aws_sdk_ssm::Region; + use aws_sdk_ssm::config::Region; #[test] fn combined_parameters() { diff --git a/tools/pubsys/src/aws/publish_ami/mod.rs b/tools/pubsys/src/aws/publish_ami/mod.rs index 605de3f4..a80fb505 100644 --- a/tools/pubsys/src/aws/publish_ami/mod.rs +++ b/tools/pubsys/src/aws/publish_ami/mod.rs @@ -7,13 +7,15 @@ use crate::aws::ami::Image; use crate::aws::client::build_client_config; use crate::aws::region_from_string; use crate::Args; -use aws_sdk_ec2::error::{ModifyImageAttributeError, ModifySnapshotAttributeError}; -use aws_sdk_ec2::model::{ +use aws_sdk_ec2::error::{ProvideErrorMetadata, SdkError}; +use aws_sdk_ec2::operation::{ + modify_image_attribute::{ModifyImageAttributeError, ModifyImageAttributeOutput}, + modify_snapshot_attribute::{ModifySnapshotAttributeError, ModifySnapshotAttributeOutput}, +}; +use aws_sdk_ec2::types::{ ImageAttributeName, OperationType, PermissionGroup, SnapshotAttributeName, }; -use aws_sdk_ec2::output::{ModifyImageAttributeOutput, ModifySnapshotAttributeOutput}; -use aws_sdk_ec2::types::SdkError; -use aws_sdk_ec2::{Client as Ec2Client, Region}; +use aws_sdk_ec2::{config::Region, Client as Ec2Client}; use clap::{Args as ClapArgs, Parser}; use futures::future::{join, ready}; use futures::stream::{self, StreamExt}; @@ -561,10 +563,11 @@ pub(crate) async fn modify_regional_images( mod error { use crate::aws::ami; - use aws_sdk_ec2::error::{ - DescribeImagesError, ModifyImageAttributeError, ModifySnapshotAttributeError, + use aws_sdk_ec2::error::SdkError; + use aws_sdk_ec2::operation::{ + describe_images::DescribeImagesError, modify_image_attribute::ModifyImageAttributeError, + modify_snapshot_attribute::ModifySnapshotAttributeError, }; - use aws_sdk_ec2::types::SdkError; use snafu::Snafu; use std::io; use std::path::PathBuf; diff --git a/tools/pubsys/src/aws/ssm/mod.rs b/tools/pubsys/src/aws/ssm/mod.rs index da3e6f1f..82d3685b 100644 --- a/tools/pubsys/src/aws/ssm/mod.rs +++ b/tools/pubsys/src/aws/ssm/mod.rs @@ -13,8 +13,8 @@ use crate::aws::{ }; use crate::Args; use aws_config::SdkConfig; -use aws_sdk_ec2::{model::ArchitectureValues, Client as Ec2Client}; -use aws_sdk_ssm::{Client as SsmClient, Region}; +use aws_sdk_ec2::{types::ArchitectureValues, Client as Ec2Client}; +use aws_sdk_ssm::{config::Region, Client as SsmClient}; use clap::Parser; use futures::stream::{StreamExt, TryStreamExt}; use governor::{prelude::*, Quota, RateLimiter}; diff --git a/tools/pubsys/src/aws/ssm/ssm.rs b/tools/pubsys/src/aws/ssm/ssm.rs index fcfeb296..f92666fd 100644 --- a/tools/pubsys/src/aws/ssm/ssm.rs +++ b/tools/pubsys/src/aws/ssm/ssm.rs @@ -1,11 +1,12 @@ //! The ssm module owns the getting and setting of parameters in SSM. use super::{SsmKey, SsmParameters}; -use aws_sdk_ssm::error::{GetParametersError, PutParameterError}; -use aws_sdk_ssm::model::ParameterType; -use aws_sdk_ssm::output::{GetParametersOutput, PutParameterOutput}; -use aws_sdk_ssm::types::SdkError; -use aws_sdk_ssm::{Client as SsmClient, Region}; +use aws_sdk_ssm::error::{ProvideErrorMetadata, SdkError}; +use aws_sdk_ssm::operation::{ + get_parameters::{GetParametersError, GetParametersOutput}, + put_parameter::{PutParameterError, PutParameterOutput}, +}; +use aws_sdk_ssm::{config::Region, types::ParameterType, Client as SsmClient}; use futures::future::{join, ready}; use futures::stream::{self, FuturesUnordered, StreamExt}; use log::{debug, error, info, trace, warn}; @@ -407,8 +408,10 @@ pub(crate) async fn validate_parameters( } pub(crate) mod error { - use aws_sdk_ssm::error::{GetParametersByPathError, GetParametersError}; - use aws_sdk_ssm::types::SdkError; + use aws_sdk_ssm::error::SdkError; + use aws_sdk_ssm::operation::{ + get_parameters::GetParametersError, get_parameters_by_path::GetParametersByPathError, + }; use snafu::Snafu; use std::error::Error as _; use std::time::Duration; diff --git a/tools/pubsys/src/aws/ssm/template.rs b/tools/pubsys/src/aws/ssm/template.rs index 56c1a88a..bde1ccde 100644 --- a/tools/pubsys/src/aws/ssm/template.rs +++ b/tools/pubsys/src/aws/ssm/template.rs @@ -3,7 +3,7 @@ use super::{BuildContext, SsmKey, SsmParameters}; use crate::aws::ami::Image; -use aws_sdk_ssm::Region; +use aws_sdk_ssm::config::Region; use log::trace; use serde::{Deserialize, Serialize}; use snafu::{ensure, ResultExt}; @@ -275,7 +275,7 @@ mod test { use super::{RenderedParameter, RenderedParametersMap}; use crate::aws::{ami::Image, ssm::SsmKey}; - use aws_sdk_ssm::Region; + use aws_sdk_ssm::config::Region; // These tests assert that the RenderedParametersMap can be created correctly. #[test] diff --git a/tools/pubsys/src/aws/validate_ami/ami.rs b/tools/pubsys/src/aws/validate_ami/ami.rs index c5d96a81..4ee85fb4 100644 --- a/tools/pubsys/src/aws/validate_ami/ami.rs +++ b/tools/pubsys/src/aws/validate_ami/ami.rs @@ -1,7 +1,6 @@ //! The ami module owns the describing of images in EC2. -use aws_sdk_ec2::model::Image; -use aws_sdk_ec2::{Client as Ec2Client, Region}; +use aws_sdk_ec2::{config::Region, types::Image, Client as Ec2Client}; use futures::future::{join, ready}; use futures::stream::{FuturesUnordered, StreamExt}; use log::{info, trace}; @@ -182,8 +181,8 @@ pub(crate) async fn describe_images_in_region( } pub(crate) mod error { - use aws_sdk_ec2::error::DescribeImagesError; - use aws_sdk_ssm::types::SdkError; + use aws_sdk_ec2::operation::describe_images::DescribeImagesError; + use aws_sdk_ssm::error::SdkError; use aws_smithy_types::error::display::DisplayErrorContext; use snafu::Snafu; diff --git a/tools/pubsys/src/aws/validate_ami/mod.rs b/tools/pubsys/src/aws/validate_ami/mod.rs index f57bbc4b..e827059c 100644 --- a/tools/pubsys/src/aws/validate_ami/mod.rs +++ b/tools/pubsys/src/aws/validate_ami/mod.rs @@ -9,7 +9,7 @@ use self::results::{AmiValidationResult, AmiValidationResultStatus, AmiValidatio use crate::aws::client::build_client_config; use crate::aws::validate_ami::ami::describe_images; use crate::Args; -use aws_sdk_ec2::{Client as AmiClient, Region}; +use aws_sdk_ec2::{config::Region, Client as AmiClient}; use clap::Parser; use log::{error, info, trace}; use pubsys_config::InfraConfig; @@ -282,7 +282,7 @@ mod test { ami::launch_permissions::LaunchPermissionDef, validate_ami::results::{AmiValidationResult, AmiValidationResultStatus}, }; - use aws_sdk_ec2::Region; + use aws_sdk_ec2::config::Region; use std::collections::{HashMap, HashSet}; // These tests assert that the images can be validated correctly. diff --git a/tools/pubsys/src/aws/validate_ami/results.rs b/tools/pubsys/src/aws/validate_ami/results.rs index 93b723cf..698fbe01 100644 --- a/tools/pubsys/src/aws/validate_ami/results.rs +++ b/tools/pubsys/src/aws/validate_ami/results.rs @@ -2,7 +2,7 @@ use super::ami::ImageDef; use super::Result; -use aws_sdk_ec2::Region; +use aws_sdk_ec2::config::Region; use serde::{Deserialize, Serialize}; use serde_plain::{derive_display_from_serialize, derive_fromstr_from_deserialize}; use std::collections::{HashMap, HashSet}; @@ -198,7 +198,7 @@ impl AmiValidationResults { mod test { use super::{AmiValidationResult, AmiValidationResultStatus, AmiValidationResults}; use crate::aws::validate_ami::ami::ImageDef; - use aws_sdk_ssm::Region; + use aws_sdk_ssm::config::Region; use std::collections::{HashMap, HashSet}; // These tests assert that the `get_results_for_status` function returns the correct values. diff --git a/tools/pubsys/src/aws/validate_ssm/mod.rs b/tools/pubsys/src/aws/validate_ssm/mod.rs index 4fbc9af2..3dc5f4ee 100644 --- a/tools/pubsys/src/aws/validate_ssm/mod.rs +++ b/tools/pubsys/src/aws/validate_ssm/mod.rs @@ -8,7 +8,7 @@ use super::ssm::ssm::get_parameters_by_prefix; use super::ssm::{SsmKey, SsmParameters}; use crate::aws::client::build_client_config; use crate::Args; -use aws_sdk_ssm::{Client as SsmClient, Region}; +use aws_sdk_ssm::{config::Region, Client as SsmClient}; use clap::Parser; use log::{error, info, trace}; use pubsys_config::InfraConfig; @@ -313,7 +313,7 @@ mod test { ssm::{SsmKey, SsmParameters}, validate_ssm::{results::SsmValidationResult, validate_parameters_in_region}, }; - use aws_sdk_ssm::Region; + use aws_sdk_ssm::config::Region; use std::collections::{HashMap, HashSet}; // These tests assert that the parameters can be validated correctly. diff --git a/tools/pubsys/src/aws/validate_ssm/results.rs b/tools/pubsys/src/aws/validate_ssm/results.rs index d21fc9af..eadd4290 100644 --- a/tools/pubsys/src/aws/validate_ssm/results.rs +++ b/tools/pubsys/src/aws/validate_ssm/results.rs @@ -1,7 +1,7 @@ //! The results module owns the reporting of SSM validation results. use crate::aws::validate_ssm::Result; -use aws_sdk_ssm::Region; +use aws_sdk_ssm::config::Region; use serde::{Deserialize, Serialize}; use serde_plain::{derive_display_from_serialize, derive_fromstr_from_deserialize}; use std::collections::{HashMap, HashSet}; @@ -206,7 +206,7 @@ mod test { use crate::aws::validate_ssm::results::{ SsmValidationResult, SsmValidationResultStatus, SsmValidationResults, }; - use aws_sdk_ssm::Region; + use aws_sdk_ssm::config::Region; // These tests assert that the `get_results_for_status` function returns the correct values. diff --git a/tools/pubsys/src/repo.rs b/tools/pubsys/src/repo.rs index dcdb4366..97b37d84 100644 --- a/tools/pubsys/src/repo.rs +++ b/tools/pubsys/src/repo.rs @@ -5,7 +5,7 @@ pub(crate) mod refresh_repo; pub(crate) mod validate_repo; use crate::{friendly_version, Args}; -use aws_sdk_kms::{Client as KmsClient, Region}; +use aws_sdk_kms::{config::Region, Client as KmsClient}; use chrono::{DateTime, Utc}; use clap::Parser; use lazy_static::lazy_static; diff --git a/tools/testsys/Cargo.toml b/tools/testsys/Cargo.toml index fb91c214..22dd85ca 100644 --- a/tools/testsys/Cargo.toml +++ b/tools/testsys/Cargo.toml @@ -8,8 +8,8 @@ publish = false [dependencies] async-trait = "0.1" -aws-config = "0.54.1" -aws-sdk-ec2 = "0.24" +aws-config = "0.55" +aws-sdk-ec2 = "0.28" base64 = "0.20" bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.8", tag = "v0.0.8"} bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } diff --git a/tools/testsys/src/aws_resources.rs b/tools/testsys/src/aws_resources.rs index ab9985ba..5f6fc2f2 100644 --- a/tools/testsys/src/aws_resources.rs +++ b/tools/testsys/src/aws_resources.rs @@ -1,7 +1,7 @@ use crate::crds::BottlerocketInput; use crate::error::{self, Result}; -use aws_sdk_ec2::model::{Filter, Image}; -use aws_sdk_ec2::Region; +use aws_sdk_ec2::config::Region; +use aws_sdk_ec2::types::{Filter, Image}; use bottlerocket_types::agent_config::{ ClusterType, CustomUserData, Ec2Config, Ec2KarpenterConfig, KarpenterDeviceMapping, }; diff --git a/tools/testsys/src/error.rs b/tools/testsys/src/error.rs index f937e7ac..06f73950 100644 --- a/tools/testsys/src/error.rs +++ b/tools/testsys/src/error.rs @@ -1,5 +1,5 @@ -use aws_sdk_ec2::error::DescribeImagesError; -use aws_sdk_ec2::types::SdkError; +use aws_sdk_ec2::error::SdkError; +use aws_sdk_ec2::operation::describe_images::DescribeImagesError; use snafu::Snafu; use std::path::PathBuf; From 512ce304f74f588fdaa715c0ae4c78b7a3cecdfe Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Tue, 22 Aug 2023 23:42:35 +0000 Subject: [PATCH 1065/1356] tools: update rust dependencies --- tools/Cargo.lock | 882 ++++++++++++++++++++++------------------------- tools/deny.toml | 25 +- 2 files changed, 430 insertions(+), 477 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 9ddcacca..a83367d2 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.19.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ "gimli", ] @@ -19,13 +19,19 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aho-corasick" -version = "0.7.20" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" +checksum = "6748e8def348ed4d14996fa801f4122cd763fff530258cdc03f64b25f89d3a5a" dependencies = [ "memchr", ] +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + [[package]] name = "android_system_properties" version = "0.1.5" @@ -85,9 +91,9 @@ dependencies = [ [[package]] name = "anstyle-wincon" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188" +checksum = "c677ab05e09154296dd37acecd46420c17b9713e8366facafa8fc0885167cf4c" dependencies = [ "anstyle", "windows-sys 0.48.0", @@ -95,9 +101,9 @@ dependencies = [ [[package]] name = "argh" -version = "0.1.10" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab257697eb9496bf75526f0217b5ed64636a9cfafa78b8365c71bd283fcef93e" +checksum = "7af5ba06967ff7214ce4c7419c7d185be7ecd6cc4965a8f6e1d8ce0398aad219" dependencies = [ "argh_derive", "argh_shared", @@ -105,21 +111,24 @@ dependencies = [ [[package]] name = "argh_derive" -version = "0.1.10" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b382dbd3288e053331f03399e1db106c9fb0d8562ad62cb04859ae926f324fa6" +checksum = "56df0aeedf6b7a2fc67d06db35b09684c3e8da0c95f8f27685cb17e08413d87a" dependencies = [ "argh_shared", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.29", ] [[package]] name = "argh_shared" -version = "0.1.10" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64cb94155d965e3d37ffbbe7cc5b82c3dd79dd33bd48e536f73d2cfb8d85506f" +checksum = "5693f39141bda5760ecc4111ab08da40565d1771038c4a0250f03457ec707531" +dependencies = [ + "serde", +] [[package]] name = "assert-json-diff" @@ -134,24 +143,24 @@ dependencies = [ [[package]] name = "async-recursion" -version = "1.0.2" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b015a331cc64ebd1774ba119538573603427eaace0a1950c423ab971f903796" +checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.29", ] [[package]] name = "async-trait" -version = "0.1.66" +version = "0.1.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84f9ebcc6c1f5b8cb160f6990096a5c127f423fcb6e1ccc46c370cbdfb75dfc" +checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.29", ] [[package]] @@ -189,7 +198,7 @@ dependencies = [ "aws-smithy-types", "aws-types", "bytes", - "fastrand", + "fastrand 1.9.0", "hex", "http", "hyper", @@ -209,7 +218,7 @@ checksum = "1fcdb2f7acbc076ff5ad05e7864bdb191ca70a6fd07668dc3a1a8bcd051de5ae" dependencies = [ "aws-smithy-async", "aws-smithy-types", - "fastrand", + "fastrand 1.9.0", "tokio", "tracing", "zeroize", @@ -268,7 +277,7 @@ dependencies = [ "aws-smithy-xml", "aws-types", "bytes", - "fastrand", + "fastrand 1.9.0", "http", "regex", "tokio-stream", @@ -294,7 +303,7 @@ dependencies = [ "aws-smithy-types", "aws-types", "bytes", - "fastrand", + "fastrand 1.9.0", "http", "regex", "tokio-stream", @@ -322,7 +331,7 @@ dependencies = [ "aws-smithy-xml", "aws-types", "bytes", - "fastrand", + "fastrand 1.9.0", "http", "regex", "tokio-stream", @@ -406,7 +415,7 @@ dependencies = [ "aws-smithy-types", "aws-types", "bytes", - "fastrand", + "fastrand 1.9.0", "http", "regex", "tokio-stream", @@ -545,7 +554,7 @@ dependencies = [ "aws-smithy-http-tower", "aws-smithy-types", "bytes", - "fastrand", + "fastrand 1.9.0", "http", "http-body", "hyper", @@ -667,9 +676,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.67" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" dependencies = [ "addr2line", "cc", @@ -694,9 +703,9 @@ checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5" [[package]] name = "base64" -version = "0.21.0" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "base64-simd" @@ -714,6 +723,12 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" + [[package]] name = "block-buffer" version = "0.10.4" @@ -748,9 +763,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.4.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d4260bcc2e8fc9df1eac4919a720effeb63a3f0952f5bf4944adfa18897f09" +checksum = "6798148dccfbff0fae41c7574d2fa8f1ef3492fba0face179de5d8d447d67b05" dependencies = [ "memchr", "serde", @@ -790,9 +805,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "bytecount" @@ -839,9 +854,12 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.79" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +dependencies = [ + "libc", +] [[package]] name = "cfg-if" @@ -851,12 +869,12 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.23" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" dependencies = [ + "android-tzdata", "iana-time-zone", - "num-integer", "num-traits", "serde", "winapi", @@ -870,7 +888,7 @@ checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ "ansi_term", "atty", - "bitflags", + "bitflags 1.3.2", "strsim 0.8.0", "textwrap", "unicode-width", @@ -879,9 +897,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.5" +version = "4.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2686c4115cb0810d9a984776e197823d08ec94f176549a89a9efded477c456dc" +checksum = "03aef18ddf7d879c15ce20f04826ef8418101c7e528014c3eeea13321047dca3" dependencies = [ "clap_builder", "clap_derive", @@ -890,27 +908,26 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.3.5" +version = "4.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e53afce1efce6ed1f633cf0e57612fe51db54a1ee4fd8f8503d078fe02d69ae" +checksum = "f8ce6fffb678c9b80a70b6b6de0aad31df727623a70fd9a842c30cd573e2fa98" dependencies = [ "anstream", "anstyle", - "bitflags", "clap_lex", "strsim 0.10.0", ] [[package]] name = "clap_derive" -version = "4.3.2" +version = "4.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f" +checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.29", ] [[package]] @@ -919,16 +936,6 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" -[[package]] -name = "codespan-reporting" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" -dependencies = [ - "termcolor", - "unicode-width", -] - [[package]] name = "coldsnap" version = "0.6.0" @@ -972,15 +979,15 @@ dependencies = [ [[package]] name = "console" -version = "0.15.5" +version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d79fbe8970a77e3e34151cc13d3b3e248aa0faaecb9f6091fa07ebefe5ad60" +checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8" dependencies = [ "encode_unicode", "lazy_static", "libc", "unicode-width", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -1001,18 +1008,18 @@ checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.6" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "280a9f2d8b3a38871a3c8a46fb80db65e5e5ed97da80c4d08bf27fb63e35e181" +checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" dependencies = [ "libc", ] [[package]] name = "crc32c" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dfea2db42e9927a3845fb268a10a72faed6d416065f77873f05e411457c363e" +checksum = "d8f48d60e5b4d2c53d5c2b1d8a58c849a70ae5e5509b08a48d047e3b65714a74" dependencies = [ "rustc_version", ] @@ -1049,9 +1056,9 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.14" +version = "0.9.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ "autocfg", "cfg-if", @@ -1062,9 +1069,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.15" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if", ] @@ -1079,50 +1086,6 @@ dependencies = [ "typenum", ] -[[package]] -name = "cxx" -version = "1.0.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a140f260e6f3f79013b8bfc65e7ce630c9ab4388c6a89c71e07226f49487b72" -dependencies = [ - "cc", - "cxxbridge-flags", - "cxxbridge-macro", - "link-cplusplus", -] - -[[package]] -name = "cxx-build" -version = "1.0.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da6383f459341ea689374bf0a42979739dc421874f112ff26f829b8040b8e613" -dependencies = [ - "cc", - "codespan-reporting", - "once_cell", - "proc-macro2", - "quote", - "scratch", - "syn 1.0.109", -] - -[[package]] -name = "cxxbridge-flags" -version = "1.0.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90201c1a650e95ccff1c8c0bb5a343213bdd317c6e600a93075bca2eff54ec97" - -[[package]] -name = "cxxbridge-macro" -version = "1.0.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "darling" version = "0.14.4" @@ -1171,11 +1134,17 @@ dependencies = [ "parking_lot_core", ] +[[package]] +name = "deranged" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" + [[package]] name = "digest" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", "crypto-common", @@ -1223,15 +1192,15 @@ dependencies = [ [[package]] name = "dyn-clone" -version = "1.0.11" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b0cf012f1230e43cd00ebb729c6bb58707ecfa8ad08b52ef3a4ccd2697fc30" +checksum = "bbfc4744c1b8f2a09adc0e55242f60b1af195d88596bd8700be74418c056c555" [[package]] name = "either" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "encode_unicode" @@ -1263,13 +1232,13 @@ dependencies = [ [[package]] name = "errno" -version = "0.2.8" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" +checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" dependencies = [ "errno-dragonfly", "libc", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -1303,6 +1272,12 @@ dependencies = [ "instant", ] +[[package]] +name = "fastrand" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" + [[package]] name = "fnv" version = "1.0.7" @@ -1311,18 +1286,18 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "form_urlencoded" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] [[package]] name = "futures" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "531ac96c6ff5fd7c62263c5e3c67a603af4fcaee2e1a0ae5565ba3a11e69e549" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", @@ -1335,9 +1310,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "164713a5a0dcc3e7b4b1ed7d3b433cabc18025386f9339346e8daf15963cf7ac" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -1351,9 +1326,9 @@ checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1997dd9df74cdac935c76252744c1ed5794fac083242ea4fe77ef3ed60ba0f83" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", @@ -1368,26 +1343,26 @@ checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-macro" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3eb14ed937631bd8b8b8977f2c198443447a8355b6e3ca599f38c975e5a963b6" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.29", ] [[package]] name = "futures-sink" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec93083a4aecafb2a80a885c9de1f0ccae9dbd32c2bb54b0c3a65690e0b8d2f2" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd65540d33b37b16542a0438c12e6aeead10d4ac5d05bd3f805b8f35ab592879" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-timer" @@ -1397,9 +1372,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ef6b17e481503ec85211fed8f39d1970f128935ca1f814cd32ac4a6842e84ab" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures-channel", "futures-core", @@ -1433,9 +1408,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "libc", @@ -1444,15 +1419,15 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.2" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" +checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" [[package]] name = "globset" -version = "0.4.10" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "029d74589adefde59de1a0c4f4732695c32805624aec7b68d91503d4dba79afc" +checksum = "759c97c1e17c55525b57192c06a267cda0ac5210b222d6b82189a2338fa1c13d" dependencies = [ "aho-corasick", "bstr", @@ -1481,9 +1456,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.18" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" +checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833" dependencies = [ "bytes", "fnv", @@ -1500,9 +1475,9 @@ dependencies = [ [[package]] name = "handlebars" -version = "4.3.6" +version = "4.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "035ef95d03713f2c347a72547b7cd38cbc9af7cd51e6099fb62d586d4a6dee3a" +checksum = "83c3372087601b532857d332f5957cbae686da52bb7810bf038c3e3c3cc2fa0d" dependencies = [ "log", "pest", @@ -1535,18 +1510,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" -dependencies = [ - "libc", -] - -[[package]] -name = "hermit-abi" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" +checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" [[package]] name = "hex" @@ -1565,11 +1531,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "747309b4b440c06d57b0b25f2aee03ee9b5e5397d288c60e21fc709bb98a7408" +checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" dependencies = [ - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -1596,9 +1562,9 @@ dependencies = [ [[package]] name = "http-range-header" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" +checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" [[package]] name = "httparse" @@ -1608,9 +1574,9 @@ checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "humantime" @@ -1620,9 +1586,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.25" +version = "0.14.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc5e554ff619822309ffd57d8734d77cd5ce6238bc956f037ea06c58238c9899" +checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" dependencies = [ "bytes", "futures-channel", @@ -1659,17 +1625,18 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7" +checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" dependencies = [ + "futures-util", "http", "hyper", "log", - "rustls 0.21.0", + "rustls 0.21.6", "rustls-native-certs", "tokio", - "tokio-rustls 0.24.0", + "tokio-rustls 0.24.1", ] [[package]] @@ -1686,26 +1653,25 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.53" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "winapi", + "windows", ] [[package]] name = "iana-time-zone-haiku" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ - "cxx", - "cxx-build", + "cc", ] [[package]] @@ -1716,9 +1682,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -1736,11 +1702,12 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.17.3" +version = "0.17.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cef509aa9bc73864d6756f0d34d35504af3cf0844373afe9b8669a5b8005a729" +checksum = "0b297dc40733f23a0e52728a58fa9489a5b7638a324932de16b41adc3ef80730" dependencies = [ "console", + "instant", "number_prefix", "portable-atomic", "unicode-width", @@ -1756,7 +1723,7 @@ dependencies = [ "aws-sdk-cloudformation", "aws-sdk-s3", "aws-types", - "clap 4.3.5", + "clap 4.3.23", "hex", "log", "pubsys-config", @@ -1779,46 +1746,34 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "io-lifetimes" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09270fd4fa1111bc614ed2246c7ef56239a3063d5be0d1ec3b589c505d400aeb" -dependencies = [ - "hermit-abi 0.3.1", - "libc", - "windows-sys 0.45.0", -] - [[package]] name = "ipnet" -version = "2.7.2" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" +checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" [[package]] name = "is-terminal" -version = "0.4.5" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8687c819457e979cc940d09cb16e42a1bf70aa6b60a549de6d3a62a0ee90c69e" +checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ - "hermit-abi 0.3.1", - "io-lifetimes", + "hermit-abi 0.3.2", "rustix", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] name = "itoa" -version = "1.0.6" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "js-sys" -version = "0.3.61" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] @@ -1852,7 +1807,7 @@ version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd990069640f9db34b3b0f7a1afc62a05ffaa3be9b66aa3c313f58346df7f788" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", "bytes", "chrono", "serde", @@ -1887,7 +1842,7 @@ dependencies = [ "http", "http-body", "hyper", - "hyper-rustls 0.24.0", + "hyper-rustls 0.24.1", "hyper-timeout", "jsonpath_lib", "k8s-openapi", @@ -1895,7 +1850,7 @@ dependencies = [ "pem", "pin-project", "rand", - "rustls 0.21.0", + "rustls 0.21.6", "rustls-pemfile", "secrecy", "serde", @@ -1949,18 +1904,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.141" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3304a64d199bb964be99741b7a14d26972741915b3649639149b2479bb46f4b5" - -[[package]] -name = "link-cplusplus" -version = "1.0.8" +version = "0.2.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" -dependencies = [ - "cc", -] +checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" [[package]] name = "linked-hash-map" @@ -1970,15 +1916,15 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.1.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" +checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" [[package]] name = "lock_api" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ "autocfg", "scopeguard", @@ -1986,12 +1932,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.17" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "mach" @@ -2025,9 +1968,9 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg", ] @@ -2040,23 +1983,22 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "miniz_oxide" -version = "0.6.2" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" dependencies = [ "adler", ] [[package]] name = "mio" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" +checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", - "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -2065,7 +2007,7 @@ version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if", "libc", "static_assertions", @@ -2095,20 +2037,20 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" dependencies = [ "autocfg", ] [[package]] name = "num_cpus" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.2.6", + "hermit-abi 0.3.2", "libc", ] @@ -2129,9 +2071,9 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.30.3" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" +checksum = "77ac5bbd07aea88c60a577a1ce218075ffd59208b2d7ca97adf9bfc5aeb21ebe" dependencies = [ "memchr", ] @@ -2149,9 +2091,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "openssl-probe" @@ -2170,12 +2112,12 @@ dependencies = [ [[package]] name = "os_pipe" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a53dbb20faf34b16087a931834cba2d7a73cc74af2b7ef345a4c8324e2409a12" +checksum = "0ae859aa07428ca9a929b936690f8b12dc5f11dd8c6992a18ca93919f28bc177" dependencies = [ "libc", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -2207,15 +2149,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.7" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.3.5", "smallvec", - "windows-sys 0.45.0", + "windows-targets 0.48.5", ] [[package]] @@ -2229,18 +2171,18 @@ dependencies = [ [[package]] name = "path-absolutize" -version = "3.0.14" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f1d4993b16f7325d90c18c3c6a3327db7808752db8d208cea0acee0abd52c52" +checksum = "43eb3595c63a214e1b37b44f44b0a84900ef7ae0b4c5efce59e123d246d7a0de" dependencies = [ "path-dedot", ] [[package]] name = "path-dedot" -version = "3.0.18" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a81540d94551664b72b72829b12bd167c73c9d25fbac0e04fafa8023f7e4901" +checksum = "9d55e486337acb9973cdea3ec5638c1b3bcb22e573b2b7b41969e0c744d5a15e" dependencies = [ "once_cell", ] @@ -2256,15 +2198,15 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" -version = "2.5.6" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cbd939b234e95d72bc393d51788aec68aeeb5d51e748ca08ff3aad58cb722f7" +checksum = "1acb4a4365a13f749a93f1a094a7805e5cfa0955373a9de860d962eaa3a5fe5a" dependencies = [ "thiserror", "ucd-trie", @@ -2272,9 +2214,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.5.6" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a81186863f3d0a27340815be8f2078dd8050b14cd71913db9fbda795e5f707d7" +checksum = "666d00490d4ac815001da55838c500eafb0320019bbaa44444137c48b443a853" dependencies = [ "pest", "pest_generator", @@ -2282,22 +2224,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.5.6" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75a1ef20bf3193c15ac345acb32e26b3dc3223aff4d77ae4fc5359567683796b" +checksum = "68ca01446f50dbda87c1786af8770d535423fa8a53aec03b8f4e3d7eb10e0929" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.29", ] [[package]] name = "pest_meta" -version = "2.5.6" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e3b284b1f13a20dc5ebc90aff59a51b8d7137c221131b52a7260c08cbc1cc80" +checksum = "56af0a30af74d0445c0bf6d9d051c979b516a1a5af790d251daee76005420a48" dependencies = [ "once_cell", "pest", @@ -2306,29 +2248,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.12" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.12" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.29", ] [[package]] name = "pin-project-lite" -version = "0.2.9" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" +checksum = "12cc1b0bf1727a77a54b6654e7b5f1af8604923edc8b81885f8ec92f9e3f0a05" [[package]] name = "pin-utils" @@ -2338,9 +2280,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "portable-atomic" -version = "0.3.19" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26f6a7b87c2e435a3241addceeeff740ff8b7e76b74c13bf9acb17fa454ea00b" +checksum = "f32154ba0af3a075eefa1eda8bb414ee928f62303a54ea85b8d6638ff1a6ee9e" [[package]] name = "ppv-lite86" @@ -2374,9 +2316,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.56" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" +checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" dependencies = [ "unicode-ident", ] @@ -2396,7 +2338,7 @@ dependencies = [ "aws-types", "buildsys", "chrono", - "clap 4.3.5", + "clap 4.3.23", "coldsnap", "duct", "futures", @@ -2449,7 +2391,7 @@ dependencies = [ name = "pubsys-setup" version = "0.1.0" dependencies = [ - "clap 4.3.5", + "clap 4.3.23", "hex", "log", "pubsys-config", @@ -2480,9 +2422,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.26" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] @@ -2523,7 +2465,7 @@ version = "10.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -2554,7 +2496,16 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ - "bitflags", + "bitflags 1.3.2", +] + +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags 1.3.2", ] [[package]] @@ -2564,15 +2515,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ "getrandom", - "redox_syscall", + "redox_syscall 0.2.16", "thiserror", ] [[package]] name = "regex" -version = "1.7.3" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d" +checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" dependencies = [ "aho-corasick", "memchr", @@ -2581,17 +2544,17 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.29" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" +checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" [[package]] name = "reqwest" -version = "0.11.13" +version = "0.11.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68cc60575865c7831548863cc02356512e3f1dc2f3f82cb837d7fc4cc8f3c97c" +checksum = "20b9b67e2ca7dd9e9f9285b759de30ff538aab981abaaf7bc9bd90b84a0126c3" dependencies = [ - "base64 0.13.1", + "base64 0.21.2", "bytes", "encoding_rs", "futures-core", @@ -2600,7 +2563,7 @@ dependencies = [ "http", "http-body", "hyper", - "hyper-rustls 0.23.2", + "hyper-rustls 0.24.1", "ipnet", "js-sys", "log", @@ -2608,13 +2571,13 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls 0.20.8", + "rustls 0.21.6", "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls 0.23.4", + "tokio-rustls 0.24.1", "tower-service", "url", "wasm-bindgen", @@ -2641,9 +2604,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4a36c42d1873f9a77c53bde094f9664d9891bc604a45b4798fd2c389ed12e5b" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc_version" @@ -2656,16 +2619,15 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.11" +version = "0.38.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db4165c9963ab29e422d6c26fbc1d37f15bace6b2810221f9d925023480fcf0e" +checksum = "19ed4fa021d81c8392ce04db050a3da9a60299050b7ae1cf482d862b54a7218f" dependencies = [ - "bitflags", + "bitflags 2.4.0", "errno", - "io-lifetimes", "libc", "linux-raw-sys", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -2682,9 +2644,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.0" +version = "0.21.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07180898a28ed6a7f7ba2311594308f595e3dd2e3c3812fa0a80a47b45f17e5d" +checksum = "1d1feddffcfcc0b33f5c6ce9a29e341e4cd59c3f78e7ee45f4a40c038b1d6cbb" dependencies = [ "log", "ring", @@ -2694,9 +2656,9 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" dependencies = [ "openssl-probe", "rustls-pemfile", @@ -2706,18 +2668,18 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.1" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" +checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.13.1", + "base64 0.21.2", ] [[package]] name = "rustls-webpki" -version = "0.100.2" +version = "0.101.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e98ff011474fa39949b7e5c0428f9b4937eda7da7848bbb947786b7be0b27dab" +checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" dependencies = [ "ring", "untrusted", @@ -2725,9 +2687,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.13" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" +checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" [[package]] name = "same-file" @@ -2740,11 +2702,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] @@ -2773,15 +2735,9 @@ dependencies = [ [[package]] name = "scopeguard" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" - -[[package]] -name = "scratch" -version = "1.0.5" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sct" @@ -2805,11 +2761,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.8.2" +version = "2.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", @@ -2818,9 +2774,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.8.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" dependencies = [ "core-foundation-sys", "libc", @@ -2828,18 +2784,18 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" +checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" dependencies = [ "serde", ] [[package]] name = "serde" -version = "1.0.160" +version = "1.0.185" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2f3770c8bce3bcda7e149193a069a0f4365bda1fa5cd88e03bca26afc1216c" +checksum = "be9b6f69f1dfd54c3b568ffa45c310d6973a5e5148fd40cf515acaf38cf5bc31" dependencies = [ "serde_derive", ] @@ -2856,13 +2812,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.160" +version = "1.0.185" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291a097c63d8497e00160b166a967a4a79c64f3facdd01cbd7502231688d77df" +checksum = "dc59dfdcbad1437773485e0367fea4b090a2e0a16d9ffc46af47764536a298ec" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.29", ] [[package]] @@ -2878,9 +2834,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.95" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d721eca97ac802aa7777b701877c8004d950fc142651367300d21c1cc0194744" +checksum = "bdf3bf93142acad5821c99197022e170842cdbc1c30482b98750c688c640842a" dependencies = [ "indexmap", "itoa", @@ -2947,9 +2903,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" +checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" dependencies = [ "cfg-if", "cpufeatures", @@ -2994,24 +2950,24 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ "autocfg", ] [[package]] name = "smallvec" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" +checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" [[package]] name = "snafu" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0656e7e3ffb70f6c39b3c2a86332bb74aa3c679da781642590f3c1118c5045" +checksum = "e4de37ad025c587a29e8f3f5605c00f70b98715ef90b9061a815b9e59e9042d6" dependencies = [ "backtrace", "doc-comment", @@ -3020,9 +2976,9 @@ dependencies = [ [[package]] name = "snafu-derive" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "475b3bbe5245c26f2d8a6f62d67c1f30eb9fffeccee721c45d162c3ebbdf81b2" +checksum = "990079665f075b699031e9c08fd3ab99be5029b96f3b78dc0709e8f77e4efebf" dependencies = [ "heck", "proc-macro2", @@ -3066,9 +3022,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "subtle" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "syn" @@ -3083,9 +3039,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.15" +version = "2.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" +checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a" dependencies = [ "proc-macro2", "quote", @@ -3118,15 +3074,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.4.0" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af18f7ae1acd354b992402e9ec5864359d693cd8a79dcbef59f76891701c1e95" +checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" dependencies = [ "cfg-if", - "fastrand", - "redox_syscall", + "fastrand 2.0.0", + "redox_syscall 0.3.5", "rustix", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] @@ -3158,9 +3114,9 @@ dependencies = [ "base64 0.20.0", "bottlerocket-types", "bottlerocket-variant", - "clap 4.3.5", + "clap 4.3.23", "env_logger", - "fastrand", + "fastrand 1.9.0", "futures", "handlebars", "log", @@ -3238,30 +3194,31 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.40" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" +checksum = "97a802ec30afc17eee47b2855fc72e0c4cd62be9b4efe6591edde0ec5bd68d8f" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.40" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" +checksum = "6bb623b56e39ab7dcd4b1b98bb6c8f8d907ed255b18de254088016b27a8ee19b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.29", ] [[package]] name = "time" -version = "0.3.20" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" +checksum = "0bb39ee79a6d8de55f48f2293a830e040392f1c5f16e336bdd1788cd0aadce07" dependencies = [ + "deranged", "itoa", "libc", "num_threads", @@ -3272,15 +3229,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" +checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.8" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" +checksum = "733d258752e9303d392b94b75230d07b0b9c489350c69b851fc6c065fde3e8f9" dependencies = [ "time-core", ] @@ -3312,14 +3269,14 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.25.0" +version = "1.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e00990ebabbe4c14c08aca901caed183ecd5c09562a12c824bb53d3c3fd3af" +checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" dependencies = [ "autocfg", + "backtrace", "bytes", "libc", - "memchr", "mio", "num_cpus", "parking_lot", @@ -3327,7 +3284,7 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] @@ -3342,13 +3299,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.8.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.29", ] [[package]] @@ -3364,19 +3321,19 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0d409377ff5b1e3ca6437aa86c1eb7d40c134bfec254e44c830defa92669db5" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.0", + "rustls 0.21.6", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.12" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", "pin-project-lite", @@ -3397,9 +3354,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" +checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ "bytes", "futures-core", @@ -3498,12 +3455,12 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.4.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d1d42a9b3f3ec46ba828e8d376aec14592ea199f70a06a548587ecd1c4ab658" +checksum = "55ae70283aba8d2a8b411c695c437fe25b8b5e44e23e780662002fc72fb47a82" dependencies = [ - "base64 0.20.0", - "bitflags", + "base64 0.21.2", + "bitflags 2.4.0", "bytes", "futures-core", "futures-util", @@ -3544,20 +3501,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.23" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.29", ] [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", ] @@ -3604,9 +3561,9 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "ucd-trie" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" [[package]] name = "unescape" @@ -3622,9 +3579,9 @@ checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.8" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" +checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" [[package]] name = "unicode-normalization" @@ -3643,9 +3600,9 @@ checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" [[package]] name = "unsafe-libyaml" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1865806a559042e51ab5414598446a5871b561d21b6764f2eabb0dd481d880a6" +checksum = "f28467d3e1d3c6586d8f25fa243f544f5800fec42d97032474e17222c2b75cfa" [[package]] name = "untrusted" @@ -3670,9 +3627,9 @@ dependencies = [ [[package]] name = "url" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", "idna", @@ -3682,9 +3639,9 @@ dependencies = [ [[package]] name = "urlencoding" -version = "2.1.2" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8db7427f936968176eaa7cdf81b7f98b980b18495ec28f1b5791ac3bfe3eea9" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" [[package]] name = "utf-8" @@ -3728,11 +3685,10 @@ dependencies = [ [[package]] name = "want" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ - "log", "try-lock", ] @@ -3750,9 +3706,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -3760,24 +3716,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.29", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.34" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if", "js-sys", @@ -3787,9 +3743,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3797,28 +3753,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.29", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "web-sys" -version = "0.3.61" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", @@ -3836,12 +3792,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.6" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" -dependencies = [ - "webpki", -] +checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" [[package]] name = "winapi" @@ -3875,18 +3828,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] -name = "windows-sys" -version = "0.42.0" +name = "windows" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", + "windows-targets 0.48.5", ] [[package]] @@ -3904,7 +3851,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.48.0", + "windows-targets 0.48.5", ] [[package]] @@ -3924,17 +3871,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ - "windows_aarch64_gnullvm 0.48.0", - "windows_aarch64_msvc 0.48.0", - "windows_i686_gnu 0.48.0", - "windows_i686_msvc 0.48.0", - "windows_x86_64_gnu 0.48.0", - "windows_x86_64_gnullvm 0.48.0", - "windows_x86_64_msvc 0.48.0", + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", ] [[package]] @@ -3945,9 +3892,9 @@ checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_msvc" @@ -3957,9 +3904,9 @@ checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_i686_gnu" @@ -3969,9 +3916,9 @@ checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_msvc" @@ -3981,9 +3928,9 @@ checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_x86_64_gnu" @@ -3993,9 +3940,9 @@ checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnullvm" @@ -4005,9 +3952,9 @@ checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_msvc" @@ -4017,17 +3964,18 @@ checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winreg" -version = "0.10.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ - "winapi", + "cfg-if", + "windows-sys 0.48.0", ] [[package]] diff --git a/tools/deny.toml b/tools/deny.toml index a9b911c0..584d80be 100644 --- a/tools/deny.toml +++ b/tools/deny.toml @@ -65,12 +65,22 @@ multiple-versions = "deny" wildcards = "deny" skip = [ - # num_cpus uses an old version of hermit-abi - { name = "hermit-abi", version = "=0.2.6" }, - # tungstenite other crates use an old version of base64 - { name = "base64", version = "=0.13.1" }, + # several dependencies are using multiple versions of base64 + { name = "base64" }, + # several dependencies are using an old version of bitflags + { name = "bitflags", version = "=1.3" }, + # several dependencies are using an old version of serde_yaml + { name = "serde_yaml", version = "=0.8" }, # governor uses an old version of wasi { name = "wasi", version = "=0.10.2" }, + # aws-sdk-rust is using an old version of fastrand + { name = "fastrand", version = "=1.9" }, + # aws-sdk-rust is using an old version of rustls, hyper-rustls, and tokio-rustls + { name = "rustls", version = "=0.20" }, + { name = "hyper-rustls", version = "=0.23" }, + { name = "tokio-rustls", version = "=0.23" }, + # kube-client uses an old version of redox_syscall + { name = "redox_syscall", version = "=0.2" }, ] skip-tree = [ @@ -78,12 +88,7 @@ skip-tree = [ # are using different versions of windows-sys. we skip the # dependency tree because windows-sys has many sub-crates # that differ in major version. - { name = "windows-sys", version = "=0.42.0" }, - - # TestSys uses a newer version of base64 and serde_yaml - { name = "testsys-model", version = "=0.0.8" }, - { name = "bottlerocket-types", version = "=0.0.8" }, - + { name = "windows-sys" }, # generate-readme uses an old version of clap and other dependencies { name = "generate-readme", version = "0.1.0" } ] From df06bff7a508b53558799e590ced7816b3e3e950 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Fri, 25 Aug 2023 12:12:54 -0700 Subject: [PATCH 1066/1356] chore: explicity set cargo feature resolver version to 2 This gets rid of a warning that comes with rust 1.72.0. This does not impact any of our existing crates since they're all on edition 2021. See https://github.com/rust-lang/cargo/issues/10112 --- tools/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/Cargo.toml b/tools/Cargo.toml index 03fb9b38..223aaf87 100644 --- a/tools/Cargo.toml +++ b/tools/Cargo.toml @@ -1,4 +1,5 @@ [workspace] +resolver = "2" members = [ "infrasys", "buildsys", From 9d78962a12a34606e72a794eddbef66f9867d932 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Fri, 25 Aug 2023 12:22:19 -0700 Subject: [PATCH 1067/1356] tools: fix 1.72.0 clippy warnings --- tools/buildsys/src/gomod.rs | 4 ++-- tools/pubsys/src/aws/promote_ssm/mod.rs | 2 +- tools/pubsys/src/aws/publish_ami/mod.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/buildsys/src/gomod.rs b/tools/buildsys/src/gomod.rs index de4b89c8..6a1dabc0 100644 --- a/tools/buildsys/src/gomod.rs +++ b/tools/buildsys/src/gomod.rs @@ -48,7 +48,7 @@ const GO_MOD_DOCKER_SCRIPT_NAME: &str = "docker-go-script.sh"; // buildsys is executed from the context of many different package directories, // managing a temporary file via this Rust module prevents having to acquire the // path of some static script file on the host system. -const GO_MOD_SCRIPT_TMPL: &str = r###"#!/bin/bash +const GO_MOD_SCRIPT_TMPL: &str = r#".#!/bin/bash set -e @@ -68,7 +68,7 @@ popd tar czf __OUTPUT__ "${targetdir}"/vendor rm -rf "${targetdir}" touch -r __LOCAL_FILE_NAME__ __OUTPUT__ -"###; +"#; impl GoMod { pub(crate) fn vendor( diff --git a/tools/pubsys/src/aws/promote_ssm/mod.rs b/tools/pubsys/src/aws/promote_ssm/mod.rs index 28b6b292..21f4ca1d 100644 --- a/tools/pubsys/src/aws/promote_ssm/mod.rs +++ b/tools/pubsys/src/aws/promote_ssm/mod.rs @@ -281,7 +281,7 @@ fn merge_parameters( source_parameters .into_iter() // Process the `set_parameters` second so that they overwrite existing values. - .chain(set_parameters.clone().into_iter()) + .chain(set_parameters.clone()) .for_each(|(ssm_key, ssm_value)| { combined_parameters // The `entry()` API demands that we clone diff --git a/tools/pubsys/src/aws/publish_ami/mod.rs b/tools/pubsys/src/aws/publish_ami/mod.rs index a80fb505..578bdee4 100644 --- a/tools/pubsys/src/aws/publish_ami/mod.rs +++ b/tools/pubsys/src/aws/publish_ami/mod.rs @@ -514,7 +514,7 @@ pub(crate) async fn modify_regional_images( info!("Modified permissions of image {} in {}", image_id, region); // Set the `public` and `launch_permissions` fields for the Image object - let mut image = images.get_mut(&Region::new(region.clone())).ok_or( + let image = images.get_mut(&Region::new(region.clone())).ok_or( error::Error::MissingRegion { region: region.clone(), }, From 7df950ca1864ebf20ed9dd8665993267d8c8f439 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 18 Jul 2023 11:55:21 +0000 Subject: [PATCH 1068/1356] linux-firmware: Add base firmware package Create linux-firmware package and import latest source from upstream. Adjust spec file to package all firmware files available. Signed-off-by: Leonard Foerster --- packages/linux-firmware/Cargo.toml | 16 +++++++ packages/linux-firmware/linux-firmware.spec | 47 +++++++++++++++++++++ 2 files changed, 63 insertions(+) create mode 100644 packages/linux-firmware/Cargo.toml create mode 100644 packages/linux-firmware/linux-firmware.spec diff --git a/packages/linux-firmware/Cargo.toml b/packages/linux-firmware/Cargo.toml new file mode 100644 index 00000000..6cfbb6b0 --- /dev/null +++ b/packages/linux-firmware/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "linux-firmware" +version = "0.1.0" +edition = "2021" +publish = false +build = "../build.rs" + +[package.metadata.build-package] +package-name = "linux-firmware" + +[lib] +path = "../packages.rs" + +[[package.metadata.build-package.external-files]] +url = "https://www.kernel.org/pub/linux/kernel/firmware/linux-firmware-20230625.tar.xz" +sha512 = "0e48aa7f63495485426d37491c7cb61843165625bd47f912c5d83628c6de871759f1a78be3af3d651f7c396bd87dff07e21ba7afc47896c1c143106d5f16d351" diff --git a/packages/linux-firmware/linux-firmware.spec b/packages/linux-firmware/linux-firmware.spec new file mode 100644 index 00000000..7453277c --- /dev/null +++ b/packages/linux-firmware/linux-firmware.spec @@ -0,0 +1,47 @@ +%global debug_package %{nil} + +%global fwdir %{_cross_libdir}/firmware + +# Many of the firmware files have specialized binary formats that are not supported +# by the strip binary used in __spec_install_post macro. Work around build failures +# by skipping striping. +%global __strip /usr/bin/true + +Name: %{_cross_os}linux-firmware +Version: 20230625 +Release: 1%{?dist} +Summary: Firmware files used by the Linux kernel +License: GPL+ and GPLv2+ and MIT and Redistributable, no modification permitted +URL: https://www.kernel.org/ + +Source0: https://www.kernel.org/pub/linux/kernel/firmware/linux-firmware-%{version}.tar.xz + +%description +%{summary}. + +%prep +%autosetup -n linux-firmware-%{version} -p1 + +%build + +%install +mkdir -p %{buildroot}/%{fwdir} +mkdir -p %{buildroot}/%{fwdir}/updates + +# Here we have potential to shave off some extra space by using `install-xz` of +# `install-zst` to compress firmware images on disk. However, that functionality +# relies on kernels being configured with `CONFIG_FW_LOADER_COMPRESS_[ZSTD|XZ]` +# which we currently do not have. +make DESTDIR=%{buildroot}/ FIRMWAREDIR=%{fwdir} install + + +# Remove executable bits from random firmware +pushd %{buildroot}/%{fwdir} +find . -type f -executable -exec chmod -x {} \; +popd + +%files +%dir %{fwdir} +%{fwdir}/* +%license LICENCE.* LICENSE.* GPL* WHENCE +%{_cross_attribution_file} From ed7f14591f50ec2291c885ff6934b73a469efc02 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Wed, 26 Jul 2023 12:05:27 +0000 Subject: [PATCH 1069/1356] linux-firmware: Remove a lot of firmware we do not need in Bottlerocket Bottlerocket is an OS with a comparably cut-down kernel config and as such does not provide the wide range of drivers one can find in general purpose linux distributions. Many of these devices, Bottlerocket does not support have firmware binaries shipped through linux-firmware. Drop those firmware blobs we do not ship drivers for in Bottlerocket. I have tried to section the removal into groups of drivers with commonalities (e.g. Wifi and Bluetooth devices). Each patch has a list mapping each driver to a specific kernel config option so we can easily find which firmware binaries to add back into Bottlerocket if we add drivers. The patches only touch the WHENCE file and not the actual binaries as rpm does not like to apply git binary patches. The WHENCE file is a record of all the firmware binaries in the package with additional information for each file. On installation the copy-firmware.sh script uses the information in the file to determine which files to copy, so it is enough to remove information from WHENCE in order to remove binaries from the installed directories. Signed-off-by: Leonard Foerster --- ...nd-remove-firmware-for-snd-audio-dev.patch | 1979 +++++++++ ...ideo-Remove-firmware-for-video-broad.patch | 1630 ++++++++ ...t-wifi-Remove-firmware-for-Bluetooth.patch | 3657 +++++++++++++++++ ...csi-Remove-firmware-for-SCSI-devices.patch | 191 + ...sb-remove-firmware-for-USB-Serial-PC.patch | 901 ++++ ...thernet-Remove-firmware-for-ethernet.patch | 539 +++ ...emove-firmware-for-Accelarator-devic.patch | 836 ++++ ...-gpu-Remove-firmware-for-GPU-devices.patch | 1923 +++++++++ ...arious-Remove-firmware-for-various-d.patch | 324 ++ ...mware-amd-ucode-Remove-amd-microcode.patch | 122 + packages/linux-firmware/linux-firmware.spec | 11 + 11 files changed, 12113 insertions(+) create mode 100644 packages/linux-firmware/0001-linux-firmware-snd-remove-firmware-for-snd-audio-dev.patch create mode 100644 packages/linux-firmware/0002-linux-firmware-video-Remove-firmware-for-video-broad.patch create mode 100644 packages/linux-firmware/0003-linux-firmware-bt-wifi-Remove-firmware-for-Bluetooth.patch create mode 100644 packages/linux-firmware/0004-linux-firmware-scsi-Remove-firmware-for-SCSI-devices.patch create mode 100644 packages/linux-firmware/0005-linux-firmware-usb-remove-firmware-for-USB-Serial-PC.patch create mode 100644 packages/linux-firmware/0006-linux-firmware-ethernet-Remove-firmware-for-ethernet.patch create mode 100644 packages/linux-firmware/0007-linux-firmware-Remove-firmware-for-Accelarator-devic.patch create mode 100644 packages/linux-firmware/0008-linux-firmware-gpu-Remove-firmware-for-GPU-devices.patch create mode 100644 packages/linux-firmware/0009-linux-firmware-various-Remove-firmware-for-various-d.patch create mode 100644 packages/linux-firmware/0010-linux-firmware-amd-ucode-Remove-amd-microcode.patch diff --git a/packages/linux-firmware/0001-linux-firmware-snd-remove-firmware-for-snd-audio-dev.patch b/packages/linux-firmware/0001-linux-firmware-snd-remove-firmware-for-snd-audio-dev.patch new file mode 100644 index 00000000..f0f6abf7 --- /dev/null +++ b/packages/linux-firmware/0001-linux-firmware-snd-remove-firmware-for-snd-audio-dev.patch @@ -0,0 +1,1979 @@ +From c5e85907233a5ddc7df414744561f0fd848e907f Mon Sep 17 00:00:00 2001 +From: Leonard Foerster +Date: Tue, 25 Jul 2023 09:22:51 +0000 +Subject: [PATCH] linux-firmware: snd: remove firmware for snd/audio devices + +Bottlerocket does not configure drivers for audio devices for any of its +kernels. Hence, we do not need to ship firmware for these devices. The +following list maps the drivers as they are named in WHENCE to the +kernel config option for reference and easy searchability should we need +to find the right firmware to add when adding a driver to our kernel +configuation: + +* snd-korg1212 - CONFIG_SND_KORG1212 +* snd-maestro3 - CONFIG_SND_MAESTRO3 +* snd-ymfpci - CONFIG_SND_YMFPCI +* snd-sb16_csp - CONFIG_SND_SB16_CSP +* snd-wavefront - CONFIG_SND_WAVEFRONT +* mtk-sof - CONFIG_SND_SOC_SOF_MTK_TOPLEVEL +* snd-hda-codex-ca0132 - CONFIG_SND_HDA_INTEL && CONFIG_SND_HDA_CODEC_CA0132 +* snd_soc_sst_acpi - CONFIG_SND_SOC_INTEL_SST_ACPI +* snd_intel_sst_core - CONFIG_SND_SOC_INTEL_SST +* snd_soc_catpt - CONFIG_SND_SOC_INTEL_CATPT +* snd_soc_avs - CONFIG_SND_SOC_INTEL_AVS +* snd_soc_skl - CONFIG_SND_SOC_INTEL_SKL +* cs35l41_hda - CONFIG_SND_SOC_C35L41* && SND_HDA_SCODEC_CS35L41* + +Signed-off-by: Leonard Foerster +--- + LICENCE.IntcSST2 | 39 -- + LICENCE.adsp_sst | 999 -------------------------------------------- + LICENCE.ca0132 | 47 --- + LICENCE.fw_sst_0f28 | 40 -- + LICENSE.cirrus | 182 -------- + WHENCE | 545 ------------------------ + 6 files changed, 1852 deletions(-) + delete mode 100644 LICENCE.IntcSST2 + delete mode 100644 LICENCE.adsp_sst + delete mode 100644 LICENCE.ca0132 + delete mode 100644 LICENCE.fw_sst_0f28 + delete mode 100644 LICENSE.cirrus + +diff --git a/LICENCE.IntcSST2 b/LICENCE.IntcSST2 +deleted file mode 100644 +index d4f1609..0000000 +--- a/LICENCE.IntcSST2 ++++ /dev/null +@@ -1,39 +0,0 @@ +-Copyright (c) 2014, Intel Corporation. +-All rights reserved. +- +-Redistribution. Redistribution and use in binary form, without +-modification, are permitted provided that the following conditions are +-met: +- +-* Redistributions must reproduce the above copyright notice and the +- following disclaimer in the documentation and/or other materials +- provided with the distribution. +-* Neither the name of Intel Corporation nor the names of its suppliers +- may be used to endorse or promote products derived from this software +- without specific prior written permission. +-* No reverse engineering, decompilation, or disassembly of this software +- is permitted. +- +-Limited patent license. Intel Corporation grants a world-wide, +-royalty-free, non-exclusive license under patents it now or hereafter +-owns or controls to make, have made, use, import, offer to sell and +-sell ("Utilize") this software, but solely to the extent that any +-such patent is necessary to Utilize the software alone, or in +-combination with an operating system licensed under an approved Open +-Source license as listed by the Open Source Initiative at +-http://opensource.org/licenses. The patent license shall not apply to +-any other combinations which include this software. No hardware per +-se is licensed hereunder. +- +-DISCLAIMER. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +-CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +-BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +-FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +-COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +-DAMAGE. +diff --git a/LICENCE.adsp_sst b/LICENCE.adsp_sst +deleted file mode 100644 +index c66b1b2..0000000 +--- a/LICENCE.adsp_sst ++++ /dev/null +@@ -1,999 +0,0 @@ +-***** INTEL BINARY FIRMWARE RELEASE LICENCE ******************************** +- +-Copyright (c) 2014-15 Intel Corporation. +-All rights reserved. +- +-Redistribution. +- +-Redistribution and use in binary form, without modification, are permitted +-provided that the following conditions are met: +-* Redistributions must reproduce the above copyright notice and the +- following disclaimer in the documentation and/or other materials provided +- with the distribution. +-* Neither the name of Intel Corporation nor the names of its suppliers may +- be used to endorse or promote products derived from this software without +- specific prior written permission. +-* No reverse engineering, decompilation, or disassembly of this software is +- permitted. +- +- +-Limited patent license. +- +-Intel Corporation grants a world-wide, royalty-free, non-exclusive license +-under patents it now or hereafter owns or controls to make, have made, use, +-import, offer to sell and sell ("Utilize") this software, but solely to the +-extent that any such patent is necessary to Utilize the software alone. The +-patent license shall not apply to any combinations which include this software. +-No hardware per se is licensed hereunder. +- +- +-DISCLAIMER. +- +-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +-POSSIBILITY OF SUCH DAMAGE. +- +- +-***** NEW LIBC LICENCE******************************** +- +-The newlib subdirectory is a collection of software from several sources. +- +-Each file may have its own copyright/license that is embedded in the source +-file. Unless otherwise noted in the body of the source file(s), the following copyright +-notices will apply to the contents of the newlib subdirectory: +- +-(1) Red Hat Incorporated +- +-Copyright (c) 1994-2009 Red Hat, Inc. All rights reserved. +- +-This copyrighted material is made available to anyone wishing to use, +-modify, copy, or redistribute it subject to the terms and conditions +-of the BSD License. This program is distributed in the hope that +-it will be useful, but WITHOUT ANY WARRANTY expressed or implied, +-including the implied warranties of MERCHANTABILITY or FITNESS FOR +-A PARTICULAR PURPOSE. A copy of this license is available at +-http://www.opensource.org/licenses. Any Red Hat trademarks that are +-incorporated in the source code or documentation are not subject to +-the BSD License and may only be used or replicated with the express +-permission of Red Hat, Inc. +- +-(2) University of California, Berkeley +- +-Copyright (c) 1981-2000 The Regents of the University of California. +-All rights reserved. +- +-Redistribution and use in source and binary forms, with or without modification, +-are permitted provided that the following conditions are met: +- +- * Redistributions of source code must retain the above copyright notice, +- this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright notice, +- this list of conditions and the following disclaimer in the documentation +- and/or other materials provided with the distribution. +- * Neither the name of the University nor the names of its contributors +- may be used to endorse or promote products derived from this software +- without specific prior written permission. +- +-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +-IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +-INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +-NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +-WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY +-OF SUCH DAMAGE. +- +-(3) David M. Gay (AT&T 1991, Lucent 1998) +- +-The author of this software is David M. Gay. +- +-Copyright (c) 1991 by AT&T. +- +-Permission to use, copy, modify, and distribute this software for any +-purpose without fee is hereby granted, provided that this entire notice +-is included in all copies of any software which is or includes a copy +-or modification of this software and in all copies of the supporting +-documentation for such software. +- +-THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED +-WARRANTY. IN PARTICULAR, NEITHER THE AUTHOR NOR AT&T MAKES ANY +-REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY +-OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. +- +-------------------------------------------------------------------- +- +-The author of this software is David M. Gay. +- +-Copyright (C) 1998-2001 by Lucent Technologies +-All Rights Reserved +- +-Permission to use, copy, modify, and distribute this software and +-its documentation for any purpose and without fee is hereby +-granted, provided that the above copyright notice appear in all +-copies and that both that the copyright notice and this +-permission notice and warranty disclaimer appear in supporting +-documentation, and that the name of Lucent or any of its entities +-not be used in advertising or publicity pertaining to +-distribution of the software without specific, written prior +-permission. +- +-LUCENT DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +-INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. +-IN NO EVENT SHALL LUCENT OR ANY OF ITS ENTITIES BE LIABLE FOR ANY +-SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +-IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +-ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +-THIS SOFTWARE. +- +- +-(4) Advanced Micro Devices +- +-Copyright 1989, 1990 Advanced Micro Devices, Inc. +- +-This software is the property of Advanced Micro Devices, Inc (AMD) which +-specifically grants the user the right to modify, use and distribute this +-software provided this notice is not removed or altered. All other rights +-are reserved by AMD. +- +-AMD MAKES NO WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, WITH REGARD TO THIS +-SOFTWARE. IN NO EVENT SHALL AMD BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL +-DAMAGES IN CONNECTION WITH OR ARISING FROM THE FURNISHING, PERFORMANCE, OR +-USE OF THIS SOFTWARE. +- +-So that all may benefit from your experience, please report any problems +-or suggestions about this software to the 29K Technical Support Center at +-800-29-29-AMD (800-292-9263) in the USA, or 0800-89-1131 in the UK, or +-0031-11-1129 in Japan, toll free. The direct dial number is 512-462-4118. +- +-Advanced Micro Devices, Inc. +-29K Support Products +-Mail Stop 573 +-5900 E. Ben White Blvd. +-Austin, TX 78741 +-800-292-9263 +- +-(5) +- +-(6) +- +-(7) Sun Microsystems +- +-Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. +- +-Developed at SunPro, a Sun Microsystems, Inc. business. +-Permission to use, copy, modify, and distribute this +-software is freely granted, provided that this notice is preserved. +- +-(8) Hewlett Packard +- +-(c) Copyright 1986 HEWLETT-PACKARD COMPANY +- +-To anyone who acknowledges that this file is provided "AS IS" +-without any express or implied warranty: +- permission to use, copy, modify, and distribute this file +-for any purpose is hereby granted without fee, provided that +-the above copyright notice and this notice appears in all +-copies, and that the name of Hewlett-Packard Company not be +-used in advertising or publicity pertaining to distribution +-of the software without specific, written prior permission. +-Hewlett-Packard Company makes no representations about the +-suitability of this software for any purpose. +- +-(9) Hans-Peter Nilsson +- +-Copyright (C) 2001 Hans-Peter Nilsson +- +-Permission to use, copy, modify, and distribute this software is +-freely granted, provided that the above copyright notice, this notice +-and the following disclaimer are preserved with no changes. +- +-THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR +-IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED +-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +-PURPOSE. +- +-(10) Stephane Carrez (m68hc11-elf/m68hc12-elf targets only) +- +-Copyright (C) 1999, 2000, 2001, 2002 Stephane Carrez (stcarrez@nerim.fr) +- +-The authors hereby grant permission to use, copy, modify, distribute, +-and license this software and its documentation for any purpose, provided +-that existing copyright notices are retained in all copies and that this +-notice is included verbatim in any distributions. No written agreement, +-license, or royalty fee is required for any of the authorized uses. +-Modifications to this software may be copyrighted by their authors +-and need not follow the licensing terms described here, provided that +-the new terms are clearly indicated on the first page of each file where +-they apply. +- +-(11) Christopher G. Demetriou +- +-Copyright (c) 2001 Christopher G. Demetriou +-All rights reserved. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions +-are met: +-1. Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +-2. Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the distribution. +-3. The name of the author may not be used to endorse or promote products +- derived from this software without specific prior written permission. +- +-THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +-OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +-IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +-NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-(12) SuperH, Inc. +- +-Copyright 2002 SuperH, Inc. All rights reserved +- +-This software is the property of SuperH, Inc (SuperH) which specifically +-grants the user the right to modify, use and distribute this software +-provided this notice is not removed or altered. All other rights are +-reserved by SuperH. +- +-SUPERH MAKES NO WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, WITH REGARD TO +-THIS SOFTWARE. IN NO EVENT SHALL SUPERH BE LIABLE FOR INDIRECT, SPECIAL, +-INCIDENTAL OR CONSEQUENTIAL DAMAGES IN CONNECTION WITH OR ARISING FROM +-THE FURNISHING, PERFORMANCE, OR USE OF THIS SOFTWARE. +- +-So that all may benefit from your experience, please report any problems +-or suggestions about this software to the SuperH Support Center via +-e-mail at softwaresupport@superh.com . +- +-SuperH, Inc. +-405 River Oaks Parkway +-San Jose +-CA 95134 +-USA +- +-(13) Royal Institute of Technology +- +-Copyright (c) 1999 Kungliga Tekniska Högskolan +-(Royal Institute of Technology, Stockholm, Sweden). +-All rights reserved. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions +-are met: +- +-1. Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- +-2. Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the distribution. +- +-3. Neither the name of KTH nor the names of its contributors may be +- used to endorse or promote products derived from this software without +- specific prior written permission. +- +-THIS SOFTWARE IS PROVIDED BY KTH AND ITS CONTRIBUTORS ``AS IS'' AND ANY +-EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +-PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL KTH OR ITS CONTRIBUTORS BE +-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +-BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +-WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +-OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +-ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-(14) Alexey Zelkin +- +-Copyright (c) 2000, 2001 Alexey Zelkin +-All rights reserved. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions +-are met: +-1. Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +-2. Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the distribution. +- +-THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +-ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +-OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +-SUCH DAMAGE. +- +-(15) Andrey A. Chernov +- +-Copyright (C) 1997 by Andrey A. Chernov, Moscow, Russia. +-All rights reserved. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions +-are met: +-1. Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +-2. Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the distribution. +- +-THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND +-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +-ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +-OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +-SUCH DAMAGE. +- +-(16) FreeBSD +- +-Copyright (c) 1997-2002 FreeBSD Project. +-All rights reserved. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions +-are met: +-1. Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +-2. Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the distribution. +- +-THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +-ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +-OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +-SUCH DAMAGE. +- +-(17) S. L. Moshier +- +-Author: S. L. Moshier. +- +-Copyright (c) 1984,2000 S.L. Moshier +- +-Permission to use, copy, modify, and distribute this software for any +-purpose without fee is hereby granted, provided that this entire notice +-is included in all copies of any software which is or includes a copy +-or modification of this software and in all copies of the supporting +-documentation for such software. +- +-THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED +-WARRANTY. IN PARTICULAR, THE AUTHOR MAKES NO REPRESENTATION +-OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS +-SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. +- +-(18) Citrus Project +- +-Copyright (c)1999 Citrus Project, +-All rights reserved. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions +-are met: +-1. Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +-2. Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the distribution. +- +-THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +-ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +-OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +-SUCH DAMAGE. +- +-(19) Todd C. Miller +- +-Copyright (c) 1998 Todd C. Miller +-All rights reserved. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions +-are met: +-1. Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +-2. Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the distribution. +-3. The name of the author may not be used to endorse or promote products +- derived from this software without specific prior written permission. +- +-THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, +-INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY +-AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +-THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +-EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +-PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +-OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +-WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +-OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +-ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-(20) DJ Delorie (i386) +-Copyright (C) 1991 DJ Delorie +-All rights reserved. +- +-Redistribution, modification, and use in source and binary forms is permitted +-provided that the above copyright notice and following paragraph are +-duplicated in all such forms. +- +-This file is distributed WITHOUT ANY WARRANTY; without even the implied +-warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +- +-(21) Free Software Foundation LGPL License (*-linux* targets only) +- +- Copyright (C) 1990-1999, 2000, 2001 Free Software Foundation, Inc. +- This file is part of the GNU C Library. +- Contributed by Mark Kettenis , 1997. +- +- The GNU C Library is free software; you can redistribute it and/or +- modify it under the terms of the GNU Lesser General Public +- License as published by the Free Software Foundation; either +- version 2.1 of the License, or (at your option) any later version. +- +- The GNU C Library is distributed in the hope that it will be useful, +- but WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- Lesser General Public License for more details. +- +- You should have received a copy of the GNU Lesser General Public +- License along with the GNU C Library; if not, write to the Free +- Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +- 02110-1301 USA. +- +-(22) Xavier Leroy LGPL License (i[3456]86-*-linux* targets only) +- +-Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) +- +-This program is free software; you can redistribute it and/or +-modify it under the terms of the GNU Library General Public License +-as published by the Free Software Foundation; either version 2 +-of the License, or (at your option) any later version. +- +-This program is distributed in the hope that it will be useful, +-but WITHOUT ANY WARRANTY; without even the implied warranty of +-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +-GNU Library General Public License for more details. +- +-(23) Intel (i960) +- +-Copyright (c) 1993 Intel Corporation +- +-Intel hereby grants you permission to copy, modify, and distribute this +-software and its documentation. Intel grants this permission provided +-that the above copyright notice appears in all copies and that both the +-copyright notice and this permission notice appear in supporting +-documentation. In addition, Intel grants this permission provided that +-you prominently mark as "not part of the original" any modifications +-made to this software or documentation, and that the name of Intel +-Corporation not be used in advertising or publicity pertaining to +-distribution of the software or the documentation without specific, +-written prior permission. +- +-Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR +-IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY +-OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or +-representations regarding the use of, or the results of the use of, +-the software and documentation in terms of correctness, accuracy, +-reliability, currentness, or otherwise; and you rely on the software, +-documentation and results solely at your own risk. +- +-IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS, +-LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES +-OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM +-PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER. +- +-(24) Hewlett-Packard (hppa targets only) +- +-(c) Copyright 1986 HEWLETT-PACKARD COMPANY +- +-To anyone who acknowledges that this file is provided "AS IS" +-without any express or implied warranty: +- permission to use, copy, modify, and distribute this file +-for any purpose is hereby granted without fee, provided that +-the above copyright notice and this notice appears in all +-copies, and that the name of Hewlett-Packard Company not be +-used in advertising or publicity pertaining to distribution +-of the software without specific, written prior permission. +-Hewlett-Packard Company makes no representations about the +-suitability of this software for any purpose. +- +-(25) Henry Spencer (only *-linux targets) +- +-Copyright 1992, 1993, 1994 Henry Spencer. All rights reserved. +-This software is not subject to any license of the American Telephone +-and Telegraph Company or of the Regents of the University of California. +- +-Permission is granted to anyone to use this software for any purpose on +-any computer system, and to alter it and redistribute it, subject +-to the following restrictions: +- +-1. The author is not responsible for the consequences of use of this +- software, no matter how awful, even if they arise from flaws in it. +- +-2. The origin of this software must not be misrepresented, either by +- explicit claim or by omission. Since few users ever read sources, +- credits must appear in the documentation. +- +-3. Altered versions must be plainly marked as such, and must not be +- misrepresented as being the original software. Since few users +- ever read sources, credits must appear in the documentation. +- +-4. This notice may not be removed or altered. +- +-(26) Mike Barcroft +- +-Copyright (c) 2001 Mike Barcroft +-All rights reserved. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions +-are met: +-1. Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +-2. Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the distribution. +- +-THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +-ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +-OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +-SUCH DAMAGE. +- +-(27) Konstantin Chuguev (--enable-newlib-iconv) +- +-Copyright (c) 1999, 2000 +- Konstantin Chuguev. All rights reserved. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions +-are met: +-1. Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +-2. Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the distribution. +- +-THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +-ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +-OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +-SUCH DAMAGE. +- +- iconv (Charset Conversion Library) v2.0 +- +-(28) Artem Bityuckiy (--enable-newlib-iconv) +- +-Copyright (c) 2003, Artem B. Bityuckiy, SoftMine Corporation. +-Rights transferred to Franklin Electronic Publishers. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions +-are met: +-1. Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +-2. Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the distribution. +- +-THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +-ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +-OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +-SUCH DAMAGE. +- +-(29) IBM, Sony, Toshiba (only spu-* targets) +- +- (C) Copyright 2001,2006, +- International Business Machines Corporation, +- Sony Computer Entertainment, Incorporated, +- Toshiba Corporation, +- +- All rights reserved. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions are met: +- +- * Redistributions of source code must retain the above copyright notice, +- this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the distribution. +- * Neither the names of the copyright holders nor the names of their +- contributors may be used to endorse or promote products derived from this +- software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +- POSSIBILITY OF SUCH DAMAGE. +- +-(30) - Alex Tatmanjants (targets using libc/posix) +- +- Copyright (c) 1995 Alex Tatmanjants +- at Electronni Visti IA, Kiev, Ukraine. +- All rights reserved. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- 1. Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- 2. Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the distribution. +- +- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND +- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +- ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE +- FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +- OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +- HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +- LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +- OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +- SUCH DAMAGE. +- +-(31) - M. Warner Losh (targets using libc/posix) +- +- Copyright (c) 1998, M. Warner Losh +- All rights reserved. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- 1. Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- 2. Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the distribution. +- +- THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +- ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +- FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +- OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +- HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +- LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +- OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +- SUCH DAMAGE. +- +-(32) - Andrey A. Chernov (targets using libc/posix) +- +- Copyright (C) 1996 by Andrey A. Chernov, Moscow, Russia. +- All rights reserved. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- 1. Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- 2. Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the distribution. +- +- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND +- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +- ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +- FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +- OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +- HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +- LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +- OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +- SUCH DAMAGE. +- +-(33) - Daniel Eischen (targets using libc/posix) +- +- Copyright (c) 2001 Daniel Eischen . +- All rights reserved. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- 1. Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- 2. Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the distribution. +- +- THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +- ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +- FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +- OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +- HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +- LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +- OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +- SUCH DAMAGE. +- +- +-(34) - Jon Beniston (only lm32-* targets) +- +- Contributed by Jon Beniston +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- 1. Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- 2. Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the distribution. +- +- THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +- ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +- FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +- OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +- HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +- LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +- OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +- SUCH DAMAGE. +- +- +-(35) - ARM Ltd (arm and thumb variant targets only) +- +- Copyright (c) 2009 ARM Ltd +- All rights reserved. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- 1. Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- 2. Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the distribution. +- 3. The name of the company may not be used to endorse or promote +- products derived from this software without specific prior written +- permission. +- +- THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED +- WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +- MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +- IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +- TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +- PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +- LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +- NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-(36) - Xilinx, Inc. (microblaze-* and powerpc-* targets) +- +-Copyright (c) 2004, 2009 Xilinx, Inc. All rights reserved. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions are +-met: +- +-1. Redistributions source code must retain the above copyright notice, +-this list of conditions and the following disclaimer. +- +-2. Redistributions in binary form must reproduce the above copyright +-notice, this list of conditions and the following disclaimer in the +-documentation and/or other materials provided with the distribution. +- +-3. Neither the name of Xilinx nor the names of its contributors may be +-used to endorse or promote products derived from this software without +-specific prior written permission. +- +-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS +-IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +-TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +-PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +-HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +-TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +- +-(37) Texas Instruments Incorporated (tic6x-*, *-tirtos targets) +- +-Copyright (c) 1996-2010,2014 Texas Instruments Incorporated +-http://www.ti.com/ +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- +- Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- +- Neither the name of Texas Instruments Incorporated nor the names +- of its contributors may be used to endorse or promote products +- derived from this software without specific prior written +- permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-(38) National Semiconductor (cr16-* and crx-* targets) +- +-Copyright (c) 2004 National Semiconductor Corporation +- +-The authors hereby grant permission to use, copy, modify, distribute, +-and license this software and its documentation for any purpose, provided +-that existing copyright notices are retained in all copies and that this +-notice is included verbatim in any distributions. No written agreement, +-license, or royalty fee is required for any of the authorized uses. +-Modifications to this software may be copyrighted by their authors +-and need not follow the licensing terms described here, provided that +-the new terms are clearly indicated on the first page of each file where +-they apply. +- +-(39) - Adapteva, Inc. (epiphany-* targets) +- +-Copyright (c) 2011, Adapteva, Inc. +-All rights reserved. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions are met: +- * Redistributions of source code must retain the above copyright notice, this +- list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright notice, +- this list of conditions and the following disclaimer in the documentation +- and/or other materials provided with the distribution. +- * Neither the name of Adapteva nor the names of its contributors may be used +- to endorse or promote products derived from this software without specific +- prior written permission. +- +-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-(40) - Altera Corportion (nios2-* targets) +- +-Copyright (c) 2003 Altera Corporation +-All rights reserved. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions +-are met: +- +- o Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- o Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the distribution. +- o Neither the name of Altera Corporation nor the names of its +- contributors may be used to endorse or promote products derived from +- this software without specific prior written permission. +- +-THIS SOFTWARE IS PROVIDED BY ALTERA CORPORATION, THE COPYRIGHT HOLDER, +-AND ITS CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +-INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY +-AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +-THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-(41) Ed Schouten - Free BSD +- +-Copyright (c) 2008 Ed Schouten +-All rights reserved. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions +-are met: +-1. Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +-2. Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the distribution. +- +-THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +-ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +-OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +-SUCH DAMAGE. +- +diff --git a/LICENCE.ca0132 b/LICENCE.ca0132 +deleted file mode 100644 +index 411750a..0000000 +--- a/LICENCE.ca0132 ++++ /dev/null +@@ -1,47 +0,0 @@ +-Copyright (c) 2012, Creative Technology Ltd +-All rights reserved. +- +-Redistribution. Redistribution and use in binary form, without +-modification, are permitted provided that the following conditions are +-met: +- +-* Redistributions must reproduce the above copyright notice and the +- following disclaimer in the documentation and/or other materials +- provided with the distribution. +-* Neither the name of Creative Technology Ltd or its affiliates ("CTL") +- nor the names of its suppliers may be used to endorse or promote +- products derived from this software without specific prior written +- permission. +-* No reverse engineering, decompilation, or disassembly of this software +- (or any part thereof) is permitted. +- +-Limited patent license. CTL grants a limited, world-wide, +-royalty-free, non-exclusive license under patents it now or hereafter +-owns or controls to make, have made, use, import, offer to sell and +-sell ("Utilize") this software, but strictly only to the extent that any +-such patent is necessary to Utilize the software alone, or in +-combination with an operating system licensed under an approved Open +-Source license as listed by the Open Source Initiative at +-http://opensource.org/licenses. The patent license shall not be +-applicable, to any other combinations which include this software. +-No hardware per se is licensed hereunder. +- +-DISCLAIMER. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +-CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +-BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +-FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +-COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +-DAMAGE. +- +-NO OTHER RIGHTS GRANTED. USER HEREBY ACKNOWLEDGES AND AGREES THAT USE OF +-THIS SOFTWARE SHALL NOT CREATE OR GIVE GROUNDS FOR A LICENSE BY +-IMPLICATION, ESTOPPEL, OR OTHERWISE TO ANY INTELLECTUAL PROPERTY RIGHTS +-(PATENT, COPYRIGHT, TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) +-EMBODIED IN ANY OTHER CTL HARDWARE OR SOFTWARE WHETHER SOLELY OR IN +-COMBINATION WITH THIS SOFTWARE. +diff --git a/LICENCE.fw_sst_0f28 b/LICENCE.fw_sst_0f28 +deleted file mode 100644 +index 247e35f..0000000 +--- a/LICENCE.fw_sst_0f28 ++++ /dev/null +@@ -1,40 +0,0 @@ +-Copyright (c) 2014 Intel Corporation. +-All rights reserved. +- +-Redistribution. +- +-Redistribution and use in binary form, without modification, are permitted +-provided that the following conditions are met: +-* Redistributions must reproduce the above copyright notice and the +- following disclaimer in the documentation and/or other materials provided +- with the distribution. +-* Neither the name of Intel Corporation nor the names of its suppliers may +- be used to endorse or promote products derived from this software without +- specific prior written permission. +-* No reverse engineering, decompilation, or disassembly of this software is +- permitted. +- +- +-Limited patent license. +- +-Intel Corporation grants a world-wide, royalty-free, non-exclusive license +-under patents it now or hereafter owns or controls to make, have made, use, +-import, offer to sell and sell ("Utilize") this software, but solely to the +-extent that any such patent is necessary to Utilize the software alone. The +-patent license shall not apply to any combinations which include this software. +-No hardware per se is licensed hereunder. +- +- +-DISCLAIMER. +- +-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +-POSSIBILITY OF SUCH DAMAGE. +diff --git a/LICENSE.cirrus b/LICENSE.cirrus +deleted file mode 100644 +index c9d7c22..0000000 +--- a/LICENSE.cirrus ++++ /dev/null +@@ -1,182 +0,0 @@ +-Use, distribution, or reproduction of this CIRRUS LOGIC software is governed by +-the terms of this Agreement. Any use, distribution or reproduction of this +-CIRRUS LOGIC software constitutes your acceptance of the following terms and +-conditions. +- +-1. DEFINED TERMS +- +-“CIRRUS LOGIC” means either Cirrus Logic, Inc., a Delaware Corporation (for +-licensees based in the United States), or Cirrus Logic International (UK) Ltd, a +-company registered in Scotland (for licensees based outside the United States). +- +-“Licensee” means the party which has accepted these terms, including by +-distributing, reproducing and/or using the Software. +-“Software” means software provided to Licensee in binary code form, that runs or +-is intended to run on a processor embedded in an end product (and related files +-and documentation) (“Software”). +- +-2. GRANT OF LICENSE +- +-a. Subject to the terms, conditions, and limitations of this Agreement, CIRRUS +-LOGIC grants to Licensee a non-exclusive , non-transferable license (the +-“License”) to (i) use and integrate the Software with other software, and (ii) +-reproduce and distribute the Software in its complete and unmodified form, +-provided all use of the Software is in connection with CIRRUS LOGIC +-semiconductor devices. These license rights do not automatically extend to any +-third-party software within the Software for which a separate license is +-required to enable use by the Licensee. Licensee must agree applicable license +-terms with the relevant third-party licensors to use such software. +-b. Licensee (i) shall not remove or obscure any copyright and/or trademark +-notices from the Software, and (ii) shall maintain and reproduce all copyright +-and other proprietary notices on any copy in the same form and manner that such +-notices are included on the Software (except if the Software is embedded such +-that it is not readily accessible to an end user). +-c. Licensee may not make any modifications to the Software and may only +-distribute the Software under the terms of this Agreement. Recipients of the +-Software must be provided with a copy of this Agreement. +- +-3. TERMINATION +- +-a. This Agreement will automatically terminate if Licensee does not comply with +-its terms. +-b. In the event of termination: +-i. Licensee must destroy all copies of the Software (and parts thereof), and all +-Proprietary Information (as defined below), including any original, backup, or +-archival copy that Licensee may have installed, downloaded, or recorded on any +-medium. Upon written request from CIRRUS LOGIC, Licensee will certify in +-writing that it has complied with this provision and has not retained any copies +-of the Software or any Proprietary Information; +-ii. the rights and licenses granted to Licensee under this Agreement will +-immediately terminate; +-iii. all rights and obligations under this Agreement which by their nature +-should survive termination, will remain in full force and effect. +- +-4. OWNERSHIP, RIGHTS, USE LIMITATIONS, AND DUTIES +- +-a. CIRRUS LOGIC and/or its licensors own all proprietary rights in the Software. +- Whilst this Agreement is in effect, Licensee hereby covenants that it will not +-assert any claim that the Software infringes any intellectual property rights +-owned or controlled by Licensee. +-b. Other than as expressly set forth in this Agreement, CIRRUS LOGIC does not +-grant, and Licensee does not receive, any ownership right, title or interest in +-any intellectual property rights relating to the Software, nor in any copy of +-any part of the foregoing. No license is granted to Licensee in any human +-readable code of the Software (source code). +-c. Licensee shall not (i) use, license, sell or otherwise distribute the +-Software except as provided in this Agreement, (ii) attempt to modify in any +-way, reverse engineer, decompile or disassemble any portion of the Software; or +-(iii) use the Software or other material in violation of any applicable law or +-regulation. +-d. The Software is not intended or authorized for use in or with products for +-which CIRRUS LOGIC semiconductor devices are not designed, tested or intended, +-as detailed in the CIRRUS LOGIC Terms and Conditions of Sale, available at +-www.cirrus.com/legal (as the same may be updated from time to time), which shall +-apply to Licensee’s use of Software, insofar as relevant thereto. +-e. CIRRUS LOGIC may require Licensee to cease using a version of the Software, +-and may require use of an updated version, where (a) a third-party has claimed +-that the Software infringes its intellectual property rights, and/or (b) for +-technical reasons CIRRUS LOGIC is no longer able to permit ongoing use of the +-version of the Software being used by Licensee. +-f. If Licensee requests support, CIRRUS LOGIC has no obligation to provide any +-such support but if it agrees to do so any such support will be on a reasonable +-efforts basis. +-g. Licensee shall keep complete and accurate records of its use of the Software +-and shall, on request, promptly provide to CIRRUS LOGIC a certificate evidencing +-the extent of such use. +- +-5. CONFIDENTIALITY +- +-a. Licensee may obtain or be provided with information relating to the Software, +-including in documentation provided to it (“Proprietary Information”). Such +-Proprietary Information shall belong solely to CIRRUS LOGIC and/or its +-affiliates (or, as the case may be, relevant third parties). +-b. During and after the term of this Agreement, Licensee agrees to maintain all +-such Proprietary Information in strict confidence and to not use (except as +-expressly authorized in this Agreement), disclose, or provide any third-party +-with access to any Proprietary Information except under a written agreement with +-terms at least as protective as the terms of this Agreement. Licensee also +-agrees to exercise the same degree of care and diligence as it uses in respect +-of its own confidential and proprietary information when dealing with CIRRUS +-LOGIC Proprietary Information, and in any event no less than reasonable care and +-diligence. +-c. Information will not be considered Proprietary Information if (i) it becomes +-public knowledge other than through any act or omission constituting a breach of +-the Licensee’s obligations under this Agreement; (ii) the Licensee can prove it +-was already in the Licensee’s possession and at its free disposal before the +-disclosure hereunder; and (iii) it was received in good faith from a third party +-having no obligation of confidentiality and which is free to disclose such +-Confidential Information +- +-6. NO WARRANTIES OR LIABILITIES +- +-LICENSEE EXPRESSLY ACKNOWLEDGES AND AGREES THAT THE SOFTWARE IS PROVIDED BY +-CIRRUS LOGIC “AS IS” WITHOUT ANY WARRANTIES WHATSOEVER AND THAT THE +-INSTALLATION, OPERATION AND USE OF THE SOFTWARE IS AT LICENSEE’S OWN RISK. +-CIRRUS LOGIC MAKES NO WARRANTIES, EXPRESS, IMPLIED OR STATUTORY, AND EXPRESSLY +-DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +-PURPOSE, GOOD TITLE, NON-INFRINGEMENT, SATISFACTORY QUALITY OR PERFORMANCE OR +-WHICH MAY ARISE FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE. CIRRUS LOGIC +-PROVIDES NO WARRANTY THAT THE SOFTWARE IS FREE FROM DEFECTS OR CHARACTERISTICS +-THAT COULD CAUSE VULNERABILITY TO CYBER-ATTACK, DATA BREACH OR PRIVACY +-VIOLATIONS. CIRRUS LOGIC SHALL IN NO EVENT BE LIABLE TO LICENSEE OR ANYONE ELSE +-FOR ANY LOSS, INJURY OR DAMAGE CAUSED IN WHOLE OR PART BY THE INSTALLATION, +-OPERATION OR USE OF THE SOFTWARE, LICENSEE’S INCORRECT USE OF THE SOFTWARE +-INCLUDING ANY FAILURE TO PROPERLY INSTALL ANY UPDATES TO THE SOFTWARE OR OTHER +-SOFTWARE WITH WHICH THE SOFTWARE OPERATES OR WHICH IT UPDATES, OR IS INTENDED TO +-OPERATE WITH OR UPDATE, OR THE RESULTS PRODUCED BY, OR FAILURES, DELAYS, OR +-INTERRUPTIONS OF THE SOFTWARE. WITHOUT LIMITING THE FOREGOING GENERALITY, +-CIRRUS LOGIC SHALL IN NO EVENT BE LIABLE WITH RESPECT TO ANY INTELLECTUAL +-PROPERTY INFRINGEMENT CLAIMS WHICH ARISE FROM, OR IN ANY WAY RELATE TO, USE OF +-THE SOFTWARE, INCLUDING, WITHOUT LIMITATION, ANY CLAIMS RELATING TO HAPTICS ON A +-COMPONENT OR SYSTEM LEVEL. CIRRUS LOGIC AND ITS LICENSORS SHALL IN NO EVENT BE +-LIABLE TO LICENSEE OR ANYONE ELSE FOR ANY DIRECT, CONSEQUENTIAL, INCIDENTAL OR +-SPECIAL DAMAGES, INCLUDING BUT NOT LIMITED TO ANY LOST PROFITS ARISING OUT OF OR +-RELATING TO THE INSTALLATION, OPERATION OR USE OF THE SOFTWARE. BECAUSE SOME +-JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF CERTAIN WARRANTIES OR +-TYPES OF CLAIM OR LOSS THEN IN SUCH INSTANCES THE ABOVE EXCLUSIONS SHALL BE +-INTERPRETED TO APPLY TO THE EXTENT PERMITTED BY LOCAL LAW. SUBJECT TO THE +-FOREGOING, THE TOTAL LIABILITY OF CIRRUS LOGIC AND ITS LICENSORS TO LICENSEE +-UNDER THIS AGREEMENT, AND/OR ARISING FROM, OR IN CONNECTION WITH, THE USE OF (OR +-INABILITY TO USE) THE SOFTWARE, WHETHER ARISING IN CONTRACT, TORT (INCLUDING +-NEGLIGENCE), QUASI TORT, OR OTHERWISE SHALL NOT EXCEED THE LICENSE FEES (IF ANY) +-PAID BY LICENSEE FOR THE SOFTWARE THAT GAVE RISE TO THE CLAIM, OR TEN THOUSAND +-U.S. DOLLARS (U.S. $10,000), WHICHEVER IS GREATER. +- +-7. EXPORT AND END USE RESTRICTIONS +- +-Licensee acknowledges that the Software is subject to United States and other +-applicable export related laws and regulations (“Export Laws”). Licensee +-agrees that it may not export, re-export or transfer the Software or any direct +-product of the Software other than in accordance with those Export Laws. +-Licensee further agrees to be bound by, and to act in accordance with, +-provisions of the CIRRUS LOGIC Terms and Conditions of Sale available at +-www.cirrus.com/legal (as updated from time to time), including insofar as they +-relate to export/end use restrictions. +- +-8. GENERAL PROVISIONS +- +-This Agreement is not assignable or sub-licensable by Licensee without the prior +-written consent of CIRRUS LOGIC. CIRRUS LOGIC may sub-license or assign any or +-all of its rights and obligations under this Agreement without Licensee’s +-consent. The waiver by either party of a breach of this Agreement shall not +-constitute a waiver of any subsequent breach of this Agreement; nor shall any +-delay to exercise any right under this Agreement operate as a waiver of such +-right. This Agreement shall be deemed to have been made in, and shall be +-construed pursuant to the laws of, the State of Texas without regard to +-conflicts of laws provisions thereof. Both parties hereby consent to the +-exclusive jurisdiction of the State of Texas and the locale of Austin therein. +-The prevailing party in any action to enforce this Agreement shall be entitled +-to recover costs and expenses including, without limitation, attorneys' fees. +-The parties agree that CIRRUS LOGIC and its licensors shall be entitled to +-equitable relief in addition to any remedies it may have hereunder or at law. +- +-9. ENTIRE AGREEMENT +- +-This Agreement and any terms referenced or incorporated herein, constitutes the +-entire agreement between Licensee and CIRRUS LOGIC with respect to the Software +-provided pursuant to this Agreement and supersedes any other agreement between +-Licensee and CIRRUS LOGIC with respect thereto (including terms presented and/or +-accepted as part of an installation process), but does not otherwise replace, +-modify or cancel any other written agreement between Licensee and CIRRUS LOGIC. +-If there is any inconsistency between these terms and those presented as part of +-the process to install the Software, these terms will prevail. +diff --git a/WHENCE b/WHENCE +index e6309eb..116d04d 100644 +--- a/WHENCE ++++ b/WHENCE +@@ -17,43 +17,6 @@ Licence: Redistributable. See LICENCE.cypress for details. + + -------------------------------------------------------------------------- + +-Driver: snd-korg1212 -- Korg 1212 IO audio device +- +-File: korg/k1212.dsp +- +-Licence: Unknown +- +-Found in alsa-firmware package in hex form; no licensing information. +- +--------------------------------------------------------------------------- +- +-Driver: snd-maestro3 -- ESS Allegro Maestro3 audio device +- +-File: ess/maestro3_assp_kernel.fw +-File: ess/maestro3_assp_minisrc.fw +- +-Licence: Unknown +- +-Found in alsa-firmware package in hex form with a comment claiming to +-be GPLv2+, but without source -- and with another comment saying "ESS +-drops binary dsp code images on our heads, but we don't get to see +-specs on the dsp." +- +--------------------------------------------------------------------------- +- +-Driver: snd-ymfpci -- Yamaha YMF724/740/744/754 audio devices +- +-File: yamaha/ds1_ctrl.fw +-File: yamaha/ds1_dsp.fw +-File: yamaha/ds1e_ctrl.fw +- +-Licence: Unknown +- +-Found alsa-firmware package in hex form, with the following comment: +- Copyright (c) 1997-1999 Yamaha Corporation. All Rights Reserved. +- +--------------------------------------------------------------------------- +- + Driver: advansys - AdvanSys SCSI + + File: advansys/mcode.bin +@@ -360,24 +323,6 @@ http://www.zdomain.com/a56.html + + -------------------------------------------------------------------------- + +-Driver: snd-sb16-csp - Sound Blaster 16/AWE CSP support +- +-File: sb16/mulaw_main.csp +-File: sb16/alaw_main.csp +-File: sb16/ima_adpcm_init.csp +-File: sb16/ima_adpcm_playback.csp +-File: sb16/ima_adpcm_capture.csp +- +-Licence: Allegedly GPLv2+, but no source visible. Marked: +-/* +- * Copyright (c) 1994 Creative Technology Ltd. +- * Microcode files for SB16 Advanced Signal Processor +- */ +- +-Found in hex form in kernel source. +- +--------------------------------------------------------------------------- +- + Driver: qla2xxx - QLogic QLA2XXX Fibre Channel + + File: ql2100_fw.bin +@@ -1383,17 +1328,6 @@ ARM assembly source code from https://linuxtv.org/downloads/firmware/Boot.S + + -------------------------------------------------------------------------- + +-Driver: snd-wavefront - ISA WaveFront sound card +- +-File: yamaha/yss225_registers.bin +- +-Licence: Allegedly GPLv2+, but no source visible. +- +-Found in hex form in kernel source, with the following comment: +- Copyright (c) 1998-2002 by Paul Davis +- +--------------------------------------------------------------------------- +- + Driver: rt61pci - Ralink RT2561, RT2561S, RT2661 wireless MACs + + File: rt2561.bin +@@ -3605,17 +3539,6 @@ Licence: GPLv2. Some build scripts use the New BSD (3-clause) licence.. See GPL- + + -------------------------------------------------------------------------- + +-Driver: snd-hda-codec-ca0132 - Creative Sound Core3D codec +- +-File: ctefx.bin +-File: ctspeq.bin +- +-Licence: Redistributable. See LICENCE.ca0132 for details +- +-Found also in alsa-firmware package. +- +--------------------------------------------------------------------------- +- + Driver: btusb - Bluetooth USB driver + + File: intel/ibt-hw-37.7.bseq +@@ -4044,14 +3967,6 @@ Licence: Redistributable. See LICENCE.r8a779x_usb3 for details. + + -------------------------------------------------------------------------- + +-Driver: snd_soc_sst_acpi +- +-File: intel/fw_sst_0f28.bin-48kHz_i2s_master +- +-License: Redistributable. See LICENCE.fw_sst_0f28 for details +- +--------------------------------------------------------------------------- +- + Driver: as102 - Abilis Systems Single DVB-T Receiver + + File: as102_data1_st.hex +@@ -4070,104 +3985,6 @@ Licence: Redistributable. See LICENCE.it913x for details + + -------------------------------------------------------------------------- + +-Driver: snd_soc_catpt -- Intel AudioDSP driver for HSW/BDW platforms +- +-File: intel/catpt/bdw/dsp_basefw.bin +-Version: 44b81c4d5397a63108356f58f036953d9b288c4e +-Link: intel/IntcSST2.bin -> catpt/bdw/dsp_basefw.bin +- +-License: Redistributable. See LICENCE.IntcSST2 for details +- +--------------------------------------------------------------------------- +- +-Driver: snd_soc_avs -- Intel AudioDSP driver for cAVS platforms +- +-File: intel/avs/skl/dsp_basefw.bin +-File: intel/avs/skl/dsp_mod_7CAD0808-AB10-CD23-EF45-12AB34CD56EF.bin +-Version: 9.21.00.4899 +-Link: intel/dsp_fw_release.bin -> avs/skl/dsp_basefw.bin +-Link: intel/dsp_fw_kbl.bin -> avs/skl/dsp_basefw.bin +-File: intel/avs/apl/dsp_basefw.bin +-Version: 9.22.01.4908 +-Link: intel/dsp_fw_bxtn.bin -> avs/apl/dsp_basefw.bin +-Link: intel/dsp_fw_glk.bin -> avs/apl/dsp_basefw.bin +-File: intel/avs/cnl/dsp_basefw.bin +-Version: 10.23.00.8551 +-Link: intel/dsp_fw_cnl.bin -> avs/cnl/dsp_basefw.bin +- +-License: Redistributable. See LICENCE.adsp_sst for details +- +--------------------------------------------------------------------------- +- +-Driver: snd_intel_sst_core +- +-File: intel/fw_sst_0f28.bin +-File: intel/fw_sst_0f28_ssp0.bin +- +-License: Redistributable. See LICENCE.fw_sst_0f28 for details +- +--------------------------------------------------------------------------- +- +-Driver: snd_intel_sst_core +- +-File: intel/fw_sst_22a8.bin +-Version: 01.0B.02.02 +- +-License: Redistributable. See LICENCE.fw_sst_0f28 for details +- +--------------------------------------------------------------------------- +- +-Driver: snd-soc-skl +- +-File: intel/dsp_fw_release_v969.bin +-Version: 8.20.00.969 +-File: intel/dsp_fw_release_v3402.bin +-Version: 9.21.00.3402_161 +- +-License: Redistributable. See LICENCE.adsp_sst for details +- +-File: intel/dsp_fw_bxtn_v2219.bin +-Version: 9.22.01.2219_64 +-File: intel/dsp_fw_bxtn_v3366.bin +-Version: 9.22.01.3366_157 +- +-License: Redistributable. See LICENCE.adsp_sst for details +- +-File: intel/dsp_fw_kbl_v701.bin +-Version: 9.21.00.701 +-File: intel/dsp_fw_kbl_v1037.bin +-Version: 09.21.00.1037 +-File: intel/dsp_fw_kbl_v2042.bin +-Version: 9.21.00.2042_46 +-File: intel/dsp_fw_kbl_v2630.bin +-Version: 9.21.00.2630_97 +-File: intel/dsp_fw_kbl_v3266.bin +-Version: 9.21.00.3266_144 +-File: intel/dsp_fw_kbl_v3420.bin +-Version: 9.21.00.3420_163 +-File: intel/dsp_fw_kbl_v3402.bin +-Version: 9.21.00.3402_161 +- +-License: Redistributable. See LICENCE.adsp_sst for details +- +-File: intel/dsp_fw_glk_v1814.bin +-Version: 9.92.01.1814 +-File: intel/dsp_fw_glk_v2880.bin +-Version: 9.22.00.2880 +-File: intel/dsp_fw_glk_v2768.bin +-Version: 9.22.01.2768 +-File: intel/dsp_fw_glk_v3366.bin +-Version: 9.22.01.3366_157 +- +-File: intel/dsp_fw_cnl_v1191.bin +-Version: 10.00.00.1191 +-File: intel/dsp_fw_cnl_v1858.bin +-Version: 10.23.00.1858 +- +-License: Redistributable. See LICENCE.adsp_sst for details +- +--------------------------------------------------------------------------- +- + Driver: smsmdtv - Siano MDTV Core module + + File: cmmb_vega_12mhz.inp +@@ -5935,368 +5752,6 @@ Licence: Redistributable. See LICENSE.amphion_vpu for details + + --------------------------------------------------------------------------- + +-Driver: cs35l41_hda - CS35l41 ALSA HDA audio driver +- +-File: cirrus/cs35l41-dsp1-spk-prot.wmfw +-File: cirrus/cs35l41-dsp1-spk-prot.bin +-File: cirrus/cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8971.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8971.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8972.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8972.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8973.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8973.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8974.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8974.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8975.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8975.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c896e.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c896e.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c89c3.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c89c3.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8981.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8981.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c898e.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c898e.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c898f.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c898f.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8991.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8991.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8992.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8992.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8994.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8994.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8995.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8995.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c89c6.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c89c6.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-File: cirrus/cs35l41-dsp1-spk-prot-103c8971.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c8971.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c8972.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c8972.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c8973.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c8973.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8974.bin -> cs35l41-dsp1-spk-prot-103c8972.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8974.bin -> cs35l41-dsp1-spk-cali-103c8972.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c8975-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c8975-r0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c8975-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c8975-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c896e-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c896e-r0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c896e-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c896e-l0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-103c898e.bin -> cs35l41-dsp1-spk-prot-103c8971.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-103c898e.bin -> cs35l41-dsp1-spk-cali-103c8971.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-103c898f.bin -> cs35l41-dsp1-spk-prot-103c8971.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-103c898f.bin -> cs35l41-dsp1-spk-cali-103c8971.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8991.bin -> cs35l41-dsp1-spk-prot-103c8972.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8991.bin -> cs35l41-dsp1-spk-cali-103c8972.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8992.bin -> cs35l41-dsp1-spk-prot-103c8972.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8992.bin -> cs35l41-dsp1-spk-cali-103c8972.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8994.bin -> cs35l41-dsp1-spk-prot-103c8973.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8994.bin -> cs35l41-dsp1-spk-cali-103c8973.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8995.bin -> cs35l41-dsp1-spk-prot-103c8973.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8995.bin -> cs35l41-dsp1-spk-cali-103c8973.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c89c6-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c89c6-r0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c89c6-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c89c6-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c89c3-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c89c3-r0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c89c3-r1.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c89c3-r1.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-103c89c3-l0.bin -> cs35l41-dsp1-spk-prot-103c89c3-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-103c89c3-l0.bin -> cs35l41-dsp1-spk-cali-103c89c3-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-103c89c3-l1.bin -> cs35l41-dsp1-spk-prot-103c89c3-r1.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-103c89c3-l1.bin -> cs35l41-dsp1-spk-cali-103c89c3-r1.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c8981-r0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c8981-r1.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c8981-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c8981-l1.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c8981-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c8981-r1.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c8981-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c8981-l1.bin +-File: cirrus/cs35l41/v6.39.0/halo_cspl_RAM_revB2_29.41.0.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-17aa3847.wmfw -> cs35l41/v6.39.0/halo_cspl_RAM_revB2_29.41.0.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-17aa3847.wmfw -> cs35l41/v6.39.0/halo_cspl_RAM_revB2_29.41.0.wmfw +-File: cirrus/cs35l41-dsp1-spk-prot-17aa3847-spkid0-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-17aa3847-spkid0-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-17aa3847-spkid0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-17aa3847-spkid1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-17aa3847-spkid1-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-17aa3847-spkid1.bin +-File: cirrus/cs35l41/v6.47.0/halo_cspl_RAM_revB2_29.49.0.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-17aa3855.wmfw -> cs35l41/v6.47.0/halo_cspl_RAM_revB2_29.49.0.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-17aa3855.wmfw -> cs35l41/v6.47.0/halo_cspl_RAM_revB2_29.49.0.wmfw +-File: cirrus/cs35l41-dsp1-spk-prot-17aa3855-spkid0-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-17aa3855-spkid0-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-17aa3855-spkid0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-17aa3855-spkid1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-17aa3855-spkid1-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-17aa3855-spkid1.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-17aa22f1.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-17aa22f1.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-17aa22f2.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-17aa22f2.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-17aa22f3.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-17aa22f3.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-File: cirrus/cs35l41-dsp1-spk-prot-17aa22f1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-17aa22f1-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-17aa22f1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-17aa22f1-r0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-17aa22f2-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-17aa22f2-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-17aa22f2-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-17aa22f2-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-17aa22f3-l0.bin -> cs35l41-dsp1-spk-prot-17aa22f2-l0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-17aa22f3-r0.bin -> cs35l41-dsp1-spk-prot-17aa22f2-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-17aa22f3-l0.bin -> cs35l41-dsp1-spk-cali-17aa22f2-l0.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-17aa22f3-r0.bin -> cs35l41-dsp1-spk-cali-17aa22f2-r0.bin +-File: cirrus/cs35l41/v6.63.0/halo_cspl_RAM_revB2_29.65.0.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-104312af.wmfw -> cs35l41/v6.63.0/halo_cspl_RAM_revB2_29.65.0.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-104312af.wmfw -> cs35l41/v6.63.0/halo_cspl_RAM_revB2_29.65.0.wmfw +-File: cirrus/cs35l41-dsp1-spk-prot-104312af-spkid0-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-104312af-spkid0-r0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-104312af-spkid1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-104312af-spkid1-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-104312af-spkid0-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-104312af-spkid0-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-104312af-spkid1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-104312af-spkid1-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-10431a8f.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-10431a8f.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-File: cirrus/cs35l41-dsp1-spk-prot-10431a8f-spkid0-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10431a8f-spkid0-r0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10431a8f-spkid1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10431a8f-spkid1-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10431a8f-spkid0-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10431a8f-spkid0-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10431a8f-spkid1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10431a8f-spkid1-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-10431e02.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-10431e02.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-File: cirrus/cs35l41-dsp1-spk-prot-10431e02-spkid0-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10431e02-spkid0-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10431e02-spkid0-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10431e02-spkid0-r0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10431e02-spkid1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10431e02-spkid1-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10431e02-spkid1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10431e02-spkid1-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-10431f12.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-10431f12.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-File: cirrus/cs35l41-dsp1-spk-cali-10431f12-spkid0-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10431f12-spkid0-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10431f12-spkid1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10431f12-spkid1-r0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10431f12-spkid0-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10431f12-spkid0-r0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10431f12-spkid1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10431f12-spkid1-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-10431e12.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-10431e12.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-File: cirrus/cs35l41-dsp1-spk-cali-10431e12-spkid0-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10431e12-spkid0-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10431e12-spkid1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10431e12-spkid1-r0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10431e12-spkid0-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10431e12-spkid0-r0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10431e12-spkid1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10431e12-spkid1-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-10431b93.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-10431b93.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-10431a20.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-10431a20.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-10431a30.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-10431a30.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-10431a40.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-10431a40.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-10431a50.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-10431a50.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-10431a60.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-10431a60.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-File: cirrus/cs35l41-dsp1-spk-prot-10431b93-spkid0-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10431b93-spkid0-r0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10431b93-spkid1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10431b93-spkid1-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10431b93-spkid0-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10431b93-spkid0-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10431b93-spkid1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10431b93-spkid1-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-10433a20-spkid0-l0.bin -> cs35l41-dsp1-spk-prot-10431b93-spkid0-l0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-10433a20-spkid0-r0.bin -> cs35l41-dsp1-spk-prot-10431b93-spkid0-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-10433a20-spkid1-l0.bin -> cs35l41-dsp1-spk-prot-10431b93-spkid1-l0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-10433a20-spkid1-r0.bin -> cs35l41-dsp1-spk-prot-10431b93-spkid1-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-10433a20-spkid0-l0.bin -> cs35l41-dsp1-spk-cali-10431b93-spkid0-l0.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-10433a20-spkid0-r0.bin -> cs35l41-dsp1-spk-cali-10431b93-spkid0-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-10433a20-spkid1-l0.bin -> cs35l41-dsp1-spk-cali-10431b93-spkid1-l0.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-10433a20-spkid1-r0.bin -> cs35l41-dsp1-spk-cali-10431b93-spkid1-r0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10433a30-spkid1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10433a30-spkid1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10433a30-spkid1-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10433a30-spkid1-r0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10433a30-spkid0-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10433a30-spkid0-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10433a30-spkid0-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10433a30-spkid0-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-10433a40-spkid0-l0.bin -> cs35l41-dsp1-spk-prot-10433a30-spkid0-l0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-10433a40-spkid0-r0.bin -> cs35l41-dsp1-spk-prot-10433a30-spkid0-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-10433a40-spkid1-l0.bin -> cs35l41-dsp1-spk-prot-10433a30-spkid1-l0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-10433a40-spkid1-r0.bin -> cs35l41-dsp1-spk-prot-10433a30-spkid1-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-10433a40-spkid0-l0.bin -> cs35l41-dsp1-spk-cali-10433a30-spkid0-l0.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-10433a40-spkid0-r0.bin -> cs35l41-dsp1-spk-cali-10433a30-spkid0-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-10433a40-spkid1-l0.bin -> cs35l41-dsp1-spk-cali-10433a30-spkid1-l0.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-10433a40-spkid1-r0.bin -> cs35l41-dsp1-spk-cali-10433a30-spkid1-r0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10433a50-spkid1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10433a50-spkid1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10433a50-spkid1-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10433a50-spkid1-r0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10433a50-spkid0-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10433a50-spkid0-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10433a50-spkid0-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10433a50-spkid0-r0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10433a60-spkid1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10433a60-spkid1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10433a60-spkid1-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10433a60-spkid1-r0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10433a60-spkid0-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10433a60-spkid0-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-10433a60-spkid0-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-10433a60-spkid0-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-17aa2316.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-17aa2316.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-17aa2317.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-17aa2317.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-File: cirrus/cs35l41-dsp1-spk-prot-17aa2316-spkid0-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-17aa2316-spkid0-r0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-17aa2316-spkid1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-17aa2316-spkid1-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-17aa2316-spkid0-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-17aa2316-spkid0-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-17aa2316-spkid1-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-17aa2316-spkid1-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-17aa2317-spkid0-l0.bin -> cs35l41-dsp1-spk-prot-17aa2316-spkid0-l0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-17aa2317-spkid0-r0.bin -> cs35l41-dsp1-spk-prot-17aa2316-spkid0-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-17aa2317-spkid1-l0.bin -> cs35l41-dsp1-spk-prot-17aa2316-spkid1-l0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-17aa2317-spkid1-r0.bin -> cs35l41-dsp1-spk-prot-17aa2316-spkid1-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-17aa2317-spkid0-l0.bin -> cs35l41-dsp1-spk-cali-17aa2316-spkid0-l0.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-17aa2317-spkid0-r0.bin -> cs35l41-dsp1-spk-cali-17aa2316-spkid0-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-17aa2317-spkid1-l0.bin -> cs35l41-dsp1-spk-cali-17aa2316-spkid1-l0.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-17aa2317-spkid1-r0.bin -> cs35l41-dsp1-spk-cali-17aa2316-spkid1-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-17aa2318.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-17aa2318.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-17aa2319.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-17aa2319.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-17aa231a.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-17aa231a.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-17aa2318-l0.bin -> cs35l41-dsp1-spk-prot-17aa22f1-l0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-17aa2318-r0.bin -> cs35l41-dsp1-spk-prot-17aa22f1-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-17aa2318-l0.bin -> cs35l41-dsp1-spk-cali-17aa22f1-l0.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-17aa2318-r0.bin -> cs35l41-dsp1-spk-cali-17aa22f1-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-17aa2319-l0.bin -> cs35l41-dsp1-spk-prot-17aa22f2-l0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-17aa2319-r0.bin -> cs35l41-dsp1-spk-prot-17aa22f2-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-17aa2319-l0.bin -> cs35l41-dsp1-spk-cali-17aa22f2-l0.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-17aa2319-r0.bin -> cs35l41-dsp1-spk-cali-17aa22f2-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-17aa231a-l0.bin -> cs35l41-dsp1-spk-prot-17aa22f2-l0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-17aa231a-r0.bin -> cs35l41-dsp1-spk-prot-17aa22f2-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-17aa231a-l0.bin -> cs35l41-dsp1-spk-cali-17aa22f2-l0.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-17aa231a-r0.bin -> cs35l41-dsp1-spk-cali-17aa22f2-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8c26.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8c26.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8b42.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8b42.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8b43.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8b43.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8b44.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8b44.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8b45.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8b45.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8b46.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8b46.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8b47.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8b47.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8b63.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8b63.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8b70.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8b70.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8b72.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8b72.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8b74.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8b74.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8b77.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8b77.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8b8f.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8b92.wmfw -> cs35l41/v6.61.1/halo_cspl_RAM_revB2_29.63.1.wmfw +-File: cirrus/cs35l41-dsp1-spk-prot-103c8b42.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c8b42.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c8b43.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c8b43.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c8b44.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c8b44.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c8b45.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c8b45.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8b46.bin -> cs35l41-dsp1-spk-prot-103c8b45.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8b46.bin -> cs35l41-dsp1-spk-cali-103c8b45.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c8b47.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c8b47.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c8b63-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c8b63-r0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c8b63-r1.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c8b63-r1.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c8b63-l0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c8b63-l0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c8b63-l1.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c8b63-l1.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8b70.bin -> cs35l41-dsp1-spk-prot-103c8b42.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8b70.bin -> cs35l41-dsp1-spk-cali-103c8b42.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8b72.bin -> cs35l41-dsp1-spk-prot-103c8b45.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8b72.bin -> cs35l41-dsp1-spk-cali-103c8b45.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8b74.bin -> cs35l41-dsp1-spk-prot-103c8b47.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8b74.bin -> cs35l41-dsp1-spk-cali-103c8b47.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8b77.bin -> cs35l41-dsp1-spk-prot-103c8b45.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8b77.bin -> cs35l41-dsp1-spk-cali-103c8b45.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c8b8f-r0.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c8b8f-r1.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c8b8f-r0.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c8b8f-r1.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8b8f-l0.bin -> cs35l41-dsp1-spk-prot-103c8b8f-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8b8f-l1.bin -> cs35l41-dsp1-spk-prot-103c8b8f-r1.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8b8f-l0.bin -> cs35l41-dsp1-spk-cali-103c8b8f-r0.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8b8f-l1.bin -> cs35l41-dsp1-spk-cali-103c8b8f-r1.bin +-File: cirrus/cs35l41-dsp1-spk-prot-103c8b92.bin +-File: cirrus/cs35l41-dsp1-spk-cali-103c8b92.bin +-Link: cirrus/cs35l41-dsp1-spk-prot-103c8c26.bin -> cs35l41-dsp1-spk-prot-103c8b45.bin +-Link: cirrus/cs35l41-dsp1-spk-cali-103c8c26.bin -> cs35l41-dsp1-spk-cali-103c8b45.bin +- +-License: Redistributable. See LICENSE.cirrus for details. +- +-Use of Cirrus Logic drivers, firmware and other materials is permitted +-only in connection with Cirrus Logic hardware products. +- +-Copyright © 2022 Cirrus Logic, Inc. and Cirrus Logic International +-Semiconductor Ltd. All Rights Reserved. +- +---------------------------------------------------------------------------- +- +-Driver: mtk-sof - MediaTek Sound Open Firmware driver +- +-File: mediatek/sof/sof-mt8186.ri +-File: mediatek/sof/sof-mt8186.ldc +-File: mediatek/sof-tplg/sof-mt8186.tplg +-Version: v0.2.1 +- +-File: mediatek/sof/sof-mt8195.ri +-File: mediatek/sof/sof-mt8195.ldc +-File: mediatek/sof-tplg/sof-mt8195-mt6359-rt1019-rt5682.tplg +-File: mediatek/sof-tplg/sof-mt8195-mt6359-rt1019-rt5682-dts.tplg +-Version: v0.4.1 +- +-Licence: Redistributable. See LICENCE.mediatek for details. +- +--------------------------------------------------------------------------- +- + Driver: nxp-sr1xx - NXP Ultra Wide Band driver + File: nxp/sr150_fw.bin + Version: 35.00.03 +-- +2.40.1 + diff --git a/packages/linux-firmware/0002-linux-firmware-video-Remove-firmware-for-video-broad.patch b/packages/linux-firmware/0002-linux-firmware-video-Remove-firmware-for-video-broad.patch new file mode 100644 index 00000000..ad4b0ea6 --- /dev/null +++ b/packages/linux-firmware/0002-linux-firmware-video-Remove-firmware-for-video-broad.patch @@ -0,0 +1,1630 @@ +From 80d430aa70fec3a2d0fdbe59f3a102c8aeaa564a Mon Sep 17 00:00:00 2001 +From: Leonard Foerster +Date: Tue, 25 Jul 2023 09:43:12 +0000 +Subject: [PATCH] linux-firmware: video: Remove firmware for video/broadcast + devices + +Bottlerocket does not configure any special video encoding/decoding +hardware, DVB/DAB tuner, TV cards, and V4L2 hardware. We do not need +to ship firmware for devices we do not ship drivers for. The following +list maps the drivers as named in WHENCE to kernel config options to +easily check if we need to ship additional firmware in the future. + +* cpia2 - CONFIG_VIDEO_CPIA2 +* dabusb - CONFIG_USB_DABUSB (not available since 2.6.39) +* vicam - CONFIG_USB_GSPCA_VICAM +* ipu3-imgu - CONFIG_VIDEO_IPU3_IMGU +* cx231xx - CONFIG_VIDEO_CX231XX +* cx23418 - CONFIG_VIDEO_CX18 +* cx23885 - CONFIG_VIDEO_CX23885 +* cx25840 - CONFIG_VIDEO_CX25840 +* mtk-vpu - CONFIG_VIDEO_MEDIATEK_VPU +* ti-vpe - CONFIG_VIDEO_TI_VPE +* tlg2300 - CONFIG_VIDEO_TLG2300 +* s5p-mfc - CONFIG_VIDEO_SAMSUNG_S5P_MFC +* go7007 - CONFIG_VIDEO_GO7007 +* venus - CONFIG_VIDEO_QCOM_VENUS +* meson-vdec - CONFIG_VIDEO_MESON_VDEC +* wave5 - CONFIG_VIDEO_WAVE_VPU (driver not yet included upstream) +* amphion - CONFIG_VIDEO_AMPHION_VPU +* dvb-ttusb-budget - CONFIG_DVB_TTUSB_BUDGET +* dvb-ttpci - CONFIG_DVB_AV7110 +* xc4000 - CONFIG_MEDIA_TUNER_XC4000 +* xc5000 - CONFIG_MEDIA_TUNER_XC5000 +* dib0700 - CONFIG_DVB_USB_DIB0700 +* lsg8gxx - CONFIG_DVB_LGS8GXX +* drxk - CONFIG_DVB_DRXK +* as102 - CONFIG_DVB_AS102 +* it9135 - CONFIG_MEDIA_TUNER_IT913X +* smsmdtv - CONFIG_SMS_SIANO_MDTV +* tegra-vix - CONFIG_DRM_TEGRA +* rk3399-dptx - CONFIG_ROCKCHIP_CDN_DP +* cdns-mhdp - CONFIG_DRM_CDNS_MHDP8546 +* lt9611uxc - CONFIG_DRM_LONTIUM_LT9611UXC + +Signed-off-by: Leonard Foerster +--- + LICENCE.Abilis | 22 -- + LICENCE.cadence | 63 ------ + LICENCE.cnm | 23 --- + LICENCE.go7007 | 457 ------------------------------------------ + LICENCE.it913x | 17 -- + LICENCE.rockchip | 41 ---- + LICENCE.siano | 31 --- + LICENCE.ti-tspa | 46 ----- + LICENCE.xc4000 | 23 --- + LICENCE.xc5000 | 23 --- + LICENCE.xc5000c | 23 --- + LICENSE.Lontium | 2 - + LICENSE.amlogic_vdec | 15 -- + LICENSE.amphion_vpu | 48 ----- + LICENSE.dib0700 | 22 -- + LICENSE.ipu3_firmware | 36 ---- + WHENCE | 400 ------------------------------------ + 17 files changed, 1292 deletions(-) + delete mode 100644 LICENCE.Abilis + delete mode 100644 LICENCE.cadence + delete mode 100644 LICENCE.cnm + delete mode 100644 LICENCE.go7007 + delete mode 100644 LICENCE.it913x + delete mode 100644 LICENCE.rockchip + delete mode 100644 LICENCE.siano + delete mode 100644 LICENCE.ti-tspa + delete mode 100644 LICENCE.xc4000 + delete mode 100644 LICENCE.xc5000 + delete mode 100644 LICENCE.xc5000c + delete mode 100644 LICENSE.Lontium + delete mode 100644 LICENSE.amlogic_vdec + delete mode 100644 LICENSE.amphion_vpu + delete mode 100644 LICENSE.dib0700 + delete mode 100644 LICENSE.ipu3_firmware + +diff --git a/LICENCE.Abilis b/LICENCE.Abilis +deleted file mode 100644 +index 9050d2b..0000000 +--- a/LICENCE.Abilis ++++ /dev/null +@@ -1,22 +0,0 @@ +-Firmware provided by Pierrick Hascoet to Devin +-Heitmueller on January 15, 2010. +- +-The USB firmware files "dvb-as102_data1_st.hex" and "as102_data2_st.hex" for +-Abilis's AS10X, used together with the AS10X USB Kernel driver, is provided +-under the following licensing terms: +- +-Copyright (c) 2010, Abilis Systems Sarl +- +-Permission to use, copy, modify, and/or distribute this software for +-any purpose with or without fee is hereby granted, provided that the +-above copyright notice and this permission notice appear in all +-copies. +- +-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL +-WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED +-WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE +-AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL +-DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR +-PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +-TORTUOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +-PERFORMANCE OF THIS SOFTWARE. +diff --git a/LICENCE.cadence b/LICENCE.cadence +deleted file mode 100644 +index b3564c2..0000000 +--- a/LICENCE.cadence ++++ /dev/null +@@ -1,63 +0,0 @@ +-Copyright (c) 2018, Cadence Design Systems, Inc. +-All rights reserved. +- +-Redistribution. Redistribution and use in binary form, without +-modification, are permitted provided that the following conditions are +-met: +- +-* Redistributions must reproduce the above copyright notice and the +- following disclaimer in the documentation and/or other materials +- provided with the distribution. +- +-* Neither the name of Cadence Design Systems, Inc., its products +- nor the names of its suppliers may be used to endorse or promote products +- derived from this Software without specific prior written permission. +- +-* No reverse engineering, decompilation, or disassembly of this software +- is permitted. +- +-DISCLAIMER. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +-CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +-BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +-FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +-COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +-DAMAGE. +- +-This software contains: +- +-HDCP Cipher is licensed under the FreeBSD license. A copy of the FreeBSD +-license can be found at +-https://www.freebsd.org/copyright/freebsd-license.html. +-The source code for HDCP Cipher can is available here: +-http://www3.cs.stonybrook.edu/~rob/hdcp.html +- +-SSL Library is licensed under the Apache License, Version 2.0. +-A copy of the Apache License, Version 2.0 can be found at +-http://www.apache.org/licenses/LICENSE-2.0. +-The original source code for SSL Library can is available here: +-https://tls.mbed.org/download +- +-Fast discrete Fourier and cosine transforms and inverses +-author: Monty +-modifications by: Monty +-last modification date: Jul 1 1996 +- +-/* These Fourier routines were originally based on the Fourier +-routines of the same names from the NETLIB bihar and fftpack +-fortran libraries developed by Paul N. Swarztrauber at the National +-Center for Atmospheric Research in Boulder, CO USA. They have been +-reimplemented in C and optimized in a few ways for OggSquish. */ +- +-/* As the original fortran libraries are public domain, the C Fourier +-routines in this file are hereby released to the public domain as +-well. The C routines here produce output exactly equivalent to the +-original fortran routines. Of particular interest are the facts +-that (like the original fortran), these routines can work on +-arbitrary length vectors that need not be powers of two in +-length. */ +diff --git a/LICENCE.cnm b/LICENCE.cnm +deleted file mode 100644 +index 48d23ea..0000000 +--- a/LICENCE.cnm ++++ /dev/null +@@ -1,23 +0,0 @@ +-Copyright (C) 2021 Chips&Media, Inc. +-All rights reserved. +- +-Redistribution and use in binary form is permitted provided that the following +-conditions are met: +- +-1. Redistributions must reproduce the above copyright notice, this list of +-conditions and the following disclaimer in the documentation and/or other +-materials provided with the distribution. +- +-2. Redistribution and use shall be used only with Texas Instruments Incorporateds +-silicon products. Any other use, reproduction, modification, translation, +-or compilation of the Software is prohibited. +- +-3. No reverse engineering, decompilation, or disassembly is permitted. +- +-TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, THIS SOFTWARE IS PROVIDED +-"AS IS" WITHOUT WARRANTY OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ANY EXPRESS +-OR IMPLIED WARRANTIES OF MERCHANTABILITY, ACCURACY, FITNESS OR SUFFICIENCY FOR A +-PARTICULAR PURPOSE, SATISFACTORY QUALITY, CORRESPONDENCE WITH DESCRIPTION, QUIET +-ENJOYMENT OR NON-INFRINGEMENT OF THIRD PARTY INTELLECTUAL PROPERTY RIGHTS. +-CHIPS&MEDIA, INC., ITS AFFILIATES AND THEIR SUPPLIERS DISCLAIM ANY WARRANTY THAT THE +-DELIVERABLES WILL OPERATE WITHOUT INTERRUPTION OR BE ERROR-FREE. +diff --git a/LICENCE.go7007 b/LICENCE.go7007 +deleted file mode 100644 +index 3689f3b..0000000 +--- a/LICENCE.go7007 ++++ /dev/null +@@ -1,457 +0,0 @@ +-The README file from the original package from Micronas appears below. Only +-the part about the firmware redistribution in section 0 is relevant, all +-other sections are completely obsolete. +- +---------------------------------------------------------------------------- +- WIS GO7007SB Public Linux Driver +---------------------------------------------------------------------------- +- +- +-*** Please see the file RELEASE-NOTES for important last-minute updates *** +- +- +- 0. OVERVIEW AND LICENSING/DISCLAIMER +- +- +-This driver kit contains Linux drivers for the WIS GO7007SB multi-format +-video encoder. Only kernel version 2.6.x is supported. The video stream +-is available through the Video4Linux2 API and the audio stream is available +-through the ALSA API (or the OSS emulation layer of the ALSA system). +- +-The files in kernel/ and hotplug/ are licensed under the GNU General Public +-License Version 2 from the Free Software Foundation. A copy of the license +-is included in the file COPYING. +- +-The example applications in apps/ and C header files in include/ are +-licensed under a permissive license included in the source files which +-allows copying, modification and redistribution for any purpose without +-attribution. +- +-The firmware files included in the firmware/ directory may be freely +-redistributed only in conjunction with this document; but modification, +-tampering and reverse engineering are prohibited. +- +-MICRONAS USA, INC., MAKES NO WARRANTIES TO ANY PERSON OR ENTITY WITH +-RESPECT TO THE SOFTWARE OR ANY DERIVATIVES THEREOF OR ANY SERVICES OR +-LICENSES AND DISCLAIMS ALL IMPLIED WARRANTIES, INCLUDING WITHOUT LIMITATION +-WARRANTIES OF MERCHANTABILITY, SUPPORT, AND FITNESS FOR A PARTICULAR +-PURPOSE AND NON-INFRINGEMENT. +- +- +- 1. SYSTEM REQUIREMENTS +- +- +-This driver requires Linux kernel 2.6. Kernel 2.4 is not supported. Using +-kernel 2.6.10 or later is recommended, as earlier kernels are known to have +-unstable USB 2.0 support. +- +-A fully built kernel source tree must be available. Typically this will be +-linked from "/lib/modules//build" for convenience. If this +-link does not exist, an extra parameter will need to be passed to the +-`make` command. +- +-All vendor-built kernels should already be configured properly. However, +-for custom-built kernels, the following options need to be enabled in the +-kernel as built-in or modules: +- +- CONFIG_HOTPLUG - Support for hot-pluggable devices +- CONFIG_MODULES - Enable loadable module support +- CONFIG_KMOD - Automatic kernel module loading +- CONFIG_FW_LOADER - Hotplug firmware loading support +- CONFIG_I2C - I2C support +- CONFIG_VIDEO_DEV - Video For Linux +- CONFIG_SOUND - Sound card support +- CONFIG_SND - Advanced Linux Sound Architecture +- CONFIG_USB - Support for Host-side USB +- CONFIG_USB_DEVICEFS - USB device filesystem +- CONFIG_USB_EHCI_HCD - EHCI HCD (USB 2.0) support +- +-Additionally, to use the example application, the following options need to +-be enabled in the ALSA section: +- +- CONFIG_SND_MIXER_OSS - OSS Mixer API +- CONFIG_SND_PCM_OSS - OSS PCM (digital audio) API +- +-The hotplug scripts, along with the fxload utility, must also be installed. +-These scripts can be obtained from . +-Hotplugging is used for loading firmware into the Cypruss EZ-USB chip using +-fxload and for loading firmware into the driver using the firmware agent. +- +- +- 2. COMPILING AND INSTALLING THE DRIVER +- +- +-Most users should be able to compile the driver by simply running: +- +- $ make +- +-in the top-level directory of the driver kit. First the kernel modules +-will be built, followed by the example applications. +- +-If the build system is unable to locate the kernel source tree for the +-currently-running kernel, or if the module should be built for a kernel +-other than the currently-running kernel, an additional parameter will need +-to be passed to make to specify the appropriate kernel source directory: +- +- $ make KERNELSRC=/usr/src/linux-2.6.10-custom3 +- +-Once the compile completes, the driver and firmware files should be +-installed by running: +- +- $ make install +- +-The kernel modules will be placed in "/lib/modules//extra" +-and the firmware files will be placed in the appropriate hotplug firmware +-directory, usually /lib/firmware. In addition, USB maps and scripts will +-be placed in /etc/hotplug/usb to enable fxload to initialize the EZ-USB +-control chip when the device is connected. +- +- +- 3. PAL/SECAM TUNER CONFIGURATION (TV402U-EU only) +- +- +-The PAL model of the Plextor ConvertX TV402U may require additional +-configuration to correctly select the appropriate TV frequency band and +-audio subchannel. +- +-Users with a device other than the Plextor ConvertX TV402U-EU should skip +-this section. +- +-The wide variety of PAL TV systems used in Europe requires that additional +-information about the local TV standards be passed to the driver in order +-to properly tune TV channels. The two necessary parameters are (a) the PAL +-TV band, and (b) the audio subchannel format in use. +- +-In many cases, the appropriate TV band selection is passed to the driver +-from applications. However, in some cases, the application only specifies +-that the driver should use PAL but not the specific information about the +-appropriate TV band. To work around this issue, the correct TV band may be +-specified in the "force_band" parameter to the wis-sony-tuner module: +- +- TV band force_band +- ------- ---------- +- PAL B/G B +- PAL I I +- PAL D/K D +- SECAM L L +- +-If the "force_band" parameter is specified, the driver will ignore any TV +-band specified by applications and will always use the band provided in the +-module parameter. +- +-The other parameter that can be specified is the audio subchannel format. +-There are several stereo audio carrier systems in use, including NICAM and +-three varieties of A2. To receive audio broadcast on one of these stereo +-carriers, the "force_mpx_mode" parameter must be specified to the +-wis-sony-tuner module. +- +- TV band Audio subcarrier force_mpx_mode +- ------- ---------------- -------------- +- PAL B/G Mono (default) 1 +- PAL B/G A2 2 +- PAL B/G NICAM 3 +- PAL I Mono (default) 4 +- PAL I NICAM 5 +- PAL D/K Mono (default) 6 +- PAL D/K A2 (1) 7 +- PAL D/K A2 (2) 8 +- PAL D/K A2 (3) 9 +- PAL D/K NICAM 10 +- SECAM L Mono (default) 11 +- SECAM L NICAM 12 +- +-If the "force_mpx_mode" parameter is not specified, the correct mono-only +-mode will be chosen based on the TV band. However, the tuner will not +-receive stereo audio or bilingual broadcasts correctly. +- +-To pass the "force_band" or "force_mpx_mode" parameters to the +-wis-sony-tuner module, the following line must be added to the modprobe +-configuration file, which varies from one Linux distribution to another. +- +- options wis-sony-tuner force_band=B force_mpx_mode=2 +- +-The above example would force the tuner to the PAL B/G TV band and receive +-stereo audio broadcasts on the A2 carrier. +- +-To verify that the configuration has been placed in the correct location, +-execute: +- +- $ modprobe -c | grep wis-sony-tuner +- +-If the configuration line appears, then modprobe will pass the parameters +-correctly the next time the wis-sony-tuner module is loaded into the +-kernel. +- +- +- 4. TESTING THE DRIVER +- +- +-Because few Linux applications are able to correctly capture from +-Video4Linux2 devices with only compressed formats supported, the new driver +-should be tested with the "gorecord" application in the apps/ directory. +- +-First connect a video source to the device, such as a DVD player or VCR. +-This will be captured to a file for testing the driver. If an input source +-is unavailable, a test file can still be captured, but the video will be +-black and the audio will be silent. +- +-This application will auto-detect the V4L2 and ALSA/OSS device names of the +-hardware and will record video and audio to an AVI file for a specified +-number of seconds. For example: +- +- $ apps/gorecord -duration 60 capture.avi +- +-If this application does not successfully record an AVI file, the error +-messages produced by gorecord and recorded in the system log (usually in +-/var/log/messages) should provide information to help resolve the problem. +- +-Supplying no parameters to gorecord will cause it to probe the available +-devices and exit. Use the -help flag for usage information. +- +- +- 5. USING THE DRIVER +- +- +-The V4L2 device implemented by the driver provides a standard compressed +-format API, within the following criteria: +- +- * Applications that only support the original Video4Linux1 API will not +- be able to communicate with this driver at all. +- +- * No raw video modes are supported, so applications like xawtv that +- expect only uncompressed video will not function. +- +- * Supported compression formats are: Motion-JPEG, MPEG1, MPEG2 and MPEG4. +- +- * MPEG video formats are delivered as Video Elementary Streams only. +- Program Stream (PS), Transport Stream (TS) and Packetized Elementary +- Stream (PES) formats are not supported. +- +- * Video parameters such as format and input port may not be changed while +- the encoder is active. +- +- * The audio capture device only functions when the video encoder is +- actively capturing video. Attempts to read from the audio device when +- the encoder is inactive will result in an I/O error. +- +- * The native format of the audio device is 48Khz 2-channel 16-bit +- little-endian PCM, delivered through the ALSA system. No audio +- compression is implemented in the hardware. ALSA may convert to other +- uncompressed formats on the fly. +- +-The include/ directory contains a C header file describing non-standard +-features of the GO7007SB encoder, which are described below: +- +- +- GO7007IOC_S_COMP_PARAMS, GO7007IOC_G_COMP_PARAMS +- +- These ioctls are used to negotiate general compression parameters. +- +- To query the current parameters, call the GO7007IOC_G_COMP_PARAMS ioctl +- with a pointer to a struct go7007_comp_params. If the driver is not +- set to MPEG format, the EINVAL error code will be returned. +- +- To change the current parameters, initialize all fields of a struct +- go7007_comp_params and call the GO7007_IOC_S_COMP_PARAMS ioctl with a +- pointer to this structure. The driver will return the current +- parameters with any necessary changes to conform to the limitations of +- the hardware or current compression mode. Any or all fields can be set +- to zero to request a reasonable default value. If the driver is not +- set to MPEG format, the EINVAL error code will be returned. When I/O +- is in progress, the EBUSY error code will be returned. +- +- Fields in struct go7007_comp_params: +- +- __u32 The maximum number of frames in each +- gop_size Group Of Pictures; i.e. the maximum +- number of frames minus one between +- each key frame. +- +- __u32 The maximum number of sequential +- max_b_frames bidirectionally-predicted frames. +- (B-frames are not yet supported.) +- +- enum go7007_aspect_ratio The aspect ratio to be encoded in the +- aspect_ratio meta-data of the compressed format. +- +- Choices are: +- GO7007_ASPECT_RATIO_1_1 +- GO7007_ASPECT_RATIO_4_3_NTSC +- GO7007_ASPECT_RATIO_4_3_PAL +- GO7007_ASPECT_RATIO_16_9_NTSC +- GO7007_ASPECT_RATIO_16_9_PAL +- +- __u32 Bit-wise OR of control flags (below) +- flags +- +- Flags in struct go7007_comp_params: +- +- GO7007_COMP_CLOSED_GOP Only produce self-contained GOPs, used +- to produce streams appropriate for +- random seeking. +- +- GO7007_COMP_OMIT_SEQ_HEADER Omit the stream sequence header. +- +- +- GO7007IOC_S_MPEG_PARAMS, GO7007IOC_G_MPEG_PARAMS +- +- These ioctls are used to negotiate MPEG-specific stream parameters when +- the pixelformat has been set to V4L2_PIX_FMT_MPEG. +- +- To query the current parameters, call the GO7007IOC_G_MPEG_PARAMS ioctl +- with a pointer to a struct go7007_mpeg_params. If the driver is not +- set to MPEG format, the EINVAL error code will be returned. +- +- To change the current parameters, initialize all fields of a struct +- go7007_mpeg_params and call the GO7007_IOC_S_MPEG_PARAMS ioctl with a +- pointer to this structure. The driver will return the current +- parameters with any necessary changes to conform to the limitations of +- the hardware or selected MPEG mode. Any or all fields can be set to +- zero to request a reasonable default value. If the driver is not set +- to MPEG format, the EINVAL error code will be returned. When I/O is in +- progress, the EBUSY error code will be returned. +- +- Fields in struct go7007_mpeg_params: +- +- enum go7007_mpeg_video_standard +- mpeg_video_standard The MPEG video standard in which to +- compress the video. +- +- Choices are: +- GO7007_MPEG_VIDEO_MPEG1 +- GO7007_MPEG_VIDEO_MPEG2 +- GO7007_MPEG_VIDEO_MPEG4 +- +- __u32 Bit-wise OR of control flags (below) +- flags +- +- __u32 The profile and level indication to be +- pali stored in the sequence header. This +- is only used as an indicator to the +- decoder, and does not affect the MPEG +- features used in the video stream. +- Not valid for MPEG1. +- +- Choices for MPEG2 are: +- GO7007_MPEG2_PROFILE_MAIN_MAIN +- +- Choices for MPEG4 are: +- GO7007_MPEG4_PROFILE_S_L0 +- GO7007_MPEG4_PROFILE_S_L1 +- GO7007_MPEG4_PROFILE_S_L2 +- GO7007_MPEG4_PROFILE_S_L3 +- GO7007_MPEG4_PROFILE_ARTS_L1 +- GO7007_MPEG4_PROFILE_ARTS_L2 +- GO7007_MPEG4_PROFILE_ARTS_L3 +- GO7007_MPEG4_PROFILE_ARTS_L4 +- GO7007_MPEG4_PROFILE_AS_L0 +- GO7007_MPEG4_PROFILE_AS_L1 +- GO7007_MPEG4_PROFILE_AS_L2 +- GO7007_MPEG4_PROFILE_AS_L3 +- GO7007_MPEG4_PROFILE_AS_L4 +- GO7007_MPEG4_PROFILE_AS_L5 +- +- Flags in struct go7007_mpeg_params: +- +- GO7007_MPEG_FORCE_DVD_MODE Force all compression parameters and +- bitrate control settings to comply +- with DVD MPEG2 stream requirements. +- This overrides most compression and +- bitrate settings! +- +- GO7007_MPEG_OMIT_GOP_HEADER Omit the GOP header. +- +- GO7007_MPEG_REPEAT_SEQHEADER Repeat the MPEG sequence header at +- the start of each GOP. +- +- +- GO7007IOC_S_BITRATE, GO7007IOC_G_BITRATE +- +- These ioctls are used to set and query the target bitrate value for the +- compressed video stream. The bitrate may be selected by storing the +- target bits per second in an int and calling GO7007IOC_S_BITRATE with a +- pointer to the int. The bitrate may be queried by calling +- GO7007IOC_G_BITRATE with a pointer to an int where the current bitrate +- will be stored. +- +- Note that this is the primary means of controlling the video quality +- for all compression modes, including V4L2_PIX_FMT_MJPEG. The +- VIDIOC_S_JPEGCOMP ioctl is not supported. +- +- +----------------------------------------------------------------------------- +- Installing the WIS PCI Voyager Driver +---------------------------------------------------------------------------- +- +-The WIS PCI Voyager driver requires several patches to the Linux 2.6.11.x +-kernel source tree before compiling the driver. These patches update the +-in-kernel SAA7134 driver to the newest development version and patch bugs +-in the TDA8290/TDA8275 tuner driver. +- +-The following patches must be downloaded from Gerd Knorr's website and +-applied in the order listed: +- +- http://dl.bytesex.org/patches/2.6.11-2/i2c-tuner +- http://dl.bytesex.org/patches/2.6.11-2/i2c-tuner2 +- http://dl.bytesex.org/patches/2.6.11-2/v4l2-api-mpeg +- http://dl.bytesex.org/patches/2.6.11-2/saa7134-update +- +-The following patches are included with this SDK and can be applied in any +-order: +- +- patches/2.6.11/saa7134-voyager.diff +- patches/2.6.11/tda8275-newaddr.diff +- patches/2.6.11/tda8290-ntsc.diff +- +-Check to make sure the CONFIG_VIDEO_SAA7134 option is enabled in the kernel +-configuration, and build and install the kernel. +- +-After rebooting into the new kernel, the GO7007 driver can be compiled and +-installed: +- +- $ make SAA7134_BUILD=y +- $ make install +- $ modprobe saa7134-go7007 +- +-There will be two V4L video devices associated with the PCI Voyager. The +-first device (most likely /dev/video0) provides access to the raw video +-capture mode of the SAA7133 device and is used to configure the source +-video parameters and tune the TV tuner. This device can be used with xawtv +-or other V4L(2) video software as a standard uncompressed device. +- +-The second device (most likely /dev/video1) provides access to the +-compression functions of the GO7007. It can be tested using the gorecord +-application in the apps/ directory of this SDK: +- +- $ apps/gorecord -vdevice /dev/video1 -noaudio test.avi +- +-Currently the frame resolution is fixed at 720x480 (NTSC) or 720x576 (PAL), +-and the video standard must be specified to both the raw and the compressed +-video devices (xawtv and gorecord, for example). +- +- +--------------------------------------------------------------------------- +-RELEASE NOTES FOR WIS GO7007SB LINUX DRIVER +---------------------------------------------------------------------------- +- +-Last updated: 5 November 2005 +- +- - Release 0.9.7 includes new support for using udev to run fxload. The +- install script should automatically detect whether the old hotplug +- scripts or the new udev rules should be used. To force the use of +- hotplug, run "make install USE_UDEV=n". To force the use of udev, run +- "make install USE_UDEV=y". +- +- - Motion detection is supported but undocumented. Try the `modet` app +- for a demonstration of how to use the facility. +- +- - Using USB2.0 devices such as the TV402U with USB1.1 HCDs or hubs can +- cause buffer overruns and frame drops, even at low framerates, due to +- inconsistency in the bitrate control mechanism. +- +- - On devices with an SAA7115, including the Plextor ConvertX, video height +- values of 96, 128, 160, 192, 256, 320, and 384 do not work in NTSC mode. +- All valid heights up to 512 work correctly in PAL mode. +- +- - The WIS Star Trek and PCI Voyager boards have no support yet for audio +- or the TV tuner. +diff --git a/LICENCE.it913x b/LICENCE.it913x +deleted file mode 100644 +index ec8f56c..0000000 +--- a/LICENCE.it913x ++++ /dev/null +@@ -1,17 +0,0 @@ +-Copyright (c) 2014, ITE Tech. Inc. +- +-The firmware files "dvb-usb-it9135-01.fw" and "dvb-usb-it9135-02.fw" +-are for ITEtech it9135 Ax and Bx chip versions. +- +-Permission to use, copy, modify, and/or distribute this software for +-any purpose with or without fee is hereby granted, provided that the +-above copyright notice and this permission notice appear in all copies. +- +-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE +-FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +-DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +-WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +-ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +-SOFTWARE. +diff --git a/LICENCE.rockchip b/LICENCE.rockchip +deleted file mode 100644 +index d23b4c4..0000000 +--- a/LICENCE.rockchip ++++ /dev/null +@@ -1,41 +0,0 @@ +-Copyright (c) 2016, Fuzhou Rockchip Electronics Co.Ltd +-All rights reserved. +- +-Redistribution. Redistribution and use in binary form, without +-modification, are permitted provided that the following conditions are +-met: +- +-* Redistributions must reproduce the above copyright notice and the +- following disclaimer in the documentation and/or other materials +- provided with the distribution. +- +-* Neither the name of Fuzhou Rockchip Electronics Co.Ltd, its products +- nor the names of its suppliers may be used to endorse or promote products +- derived from this Software without specific prior written permission. +- +-* No reverse engineering, decompilation, or disassembly of this software +- is permitted. +- +-Limited patent license. Fuzhou Rockchip Electronics Co.Ltd grants a world-wide, +-royalty-free, non-exclusive license under patents it now or hereafter +-owns or controls to make, have made, use, import, offer to sell and +-sell ("Utilize") this software, but solely to the extent that any +-such patent is necessary to Utilize the software alone, or in +-combination with an operating system licensed under an approved Open +-Source license as listed by the Open Source Initiative at +-http://opensource.org/licenses. The patent license shall not apply to +-any other combinations which include this software. No hardware per +-se is licensed hereunder. +- +-DISCLAIMER. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +-CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +-BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +-FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +-COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +-DAMAGE. +diff --git a/LICENCE.siano b/LICENCE.siano +deleted file mode 100644 +index 97e5440..0000000 +--- a/LICENCE.siano ++++ /dev/null +@@ -1,31 +0,0 @@ +-FIRMWARE LICENSE TERMS +- +-Copyright (c) 2005-2014 Siano Mobile Silicon Ltd. +-All rights reserved. +- +-Redistribution. Redistribution and use in binary form, without +-modification, are permitted provided that the following conditions are +-met: +- +-* Redistributions must reproduce the above copyright notice and the +-following disclaimer in the documentation and/or other materials +-provided with the distribution. +- +-* Neither the name of Siano Mobile Silicon Ltd. nor the names of its +-suppliers may be used to endorse or promote products derived from this +-software without specific prior written permission. +- +-* No reverse engineering, decompilation, or disassembly of this software +-is permitted. +- +-DISCLAIMER. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +-CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +-BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +-FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +-COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +-NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +-USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE +diff --git a/LICENCE.ti-tspa b/LICENCE.ti-tspa +deleted file mode 100644 +index 728fc2b..0000000 +--- a/LICENCE.ti-tspa ++++ /dev/null +@@ -1,46 +0,0 @@ +-TI TSPA License +-TECHNOLOGY AND SOFTWARE PUBLICLY AVAILABLE +-SOFTWARE LICENSE +- +-Copyright (c) 2020, Texas Instruments Incorporated. +- +-All rights reserved not granted herein. +- +-Limited License. +- +-Texas Instruments Incorporated grants a world-wide, royalty-free, non-exclusive +-license under copyrights and patents it now or hereafter owns or controls to +-make, have made, use, import, offer to sell and sell ("Utilize") this software, +-but solely to the extent that any such patent is necessary to Utilize the +-software alone. The patent license shall not apply to any combinations which +-include this software. No hardware per se is licensed hereunder. +- +-Redistribution and use in binary form, without modification, are permitted +-provided that the following conditions are met: +- +-* Redistributions must preserve existing copyright notices and reproduce this +-license (including the above copyright notice and the disclaimer below) in the +-documentation and/or other materials provided with the distribution. +- +-* Neither the name of Texas Instruments Incorporated nor the names of its +-suppliers may be used to endorse or promote products derived from this software +-without specific prior written permission. +- +-* No reverse engineering, decompilation, or disassembly of this software is +-permitted. +- +-* Nothing shall obligate TI to provide you with source code for the software +-licensed and provided to you in object code. +- +-DISCLAIMER. +- +-THIS SOFTWARE IS PROVIDED BY TI AND TI’S LICENSORS "AS IS" AND ANY EXPRESS OR +-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +-EVENT SHALL TI AND TI’S LICENSORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +-EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +diff --git a/LICENCE.xc4000 b/LICENCE.xc4000 +deleted file mode 100644 +index e3cd261..0000000 +--- a/LICENCE.xc4000 ++++ /dev/null +@@ -1,23 +0,0 @@ +-The following XC4000 firmware file "dvb-fe-xc4000-1.4.1.fw" was +-created based on version 1.4 of "xc4000_firmwares.h". +- +-Firmware provided as part of an XC4000 Linux developers kit by Brian +-Mathews to Devin Heitmueller +- on July 1, 2009. +- +-The code was released by Xceive under the following license: +- +-// Copyright (c) 2009, Xceive Corporation +-// +-// Permission to use, copy, modify, and/or distribute this software, only +-// for use with Xceive ICs, for any purpose with or without fee is hereby +-// granted, provided that the above copyright notice and this permission +-// notice appear in all source code copies. +-// +-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +diff --git a/LICENCE.xc5000 b/LICENCE.xc5000 +deleted file mode 100644 +index 0ac8557..0000000 +--- a/LICENCE.xc5000 ++++ /dev/null +@@ -1,23 +0,0 @@ +-The following XC500 firmware file "dvb-fe-xc5000-1.6.114.fw" was +-created based on "xc5000_firmwares_32000Khz.h". +- +-Firmware provided as part of an XC5000 Linux developers kit by Brian +-Mathews to Devin Heitmueller +-on July 1, 2009. +- +-The code was released by Xceive under the following license: +- +-// Copyright (c) 2009, Xceive Corporation +-// +-// Permission to use, copy, modify, and/or distribute this software, only +-// for use with Xceive ICs, for any purpose with or without fee is hereby +-// granted, provided that the above copyright notice and this permission +-// notice appear in all source code copies. +-// +-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +diff --git a/LICENCE.xc5000c b/LICENCE.xc5000c +deleted file mode 100644 +index 23b81e7..0000000 +--- a/LICENCE.xc5000c ++++ /dev/null +@@ -1,23 +0,0 @@ +-The following XC500C firmware file "dvb-fe-xc5000C-4.1.30.7.fw" was created +-based on "Xc5200_firmwares_32000Khz.h". +- +-Firmware provided as part of an XC5000C Linux developers kit by Ramon Cazares +- to Devin Heitmueller dheitmueller@linuxtv.org +-on July 25, 2012. +- +-The code was released by Cresta Technology under the following license: +- +-// Copyright (c) 2012, Cresta Technology Corporation +-// +-// Permission to use, copy, modify, and/or distribute this software, only +-// for use with Cresta Technlogy ICs, for any purpose with or without fee is +-// hereby granted, provided that the above copyright notice and this +-// permission notice appear in all source code copies. +-// +-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +diff --git a/LICENSE.Lontium b/LICENSE.Lontium +deleted file mode 100644 +index 2989473..0000000 +--- a/LICENSE.Lontium ++++ /dev/null +@@ -1,2 +0,0 @@ +-Lontium Semiconductor Corp. grants permission to use and redistribute aforementioned firmware file for the use with devices containing Lontium chipsets, but not as part of the Linux kernel or in any other form which would require the file itself to be covered by the terms of the GNU General Public License or the GNU Lesser General Public License. +-The firmware file is distributed in the hope that it will be useful, but is provided WITHOUT ANY WARRANTY, INCLUDING BUT NOT LIMITED TO IMPLIED WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. +diff --git a/LICENSE.amlogic_vdec b/LICENSE.amlogic_vdec +deleted file mode 100644 +index ac48f20..0000000 +--- a/LICENSE.amlogic_vdec ++++ /dev/null +@@ -1,15 +0,0 @@ +---------------------------------------------------------------------- +-Amlogic Co., Inc. grants permission to use and redistribute +-aforementioned firmware files for the use with devices containing +-Amlogic chipsets, but not as part of the Linux kernel or in any other +-form which would require these files themselves to be covered by the +-terms of the GNU General Public License or the GNU Lesser General +-Public License. +- +-These firmware files are distributed in the hope that they will be +-useful, but are provided WITHOUT ANY WARRANTY, INCLUDING BUT NOT +-LIMITED TO IMPLIED WARRANTY OF MERCHANTABILITY OR FITNESS FOR A +-PARTICULAR PURPOSE. +- +-Amlogic Contact: Arden Jin +---------------------------------------------------------------------- +diff --git a/LICENSE.amphion_vpu b/LICENSE.amphion_vpu +deleted file mode 100644 +index 31840ec..0000000 +--- a/LICENSE.amphion_vpu ++++ /dev/null +@@ -1,48 +0,0 @@ +-Copyright 2015, Amphion Semiconductor Ltd +-Copyright 2021, NXP +-All rights reserved. +- +-Redistribution. Reproduction and redistribution in binary form, without +-modification, for use solely in conjunction with a NXP +-chipset, is permitted provided that the following conditions are met: +- +- . Redistributions must reproduce the above copyright notice and the following +- disclaimer in the documentation and/or other materials provided with the +- distribution. +- +- . Neither the name of NXP nor the names of its suppliers +- may be used to endorse or promote products derived from this Software +- without specific prior written permission. +- +- . No reverse engineering, decompilation, or disassembly of this Software is +- permitted. +- +-Limited patent license. NXP (.Licensor.) grants you +-(.Licensee.) a limited, worldwide, royalty-free, non-exclusive license under +-the Patents to make, have made, use, import, offer to sell and sell the +-Software. No hardware per se is licensed hereunder. +-The term .Patents. as used in this agreement means only those patents or patent +-applications owned solely and exclusively by Licensor as of the date of +-Licensor.s submission of the Software and any patents deriving priority (i.e., +-having a first effective filing date) therefrom. The term .Software. as used in +-this agreement means the firmware image submitted by Licensor, under the terms +-of this license, to git://git.kernel.org/pub/scm/linux/kernel/git/firmware/ +-linux-firmware.git. +-Notwithstanding anything to the contrary herein, Licensor does not grant and +-Licensee does not receive, by virtue of this agreement or the Licensor's +-submission of any Software, any license or other rights under any patent or +-patent application owned by any affiliate of Licensor or any other entity +-(other than Licensor), whether expressly, impliedly, by virtue of estoppel or +-exhaustion, or otherwise. +- +-DISCLAIMER. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +-CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +-BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +-FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +-THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +diff --git a/LICENSE.dib0700 b/LICENSE.dib0700 +deleted file mode 100644 +index fdb6bde..0000000 +--- a/LICENSE.dib0700 ++++ /dev/null +@@ -1,22 +0,0 @@ +-Firmware provided by Patrick Boettcher to Devin +-Heitmueller on October 8, 2009. +- +-The USB firmware file "dvb-usb-dib0700.1.20.fw" for DiBcom's DiB0700, +-used together with the Linux driver module dvb-usb-dib0700, is +-provided under the following licensing terms: +- +-Copyright (c) 2009, DiBcom +- +-Permission to use, copy, modify, and/or distribute this software for +-any purpose with or without fee is hereby granted, provided that the +-above copyright notice and this permission notice appear in all +-copies. +- +-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL +-WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED +-WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE +-AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL +-DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR +-PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +-TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +-PERFORMANCE OF THIS SOFTWARE. +diff --git a/LICENSE.ipu3_firmware b/LICENSE.ipu3_firmware +deleted file mode 100644 +index 2559884..0000000 +--- a/LICENSE.ipu3_firmware ++++ /dev/null +@@ -1,36 +0,0 @@ +-Copyright (c) 2017, Intel Corporation. +-All rights reserved. +- +-Redistribution. Redistribution and use in binary form, without +-modification, are permitted provided that the following conditions are +-met: +- +-* Redistributions must reproduce the above copyright notice and the +- following disclaimer in the documentation and/or other materials +- provided with the distribution. +-* Neither the name of Intel Corporation nor the names of its suppliers +- may be used to endorse or promote products derived from this software +- without specific prior written permission. +-* No reverse engineering, decompilation, or disassembly of this software +- is permitted. +- +-Limited patent license. Intel Corporation grants a world-wide, +-royalty-free, non-exclusive license under patents it now or hereafter +-owns or controls to make, have made, use, import, offer to sell and +-sell (“Utilize”) this software, but solely to the extent that any +-such patent is necessary to Utilize the software alone. The patent license +-shall not apply to any combinations which include this software. No hardware +-per se is licensed hereunder. +- +-DISCLAIMER. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +-CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +-BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +-FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +-COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +-DAMAGE. +diff --git a/WHENCE b/WHENCE +index 116d04d..d78e45e 100644 +--- a/WHENCE ++++ b/WHENCE +@@ -53,16 +53,6 @@ Found in hex form in the kernel source. + + -------------------------------------------------------------------------- + +-Driver: dvb-ttusb-budget -- Technotrend/Hauppauge Nova-USB devices +- +-File: ttusb-budget/dspbootcode.bin +- +-Licence: Unknown +- +-Found in hex form in the kernel source. +- +--------------------------------------------------------------------------- +- + Driver: keyspan -- USB Keyspan USA-xxx serial device + + File: keyspan/mpr.fw +@@ -236,45 +226,6 @@ Converted from Intel HEX files, used in our binary representation of ihex. + + -------------------------------------------------------------------------- + +-Driver: cpia2 -- cameras based on Vision's CPiA2 +- +-File: cpia2/stv0672_vp4.bin +- +-Licence: Allegedly GPLv2+, but no source visible. Marked: +- Copyright (C) 2001 STMicroelectronics, Inc. +- Contact: steve.miller@st.com +- Description: This file contains patch data for the CPiA2 (stv0672) VP4. +- +-Found in hex form in kernel source. +- +--------------------------------------------------------------------------- +- +-Driver: dabusb -- Digital Audio Broadcasting (DAB) Receiver for USB and Linux +- +-File: dabusb/firmware.fw +-File: dabusb/bitstream.bin +- +-Licence: Distributable +- +- * Copyright (C) 1999 BayCom GmbH +- * +- * Redistribution and use in source and binary forms, with or without +- * modification, are permitted provided that redistributions of source +- * code retain the above copyright notice and this comment without +- * modification. +- +--------------------------------------------------------------------------- +- +-Driver: vicam -- USB 3com HomeConnect (aka vicam) +- +-File: vicam/firmware.fw +- +-Licence: Unknown +- +-Found in hex form in kernel source. +- +--------------------------------------------------------------------------- +- + Driver: io_edgeport - USB Inside Out Edgeport Serial Driver + + File: edgeport/boot.fw +@@ -1038,17 +989,6 @@ Also available from http://wireless.kernel.org/en/users/Drivers/iwlwifi#Firmware + + -------------------------------------------------------------------------- + +-Driver: ipu3-imgu - Intel IPU3 (3rd Gen Image Processing Unit) driver +- +-File: intel/irci_irci_ecr-master_20161208_0213_20170112_1500.bin +-Version: irci_irci_ecr-master_20161208_0213_20170112_1500 +-md5sum: 59abc311fce49c5a180b5a8a3917912d +-Link: intel/ipu3-fw.bin -> irci_irci_ecr-master_20161208_0213_20170112_1500.bin +- +-Licence: Redistributable. See LICENSE.ipu3_firmware for details +- +--------------------------------------------------------------------------- +- + Driver: tehuti - Tehuti Networks 10G Ethernet + + File: tehuti/bdx.bin +@@ -1203,36 +1143,6 @@ Found in hex form in kernel source. + + -------------------------------------------------------------------------- + +-Driver: cx231xx - Conexant Cx23100/101/102 USB broadcast A/V decoder +- +-File: v4l-cx231xx-avcore-01.fw +- +-Driver: cx23418 - Conexant PCI Broadcast A/V with MPEG encoder +- +-File: v4l-cx23418-apu.fw +-File: v4l-cx23418-cpu.fw +-File: v4l-cx23418-dig.fw +- +-Driver: cx23885 - Conexant PCI Express Broadcast A/V decoder +- +-File: v4l-cx23885-avcore-01.fw +- +-Driver: cx23840 - Conexant sideport Broadcast A/V decoder +- +-File: v4l-cx25840.fw +- +-Licence: Redistributable. +- +- Conexant grants permission to use and redistribute these firmware +- files for use with Conexant devices, but not as a part of the Linux +- kernel or in any other form which would require these files themselves +- to be covered by the terms of the GNU General Public License. +- These firmware files are distributed in the hope that they will be +- useful, but WITHOUT ANY WARRANTY; without even the implied warranty +- of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +- +--------------------------------------------------------------------------- +- + Driver: qlogicpti - PTI Qlogic, ISP Driver + + File: qlogic/isp1000.bin +@@ -1316,18 +1226,6 @@ Available from http://ldriver.qlogic.com/firmware/netxen_nic/new/ + + -------------------------------------------------------------------------- + +-Driver: dvb-ttpci -- AV7110 cards +- +-File: av7110/bootcode.bin +-Source: av7110/Boot.S +-Source: av7110/Makefile +- +-Licence: GPLv2 or later. See GPL-2 and GPL-3 for details. +- +-ARM assembly source code from https://linuxtv.org/downloads/firmware/Boot.S +- +--------------------------------------------------------------------------- +- + Driver: rt61pci - Ralink RT2561, RT2561S, RT2661 wireless MACs + + File: rt2561.bin +@@ -1421,35 +1319,6 @@ Provided from the author, Bernd Porr + + -------------------------------------------------------------------------- + +-Driver: xc4000 - Xceive 4000 Tuner driver +- +-File: dvb-fe-xc4000-1.4.1.fw +-Version: 1.4.1 +- +-Licence: Redistributable. See LICENCE.xc4000 for details +- +--------------------------------------------------------------------------- +-Driver: xc5000 - Xceive 5000 Tuner driver +- +-File: dvb-fe-xc5000-1.6.114.fw +-Version: 1.6.114 +- +-File: dvb-fe-xc5000c-4.1.30.7.fw +-Version: 4.1.30.7 +- +-Licence: Redistributable. See LICENCE.xc5000 and LICENCE.xc5000c for details +- +--------------------------------------------------------------------------- +- +-Driver: dib0700 - DiBcom dib0700 USB DVB bridge driver +- +-File: dvb-usb-dib0700-1.20.fw +-Version: 1.20 +- +-Licence: Redistributable. See LICENSE.dib0700 for details +- +--------------------------------------------------------------------------- +- + Driver: ath3k - DFU Driver for Atheros bluetooth chipset AR3011 + + File: ath3k-1.fw +@@ -2387,14 +2256,6 @@ Licence: Redistributable, provided by Realtek in their driver + + -------------------------------------------------------------------------- + +-Driver: lgs8gxx - Legend Silicon GB20600 demodulator driver +- +-File: lgs8g75.fw +- +-Licence: Unknown +- +--------------------------------------------------------------------------- +- + Driver: ib_qib - QLogic Infiniband + + File: qlogic/sd7220.fw +@@ -2671,14 +2532,6 @@ Licence: GPLv2. See GPL-2 for details. + + -------------------------------------------------------------------------- + +-Driver: ti-vpe - Texas Instruments V4L2 driver for Video Processing Engine +- +-File: ti/vpdma-1b8.bin +- +-Licence: Redistributable. See LICENCE.ti-tspa for details. +- +--------------------------------------------------------------------------- +- + Driver: wl1251 - Texas Instruments 802.11 WLAN driver for WiLink4 chips + + File: ti-connectivity/wl1251-fw.bin +@@ -2807,23 +2660,6 @@ Licence: Redistributable. See LICENCE.ti-connectivity for details. + + -------------------------------------------------------------------------- + +-Driver: tlg2300 - Telgent 2300 V4L/DVB driver. +- +-File: tlg2300_firmware.bin +- +-Licence: Redistributable. +- +- Telegent System grants permission to use and redistribute these +- firmware files for use with devices containing the chip tlg2300, but +- not as a part of the Linux kernel or in any other form which would +- require these files themselves to be covered by the terms of the GNU +- General Public License. These firmware files are distributed in the +- hope that they will be useful, but WITHOUT ANY WARRANTY; without even +- the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +- PURPOSE. +- +--------------------------------------------------------------------------- +- + Driver: r8712u - Realtek 802.11n WLAN driver for RTL8712U + + File: rtlwifi/rtl8712u.bin +@@ -3459,23 +3295,6 @@ License: Redistributable. See LICENCE.atheros_firmware for details + + -------------------------------------------------------------------------- + +-Driver: drxk - Micronas DRX-K demodulator driver +- +-File: dvb-usb-terratec-h5-drxk.fw +- +-Licence: Redistributable. +- +-TERRATEC grants permission to use and redistribute these firmware +-files for use with TERRATEC devices, but not as part of the Linux +-kernel or in any other form which would require these files themselves +-to be covered by the terms of the GNU General Public License. +- +-These firmware files are distributed in the hope that they will be +-useful, but WITHOUT ANY WARRANTY; without even the implied warranty +-of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +- +--------------------------------------------------------------------------- +- + Driver: ene-ub6250 -- ENE UB6250 SD card reader driver + + File: ene-ub6250/sd_init1.bin +@@ -3506,27 +3325,6 @@ Licence: Redistributable. See LICENCE.atheros_firmware for details + + -------------------------------------------------------------------------- + +-Driver: s5p-mfc - Samsung MFC video encoder/decoder driver +- +-File: s5p-mfc.fw +-File: s5p-mfc-v6.fw +-File: s5p-mfc-v6-v2.fw +-File: s5p-mfc-v7.fw +-File: s5p-mfc-v8.fw +- +-Licence: Redistributable. +- +-Samsung grants permission to use and redistribute aforementioned firmware +-files for the use with Exynos series devices, but not as part of the Linux +-kernel, or in any other form which would require these files themselves +-to be covered by the terms of the GNU General Public License. +- +-These firmware files are distributed in the hope that they will be +-useful, but WITHOUT ANY WARRANTY; without even the implied warranty +-of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +- +--------------------------------------------------------------------------- +- + Driver: carl9170 -- Atheros AR9170 802.11 draft-n USB driver + + File: carl9170-1.fw +@@ -3770,33 +3568,6 @@ of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + + -------------------------------------------------------------------------- + +-Driver: go7007 +- +-File: go7007/s2250-1.fw +-File: go7007/s2250-2.fw +-Link: s2250.fw -> go7007/s2250-2.fw +-Link: s2250_loader.fw -> go7007/s2250-1.fw +- +-Licence: +- Sensoray grants permission to use and redistribute these firmware +- files for use with Sensoray devices, but not as a part of the Linux +- kernel or in any other form which would require these files themselves +- to be covered by the terms of the GNU General Public License. +- These firmware files are distributed in the hope that they will be +- useful, but WITHOUT ANY WARRANTY; without even the implied warranty +- of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +- +-File: go7007/go7007fw.bin +-File: go7007/go7007tv.bin +-File: go7007/lr192.fw +-File: go7007/px-m402u.fw +-File: go7007/px-tv402u.fw +-File: go7007/wis-startrek.fw +- +-Licence: Redistributable. See LICENCE.go7007 for details +- +--------------------------------------------------------------------------- +- + Driver: ccp - Platform Security Processor (PSP) device + + File: amd/amd_sev_fam17h_model0xh.sbin +@@ -3967,44 +3738,6 @@ Licence: Redistributable. See LICENCE.r8a779x_usb3 for details. + + -------------------------------------------------------------------------- + +-Driver: as102 - Abilis Systems Single DVB-T Receiver +- +-File: as102_data1_st.hex +-File: as102_data2_st.hex +- +-License: Redistributable. See LICENCE.Abilis for details +- +--------------------------------------------------------------------------- +- +-Driver: it9135 -- ITEtech IT913x DVB-T USB driver +- +-File: dvb-usb-it9135-01.fw +-File: dvb-usb-it9135-02.fw +- +-Licence: Redistributable. See LICENCE.it913x for details +- +--------------------------------------------------------------------------- +- +-Driver: smsmdtv - Siano MDTV Core module +- +-File: cmmb_vega_12mhz.inp +-File: cmmb_venice_12mhz.inp +-File: dvb_nova_12mhz.inp +-File: dvb_nova_12mhz_b0.inp +-File: isdbt_nova_12mhz.inp +-File: isdbt_nova_12mhz_b0.inp +-File: isdbt_rio.inp +-File: sms1xxx-hcw-55xxx-dvbt-02.fw +-File: sms1xxx-hcw-55xxx-isdbt-02.fw +-File: sms1xxx-nova-a-dvbt-01.fw +-File: sms1xxx-nova-b-dvbt-01.fw +-File: sms1xxx-stellar-dvbt-01.fw +-File: tdmb_nova_12mhz.inp +- +-Licence: Redistributable. See LICENCE.siano for details +- +--------------------------------------------------------------------------- +- + Driver: xhci-tegra -- NVIDIA Tegra XHCI driver + + File: nvidia/tegra124/xusb.bin +@@ -4023,23 +3756,6 @@ Licence: Redistributable. See LICENCE.nvidia for details + + -------------------------------------------------------------------------- + +-Driver: tegra-vic -- NVIDIA Tegra VIC driver +- +-File: nvidia/tegra124/vic03_ucode.bin +-Link: nvidia/tegra124/vic.bin -> vic03_ucode.bin +- +-File: nvidia/tegra210/vic04_ucode.bin +-Link: nvidia/tegra210/vic.bin -> vic04_ucode.bin +- +-File: nvidia/tegra186/vic04_ucode.bin +-Link: nvidia/tegra186/vic.bin -> vic04_ucode.bin +- +-File: nvidia/tegra194/vic.bin +- +-Licence: Redistributable. See LICENCE.nvidia for details +- +--------------------------------------------------------------------------- +- + Driver: atusb - ATUSB IEEE 802.15.4 transceiver driver + + File: atusb/atusb-0.2.dfu +@@ -4995,17 +4711,6 @@ Licence: Redistributable. See LICENCE.Marvell for details. + + -------------------------------------------------------------------------- + +-Driver: mtk-vpu - MediaTek VPU video processing unit driver +- +-File: mediatek/mt8173/vpu_d.bin +-File: mediatek/mt8173/vpu_p.bin +-Link: vpu_d.bin -> mediatek/mt8173/vpu_d.bin +-Link: vpu_p.bin -> mediatek/mt8173/vpu_p.bin +- +-Licence: Redistributable. See LICENCE.mediatek for details. +- +--------------------------------------------------------------------------- +- + Driver: mtk_scp - MediaTek SCP System Control Processing Driver + + File: mediatek/mt8183/scp.img +@@ -5035,15 +4740,6 @@ Licence: Redistributable. See LICENCE.mediatek for details. + + -------------------------------------------------------------------------- + +-Driver: rk3399-dptx - ROCKCHIP rk3399 dptx firmware +- +-File: rockchip/dptx.bin +-Version: 3.1 +- +-Licence: Redistributable. See LICENCE.rockchip for details. +- +--------------------------------------------------------------------------- +- + Driver: mt76x0 - MediaTek MT76x0 Wireless MACs + + File: mediatek/mt7610u.bin +@@ -5323,44 +5019,6 @@ Licence: Redistributable. See LICENSE.QualcommAtheros_ath10k for details + + -------------------------------------------------------------------------- + +-Driver: venus - Qualcomm Venus video codec accelerator +- +-File: qcom/venus-1.8/venus.mbn +-Link: qcom/venus-1.8/venus.mdt -> venus.mbn +- +-Version: 1.8-00109 +- +-File: qcom/venus-4.2/venus.mbn +-Link: qcom/venus-4.2/venus.mdt -> venus.mbn +- +-Version: 4.2 +- +-File: qcom/venus-5.2/venus.mbn +-Link: qcom/venus-5.2/venus.mdt -> venus.mbn +- +-Version: 5.2-00023 +- +-File: qcom/venus-5.4/venus.mbn +-Link: qcom/venus-5.4/venus.mdt -> venus.mbn +- +-Version: 5.4-00053 +- +-File: qcom/vpu-1.0/venus.mbn +-Link: qcom/vpu-1.0/venus.mdt -> venus.mbn +- +-Version: VIDEO.VPU.1.0-00087-PROD-1 +- +-File: qcom/vpu-2.0/venus.mbn +- +-Version: VIDEO.VPU.2.0-00049-PROD-1 +- +-Licence: Redistributable. See LICENSE.qcom and qcom/NOTICE.txt for details +- +-Binary files supplied originally from +-https://developer.qualcomm.com/hardware/dragonboard-410c/tools +- +--------------------------------------------------------------------------- +- + Driver: imx-sdma - support for i.MX SDMA driver + + File: imx/sdma/sdma-imx6q.bin +@@ -5582,15 +5240,6 @@ Licence: + + -------------------------------------------------------------------------- + +-Driver: cdns-mhdp - Cadence MHDP8546 DP bridge +- +-File: cadence/mhdp8546.bin +-Version: 2.1.0 +- +-Licence: Redistributable. See LICENCE.cadence for details +- +--------------------------------------------------------------------------- +- + Driver: fsl-mc bus - NXP Management Complex Bus Driver + + File: dpaa2/mc/mc_10.10.0_ls1088a.itb +@@ -5622,28 +5271,6 @@ Licence: Redistributable. See LICENCE.microchip for details + + -------------------------------------------------------------------------- + +-Driver: meson-vdec - Amlogic video decoder +- +-File: meson/vdec/g12a_h264.bin +-File: meson/vdec/g12a_hevc_mmu.bin +-File: meson/vdec/g12a_vp9.bin +-File: meson/vdec/gxbb_h264.bin +-File: meson/vdec/gxl_h263.bin +-File: meson/vdec/gxl_h264.bin +-File: meson/vdec/gxl_hevc.bin +-File: meson/vdec/gxl_hevc_mmu.bin +-File: meson/vdec/gxl_mjpeg.bin +-File: meson/vdec/gxl_mpeg12.bin +-File: meson/vdec/gxl_mpeg4_5.bin +-File: meson/vdec/gxl_vp9.bin +-File: meson/vdec/gxm_h264.bin +-File: meson/vdec/sm1_hevc_mmu.bin +-File: meson/vdec/sm1_vp9_mmu.bin +- +-Licence: Redistributable. See LICENSE.amlogic_vdec for details. +- +--------------------------------------------------------------------------- +- + Driver: ice - Intel(R) Ethernet Connection E800 Series + + File: intel/ice/ddp/ice-1.3.30.0.pkg +@@ -5685,14 +5312,6 @@ Licence: Redistributable. See LICENCE.Marvell for details. + + ------------------------------------------------ + +-Driver: lt9611uxc - Lontium DSI to HDMI bridge +- +-File: lt9611uxc_fw.bin +- +-License: Redistributable. See LICENSE.Lontium for details. +- +--------------------------------------------------------------------------- +- + Driver: wfx - Silicon Labs Wi-Fi Transceiver + + File: wfx/wfm_wf200_C0.sec +@@ -5713,14 +5332,6 @@ https://github.com/SiliconLabs/wfx-linux-tools + + -------------------------------------------------------------------------- + +-Driver: wave5 - Chips&Media, Inc. video codec driver +- +-File: cnm/wave521c_k3_codec_fw.bin +- +-Licence: Redistributable. See LICENCE.cnm for details. +- +---------------------------------------------------------------------------- +- + Driver: rvu_cptpf - Marvell CPT driver + + File: mrvl/cpt01/ae.out +@@ -5741,17 +5352,6 @@ Licence: Redistributable. See LICENCE.Marvell for details. + + --------------------------------------------------------------------------- + +-Driver: amphion - Amphion VPU(Video Processing Unit) Codec IP driver +- +-File: amphion/vpu/vpu_fw_imx8_dec.bin +-Version: 1.8.8 +-File: amphion/vpu/vpu_fw_imx8_enc.bin +-Version: 1.3.4 +- +-Licence: Redistributable. See LICENSE.amphion_vpu for details +- +---------------------------------------------------------------------------- +- + Driver: nxp-sr1xx - NXP Ultra Wide Band driver + File: nxp/sr150_fw.bin + Version: 35.00.03 +-- +2.40.1 + diff --git a/packages/linux-firmware/0003-linux-firmware-bt-wifi-Remove-firmware-for-Bluetooth.patch b/packages/linux-firmware/0003-linux-firmware-bt-wifi-Remove-firmware-for-Bluetooth.patch new file mode 100644 index 00000000..5d0bb05c --- /dev/null +++ b/packages/linux-firmware/0003-linux-firmware-bt-wifi-Remove-firmware-for-Bluetooth.patch @@ -0,0 +1,3657 @@ +From 8330b130d288943c11658a69c033e97f02a69591 Mon Sep 17 00:00:00 2001 +From: Leonard Foerster +Date: Tue, 25 Jul 2023 10:29:08 +0000 +Subject: [PATCH] linux-firmware: bt/wifi: Remove firmware for Bluetooth/WiFi + devices + +Bottlerocket does not configure any drivers for Bluetooth or WiFi +devices. Without the drivers the firmware is useless and does not need +to be shipped. The following list matches driver names as listed in +WHENCE to the kernel config options enabling those driver in case we do +add drivers in the future we can easily check if we need to add firmware +binaries. + +* ar9170 - CONFIG_CARL9170 +* carl9170 - CONFIG_CARL9170 +* ath9k_htc - CONFIG_ATH9K_HTC +* ath3k - CONFIG_BT_ATH3K +* DFU Driver [...] AR3012 - CONFIG_BT_ATH3K +* Atheros AR300x UART [...] - CONFIG_BT_HCIUART_ATH3K +* ath6kl - CONFIG_ATH6KL +* ath10k - CONFIG_ATH10K +* ath11k - CONFIG_ATH11K +* ar5523 - CONFIG_AR5523 +* btqca - CONFIG_BT_QCA +* qca - CONFIG_QCA7000 +* mt7601u - CONFIG_MT7601U +* btmtk_usb - CONFIG_BT_HCIBTUSB_MTK +* btmtk - CONFIG_BT_MTK +* mt76x0 - CONFIG_MT76x0U && CONFIG_MT76x0E +* mt76x2e - CONFIG_MT76x2E +* mt76x2u - CONFIG_MT76x2U +* mt7615e - CONFIG_MT7615E +* mt7622 - CONFIG_MT7622WMAC +* mt7663 - CONFIG_MT7663U && CONFIG_MT7663S +* mt7915e - CONFIG_MT7915E +* mt7921 - CONFIG_MT7921U && CONFIG_MT7921S && CONFIG_MT7921E +* mt7922 - CONFIG_MT7921U && CONFIG_MT7921S && CONFIG_MT7921E +* brcmsmac - CONFIG_BRCMSMAC +* brcmfmac - CONFIG_BRCMFMAC +* BCM-0bb4-0306 - CONFIG_BT_BCM +* wl1251 - CONFIG_WL1251 +* wl12xx - CONFIG_WL12XX +* wl18xx - CONFIG_WL18XX +* TI_ST - CONFIG_TI_ST +* r8152 - CONFIG_USB_RTL8152 +* r8169 - CONFIG_R8169 +* r8712u - CONFIG_R8712U +* rtl8188ee - CONFIG_RTL8188EE +* rtl8192e - CONFIG_RTL8192E +* rtl8192ce - CONFIG_RTL8192CE +* rtl8192cu - CONFIG_RTL8192CU +* rtl8192de - CONFIG_RTL8192DE +* rtl8192ee - CONFIG_RTL8192EE +* rtl8192se - CONFIG_RTL8192SE +* rtl8723be - CONFIG_RTL8723BE +* rtl8723bs - CONFIG_RTL8723BS +* rtl8723de - CONFIG_RTL8723_COMMON +* rtl8723e - CONFIG_RTL8723_COMMON +* rtl8821ae - CONFIG_RTL8821AE +* rtl8822be - CONFIG_RTW88_8822BE +* rtl8xxxu - CONFIG_RTL8XXXU +* rtw88 - CONFIG_RTW88 +* rtw89 - CONFIG_RTW89 +* btusb - CONFIG_BT_HCIBTUSB +* nxp-sr1xx - CONFIG_NXP_UWB (driver has yet to be accepted upstream) +* btnxpuart - CONFIG_BT_NXPUART +* cw1200 - CONFIG_CW1200 +* rsi - CONFIG_RSI_91X +* wilc1000 - CONFIG_WILC1000 +* qcom_q6v5_pas - CONFIG_QCOM_Q6V5_PAS +* qcom_q6v5_mss - CONFIG_QCOM_Q6V5_MSS +* iwlwifi - CONFIG_IWLWIFI +* rt2800pci - CONFIG_RT2800PCI +* rt2800usb - CONFIG_RT2800USB +* rt2860sta - CONFIG_RT2800PCI && CONFIG_RT2800USB +* rt2870sta - CONFIG_RT2800PCI && CONFIG_RT2800USB +* rt61pci - CONFIG_RT61PCI +* rt73usb - CONFIG_RT73USB +* wfx - CONFIG_WFX + +Signed-off-by: Leonard Foerster +--- + LICENCE.NXP | 22 - + LICENCE.OLPC | 33 - + LICENCE.atheros_firmware | 38 - + LICENCE.broadcom_bcm43xx | 65 - + LICENCE.cw1200 | 35 - + LICENCE.cypress | 138 -- + LICENCE.ibt_firmware | 39 - + LICENCE.iwlwifi_firmware | 39 - + LICENCE.open-ath9k-htc-firmware | 206 -- + LICENCE.ralink-firmware.txt | 39 - + LICENCE.ralink_a_mediatek_company_firmware | 39 - + LICENCE.rtlwifi_firmware.txt | 39 - + LICENCE.ti-connectivity | 61 - + LICENCE.wl1251 | 59 - + LICENSE.QualcommAtheros_ar3k | 47 - + LICENSE.QualcommAtheros_ath10k | 47 - + LICENSE.atmel | 36 - + LICENSE.nxp | 26 - + WHENCE | 2279 +------------------- + 19 files changed, 27 insertions(+), 3260 deletions(-) + delete mode 100644 LICENCE.NXP + delete mode 100644 LICENCE.OLPC + delete mode 100644 LICENCE.atheros_firmware + delete mode 100644 LICENCE.broadcom_bcm43xx + delete mode 100644 LICENCE.cw1200 + delete mode 100644 LICENCE.cypress + delete mode 100644 LICENCE.ibt_firmware + delete mode 100644 LICENCE.iwlwifi_firmware + delete mode 100644 LICENCE.open-ath9k-htc-firmware + delete mode 100644 LICENCE.ralink-firmware.txt + delete mode 100644 LICENCE.ralink_a_mediatek_company_firmware + delete mode 100644 LICENCE.rtlwifi_firmware.txt + delete mode 100644 LICENCE.ti-connectivity + delete mode 100644 LICENCE.wl1251 + delete mode 100644 LICENSE.QualcommAtheros_ar3k + delete mode 100644 LICENSE.QualcommAtheros_ath10k + delete mode 100644 LICENSE.atmel + delete mode 100644 LICENSE.nxp + +diff --git a/LICENCE.NXP b/LICENCE.NXP +deleted file mode 100644 +index 96215f1..0000000 +--- a/LICENCE.NXP ++++ /dev/null +@@ -1,22 +0,0 @@ +-Copyright 2019. NXP B.V. All rights reserved. +- +-Redistribution and use in binary form is permitted provided that the following +-conditions are met: +- +-1. Redistributions must reproduce the above copyright notice, this list of +-conditions and the following disclaimer in the documentation and/or other +-materials provided with the distribution. +- +-2. Redistribution and use shall be used only with NXP B.V. silicon products. +-Any other use, reproduction, modification, translation, or compilation of the +-Software is prohibited. +- +-3. No reverse engineering, decompilation, or disassembly is permitted. +- +-TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, THIS SOFTWARE IS PROVIDED +-"AS IS" WITHOUT WARRANTY OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ANY EXPRESS +-OR IMPLIED WARRANTIES OF MERCHANTABILITY, ACCURACY, FITNESS OR SUFFICIENCY FOR A +-PARTICULAR PURPOSE, SATISFACTORY QUALITY, CORRESPONDENCE WITH DESCRIPTION, QUIET +-ENJOYMENT OR NON-INFRINGEMENT OF THIRD PARTY INTELLECTUAL PROPERTY RIGHTS. +-NXP B.V., ITS AFFILIATES AND THEIR SUPPLIERS DISCLAIM ANY WARRANTY THAT THE +-DELIVERABLES WILL OPERATE WITHOUT INTERRUPTION OR BE ERROR-FREE. +diff --git a/LICENCE.OLPC b/LICENCE.OLPC +deleted file mode 100644 +index a740952..0000000 +--- a/LICENCE.OLPC ++++ /dev/null +@@ -1,33 +0,0 @@ +-Copyright (c) 2006, One Laptop per Child and Marvell Corporation. +-All rights reserved. +- +-Redistribution. Redistribution and use in binary form, without +-modification, are permitted provided that the following conditions are +-met: +- +-* Redistributions must reproduce the above copyright notice and the +- following disclaimer in the documentation and/or other materials +- provided with the distribution. +-* Neither the name of Marvell Corporation nor the names of its suppliers +- may be used to endorse or promote products derived from this software +- without specific prior written permission. +-* No reverse engineering, decompilation, or disassembly of this software +- is permitted. +-* You may not use or attempt to use this software in conjunction with +- any product that is offered by a third party as a replacement, +- substitute or alternative to a Marvell Product where a Marvell Product +- is defined as a proprietary wireless LAN embedded client solution of +- Marvell or a Marvell Affiliate. +- +-DISCLAIMER. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +-CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +-BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +-FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +-COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +-DAMAGE. +diff --git a/LICENCE.atheros_firmware b/LICENCE.atheros_firmware +deleted file mode 100644 +index e0ebdac..0000000 +--- a/LICENCE.atheros_firmware ++++ /dev/null +@@ -1,38 +0,0 @@ +-Copyright (c) 2008-2010, Atheros Communications, Inc. +-All rights reserved. +- +-Redistribution. Redistribution and use in binary form, without +-modification, are permitted provided that the following conditions are +-met: +- +-* Redistributions must reproduce the above copyright notice and the +- following disclaimer in the documentation and/or other materials +- provided with the distribution. +- +-* Neither the name of Atheros Communications, Inc. nor the names of +- its suppliers may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +-* No reverse engineering, decompilation, or disassembly of this +- software is permitted. +- +-Limited patent license. Atheros Communications, Inc. grants a +-world-wide, royalty-free, non-exclusive license under patents it +-now or hereafter owns or controls to make, have made, use, import, +-offer to sell and sell ("Utilize") this software, but solely to +-the extent that any such patent is necessary to Utilize the software +-in conjunction with an Atheros Chipset. The patent license shall not +-apply to any other combinations which include this software. No +-hardware per se is licensed hereunder. +- +-DISCLAIMER. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +-CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +-BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +-FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +-THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +diff --git a/LICENCE.broadcom_bcm43xx b/LICENCE.broadcom_bcm43xx +deleted file mode 100644 +index ff26fdd..0000000 +--- a/LICENCE.broadcom_bcm43xx ++++ /dev/null +@@ -1,65 +0,0 @@ +-SOFTWARE LICENSE AGREEMENT +- +-The accompanying software in binary code form (“Software”), is licensed to you, +-or, if you are accepting on behalf of an entity, the entity and its affiliates +-exercising rights hereunder (“Licensee”) subject to the terms of this software +-license agreement (“Agreement”), unless Licensee and Broadcom Corporation +-(“Broadcom”) execute a separate written software license agreement governing +-use of the Software. ANY USE, REPRODUCTION, OR DISTRIBUTION OF THE SOFTWARE +-CONSTITUTES LICENSEE’S ACCEPTANCE OF THIS AGREEMENT. +- +-1. License. Subject to the terms and conditions of this Agreement, +-Broadcom hereby grants to Licensee a limited, non-exclusive, non-transferable, +-royalty-free license: (i) to use and integrate the Software with any other +-software; and (ii) to reproduce and distribute the Software complete, +-unmodified, and as provided by Broadcom, solely for use with Broadcom +-proprietary integrated circuit product(s) sold by Broadcom with which the +-Software was designed to be used, or their successors. +- +-2. Restrictions. Licensee shall distribute Software with a copy of this +-Agreement. Licensee shall not remove, efface or obscure any copyright or +-trademark notices from the Software. Reproductions of the Broadcom copyright +-notice shall be included with each copy of the Software, except where such +-Software is embedded in a manner not readily accessible to the end user. +-Licensee shall not: (i) use, license, sell or otherwise distribute the Software +-except as provided in this Agreement; (ii) attempt to modify in any way, +-reverse engineer, decompile or disassemble any portion of the Software; or +-(iii) use the Software or other material in violation of any applicable law or +-regulation, including but not limited to any regulatory agency. This Agreement +-shall automatically terminate upon Licensee’s failure to comply with any of the +-terms of this Agreement. In such event, Licensee will destroy all copies of the +-Software and its component parts. +- +-3. Ownership. The Software is licensed and not sold. Title to and +-ownership of the Software, including all intellectual property rights thereto, +-and any portion thereof remain with Broadcom or its licensors. Licensee hereby +-covenants that it will not assert any claim that the Software created by or for +-Broadcom infringe any intellectual property right owned or controlled by +-Licensee. +- +-4. Disclaimer. THE SOFTWARE IS OFFERED “AS IS,” AND BROADCOM PROVIDES AND +-GRANTS AND LICENSEE RECEIVES NO SUPPORT AND NO WARRANTIES OF ANY KIND, EXPRESS +-OR IMPLIED, BY STATUTE, COMMUNICATION OR CONDUCT WITH LICENSEE, OR OTHERWISE. +-BROADCOM SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, +-FITNESS FOR A SPECIFIC PURPOSE, OR NONINFRINGEMENT CONCERNING THE SOFTWARE OR +-ANY UPGRADES TO OR DOCUMENTATION FOR THE SOFTWARE. WITHOUT LIMITATION OF THE +-ABOVE, BROADCOM GRANTS NO WARRANTY THAT THE SOFTWARE IS ERROR-FREE OR WILL +-OPERATE WITHOUT INTERRUPTION, AND GRANTS NO WARRANTY REGARDING ITS USE OR THE +-RESULTS THEREFROM INCLUDING, WITHOUT LIMITATION, ITS CORRECTNESS, ACCURACY, OR +-RELIABILITY. TO THE MAXIMUM EXTENT PERMITTED BY LAW, IN NO EVENT SHALL BROADCOM +-OR ANY OF ITS LICENSORS HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES, HOWEVER CAUSED AND ON ANY THEORY +-OF LIABILITY, WHETHER FOR BREACH OF CONTRACT, TORT (INCLUDING NEGLIGENCE) OR +-OTHERWISE, ARISING OUT OF THIS AGREEMENT OR USE, REPRODUCTION, OR DISTRIBUTION +-OF THE SOFTWARE, INCLUDING BUT NOT LIMITED TO LOSS OF DATA AND LOSS OF PROFITS, +-EVEN IF SUCH PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. THESE +-LIMITATIONS SHALL APPLY NOTWITHSTANDING ANY FAILURE OF ESSENTIAL PURPOSE OF ANY +-LIMITED REMEDY. +- +-5. Export Laws. LICENSEE UNDERSTANDS AND AGREES THAT THE SOFTWARE IS +-SUBJECT TO UNITED STATES AND OTHER APPLICABLE EXPORT-RELATED LAWS AND +-REGULATIONS AND THAT LICENSEE MAY NOT EXPORT, RE-EXPORT OR TRANSFER THE +-SOFTWARE OR ANY DIRECT PRODUCT OF THE SOFTWARE EXCEPT AS PERMITTED UNDER THOSE +-LAWS. WITHOUT LIMITING THE FOREGOING, EXPORT, RE-EXPORT, OR TRANSFER OF THE +-SOFTWARE TO CUBA, IRAN, NORTH KOREA, SUDAN, AND SYRIA IS PROHIBITED. +- +diff --git a/LICENCE.cw1200 b/LICENCE.cw1200 +deleted file mode 100644 +index 1016eca..0000000 +--- a/LICENCE.cw1200 ++++ /dev/null +@@ -1,35 +0,0 @@ +-Copyright (c) 2007-2013, ST Microelectronics NV. +-All rights reserved. +- +-Redistribution. Redistribution and use in binary form, without modification, +-are permitted provided that the following conditions are met: +- +-* Redistributions must reproduce the above copyright notice and the following +-disclaimer in the documentation and/or other materials provided with the +-distribution. +- +-* Neither the name of ST Microelectronics NV. nor the names of its suppliers +-may be used to endorse or promote products derived from this software without +-specific prior written permission. +- +-* No reverse engineering, decompilation, or disassembly of this software is +-permitted. +- +-Limited patent license. ST Microelectronics NV. grants a world-wide, royalty-free, +- non-exclusive license under patents it now or hereafter owns or controls to make, +- have made, use, import, offer to sell and sell ("Utilize") this software, but +- solely to the extent that any such patent is necessary to Utilize the software in +-conjunction with an ST Microelectronics chipset. The patent license shall not +-apply to any other combinations which include this software. No hardware per se +-is licensed hereunder. +- +-DISCLAIMER. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ANDCONTRIBUTORS +-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +diff --git a/LICENCE.cypress b/LICENCE.cypress +deleted file mode 100644 +index 070ef66..0000000 +--- a/LICENCE.cypress ++++ /dev/null +@@ -1,138 +0,0 @@ +-### CYPRESS WIRELESS CONNECTIVITY DEVICES +-### DRIVER END USER LICENSE AGREEMENT (SOURCE AND BINARY DISTRIBUTION) +- +-PLEASE READ THIS END USER LICENSE AGREEMENT ("Agreement") CAREFULLY BEFORE +-DOWNLOADING, INSTALLING, OR USING THIS SOFTWARE, ANY ACCOMPANYING +-DOCUMENTATION, OR ANY UPDATES PROVIDED BY CYPRESS ("Software"). BY +-DOWNLOADING, INSTALLING, OR USING THE SOFTWARE, YOU ARE AGREEING TO BE BOUND +-BY THIS AGREEMENT. IF YOU DO NOT AGREE TO ALL OF THE TERMS OF THIS +-AGREEMENT, PROMPTLY RETURN AND DO NOT USE THE SOFTWARE. IF YOU HAVE +-PURCHASED THE SOFTWARE, YOUR RIGHT TO RETURN THE SOFTWARE EXPIRES 30 DAYS +-AFTER YOUR PURCHASE AND APPLIES ONLY TO THE ORIGINAL PURCHASER. +- +-Software Provided in Binary Code Form. This paragraph applies to any Software +-provided in binary code form. Subject to the terms and conditions of this +-Agreement, Cypress Semiconductor Corporation ("Cypress") grants you a +-non-exclusive, non-transferable license under its copyright rights in the +-Software to reproduce and distribute the Software in object code form only, +-solely for use in connection with Cypress integrated circuit products +-("Purpose"). +- +-Software Provided in Source Code Form. This paragraph applies to any Software +-provided in source code form ("Cypress Source Code"). Subject to the terms and +-conditions of this Agreement, Cypress grants you a non-exclusive, +-non-transferable license under its copyright rights in the Cypress Source Code +-to reproduce, modify, compile, and distribute the Cypress Source Code (whether +-in source code form or as compiled into binary code form) solely for the +-Purpose. Cypress retains ownership of the Cypress Source Code and any compiled +-version thereof. Subject to Cypress' ownership of the underlying Cypress +-Source Code, you retain ownership of any modifications you make to the +-Cypress Source Code. You agree not to remove any Cypress copyright or other +-notices from the Cypress Source Code and any modifications thereof. Any +-reproduction, modification, translation, compilation, or representation of +-the Cypress Source Code except as permitted in this paragraph is prohibited +-without the express written permission of Cypress. +- +-Free and Open Source Software. Portions of the Software may be licensed under +-free and/or open source licenses such as the GNU General Public License +-("FOSS"). FOSS is subject to the applicable license agreement and not this +-Agreement. If you are entitled to receive the source code from Cypress for any +-FOSS included with the Software, either the source code will be included with +-the Software or you may obtain the source code at no charge from +-. The applicable license terms will +-accompany each source code package. To review the license terms applicable to +-any FOSS for which Cypress is not required to provide you with source code, +-please see the Software's installation directory on your computer. +- +-Proprietary Rights. The Software, including all intellectual property rights +-therein, is and will remain the sole and exclusive property of Cypress or its +-suppliers. Except as otherwise expressly provided in this Agreement, you may +-not: (i) modify, adapt, or create derivative works based upon the Software; +-(ii) copy the Software; (iii) except and only to the extent explicitly +-permitted by applicable law despite this limitation, decompile, translate, +-reverse engineer, disassemble or otherwise reduce the Software to +-human-readable form; or (iv) use the Software other than for the Purpose. +- +-No Support. Cypress may, but is not required to, provide technical support for +-the Software. +- +-Term and Termination. This Agreement is effective until terminated. This +-Agreement and Your license rights will terminate immediately without notice +-from Cypress if you fail to comply with any provision of this Agreement. Upon +-termination, you must destroy all copies of Software in your possession or +-control. Termination of this Agreement will not affect any licenses validly +-granted as of the termination date to any end users of the Software. The +-following paragraphs shall survive any termination of this Agreement: "Free and +-Open Source Software," "Proprietary Rights," "Compliance With Law," +-"Disclaimer," "Limitation of Liability," and "General." +- +-Compliance With Law. Each party agrees to comply with all applicable laws, +-rules and regulations in connection with its activities under this Agreement. +-Without limiting the foregoing, the Software may be subject to export control +-laws and regulations of the United States and other countries. You agree to +-comply strictly with all such laws and regulations and acknowledge that you +-have the responsibility to obtain licenses to export, re-export, or import +-the Software. +- +-Disclaimer. TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, CYPRESS MAKES +-NO WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, WITH REGARD TO THE SOFTWARE, +-INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF +-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. Cypress reserves the +-right to make changes to the Software without notice. Cypress does not assume +-any liability arising out of the application or use of Software or any +-product or circuit described in the Software. Cypress does not authorize its +-products for use as critical components in life-support systems where a +-malfunction or failure may reasonably be expected to result in significant +-injury to the user. The inclusion of Cypress' product in a life-support +-system or application implies that the manufacturer of such system or +-application assumes all risk of such use and in doing so indemnifies Cypress +-against all charges. +- +-Limitation of Liability. IN NO EVENT WILL CYPRESS OR ITS SUPPLIERS, +-RESELLERS, OR DISTRIBUTORS BE LIABLE FOR ANY LOST REVENUE, PROFIT, OR DATA, +-OR FOR SPECIAL, INDIRECT, CONSEQUENTIAL, INCIDENTAL, OR PUNITIVE DAMAGES +-HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY, ARISING OUT OF THE +-USE OF OR INABILITY TO USE THE SOFTWARE EVEN IF CYPRESS OR ITS SUPPLIERS, +-RESELLERS, OR DISTRIBUTORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH +-DAMAGES. IN NO EVENT SHALL CYPRESS' OR ITS SUPPLIERS' RESELLERS', OR +-DISTRIBUTORS' TOTAL LIABILITY TO YOU, WHETHER IN CONTRACT, TORT (INCLUDING +-NEGLIGENCE), OR OTHERWISE, EXCEED THE PRICE PAID BY YOU FOR THE SOFTWARE. +-THE FOREGOING LIMITATIONS SHALL APPLY EVEN IF THE ABOVE-STATED WARRANTY FAILS +-OF ITS ESSENTIAL PURPOSE. BECAUSE SOME STATES OR JURISDICTIONS DO NOT ALLOW +-LIMITATION OR EXCLUSION OF CONSEQUENTIAL OR INCIDENTAL DAMAGES, THE ABOVE +-LIMITATION MAY NOT APPLY TO YOU. +- +-Restricted Rights. The Software under this Agreement is commercial computer +-software as that term is described in 48 C.F.R. 252.227-7014(a)(1). If +-acquired by or on behalf of a civilian agency, the U.S. Government acquires +-this commercial computer software and/or commercial computer software +-documentation subject to the terms of this Agreement as specified in 48 +-C.F.R. 12.212 (Computer Software) and 12.211 (Technical Data) of the Federal +-Acquisition Regulations ("FAR") and its successors. If acquired by or on +-behalf of any agency within the Department of Defense ("DOD"), the U.S. +-Government acquires this commercial computer software and/or commercial +-computer software documentation subject to the terms of this Agreement as +-specified in 48 C.F.R. 227.7202-3 of the DOD FAR Supplement ("DFAR") and its +-successors. +- +-General. This Agreement will bind and inure to the benefit of each party's +-successors and assigns, provided that you may not assign or transfer this +-Agreement, in whole or in part, without Cypress' written consent. This +-Agreement shall be governed by and construed in accordance with the laws of +-the State of California, United States of America, as if performed wholly +-within the state and without giving effect to the principles of conflict of +-law. The parties consent to personal and exclusive jurisdiction of and venue +-in, the state and federal courts within Santa Clara County, California; +-provided however, that nothing in this Agreement will limit Cypress' right to +-bring legal action in any venue in order to protect or enforce its +-intellectual property rights. No failure of either party to exercise or +-enforce any of its rights under this Agreement will act as a waiver of such +-rights. If any portion hereof is found to be void or unenforceable, the +-remaining provisions of this Agreement shall remain in full force and +-effect. This Agreement is the complete and exclusive agreement between the +-parties with respect to the subject matter hereof, superseding and replacing +-any and all prior agreements, communications, and understandings (both +-written and oral) regarding such subject matter. Any notice to Cypress will +-be deemed effective when actually received and must be sent to Cypress +-Semiconductor Corporation, ATTN: Chief Legal Officer, 198 Champion Court, San +-Jose, CA 95134 USA. +diff --git a/LICENCE.ibt_firmware b/LICENCE.ibt_firmware +deleted file mode 100644 +index f878c6a..0000000 +--- a/LICENCE.ibt_firmware ++++ /dev/null +@@ -1,39 +0,0 @@ +-Copyright © 2014, Intel Corporation. +-All rights reserved. +- +-Redistribution. Redistribution and use in binary form, without +-modification, are permitted provided that the following conditions are +-met: +- +-* Redistributions must reproduce the above copyright notice and the +- following disclaimer in the documentation and/or other materials +- provided with the distribution. +-* Neither the name of Intel Corporation nor the names of its suppliers +- may be used to endorse or promote products derived from this software +- without specific prior written permission. +-* No reverse engineering, decompilation, or disassembly of this software +- is permitted. +- +-Limited patent license. Intel Corporation grants a world-wide, +-royalty-free, non-exclusive license under patents it now or hereafter +-owns or controls to make, have made, use, import, offer to sell and +-sell ("Utilize") this software, but solely to the extent that any +-such patent is necessary to Utilize the software alone, or in +-combination with an operating system licensed under an approved Open +-Source license as listed by the Open Source Initiative at +-http://opensource.org/licenses. The patent license shall not apply to +-any other combinations which include this software. No hardware per +-se is licensed hereunder. +- +-DISCLAIMER. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +-CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +-BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +-FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +-COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +-DAMAGE. +diff --git a/LICENCE.iwlwifi_firmware b/LICENCE.iwlwifi_firmware +deleted file mode 100644 +index 6bdd16d..0000000 +--- a/LICENCE.iwlwifi_firmware ++++ /dev/null +@@ -1,39 +0,0 @@ +-Copyright (c) 2006-2021, Intel Corporation. +-All rights reserved. +- +-Redistribution. Redistribution and use in binary form, without +-modification, are permitted provided that the following conditions are +-met: +- +-* Redistributions must reproduce the above copyright notice and the +- following disclaimer in the documentation and/or other materials +- provided with the distribution. +-* Neither the name of Intel Corporation nor the names of its suppliers +- may be used to endorse or promote products derived from this software +- without specific prior written permission. +-* No reverse engineering, decompilation, or disassembly of this software +- is permitted. +- +-Limited patent license. Intel Corporation grants a world-wide, +-royalty-free, non-exclusive license under patents it now or hereafter +-owns or controls to make, have made, use, import, offer to sell and +-sell ("Utilize") this software, but solely to the extent that any +-such patent is necessary to Utilize the software alone, or in +-combination with an operating system licensed under an approved Open +-Source license as listed by the Open Source Initiative at +-http://opensource.org/licenses. The patent license shall not apply to +-any other combinations which include this software. No hardware per +-se is licensed hereunder. +- +-DISCLAIMER. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +-CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +-BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +-FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +-COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +-DAMAGE. +diff --git a/LICENCE.open-ath9k-htc-firmware b/LICENCE.open-ath9k-htc-firmware +deleted file mode 100644 +index 36655b7..0000000 +--- a/LICENCE.open-ath9k-htc-firmware ++++ /dev/null +@@ -1,206 +0,0 @@ +-This is a concatenation of LICENCE.txt and NOTICE.txt from the +-open-ath9k-htc-firmware repository describing licensing terms for the +-firmware image and its sources. +- +-The source code repository is publicly available at +-https://github.com/qca/open-ath9k-htc-firmware . +- +- +-LICENCE.txt +------------ +- +-Files with a Qualcomm Atheros / Atheros licence fall under the following +-licence. Please see NOTICES.TXT for information about other files in this +-repository. +- +----- +- +-Copyright (c) 2013 Qualcomm Atheros, Inc. +- +-All rights reserved. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted (subject to the limitations in the +-disclaimer below) provided that the following conditions are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the +- distribution. +- +- * Neither the name of Qualcomm Atheros nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +-NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +-GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +-HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +-BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +-WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +-IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +----- +- +- +-NOTICE.TXT +----------- +- +-This NOTICE.TXT file contains certain notices of software components included +-with the software that QUALCOMM ATHEROS Incorporated ('Qualcomm Atheros') is +-required to provide you. Notwithstanding anything in the notices in this file, +-your use of these software components together with the Qualcomm Atheros +-software (Qualcomm Atheros software hereinafter referred to as 'Software') is +-subject to the terms of your license from Qualcomm Atheros. Compliance with +-all copyright laws and software license agreements included in the notice +-section of this file are the responsibility of the user. Except as may be +-granted by separate express written agreement, this file provides no license +-to any Qualcomm Atheros patents, trademarks, copyrights, or other intellectual +-property. +- +-Copyright (c) 2013 QUALCOMM ATHEROS Incorporated. All rights reserved. +- +-QUALCOMM ATHEROS is a registered trademark and registered service mark of +-QUALCOMM ATHEROS Incorporated. All other trademarks and service marks are +-the property of their respective owners. +- +-NOTICES: +- +-/* +- * Copyright (c) 2005-2012 Atheros Communications Inc. +- * +- * Permission to use, copy, modify, and/or distribute this software for any +- * purpose with or without fee is hereby granted, provided that the above +- * copyright notice and this permission notice appear in all copies. +- * +- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +- */ +- +-/* +- * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting +- * Copyright (c) 2002-2005 Atheros Communications, Inc. +- * Copyright (c) 2008-2010, Atheros Communications Inc. +- * +- * Redistribution and use in source and binary forms are permitted +- * provided that the following conditions are met: +- * 1. The materials contained herein are unmodified and are used +- * unmodified. +- * 2. Redistributions of source code must retain the above copyright +- * notice, this list of conditions and the following NO +- * ''WARRANTY'' disclaimer below (''Disclaimer''), without +- * modification. +- * 3. Redistributions in binary form must reproduce at minimum a +- * disclaimer similar to the Disclaimer below and any redistribution +- * must be conditioned upon including a substantially similar +- * Disclaimer requirement for further binary redistribution. +- * 4. Neither the names of the above-listed copyright holders nor the +- * names of any contributors may be used to endorse or promote +- * product derived from this software without specific prior written +- * permission. +- * +- * NO WARRANTY +- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- * ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, +- * MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +- * IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE +- * FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT +- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +- * SUCH DAMAGES. +- */ +- +----- +- +-The following files are from ECoS with a GPLv2 licence with modification +-and linking caveats. Please see the licence below for more information: +- +-target_firmware/magpie_fw_dev/build/magpie_1_1/sboot/cmnos/printf/src/cmnos_printf.c +-target_firmware/magpie_fw_dev/target/cmnos/cmnos_printf.c +-target_firmware/magpie_fw_dev/target/cmnos/k2_fw_cmnos_printf.c +- +-//####ECOSGPLCOPYRIGHTBEGIN#### +-// ------------------------------------------- +-// This file is part of eCos, the Embedded Configurable Operating System. +-// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. +-// Copyright (C) 2002 Gary Thomas +-// +-// eCos is free software; you can redistribute it and/or modify it under +-// the terms of the GNU General Public License as published by the Free +-// Software Foundation; either version 2 or (at your option) any later version. +-// +-// eCos is distributed in the hope that it will be useful, but WITHOUT ANY +-// WARRANTY; without even the implied warranty of MERCHANTABILITY or +-// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +-// for more details. +-// +-// You should have received a copy of the GNU General Public License along +-// with eCos; if not, write to the Free Software Foundation, Inc., +-// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. +-// +-// As a special exception, if other files instantiate templates or use macros +-// or inline functions from this file, or you compile this file and link it +-// with other works to produce a work based on this file, this file does not +-// by itself cause the resulting work to be covered by the GNU General Public +-// License. However the source code for this file must still be made available +-// in accordance with section (3) of the GNU General Public License. +-// +-// This exception does not invalidate any other reasons why a work based on +-// this file might be covered by the GNU General Public License. +-// +-// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. +-// at http://sources.redhat.com/ecos/ecos-license/ +-// ------------------------------------------- +-//####ECOSGPLCOPYRIGHTEND#### +- +----- +- +-Some of the source code is sourced from Tensilica, Inc. +- +-Although most of the files fall under the MIT licence, some of the source +-files generated as part of the system development have a proprietary +-Tensilica licence. +- +-With permission from Tensilica, Inc, these files have been relicenced +-under the following licence: +- +-/* +- * Copyright (c) 2013 Tensilica Inc. +- * +- * Permission is hereby granted, free of charge, to any person obtaining +- * a copy of this software and associated documentation files (the +- * "Software"), to deal in the Software without restriction, including +- * without limitation the rights to use, copy, modify, merge, publish, +- * distribute, sublicense, and/or sell copies of the Software, and to +- * permit persons to whom the Software is furnished to do so, subject to +- * the following conditions: +- * +- * The above copyright notice and this permission notice shall be included +- * in all copies or substantial portions of the Software. +- * +- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +- * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +- */ +diff --git a/LICENCE.ralink-firmware.txt b/LICENCE.ralink-firmware.txt +deleted file mode 100644 +index 18dd038..0000000 +--- a/LICENCE.ralink-firmware.txt ++++ /dev/null +@@ -1,39 +0,0 @@ +-Copyright (c) 2007, Ralink Technology Corporation +-All rights reserved. +- +-Redistribution. Redistribution and use in binary form, without +-modification, are permitted provided that the following conditions are +-met: +- +-* Redistributions must reproduce the above copyright notice and the +- following disclaimer in the documentation and/or other materials +- provided with the distribution. +-* Neither the name of Ralink Technology Corporation nor the names of its +- suppliers may be used to endorse or promote products derived from this +- software without specific prior written permission. +-* No reverse engineering, decompilation, or disassembly of this software +- is permitted. +- +-Limited patent license. Ralink Technology Corporation grants a world-wide, +-royalty-free, non-exclusive license under patents it now or hereafter +-owns or controls to make, have made, use, import, offer to sell and +-sell ("Utilize") this software, but solely to the extent that any +-such patent is necessary to Utilize the software alone, or in +-combination with an operating system licensed under an approved Open +-Source license as listed by the Open Source Initiative at +-http://opensource.org/licenses. The patent license shall not apply to +-any other combinations which include this software. No hardware per +-se is licensed hereunder. +- +-DISCLAIMER. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +-CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +-BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +-FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +-COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +-DAMAGE. +diff --git a/LICENCE.ralink_a_mediatek_company_firmware b/LICENCE.ralink_a_mediatek_company_firmware +deleted file mode 100644 +index fef16b6..0000000 +--- a/LICENCE.ralink_a_mediatek_company_firmware ++++ /dev/null +@@ -1,39 +0,0 @@ +-Copyright (c) 2013, Ralink, A MediaTek Company +-All rights reserved. +- +-Redistribution. Redistribution and use in binary form, without +-modification, are permitted provided that the following conditions are +-met: +- +-* Redistributions must reproduce the above copyright notice and the +- following disclaimer in the documentation and/or other materials +- provided with the distribution. +-* Neither the name of Ralink Technology Corporation nor the names of its +- suppliers may be used to endorse or promote products derived from this +- software without specific prior written permission. +-* No reverse engineering, decompilation, or disassembly of this software +- is permitted. +- +-Limited patent license. Ralink Technology Corporation grants a world-wide, +-royalty-free, non-exclusive license under patents it now or hereafter +-owns or controls to make, have made, use, import, offer to sell and +-sell ("Utilize") this software, but solely to the extent that any +-such patent is necessary to Utilize the software alone, or in +-combination with an operating system licensed under an approved Open +-Source license as listed by the Open Source Initiative at +-http://opensource.org/licenses. The patent license shall not apply to +-any other combinations which include this software. No hardware per +-se is licensed hereunder. +- +-DISCLAIMER. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +-CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +-BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +-FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +-COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +-DAMAGE. +diff --git a/LICENCE.rtlwifi_firmware.txt b/LICENCE.rtlwifi_firmware.txt +deleted file mode 100644 +index d70921f..0000000 +--- a/LICENCE.rtlwifi_firmware.txt ++++ /dev/null +@@ -1,39 +0,0 @@ +-Copyright (c) 2010, Realtek Semiconductor Corporation +-All rights reserved. +- +-Redistribution. Redistribution and use in binary form, without +-modification, are permitted provided that the following conditions are +-met: +- +-* Redistributions must reproduce the above copyright notice and the +- following disclaimer in the documentation and/or other materials +- provided with the distribution. +-* Neither the name of Realtek Semiconductor Corporation nor the names of its +- suppliers may be used to endorse or promote products derived from this +- software without specific prior written permission. +-* No reverse engineering, decompilation, or disassembly of this software +- is permitted. +- +-Limited patent license. Realtek Semiconductor Corporation grants a world-wide, +-royalty-free, non-exclusive license under patents it now or hereafter +-owns or controls to make, have made, use, import, offer to sell and +-sell ("Utilize") this software, but solely to the extent that any +-such patent is necessary to Utilize the software alone, or in +-combination with an operating system licensed under an approved Open +-Source license as listed by the Open Source Initiative at +-http://opensource.org/licenses. The patent license shall not apply to +-any other combinations which include this software. No hardware per +-se is licensed hereunder. +- +-DISCLAIMER. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +-CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +-BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +-FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +-COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +-DAMAGE. +diff --git a/LICENCE.ti-connectivity b/LICENCE.ti-connectivity +deleted file mode 100644 +index 22f617f..0000000 +--- a/LICENCE.ti-connectivity ++++ /dev/null +@@ -1,61 +0,0 @@ +-Copyright (c) 2016 Texas Instruments Incorporated +- +-All rights reserved not granted herein. +- +-Limited License. +- +-Texas Instruments Incorporated grants a world-wide, royalty-free, non-exclusive +-license under copyrights and patents it now or hereafter owns or controls to +-make, have made, use, import, offer to sell and sell ("Utilize") this software +-subject to the terms herein. With respect to the foregoing patent license, such +-license is granted solely to the extent that any such patent is necessary to +-Utilize the software alone. The patent license shall not apply to any +-combinations which include this software, other than combinations with devices +-manufactured by or for TI (“TI Devices”). No hardware patent is licensed +-hereunder. +- +-Redistributions must preserve existing copyright notices and reproduce this +-license (including the above copyright notice and the disclaimer and +-(if applicable) source code license limitations below) in the documentation +-and/or other materials provided with the distribution +- +-Redistribution and use in binary form, without modification, are permitted +-provided that the following conditions are met: +- +- * No reverse engineering, decompilation, or disassembly of this +- software is permitted with respect to any software provided in binary +- form. +- +- * any redistribution and use are licensed by TI for use only with TI +- Devices. +- +- * Nothing shall obligate TI to provide you with source code for the +- software licensed and provided to you in object code. +- +-If software source code is provided to you, modification and redistribution of +-the source code are permitted provided that the following conditions are met: +- +- * any redistribution and use of the source code, including any +- resulting derivative works, are licensed by TI for use only with TI +- Devices. +- +- * any redistribution and use of any object code compiled from the +- source code and any resulting derivative works, are licensed by TI +- for use only with TI Devices. +- +-Neither the name of Texas Instruments Incorporated nor the names of its +-suppliers may be used to endorse or promote products derived from this +-software without specific prior written permission. +- +-DISCLAIMER. +- +-THIS SOFTWARE IS PROVIDED BY TI AND TI’S LICENSORS "AS IS" AND ANY EXPRESS OR +-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +-EVENT SHALL TI AND TI’S LICENSORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +-ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +diff --git a/LICENCE.wl1251 b/LICENCE.wl1251 +deleted file mode 100644 +index bd0f5f1..0000000 +--- a/LICENCE.wl1251 ++++ /dev/null +@@ -1,59 +0,0 @@ +-Copyright (c) 2000 – 2013 Texas Instruments Incorporated +- +-All rights reserved not granted herein. +- +-Limited License. +- +-Texas Instruments Incorporated grants a world-wide, royalty-free, non-exclusive +-license under copyrights and patents it now or hereafter owns or controls to +-make, have made, use, import, offer to sell and sell ("Utilize") this software +-subject to the terms herein. With respect to the foregoing patent license, +-such license is granted solely to the extent that any such patent is necessary +-to Utilize the software alone. The patent license shall not apply to any +-combinations which include this software, other than combinations with devices +-manufactured by or for TI (“TI Devices”). No hardware patent is licensed +-hereunder. +- +-Redistributions must preserve existing copyright notices and reproduce this +-license (including the above copyright notice and the disclaimer and (if +-applicable) source code license limitations below) in the documentation and/or +-other materials provided with the distribution +- +-Redistribution and use in binary form, without modification, are permitted +-provided that the following conditions are met: +- +-* No reverse engineering, decompilation, or disassembly of this software +- is permitted with respect to any software provided in binary form. +- +-* any redistribution and use are licensed by TI for use only with TI +- Devices. +- +-* Nothing shall obligate TI to provide you with source code for the +- software licensed and provided to you in object code. +- +-If software source code is provided to you, modification and redistribution of +-the source code are permitted provided that the following conditions are met: +- +-* any redistribution and use of the source code, including any resulting +- derivative works, are licensed by TI for use only with TI Devices. +- +-* any redistribution and use of any object code compiled from the source +- code and any resulting derivative works, are licensed by TI for use +- only with TI Devices. +- +-Neither the name of Texas Instruments Incorporated nor the names of its +-suppliers may be used to endorse or promote products derived from this software +-without specific prior written permission. +- +-DISCLAIMER. +- +-THIS SOFTWARE IS PROVIDED BY TI AND TI’S LICENSORS "AS IS" AND ANY EXPRESS OR +-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +-EVENT SHALL TI AND TI’S LICENSORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +-ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +diff --git a/LICENSE.QualcommAtheros_ar3k b/LICENSE.QualcommAtheros_ar3k +deleted file mode 100644 +index 7fae632..0000000 +--- a/LICENSE.QualcommAtheros_ar3k ++++ /dev/null +@@ -1,47 +0,0 @@ +-Copyright (c) 2015, Qualcomm Atheros, Inc. +-All rights reserved. +- +-Redistribution. Reproduction and redistribution in binary form, without +-modification, for use solely in conjunction with a Qualcomm Atheros, Inc. +-chipset, is permitted provided that the following conditions are met: +- +- • Redistributions must reproduce the above copyright notice and the following +- disclaimer in the documentation and/or other materials provided with the +- distribution. +- +- • Neither the name of Qualcomm Atheros, Inc. nor the names of its suppliers +- may be used to endorse or promote products derived from this Software +- without specific prior written permission. +- +- • No reverse engineering, decompilation, or disassembly of this Software is +- permitted. +- +-Limited patent license. Qualcomm Atheros, Inc. (“Licensor”) grants you +-(“Licensee”) a limited, worldwide, royalty-free, non-exclusive license under +-the Patents to make, have made, use, import, offer to sell and sell the +-Software. No hardware per se is licensed hereunder. +-The term “Patents” as used in this agreement means only those patents or patent +-applications owned solely and exclusively by Licensor as of the date of +-Licensor’s submission of the Software and any patents deriving priority (i.e., +-having a first effective filing date) therefrom. The term “Software” as used in +-this agreement means the firmware image submitted by Licensor, under the terms +-of this license, to git://git.kernel.org/pub/scm/linux/kernel/git/firmware/ +-linux-firmware.git. +-Notwithstanding anything to the contrary herein, Licensor does not grant and +-Licensee does not receive, by virtue of this agreement or the Licensor’s +-submission of any Software, any license or other rights under any patent or +-patent application owned by any affiliate of Licensor or any other entity +-(other than Licensor), whether expressly, impliedly, by virtue of estoppel or +-exhaustion, or otherwise. +- +-DISCLAIMER. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +-CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +-BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +-FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +-THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +diff --git a/LICENSE.QualcommAtheros_ath10k b/LICENSE.QualcommAtheros_ath10k +deleted file mode 100644 +index c68935c..0000000 +--- a/LICENSE.QualcommAtheros_ath10k ++++ /dev/null +@@ -1,47 +0,0 @@ +-Copyright (c) 2015-2017, Qualcomm Atheros, Inc. +-All rights reserved. +- +-Redistribution. Reproduction and redistribution in binary form, without +-modification, for use solely in conjunction with a Qualcomm Atheros, Inc. +-chipset, is permitted provided that the following conditions are met: +- +- • Redistributions must reproduce the above copyright notice and the following +- disclaimer in the documentation and/or other materials provided with the +- distribution. +- +- • Neither the name of Qualcomm Atheros, Inc. nor the names of its suppliers +- may be used to endorse or promote products derived from this Software +- without specific prior written permission. +- +- • No reverse engineering, decompilation, or disassembly of this Software is +- permitted. +- +-Limited patent license. Qualcomm Atheros, Inc. (“Licensor”) grants you +-(“Licensee”) a limited, worldwide, royalty-free, non-exclusive license under +-the Patents to make, have made, use, import, offer to sell and sell the +-Software. No hardware per se is licensed hereunder. +-The term “Patents” as used in this agreement means only those patents or patent +-applications owned solely and exclusively by Licensor as of the date of +-Licensor’s submission of the Software and any patents deriving priority (i.e., +-having a first effective filing date) therefrom. The term “Software” as used in +-this agreement means the firmware image submitted by Licensor, under the terms +-of this license, to git://git.kernel.org/pub/scm/linux/kernel/git/firmware/ +-linux-firmware.git. +-Notwithstanding anything to the contrary herein, Licensor does not grant and +-Licensee does not receive, by virtue of this agreement or the Licensor’s +-submission of any Software, any license or other rights under any patent or +-patent application owned by any affiliate of Licensor or any other entity +-(other than Licensor), whether expressly, impliedly, by virtue of estoppel or +-exhaustion, or otherwise. +- +-DISCLAIMER. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +-CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +-BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +-FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +-THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +diff --git a/LICENSE.atmel b/LICENSE.atmel +deleted file mode 100644 +index 5feb313..0000000 +--- a/LICENSE.atmel ++++ /dev/null +@@ -1,36 +0,0 @@ +-Copyright (C) 2015 Atmel Corporation. All rights reserved. +- +-REDISTRIBUTION: Permission is hereby granted by Atmel Corporation (Atmel), free +-of any license fees, to any person obtaining a copy of this firmware (the +-"Software"), to install, reproduce, copy and distribute copies, in binary form, +-in hexadecimal or equivalent formats, of the Software and to permit persons to +-whom the Software is provided to do the same, subject to the following +-conditions: +- +-* Any redistribution of the Software must reproduce the above copyright notice, +- this license notice, and the following disclaimers and notices in the +- documentation and/or other materials provided with the Software. +- +-* Neither the name of Atmel Corporation, its products nor the names of its +- suppliers may be used to endorse or promote products derived from this +- Software without specific prior written permission. +- +-* All matters arising out of or in connection with this License and/or Software +- shall be governed by California law and the parties agree to the exclusive +- jurisdiction of the Californian courts to decide all disputes arising. +- +-* The licensee shall defend and indemnify Atmel against any and all claims, +- costs, losses and damages (including reasonable legal fees) incurred by tme +- arising out of any claim relating to the Software due to the licensee’s use or +- sub-licensing of the Software +- +-DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR +-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE +-DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +-ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +diff --git a/LICENSE.nxp b/LICENSE.nxp +deleted file mode 100644 +index bfd2c70..0000000 +--- a/LICENSE.nxp ++++ /dev/null +@@ -1,26 +0,0 @@ +-LA_OPT_BINARY_FIRMWARE_ONLY rev2 June 2020 +- +-Copyright © 2018 NXP. All rights reserved. +- +-Software License Agreement (“Agreement”) +- +-ANY USE, REPRODUCTION, OR DISTRIBUTION OF THE ACCOMPANYING BINARY SOFTWARE CONSTITUTES LICENSEE'S ACCEPTANCE OF THE TERMS AND CONDITIONS OF THIS AGREEMENT. +- +-Licensed Software. “Binary Software” means the software in binary form supplied directly by NXP pursuant to this Agreement. Subject to the terms and conditions of this Agreement, NXP USA, Inc. ("Licensor"), grants to you (“Licensee”) a worldwide, non-exclusive, and royalty-free copyright license to reproduce and distribute the Binary Software in its complete and unmodified binary form as provided by Licensor, for use solely in conjunction with a programmable processing unit supplied directly or indirectly from Licensor. +- +-Restrictions. Licensee must reproduce the Licensor copyright notice above with each binary copy of the Binary Software or in the accompanying documentation. Licensee must not reverse engineer, decompile, disassemble or modify in any way the Binary Software. Licensee must not use the Binary Software in violation of any applicable law or regulation. This Agreement shall automatically terminate upon Licensee's breach of any term or condition of this Agreement in which case, Licensee shall destroy all copies of the Binary Software. Neither the name of Licensor nor the names of its suppliers may be used to endorse or promote products derived from this Binary Software without specific prior written permission. +-Disclaimer. TO THE MAXIMUM EXTENT PERMITTED BY LAW, LICENSOR EXPRESSLY DISCLAIMS ANY WARRANTY FOR THE BINARY SOFTWARE. THE BINARY SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT. WITHOUT LIMITING THE GENERALITY OF THE FOREGOING, LICENSOR DOES NOT WARRANT THAT THE BINARY SOFTWARE IS ERROR-FREE OR WILL OPERATE WITHOUT INTERRUPTION, AND LICENSOR GRANTS NO WARRANTY REGARDING ITS USE OR THE RESULTS THEREFROM, INCLUDING ITS CORRECTNESS, ACCURACY, OR RELIABILITY. +- +-Limitation of Liability. IN NO EVENT WILL LICENSOR, OR ANY OF LICENSOR'S LICENSORS HAVE ANY LIABILITY HEREUNDER FOR ANY INDIRECT, SPECIAL, OR +-CONSEQUENTIAL DAMAGES, HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER FOR BREACH OF CONTRACT, TORT (INCLUDING NEGLIGENCE), OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, INCLUDING DAMAGES FOR LOSS OF PROFITS, OR THE COST OF PROCUREMENT OF SUBSTITUTE GOODS, EVEN IF SUCH PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. LICENSOR’S TOTAL LIABILITY FOR ALL COSTS, DAMAGES, CLAIMS, OR LOSSES WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH THIS AGREEMENT OR THE BINARY SOFTWARE SUPPLIED UNDER THIS AGREEMENT IS LIMITED TO THE AGGREGATE AMOUNT PAID BY LICENSEE TO LICENSOR IN CONNECTION WITH THE BINARY SOFTWARE TO WHICH LOSSES OR DAMAGES ARE CLAIMED. +- +-Trade Compliance. Licensee shall comply with all applicable export and import control laws and regulations including but not limited to the US Export Administration Regulation (including restrictions on certain military end uses and military end users as specified in Section 15 C.F.R. § 744.21 and prohibited party lists issued by other federal governments), Catch-all regulations and all national and international embargoes. Licensee further agrees that it will not knowingly transfer, divert, export or re-export, directly or indirectly, any product, software, including software source code, or technology restricted by such regulations or by other applicable national regulations, received from Licensor under this Agreement, or any direct product of such software or technical data to any person, firm, entity, country or destination to which such transfer, diversion, export or re-export is restricted or prohibited, without obtaining prior written authorization from the applicable competent government authorities to the extent required by those laws. Licensee acknowledge that the “restricted encryption software” that is subject to the US Export Administration Regulations (EAR), is not intended for use by a government end user, as defined in part 772 of the EAR. This provision shall survive termination or expiration of this Agreement. +- +-Assignment. Licensee may not assign this Agreement without the prior written consent of Licensor. Licensor may assign this Agreement without Licensee’s consent. +- +-Governing Law. This Agreement will be governed by, construed, and enforced in accordance with the laws of the State of Texas, USA, without regard to conflicts of laws principles, will apply to all matters relating to this Agreement or the Binary Software, and Licensee agrees that any litigation will be subject to the exclusive jurisdiction of the state or federal courts Texas, USA. The United Nations Convention on Contracts for the International Sale of Goods will not apply to this Agreement. +-Restrictions, Disclaimer, Limitation of Liability, Trade Compliance, Assignment, and Governing Law shall survive termination or expiration of this Agreement. +- +- +- +- +diff --git a/WHENCE b/WHENCE +index d78e45e..edf6f75 100644 +--- a/WHENCE ++++ b/WHENCE +@@ -8,15 +8,6 @@ kernel. + + -------------------------------------------------------------------------- + +-Driver: BCM-0bb4-0306 Cypress Bluetooth firmware for HTC Vive +- +-File: brcm/BCM-0bb4-0306.hcd +-Link: brcm/BCM-0a5c-6410.hcd -> BCM-0bb4-0306.hcd +- +-Licence: Redistributable. See LICENCE.cypress for details. +- +--------------------------------------------------------------------------- +- + Driver: advansys - AdvanSys SCSI + + File: advansys/mcode.bin +@@ -306,36 +297,6 @@ Licence: Redistributable. See LICENCE.agere for details + + -------------------------------------------------------------------------- + +-Driver: ar9170 - Atheros 802.11n "otus" USB +- +-File: ar9170-1.fw +-File: ar9170-2.fw +- +-Licence: Redistributable. See LICENCE.atheros_firmware for details +- +--------------------------------------------------------------------------- +- +-Driver: ath9k_htc - Atheros HTC devices (USB) +- +-File: ar9271.fw +-File: ar7010.fw +-File: ar7010_1_1.fw +-File: htc_9271.fw +-Version: 1.3.1 +-File: htc_7010.fw +-Version: 1.3.1 +- +-Licence: Redistributable. See LICENCE.atheros_firmware for details +- +-File: ath9k_htc/htc_7010-1.4.0.fw +-Version: 1.4.0 +-File: ath9k_htc/htc_9271-1.4.0.fw +-Version: 1.4.0 +- +-Licence: Free software. See LICENCE.open-ath9k-htc-firmware for details +- +--------------------------------------------------------------------------- +- + Driver: cassini - Sun Cassini + + File: sun/cassini.bin +@@ -516,479 +477,6 @@ Found in hex form in kernel source, with the following notice: + + -------------------------------------------------------------------------- + +-Driver: libertas - Marvell Libertas fullmac-type 802.11b/g cards +- +-File: libertas/cf8381.bin +-File: libertas/cf8381_helper.bin +-File: libertas/cf8385.bin +-File: libertas/cf8385_helper.bin +-File: libertas/gspi8682.bin +-File: libertas/gspi8682_helper.bin +-File: libertas/gspi8686_v9.bin +-File: libertas/gspi8686_v9_helper.bin +-File: libertas/gspi8688.bin +-File: libertas/gspi8688_helper.bin +-File: libertas/sd8385.bin +-File: libertas/sd8385_helper.bin +-File: libertas/sd8682.bin +-File: libertas/sd8682_helper.bin +-File: libertas/sd8686_v8.bin +-File: libertas/sd8686_v8_helper.bin +-File: libertas/sd8686_v9.bin +-File: libertas/sd8686_v9_helper.bin +-File: libertas/usb8388_v5.bin +-File: libertas/usb8388_v9.bin +-File: libertas/usb8682.bin +-File: mrvl/sd8688.bin +-Link: libertas/sd8688.bin -> ../mrvl/sd8688.bin +-File: mrvl/sd8688_helper.bin +-Link: libertas/sd8688_helper.bin -> ../mrvl/sd8688_helper.bin +- +-Licence: Redistributable. See LICENCE.Marvell for details. Extracted from +-Linux driver tarballs downloaded from Marvell's "Extranet" with permission. +- +--------------------------------------------------------------------------- +- +-Driver: libertas - Marvell Libertas 802.11b/g cards, OLPC firmware +- +-File: libertas/lbtf_sdio.bin +-Version: 9.0.7.p4 +- +-File: lbtf_usb.bin +-Version: 5.132.3.p1 +- +-File: libertas/usb8388_olpc.bin +-Version: 5.110.22.p23 +- +-Licence: Redistributable. See LICENCE.OLPC for details. +- +-Available from http://dev.laptop.org/pub/firmware/libertas/ +- +--------------------------------------------------------------------------- +- +-Driver: mwl8k - Marvell Libertas softmac-type 802.11b/g/n cards +- +-File: mwl8k/fmimage_8687.fw +-File: mwl8k/helper_8687.fw +-File: mwl8k/fmimage_8366.fw +-File: mwl8k/fmimage_8366_ap-1.fw +-File: mwl8k/fmimage_8366_ap-2.fw +-File: mwl8k/fmimage_8366_ap-3.fw +-Version: 5.2.8.16 +-File: mwl8k/helper_8366.fw +- +-File: mwl8k/fmimage_8764_ap-1.fw +-Version: 7.4.0.9 +- +-Licence: Redistributable. See LICENCE.Marvell for details. 8687 images +-downloaded from Marvell's "Extranet" with permission. 8366 images contributed +-directly by Marvell. +- +--------------------------------------------------------------------------- +- +-Driver: mwifiex - Marvell Wi-Fi fullmac-type 802.11n/ac cards +- +-File: mrvl/sd8787_uapsta.bin +-Version: W14.68.35.p66 +- +-File: mrvl/usb8766_uapsta.bin +-Version: 14.68.22.p16 +- +-File: mrvl/sd8797_uapsta.bin +-Version: W14.68.29.p59 +- +-File: mrvl/usb8797_uapsta.bin +-Version: W14.68.29.p60 +- +-File: mrvl/sd8897_uapsta.bin +-Version: W15.68.19.17 +- +-File: mrvl/usb8897_uapsta.bin +-Version: 15.68.4.p103 +- +-File: mrvl/pcie8897_uapsta.bin +-Version: W15.68.19.p21 +- +-File: mrvl/sd8887_uapsta.bin +-Version: W15.68.7.p189 +- +-File: mrvl/sd8801_uapsta.bin +-Version: W14.68.36.p204 +- +-File: mrvl/usb8801_uapsta.bin +-Version: W14.68.36.p138 +- +-File: mrvl/pcieuart8997_combo_v4.bin +-Version: W16.68.1.p179 +- +-File: mrvl/pcieusb8997_combo_v4.bin +-Version: W16.68.1.p195 +- +-File: mrvl/pcie8997_wlan_v4.bin +-Version: W16.68.1.p195 +- +-File: mrvl/usbusb8997_combo_v4.bin +-Version: W16.68.1.p183 +- +-File: mrvl/sdsd8997_combo_v4.bin +-Version: W16.68.1.p179 +- +-File: mrvl/sdsd8977_combo_v2.bin +-Version: W16.68.1.p195 +- +-Licence: Redistributable. See LICENCE.NXP for details. +-Originates from https://github.com/NXP/mwifiex-firmware.git +- +--------------------------------------------------------------------------- +- +- +-Driver: iwlwifi - Intel Wireless Wifi +- +-File: iwlwifi-3945-2.ucode +-Version: 15.32.2.9 +- +-File: iwlwifi-4965-2.ucode +-Version: 228.61.2.24 +- +-File: iwlwifi-5000-5.ucode +-Version: 8.83.5.1 +- +-File: iwlwifi-5150-2.ucode +-Version: 8.24.2.2 +- +-File: iwlwifi-1000-5.ucode +-Version: 39.31.5.1 +- +-File: iwlwifi-6000-4.ucode +-Version: 9.221.4.1 +- +-File: iwlwifi-6050-5.ucode +-Version: 41.28.5.1 +- +-File: iwlwifi-6000g2a-6.ucode +-Version: 18.168.6.1 +- +-File: iwlwifi-6000g2b-6.ucode +-Version: 18.168.6.1 +- +-File: iwlwifi-135-6.ucode +-Version: 18.168.6.1 +- +-File: iwlwifi-100-5.ucode +-Version: 39.31.5.1 +- +-File: iwlwifi-105-6.ucode +-Version: 18.168.6.1 +- +-File: iwlwifi-2030-6.ucode +-Version: 18.168.6.1 +- +-File: iwlwifi-2000-6.ucode +-Version: 18.168.6.1 +- +-File: iwlwifi-7260-17.ucode +-Version: 17.bfb58538.0 +- +-File: iwlwifi-3160-17.ucode +-Version: 17.bfb58538.0 +- +-File: iwlwifi-7265-17.ucode +-Version: 17.bfb58538.0 +- +-File: iwlwifi-7265D-29.ucode +-Version: 29.f2390aa8.0 +- +-File: iwlwifi-3168-29.ucode +-Version: 29.0bd893f3.0 +- +-File: iwlwifi-8000C-34.ucode +-Version: 34.610288.0 +- +-File: iwlwifi-8000C-36.ucode +-Version: 36.ca7b901d.0 +- +-File: iwlwifi-8265-34.ucode +-Version: 34.610288.0 +- +-File: iwlwifi-8265-36.ucode +-Version: 36.ca7b901d.0 +- +-File: iwlwifi-9000-pu-b0-jf-b0-34.ucode +-Version: 34.ba501b11.0 +- +-File: iwlwifi-9000-pu-b0-jf-b0-38.ucode +-Version: 38.755cfdd8.0 +- +-File: iwlwifi-9000-pu-b0-jf-b0-46.ucode +-Version: 46.ff18e32a.0 +- +-File: iwlwifi-9260-th-b0-jf-b0-34.ucode +-Version: 34.ba501b11.0 +- +-File: iwlwifi-9260-th-b0-jf-b0-38.ucode +-Version: 38.755cfdd8.0 +- +-File: iwlwifi-9260-th-b0-jf-b0-46.ucode +-Version: 46.ff18e32a.0 +- +-File: iwlwifi-cc-a0-50.ucode +-Version: 50.3e391d3e.0 +- +-File: iwlwifi-Qu-b0-hr-b0-50.ucode +-Version: 50.3e391d3e.0 +- +-File: iwlwifi-Qu-b0-jf-b0-50.ucode +-Version: 50.3e391d3e.0 +- +-File: iwlwifi-Qu-c0-hr-b0-50.ucode +-Version: 50.3e391d3e.0 +- +-File: iwlwifi-Qu-c0-jf-b0-50.ucode +-Version: 50.3e391d3e.0 +- +-File: iwlwifi-QuZ-a0-hr-b0-50.ucode +-Version: 50.3e391d3e.0 +- +-File: iwlwifi-QuZ-a0-jf-b0-50.ucode +-Version: 50.3e391d3e.0 +- +-File: iwlwifi-cc-a0-59.ucode +-Version: 59.601f3a66.0 +- +-File: iwlwifi-Qu-b0-hr-b0-59.ucode +-Version: 59.601f3a66.0 +- +-File: iwlwifi-Qu-b0-jf-b0-59.ucode +-Version: 59.601f3a66.0 +- +-File: iwlwifi-Qu-c0-hr-b0-59.ucode +-Version: 59.601f3a66.0 +- +-File: iwlwifi-Qu-c0-jf-b0-59.ucode +-Version: 59.601f3a66.0 +- +-File: iwlwifi-QuZ-a0-hr-b0-59.ucode +-Version: 59.601f3a66.0 +- +-File: iwlwifi-QuZ-a0-jf-b0-59.ucode +-Version: 59.601f3a66.0 +- +-File: iwlwifi-so-a0-gf-a0.pnvm +- +-File: iwlwifi-so-a0-gf4-a0.pnvm +- +-File: iwlwifi-ty-a0-gf-a0-59.ucode +-Version: 59.601f3a66.0 +- +-File: iwlwifi-cc-a0-66.ucode +-Version: 66.f1c864e0.0 +- +-File: iwlwifi-Qu-b0-hr-b0-66.ucode +-Version: 66.f1c864e0.0 +- +-File: iwlwifi-Qu-b0-jf-b0-66.ucode +-Version: 66.f1c864e0.0 +- +-File: iwlwifi-Qu-c0-hr-b0-66.ucode +-Version: 66.f1c864e0.0 +- +-File: iwlwifi-Qu-c0-jf-b0-66.ucode +-Version: 66.f1c864e0.0 +- +-File: iwlwifi-QuZ-a0-hr-b0-66.ucode +-Version: 66.f1c864e0.0 +- +-File: iwlwifi-QuZ-a0-jf-b0-66.ucode +-Version: 66.f1c864e0.0 +- +-File: iwlwifi-ty-a0-gf-a0-66.ucode +-Version: 66.f1c864e0.0 +- +-File: iwlwifi-cc-a0-72.ucode +-Version: 72.daa05125.0 +- +-File: iwlwifi-Qu-b0-hr-b0-72.ucode +-Version: 72.daa05125.0 +- +-File: iwlwifi-Qu-b0-jf-b0-72.ucode +-Version: 72.daa05125.0 +- +-File: iwlwifi-Qu-c0-hr-b0-72.ucode +-Version: 72.daa05125.0 +- +-File: iwlwifi-Qu-c0-jf-b0-72.ucode +-Version: 72.daa05125.0 +- +-File: iwlwifi-QuZ-a0-hr-b0-72.ucode +-Version: 72.daa05125.0 +- +-File: iwlwifi-QuZ-a0-jf-b0-72.ucode +-Version: 72.daa05125.0 +- +-File: iwlwifi-ty-a0-gf-a0-72.ucode +-Version: 72.a764baac.0 +- +-File: iwlwifi-so-a0-gf4-a0-72.ucode +-Version: 72.a764baac.0 +- +-File: iwlwifi-so-a0-gf-a0-72.ucode +-Version: 72.a764baac.0 +- +-File: iwlwifi-so-a0-hr-b0-72.ucode +-Version: 72.daa05125.0 +- +-File: iwlwifi-so-a0-jf-b0-72.ucode +-Version: 72.daa05125.0 +- +-File: iwlwifi-cc-a0-73.ucode +-Version: 73.35c0a2c6.0 +- +-File: iwlwifi-Qu-b0-hr-b0-73.ucode +-Version: 73.35c0a2c6.0 +- +-File: iwlwifi-Qu-b0-jf-b0-73.ucode +-Version: 73.35c0a2c6.0 +- +-File: iwlwifi-Qu-c0-hr-b0-73.ucode +-Version: 73.35c0a2c6.0 +- +-File: iwlwifi-Qu-c0-jf-b0-73.ucode +-Version: 73.35c0a2c6.0 +- +-File: iwlwifi-QuZ-a0-hr-b0-73.ucode +-Version: 73.35c0a2c6.0 +- +-File: iwlwifi-QuZ-a0-jf-b0-73.ucode +-Version: 73.35c0a2c6.0 +- +-File: iwlwifi-ty-a0-gf-a0-73.ucode +-Version: 73.35c0a2c6.0 +- +-File: iwlwifi-so-a0-gf4-a0-73.ucode +-Version: 73.35c0a2c6.0 +- +-File: iwlwifi-so-a0-gf-a0-73.ucode +-Version: 73.35c0a2c6.0 +- +-File: iwlwifi-so-a0-hr-b0-73.ucode +-Version: 73.35c0a2c6.0 +- +-File: iwlwifi-so-a0-jf-b0-73.ucode +-Version: 73.35c0a2c6.0 +- +-File: iwlwifi-cc-a0-74.ucode +-Version: 74.a5e9588b.0 +- +-File: iwlwifi-Qu-b0-hr-b0-74.ucode +-Version: 74.a5e9588b.0 +- +-File: iwlwifi-Qu-b0-jf-b0-74.ucode +-Version: 74.a5e9588b.0 +- +-File: iwlwifi-Qu-c0-hr-b0-74.ucode +-Version: 74.a5e9588b.0 +- +-File: iwlwifi-Qu-c0-jf-b0-74.ucode +-Version: 74.a5e9588b.0 +- +-File: iwlwifi-QuZ-a0-hr-b0-74.ucode +-Version: 74.a5e9588b.0 +- +-File: iwlwifi-QuZ-a0-jf-b0-74.ucode +-Version: 74.a5e9588b.0 +- +-File: iwlwifi-ty-a0-gf-a0-74.ucode +-Version: 74.fe17486e.0 +- +-File: iwlwifi-so-a0-gf4-a0-74.ucode +-Version: 74.fe17486e.0 +- +-File: iwlwifi-so-a0-gf-a0-74.ucode +-Version: 74.fe17486e.0 +- +-File: iwlwifi-so-a0-hr-b0-74.ucode +-Version: 74.a5e9588b.0 +- +-File: iwlwifi-so-a0-jf-b0-74.ucode +-Version: 74.a5e9588b.0 +- +-File: iwlwifi-cc-a0-77.ucode +-Version: 74.206b0184.0 +- +-File: iwlwifi-Qu-b0-hr-b0-77.ucode +-Version: 74.206b0184.0 +- +-File: iwlwifi-Qu-b0-jf-b0-77.ucode +-Version: 74.206b0184.0 +- +-File: iwlwifi-Qu-c0-hr-b0-77.ucode +-Version: 74.206b0184.0 +- +-File: iwlwifi-Qu-c0-jf-b0-77.ucode +-Version: 74.206b0184.0 +- +-File: iwlwifi-QuZ-a0-hr-b0-77.ucode +-Version: 74.206b0184.0 +- +-File: iwlwifi-QuZ-a0-jf-b0-77.ucode +-Version: 74.206b0184.0 +- +-File: iwlwifi-ty-a0-gf-a0-77.ucode +-Version: 74.f92b5fed.0 +- +-File: iwlwifi-ty-a0-gf-a0-78.ucode +-Version: 75.3bfdc55f.0 +- +-File: iwlwifi-ty-a0-gf-a0-79.ucode +-Version: 76.27f1c37b.0 +- +-File: iwlwifi-ty-a0-gf-a0-81.ucode +-Version: 78.31fc9ae6.0 +- +-File: iwlwifi-so-a0-gf4-a0-77.ucode +-Version: 74.f92b5fed.0 +- +-File: iwlwifi-so-a0-gf4-a0-78.ucode +-Version: 75.3bfdc55f.0 +- +-File: iwlwifi-so-a0-gf4-a0-79.ucode +-Version: 76.27f1c37b.0 +- +-File: iwlwifi-so-a0-gf4-a0-81.ucode +-Version: 78.31fc9ae6.0 +- +-File: iwlwifi-so-a0-gf-a0-77.ucode +-Version: 74.f92b5fed.0 +- +-File: iwlwifi-so-a0-gf-a0-78.ucode +-Version: 74.3bfdc55f.0 +- +-File: iwlwifi-so-a0-gf-a0-79.ucode +-Version: 75.27f1c37b.0 +- +-File: iwlwifi-so-a0-gf-a0-81.ucode +-Version: 78.31fc9ae6.0 +- +-File: iwlwifi-so-a0-hr-b0-77.ucode +-Version: 74.f92b5fed.0 +- +-File: iwlwifi-so-a0-hr-b0-79.ucode +-Version: 75.27f1c37b.0 +- +-File: iwlwifi-so-a0-hr-b0-81.ucode +-Version: 78.31fc9ae6.0 +- +-File: iwlwifi-so-a0-jf-b0-77.ucode +-Version: 74.f92b5fed.0 +- +-File: iwlwifi-ty-a0-gf-a0.pnvm +- +-Licence: Redistributable. See LICENCE.iwlwifi_firmware for details +- +-Also available from http://wireless.kernel.org/en/users/Drivers/iwlwifi#Firmware +- +--------------------------------------------------------------------------- +- + Driver: tehuti - Tehuti Networks 10G Ethernet + + File: tehuti/bdx.bin +@@ -1226,86 +714,6 @@ Available from http://ldriver.qlogic.com/firmware/netxen_nic/new/ + + -------------------------------------------------------------------------- + +-Driver: rt61pci - Ralink RT2561, RT2561S, RT2661 wireless MACs +- +-File: rt2561.bin +-File: rt2561s.bin +-File: rt2661.bin +- +-Licence: Redistributable. See LICENCE.ralink-firmware.txt for details +- +-Downloaded from http://www.ralinktech.com/ralink/Home/Support/Linux.html +- +--------------------------------------------------------------------------- +- +-Driver: rt73usb - Ralink RT2571W, RT2671 wireless MACs +- +-File: rt73.bin +- +-Licence: Redistributable. See LICENCE.ralink-firmware.txt for details +- +-Downloaded from http://www.ralinktech.com/ralink/Home/Support/Linux.html +- +---------------------------------------------------------------------------- +- +-Driver: mt7601u - MediaTek MT7601U Wireless MACs +- +-File: mediatek/mt7601u.bin +-Version: 34 +-Link: mt7601u.bin -> mediatek/mt7601u.bin +- +-Licence: Redistributable. See LICENCE.ralink_a_mediatek_company_firmware for details +- +-Downloaded from http://www.mediatek.com/en/downloads/ +- +--------------------------------------------------------------------------- +- +-Driver: rt2800pci - Ralink RT2860, RT2890, RT3090, RT3290, RT5390 wireless MACs +- +-File: rt2860.bin +-Version: 40 +- +-File: rt3290.bin +-Version: 37 +- +-Licence: Redistributable. See LICENCE.ralink-firmware.txt for details +- +-Binary file supplied originally by Shiang Tu , latest +-from http://www.mediatek.com/en/downloads1/downloads/ +- +--------------------------------------------------------------------------- +- +-Driver: rt2860sta - Ralink RT3090 wireless MACs +- +-Link: rt3090.bin -> rt2860.bin +- +-Licence: Redistributable. See LICENCE.ralink-firmware.txt for details +- +--------------------------------------------------------------------------- +- +-Driver: rt2800usb - Ralink RT2870, RT3070, RT3071, RT3072, RT5370 wireless MACs +- +-File: rt2870.bin +-Version: 36 +- +-Licence: Redistributable. See LICENCE.ralink-firmware.txt for details +- +-Binary file supplied originally by Shiang Tu , latest +-from http://www.mediatek.com/en/downloads1/downloads/ +- +--------------------------------------------------------------------------- +- +-Driver: rt2870sta - Ralink RT2870, RT3070, RT3071 wireless MACs +- +-Link: rt3070.bin -> rt2870.bin +-File: rt3071.bin +- +-Licence: Redistributable. See LICENCE.ralink-firmware.txt for details +- +-rt3071.bin is a copy of bytes 4096-8191 of rt2870.bin for compatibility. +- +--------------------------------------------------------------------------- +- + Driver: usbdux/usbduxfast/usbduxsigma - usbdux data acquisition cards + + File: usbdux_firmware.bin +@@ -1319,17 +727,6 @@ Provided from the author, Bernd Porr + + -------------------------------------------------------------------------- + +-Driver: ath3k - DFU Driver for Atheros bluetooth chipset AR3011 +- +-File: ath3k-1.fw +-Version: 1.0 +- +-Fix EEPROM radio table issue and change PID to 3005 +- +-Licence: Redistributable. See LICENCE.atheros_firmware for details +- +--------------------------------------------------------------------------- +- + Driver: mga - Matrox G200/G400/G550 + + File: matrox/g200_warp.fw +@@ -2245,17 +1642,6 @@ Licence: Redistributable. + + -------------------------------------------------------------------------- + +-Driver: rtl8192e - Realtek 8192 PCI wireless driver +- +-File: RTL8192E/boot.img +-File: RTL8192E/data.img +-File: RTL8192E/main.img +- +-Licence: Redistributable, provided by Realtek in their driver +- source download. +- +--------------------------------------------------------------------------- +- + Driver: ib_qib - QLogic Infiniband + + File: qlogic/sd7220.fw +@@ -2388,1167 +1774,51 @@ Licence: + + -------------------------------------------------------------------------- + +-Driver: brcmsmac - Broadcom 802.11n softmac wireless LAN driver. ++Driver: vt6656 - VIA VT6656 USB wireless driver + +-File: brcm/bcm43xx-0.fw +-File: brcm/bcm43xx_hdr-0.fw +-Version: 610.812 ++File: vntwusb.fw + +-Licence: Redistributable. See LICENCE.broadcom_bcm43xx for details. ++Licence: Redistributable. See LICENCE.via_vt6656 for details. + + -------------------------------------------------------------------------- + +-Driver: brcmfmac - Broadcom 802.11n fullmac wireless LAN driver. +- +-File: brcm/bcm4329-fullmac-4.bin +-File: brcm/brcmfmac43236b.bin +-File: brcm/brcmfmac4329-sdio.bin +-File: brcm/brcmfmac4330-sdio.bin +-File: brcm/brcmfmac4334-sdio.bin +-File: brcm/brcmfmac4335-sdio.bin +-File: brcm/brcmfmac43241b0-sdio.bin +-File: brcm/brcmfmac43241b4-sdio.bin +-File: brcm/brcmfmac43241b5-sdio.bin +-File: brcm/brcmfmac43242a.bin +-File: brcm/brcmfmac43143.bin +-File: brcm/brcmfmac43143-sdio.bin +-File: brcm/brcmfmac43430a0-sdio.bin +-File: brcm/brcmfmac4350c2-pcie.bin +-File: brcm/brcmfmac4350-pcie.bin +-File: brcm/brcmfmac43569.bin +-File: brcm/brcmfmac4358-pcie.bin +-File: brcm/brcmfmac43602-pcie.bin +-File: brcm/brcmfmac43602-pcie.ap.bin +-File: brcm/brcmfmac4366b-pcie.bin +-File: brcm/brcmfmac4366c-pcie.bin +-File: brcm/brcmfmac4371-pcie.bin +- +-Licence: Redistributable. See LICENCE.broadcom_bcm43xx for details. +- +-File: brcm/brcmfmac4373.bin +-File: cypress/cyfmac43012-sdio.bin +-Link: brcm/brcmfmac43012-sdio.bin -> ../cypress/cyfmac43012-sdio.bin +-File: cypress/cyfmac43012-sdio.clm_blob +-Link: brcm/brcmfmac43012-sdio.clm_blob -> ../cypress/cyfmac43012-sdio.clm_blob +-File: cypress/cyfmac43340-sdio.bin +-Link: brcm/brcmfmac43340-sdio.bin -> ../cypress/cyfmac43340-sdio.bin +-File: cypress/cyfmac43362-sdio.bin +-Link: brcm/brcmfmac43362-sdio.bin -> ../cypress/cyfmac43362-sdio.bin +-File: cypress/cyfmac4339-sdio.bin +-Link: brcm/brcmfmac4339-sdio.bin -> ../cypress/cyfmac4339-sdio.bin +-File: cypress/cyfmac43430-sdio.bin +-Link: brcm/brcmfmac43430-sdio.bin -> ../cypress/cyfmac43430-sdio.bin +-File: cypress/cyfmac43430-sdio.clm_blob +-Link: brcm/brcmfmac43430-sdio.clm_blob -> ../cypress/cyfmac43430-sdio.clm_blob +-File: cypress/cyfmac43455-sdio.bin +-Link: brcm/brcmfmac43455-sdio.bin -> ../cypress/cyfmac43455-sdio.bin +-File: cypress/cyfmac43455-sdio.clm_blob +-Link: brcm/brcmfmac43455-sdio.clm_blob -> ../cypress/cyfmac43455-sdio.clm_blob +-File: cypress/cyfmac4354-sdio.bin +-Link: brcm/brcmfmac4354-sdio.bin -> ../cypress/cyfmac4354-sdio.bin +-File: cypress/cyfmac4354-sdio.clm_blob +-Link: brcm/brcmfmac4354-sdio.clm_blob -> ../cypress/cyfmac4354-sdio.clm_blob +-File: cypress/cyfmac4356-pcie.bin +-Link: brcm/brcmfmac4356-pcie.bin -> ../cypress/cyfmac4356-pcie.bin +-File: cypress/cyfmac4356-pcie.clm_blob +-Link: brcm/brcmfmac4356-pcie.clm_blob -> ../cypress/cyfmac4356-pcie.clm_blob +-File: cypress/cyfmac4356-sdio.bin +-Link: brcm/brcmfmac4356-sdio.bin -> ../cypress/cyfmac4356-sdio.bin +-File: cypress/cyfmac4356-sdio.clm_blob +-Link: brcm/brcmfmac4356-sdio.clm_blob -> ../cypress/cyfmac4356-sdio.clm_blob +-File: cypress/cyfmac43570-pcie.bin +-Link: brcm/brcmfmac43570-pcie.bin -> ../cypress/cyfmac43570-pcie.bin +-File: cypress/cyfmac43570-pcie.clm_blob +-Link: brcm/brcmfmac43570-pcie.clm_blob -> ../cypress/cyfmac43570-pcie.clm_blob +-File: cypress/cyfmac4373-sdio.bin +-Link: brcm/brcmfmac4373-sdio.bin -> ../cypress/cyfmac4373-sdio.bin +-File: cypress/cyfmac4373-sdio.clm_blob +-Link: brcm/brcmfmac4373-sdio.clm_blob -> ../cypress/cyfmac4373-sdio.clm_blob +-File: cypress/cyfmac54591-pcie.bin +-Link: brcm/brcmfmac54591-pcie.bin -> ../cypress/cyfmac54591-pcie.bin +-File: cypress/cyfmac54591-pcie.clm_blob +-Link: brcm/brcmfmac54591-pcie.clm_blob -> ../cypress/cyfmac54591-pcie.clm_blob +- +-Licence: Redistributable. See LICENCE.cypress for details. +- +-File: "brcm/brcmfmac43241b4-sdio.Advantech-MICA-071.txt" +-File: "brcm/brcmfmac43241b4-sdio.Intel Corp.-VALLEYVIEW C0 PLATFORM.txt" +-File: "brcm/brcmfmac4330-sdio.Prowise-PT301.txt" +-File: "brcm/brcmfmac43340-sdio.ASUSTeK COMPUTER INC.-TF103CE.txt" +-File: "brcm/brcmfmac43340-sdio.meegopad-t08.txt" +-File: "brcm/brcmfmac43340-sdio.pov-tab-p1006w-data.txt" +-File: "brcm/brcmfmac43340-sdio.predia-basic.txt" +-File: "brcm/brcmfmac43362-sdio.WC121.txt" +-File: "brcm/brcmfmac43362-sdio.cubietech,cubietruck.txt" +-Link: brcm/brcmfmac43362-sdio.kobo,aura.txt -> brcmfmac43362-sdio.WC121.txt +-Link: brcm/brcmfmac43362-sdio.kobo,tolino-shine2hd.txt -> brcmfmac43362-sdio.WC121.txt +-Link: brcm/brcmfmac43362-sdio.lemaker,bananapro.txt -> brcmfmac43362-sdio.cubietech,cubietruck.txt +-File: "brcm/brcmfmac43430a0-sdio.ilife-S806.txt" +-File: "brcm/brcmfmac43430a0-sdio.jumper-ezpad-mini3.txt" +-File: "brcm/brcmfmac43430a0-sdio.ONDA-V80 PLUS.txt" +-File: "brcm/brcmfmac43430-sdio.AP6212.txt" +-Link: brcm/brcmfmac43430-sdio.sinovoip,bpi-m2-plus.txt -> brcmfmac43430-sdio.AP6212.txt +-Link: brcm/brcmfmac43430-sdio.sinovoip,bpi-m2-zero.txt -> brcmfmac43430-sdio.AP6212.txt +-Link: brcm/brcmfmac43430-sdio.sinovoip,bpi-m2-ultra.txt -> brcmfmac43430-sdio.AP6212.txt +-Link: brcm/brcmfmac43430-sdio.sinovoip,bpi-m3.txt -> brcmfmac43430-sdio.AP6212.txt +-Link: brcm/brcmfmac43430-sdio.friendlyarm,nanopi-r1.txt -> brcmfmac43430-sdio.AP6212.txt +-Link: brcm/brcmfmac43430-sdio.starfive,visionfive-v1.txt -> brcmfmac43430-sdio.AP6212.txt +-Link: brcm/brcmfmac43430-sdio.beagle,beaglev-starlight-jh7100-a1.txt -> brcmfmac43430-sdio.AP6212.txt +-Link: brcm/brcmfmac43430-sdio.beagle,beaglev-starlight-jh7100-r0.txt -> brcmfmac43430-sdio.AP6212.txt +-File: "brcm/brcmfmac43430-sdio.Hampoo-D2D3_Vi8A1.txt" +-File: "brcm/brcmfmac43430-sdio.MUR1DX.txt" +-File: "brcm/brcmfmac43430-sdio.raspberrypi,3-model-b.txt" +-Link: brcm/brcmfmac43430-sdio.raspberrypi,model-zero-w.txt -> brcmfmac43430-sdio.raspberrypi,3-model-b.txt +-Link: brcm/brcmfmac43430-sdio.raspberrypi,model-zero-2-w.txt -> brcmfmac43430-sdio.raspberrypi,3-model-b.txt +-File: "brcm/brcmfmac43455-sdio.acepc-t8.txt" +-File: "brcm/brcmfmac43455-sdio.raspberrypi,3-model-b-plus.txt" +-Link: brcm/brcmfmac43455-sdio.raspberrypi,3-model-a-plus.txt -> brcmfmac43455-sdio.raspberrypi,3-model-b-plus.txt +-File: "brcm/brcmfmac43455-sdio.raspberrypi,4-model-b.txt" +-Link: brcm/brcmfmac43455-sdio.Raspberry\ Pi\ Foundation-Raspberry\ Pi\ 4\ Model\ B.txt -> brcmfmac43455-sdio.raspberrypi,4-model-b.txt +-Link: brcm/brcmfmac43455-sdio.Raspberry\ Pi\ Foundation-Raspberry\ Pi\ Compute\ Module\ 4.txt -> brcmfmac43455-sdio.raspberrypi,4-model-b.txt +-File: "brcm/brcmfmac43455-sdio.MINIX-NEO Z83-4.txt" +-File: "brcm/brcmfmac4356-pcie.gpd-win-pocket.txt" +-File: "brcm/brcmfmac4356-pcie.Intel Corporation-CHERRYVIEW D1 PLATFORM.txt" +-File: "brcm/brcmfmac4356-pcie.Xiaomi Inc-Mipad2.txt" +-File: brcm/brcmfmac4356-sdio.AP6356S.txt +-Link: brcm/brcmfmac4356-sdio.firefly,firefly-rk3399.txt -> brcmfmac4356-sdio.AP6356S.txt +-Link: brcm/brcmfmac4356-sdio.khadas,vim2.txt -> brcmfmac4356-sdio.AP6356S.txt +-Link: brcm/brcmfmac4356-sdio.vamrs,rock960.txt -> brcmfmac4356-sdio.AP6356S.txt +-File: brcm/brcmfmac43455-sdio.AW-CM256SM.txt +-Link: brcm/brcmfmac43455-sdio.beagle,am5729-beagleboneai.txt -> brcmfmac43455-sdio.AW-CM256SM.txt +-Link: brcm/brcmfmac43455-sdio.pine64,pinebook-pro.txt -> brcmfmac43455-sdio.AW-CM256SM.txt +-Link: brcm/brcmfmac43455-sdio.pine64,pinenote-v1.1.txt -> brcmfmac43455-sdio.AW-CM256SM.txt +-Link: brcm/brcmfmac43455-sdio.pine64,pinenote-v1.2.txt -> brcmfmac43455-sdio.AW-CM256SM.txt +-Link: brcm/brcmfmac43455-sdio.pine64,pinephone-pro.txt -> brcmfmac43455-sdio.AW-CM256SM.txt +-Link: brcm/brcmfmac43455-sdio.pine64,quartz64-a.txt -> brcmfmac43455-sdio.AW-CM256SM.txt +-Link: brcm/brcmfmac43455-sdio.pine64,quartz64-b.txt -> brcmfmac43455-sdio.AW-CM256SM.txt +-Link: brcm/brcmfmac43455-sdio.pine64,rockpro64-v2.0.txt -> brcmfmac43455-sdio.AW-CM256SM.txt +-Link: brcm/brcmfmac43455-sdio.pine64,rockpro64-v2.1.txt -> brcmfmac43455-sdio.AW-CM256SM.txt +-Link: brcm/brcmfmac43455-sdio.pine64,soquartz-model-a.txt -> brcmfmac43455-sdio.AW-CM256SM.txt +-Link: brcm/brcmfmac43455-sdio.pine64,soquartz-cm4io.txt -> brcmfmac43455-sdio.AW-CM256SM.txt +-Link: brcm/brcmfmac43455-sdio.pine64,soquartz-blade.txt -> brcmfmac43455-sdio.AW-CM256SM.txt ++Driver: myri10ge - Myri10GE 10GbE NIC driver + +-Licence: GPLv2. See GPL-2 for details. ++File: myri10ge_eth_z8e.dat ++File: myri10ge_ethp_z8e.dat ++File: myri10ge_rss_eth_z8e.dat ++File: myri10ge_rss_ethp_z8e.dat ++File: myri10ge_eth_big_z8e.dat ++File: myri10ge_ethp_big_z8e.dat ++File: myri10ge_rss_eth_big_z8e.dat ++File: myri10ge_rss_ethp_big_z8e.dat ++Version: 1.4.57 ++ ++License: Redistributable. See LICENCE.myri10ge_firmware for details. + + -------------------------------------------------------------------------- ++Driver: ene-ub6250 -- ENE UB6250 SD card reader driver + +-Driver: wl1251 - Texas Instruments 802.11 WLAN driver for WiLink4 chips ++File: ene-ub6250/sd_init1.bin ++File: ene-ub6250/sd_init2.bin ++File: ene-ub6250/sd_rdwr.bin ++File: ene-ub6250/ms_init.bin ++File: ene-ub6250/msp_rdwr.bin ++File: ene-ub6250/ms_rdwr.bin + +-File: ti-connectivity/wl1251-fw.bin +-Version: 4.0.4.3.7 ++Licence: Redistributable. See LICENCE.ene_firmware for details. + +-File: ti-connectivity/wl1251-nvs.bin ++-------------------------------------------------------------------------- + +-Licence: Redistributable. See LICENCE.wl1251 for details. ++Driver: isci -- Intel C600 SAS controller driver + +-The published NVS files are for testing only. Every device needs to +-have a unique NVS which is properly calibrated for best results. +- +-The driver expects to find the firmwares under a ti-connectivity subdirectory. +-So if your system looks for firmwares in /lib/firmware, the firmwares for +-wl12xx chips must be located in /lib/firmware/ti-connectivity/. +- +--------------------------------------------------------------------------- +- +-Driver: wl12xx - Texas Instruments 802.11 WLAN driver for WiLink6/7 chips +- +-File: ti-connectivity/wl1271-fw.bin +-Version: 6.1.0.50.350 (STA-only) +-File: ti-connectivity/wl1271-fw-2.bin +-Version: 6.1.5.50.74 (STA-only) +-File: ti-connectivity/wl1271-fw-ap.bin +-Version: 6.2.1.0.54 (AP-only) +-File: ti-connectivity/wl127x-fw-3.bin +-Version: 6.3.0.0.77 +-File: ti-connectivity/wl127x-fw-plt-3.bin +-Version: 6.3.0.0.77 (PLT-only) +-File: ti-connectivity/wl127x-fw-4-sr.bin +-Version: 6.3.5.0.98 (Single-role) +-File: ti-connectivity/wl127x-fw-4-mr.bin +-Version: 6.5.2.0.15 (Multi-role) +-File: ti-connectivity/wl127x-fw-4-plt.bin +-Version: 6.3.5.0.98 (PLT-only) +-File: ti-connectivity/wl127x-fw-5-sr.bin +-Version: 6.3.10.0.142 (Single-role) +-File: ti-connectivity/wl127x-fw-5-mr.bin +-Version: 6.5.7.0.50 (Multi-role) +-File: ti-connectivity/wl127x-fw-5-plt.bin +-Version: 6.3.10.0.142 (PLT-only) +- +-File: ti-connectivity/wl128x-fw.bin +-Version: 7.1.5.50.74 (STA-only) +-File: ti-connectivity/wl128x-fw-ap.bin +-Version: 7.2.1.0.54 (AP-only) +-File: ti-connectivity/wl128x-fw-3.bin +-Version: 7.3.0.0.77 +-File: ti-connectivity/wl128x-fw-plt-3.bin +-Version: 7.3.0.0.77 +-File: ti-connectivity/wl128x-fw-4-sr.bin +-Version: 7.3.5.0.98 (Single-role) +-File: ti-connectivity/wl128x-fw-4-mr.bin +-Version: 7.5.2.0.15 (Multi-role) +-File: ti-connectivity/wl128x-fw-4-plt.bin +-Version: 7.3.5.0.98 (PLT) +-File: ti-connectivity/wl128x-fw-5-sr.bin +-Version: 7.3.10.0.142 (Single-role) +-File: ti-connectivity/wl128x-fw-5-mr.bin +-Version: 7.5.7.0.50 (Multi-role) +-File: ti-connectivity/wl128x-fw-5-plt.bin +-Version: 7.3.10.2.142 (PLT-only) +- +-File: ti-connectivity/wl127x-nvs.bin +-File: ti-connectivity/wl128x-nvs.bin +-Link: ti-connectivity/wl12xx-nvs.bin -> wl127x-nvs.bin +-Link: ti-connectivity/wl1271-nvs.bin -> wl127x-nvs.bin +- +-Licence: Redistributable. See LICENCE.ti-connectivity for details. +- +-The NVS file includes two parts: +- - radio calibration +- - HW configuration parameters (aka. INI values) +- +-The published NVS files are for testing only. Every device needs to +-hava a unique NVS which is properly calibrated for best results. You +-can find more information about NVS generation for your device here: +- +-http://wireless.kernel.org/en/users/Drivers/wl12xx/calibrator +- +-If you're using a wl127x based device, use a symbolic link called +-wl1271-nvs.bin that links to the wl127x-nvs.bin file. If you are +-using wl128x, link to wl128x-nvs.bin instead. +- +-The driver expects to find the firmwares under a ti-connectivity +-subdirectory. So if your system looks for firmwares in /lib/firmware, +-the firmwares for wl12xx chips must be located in +-/lib/firmware/ti-connectivity/. +- +--------------------------------------------------------------------------- +- +-Driver: wl18xx - Texas Instruments 802.11 WLAN driver for WiLink8 chips +- +-File: ti-connectivity/wl18xx-fw.bin +-Version: 8.2.0.0.100 +-File: ti-connectivity/wl18xx-fw-2.bin +-Version: 8.5.0.0.55 +-File: ti-connectivity/wl18xx-fw-3.bin +-Version: 8.8.0.0.13 +-File: ti-connectivity/wl18xx-fw-4.bin +-Version: 8.9.0.0.79 +- +-Licence: Redistributable. See LICENCE.ti-connectivity for details. +- +-The driver expects to find the firmwares under a ti-connectivity +-subdirectory. So if your system looks for firmwares in /lib/firmware, +-the firmwares for wl18xx chips must be located in +-/lib/firmware/ti-connectivity/. +- +--------------------------------------------------------------------------- +- +-Driver: TI_ST - Texas Instruments bluetooth driver +- +-File: ti-connectivity/TIInit_6.2.31.bts +-Version: 2.44 (TI_P31.123) +-File: ti-connectivity/TIInit_6.6.15.bts +-Version: 2.14 (TI_P6_15.93) +-File: ti-connectivity/TIInit_7.2.31.bts +- +-Licence: Redistributable. See LICENCE.ti-connectivity for details. +- +- TIInit_7.2.31.bts version 7.2.31 +- +- In order to use that file copy it to /lib/firmware/ti-connectivity. +- +--------------------------------------------------------------------------- +- +-Driver: r8712u - Realtek 802.11n WLAN driver for RTL8712U +- +-File: rtlwifi/rtl8712u.bin +-Info: From Vendor's rtl8712_8188_8191_8192SU_usb_linux_v7_0.20100831 +- Reverted rtl8188C_8192C_8192D_usb_linux_v3.4.2_3727.20120404 +- +-Licence: Redistributable. See LICENCE.rtlwifi_firmware.txt for details. +- +--------------------------------------------------------------------------- +- +-Driver: rtl8192ce - Realtek 802.11n WLAN driver for RTL8192CE +- +-File: rtlwifi/rtl8192cfw.bin +-File: rtlwifi/rtl8192cfwU.bin +-File: rtlwifi/rtl8192cfwU_B.bin +-Info: From Vendor's realtek/rtlwifi_linux_mac80211_0019.0320.2014V628 driver +- +-Licence: Redistributable. See LICENCE.rtlwifi_firmware.txt for details. +- +--------------------------------------------------------------------------- +- +-Driver: rtl8192cu - Realtek 802.11n WLAN driver for RTL8192CU +- +-File: rtlwifi/rtl8192cufw.bin +-File: rtlwifi/rtl8192cufw_A.bin +-File: rtlwifi/rtl8192cufw_B.bin +-File: rtlwifi/rtl8192cufw_TMSC.bin +-Info: From Vendor's rtl8188C_8192C_usb_linux_v4.0.1_6911.20130308 driver +- All files extracted from driver/hal/rtl8192c/usb/Hal8192CUHWImg.c +- Relevant variables (CONFIG_BT_COEXISTENCE not set): +- - rtlwifi/rtl8192cufw_A.bin: Rtl8192CUFwUMCACutImgArray +- - rtlwifi/rtl8192cufw_B.bin: Rtl8192CUFwUMCBCutImgArray +- - rtlwifi/rtl8192cufw_TMSC.bin: Rtl8192CUFwTSMCImgArray +- +-Licence: Redistributable. See LICENCE.rtlwifi_firmware.txt for details. +- +--------------------------------------------------------------------------- +- +-Driver: rtl8192se - Realtek 802.11n WLAN driver for RTL8192SE +- +-Info: updated from rtl_92ce_92se_92de_linux_mac80211_0004.0816.2011 driver version +-File: rtlwifi/rtl8192sefw.bin +- +-Licence: Redistributable. See LICENCE.rtlwifi_firmware.txt for details. +- +--------------------------------------------------------------------------- +- +-Driver: rtl8192de - Realtek 802.11n WLAN driver for RTL8192DE +- +-Info: Updated from Realtek version rtl_92ce_92se_92de_8723ae_linux_mac80211_0007.0809.2012 +-File: rtlwifi/rtl8192defw.bin +- +-Licence: Redistributable. See LICENCE.rtlwifi_firmware.txt for details. +- +--------------------------------------------------------------------------- +- +-Driver: rtl8723e - Realtek 802.11n WLAN driver for RTL8723E +- +-Info: Taken from Realtek version rtl_92ce_92se_92de_8723ae_linux_mac80211_0007.0809.2012 +-File: rtlwifi/rtl8723fw.bin +-File: rtlwifi/rtl8723fw_B.bin +- +-Licence: Redistributable. See LICENCE.rtlwifi_firmware.txt for details. +- +--------------------------------------------------------------------------- +- +-Driver: rtl8723be - Realtek 802.11n WLAN driver for RTL8723BE +- +-Info: From Vendor's realtek/rtlwifi_linux_mac80211_0019.0320.2014V628 driver +-File: rtlwifi/rtl8723befw.bin +-Info: Update to version 36 - Sent by Realtek +-File: rtlwifi/rtl8723befw_36.bin +- +-Licence: Redistributable. See LICENCE.rtlwifi_firmware.txt for details. +- +--------------------------------------------------------------------------- +- +-Driver: rtl8723de - Realtek 802.11ac WLAN driver for RTL8723DE +- +-Info: Supplied by Vendor at https://github.com/pkshih/rtlwifi_rtl8723de +-File: rtlwifi/rtl8723defw.bin +- +-Licence: Redistributable. See LICENCE.rtlwifi_firmware.txt for details. +- +--------------------------------------------------------------------------- +- +-Driver: rtl8188ee - Realtek 802.11n WLAN driver for RTL8188EE +- +-Info: Taken from Realtek version rtl_92ce_92se_92de_8723ae_88ee_linux_mac80211_0010.0109.2013 +-File: rtlwifi/rtl8188efw.bin +- +-Licence: Redistributable. See LICENCE.rtlwifi_firmware.txt for details. +- +--------------------------------------------------------------------------- +- +-Driver: rtl8821ae - Realtek 802.11n WLAN driver for RTL8812AE +- +-Info: From Vendor's realtek/rtlwifi_linux_mac80211_0019.0320.2014V628 driver +-File: rtlwifi/rtl8812aefw.bin +-File: rtlwifi/rtl8812aefw_wowlan.bin +- +-Licence: Redistributable. See LICENCE.rtlwifi_firmware.txt for details. +- +--------------------------------------------------------------------------- +- +-Driver: rtl8821ae - Realtek 802.11n WLAN driver for RTL8821AE +- +-Info: From Vendor's realtek/rtlwifi_linux_mac80211_0019.0320.2014V628 driver +-File: rtlwifi/rtl8821aefw.bin +-File: rtlwifi/rtl8821aefw_wowlan.bin +-Info: Update to version 29 - Sent by Realtek +-File: rtlwifi/rtl8821aefw_29.bin +- +-Licence: Redistributable. See LICENCE.rtlwifi_firmware.txt for details. +- +--------------------------------------------------------------------------- +- +-Driver: rtl8822be - Realtek 802.11n WLAN driver for RTL8822BE +- +-Info: Sent to Larry Finger by Realtek engineer Ping-Ke Shih +-File: rtlwifi/rtl8822befw.bin +- +-Licence: Redistributable. See LICENCE.rtlwifi_firmware.txt for details. +- +--------------------------------------------------------------------------- +- +-Driver: rtw88 - Realtek 802.11ac WLAN driver for RTL8822BE and RTL8822CE +- +-Info: Sent to Larry Finger by Realtek engineer Yan-Hsuan Chuang +-File: rtw88/rtw8822b_fw.bin +-File: rtw88/rtw8822c_fw.bin +-File: rtw88/rtw8822c_wow_fw.bin +-File: rtw88/README +-File: rtw88/rtw8723d_fw.bin +-File: rtw88/rtw8821c_fw.bin +- +-Licence: Redistributable. See LICENCE.rtlwifi_firmware.txt for details. +- +- These firmware should be put under /lib/firmware/rtw88/ +- And note that the rtw88 driver is able to support wake-on-wireless LAN +- for RTL8822C devices, after kernel v5.6+. So, make sure the firmware +- rtw88/rtw8822c_wow_fw.bin is also packed, otherwise the firmware load +- fail could be a problem. +- Although RTL8723D devices are 802.11n device, they are also supported +- by rtw88 because the hardware arch is similar. +- +--------------------------------------------------------------------------- +- +-Driver: rtw89 - Realtek 802.11ax WLAN driver for RTL8851B/RTL8852A/RTL8852B/RTL8852C +- +-File: rtw89/rtw8851b_fw.bin +-File: rtw89/rtw8852a_fw.bin +-File: rtw89/rtw8852b_fw.bin +-File: rtw89/rtw8852b_fw-1.bin +-File: rtw89/rtw8852c_fw.bin +- +-Licence: Redistributable. See LICENCE.rtlwifi_firmware.txt for details. +- +--------------------------------------------------------------------------- +- +-Driver: rtl8192ee - Realtek 802.11n WLAN driver for RTL8192EE +- +-Info: Initial version taken from Realtek version +- rtl_92ce_92se_92de_8723ae_88ee_8723be_92ee_linux_mac80211_0017.1224.2013 +- Updated Jan. 14, 2015 with file added by Realtek to +- http://github.com/lwfinger/rtlwifi_new.git. +- Same firmware rtl8192eu_nic.bin so just link them +-Link: rtlwifi/rtl8192eefw.bin -> rtl8192eu_nic.bin +- +-Licence: Redistributable. See LICENCE.rtlwifi_firmware.txt for details. +- +--------------------------------------------------------------------------- +- +-Driver: rtl8723bs - Realtek 802.11n WLAN driver for RTL8723BS +- +-Info: Firmware files extracted from data statements in Realtek driver +- v4.3.5.5_12290.20140916_BTCOEX20140507-4E40. +-File: rtlwifi/rtl8723bs_bt.bin +-Link: rtlwifi/rtl8723bs_nic.bin -> rtl8723bu_nic.bin +-Link: rtlwifi/rtl8723bs_ap_wowlan.bin -> rtl8723bu_ap_wowlan.bin +-Link: rtlwifi/rtl8723bs_wowlan.bin -> rtl8723bu_wowlan.bin +- +-Licence: Redistributable. See LICENCE.rtlwifi_firmware.txt for details. +- +--------------------------------------------------------------------------- +- +-Driver: rtl8xxxu - Realtek 802.11n WLAN driver for RTL8XXX USB devices +- +-Info: rtl8723au taken from Realtek driver +- rtl8723A_WiFi_linux_v4.1.3_6044.20121224 +- Firmware is embedded in the driver as data statements. This info +- has been extracted into a binary file. +-File: rtlwifi/rtl8723aufw_A.bin +-File: rtlwifi/rtl8723aufw_B.bin +-File: rtlwifi/rtl8723aufw_B_NoBT.bin +- +-Info: rtl8723bu taken from Realtek driver +- rtl8723BU_WiFi_linux_v4.3.16_14189.20150519_BTCOEX20150119-5844 +- Firmware is embedded in the driver as data statements. This info +- has been extracted into a binary file. +-File: rtlwifi/rtl8723bu_nic.bin +-File: rtlwifi/rtl8723bu_wowlan.bin +-File: rtlwifi/rtl8723bu_ap_wowlan.bin +- +-Info: rtl8192eu taken from Realtek driver +- rtl8192EU_WiFi_linux_v5.11.2.1-18-g8e7df912b.20210527_COEX20171113-0047 +- Firmware is embedded in the driver as data statements. This info +- has been extracted into a binary file. +-File: rtlwifi/rtl8192eu_nic.bin +-Version: 35.7 +-File: rtlwifi/rtl8192eu_wowlan.bin +-Version: 35.7 +-File: rtlwifi/rtl8192eu_ap_wowlan.bin +-Version: 18.0 +- +-Info: rtl8188fu taken from Realtek driver +- RTL8188FU_Linux_v4.3.23.6_20964.20170110 +- Firmware was embedded in the driver as data statements. This info +- has been extracted into a binary file. +-File: rtlwifi/rtl8188fufw.bin +- +-File: rtlwifi/rtl8710bufw_SMIC.bin +-Version: 16.0 +-File: rtlwifi/rtl8710bufw_UMC.bin +-Version: 16.0 +- +-Info: rtl8188eu taken from Realtek driver version +- v5.2.2.4_25483.20171222. +- Firmware is embedded in the driver as data statements. This info +- has been extracted into a binary file. +-File: rtlwifi/rtl8188eufw.bin +-Version: 28.0 +- +-Info: rtl8192fu taken from Realtek driver version +- v5.8.6.2_35538.20191028_COEX20190910-0d02. +- Firmware is embedded in the driver as data statements. This info +- has been extracted into a binary file. +-File: rtlwifi/rtl8192fufw.bin +-Version: 6.0 +- +-Licence: Redistributable. See LICENCE.rtlwifi_firmware.txt for details. +- +--------------------------------------------------------------------------- +- +-Driver: r8169 - RealTek 8169/8168/8101 ethernet driver. +- +-File: rtl_nic/rtl8168d-1.fw +-File: rtl_nic/rtl8168d-2.fw +-File: rtl_nic/rtl8105e-1.fw +-File: rtl_nic/rtl8168e-1.fw +-File: rtl_nic/rtl8168e-2.fw +- +-File: rtl_nic/rtl8168e-3.fw +-Version: 0.0.4 +- +-File: rtl_nic/rtl8168f-1.fw +-Version: 0.0.5 +- +-File: rtl_nic/rtl8168f-2.fw +-Version: 0.0.4 +- +-File: rtl_nic/rtl8411-1.fw +-Version: 0.0.3 +- +-File: rtl_nic/rtl8411-2.fw +-Version: 0.0.1 +- +-File: rtl_nic/rtl8402-1.fw +-Version: 0.0.1 +- +-File: rtl_nic/rtl8106e-1.fw +-Version: 0.0.1 +- +-File: rtl_nic/rtl8106e-2.fw +-Version: 0.0.1 +- +-File: rtl_nic/rtl8168g-1.fw +-Version: 0.0.3 +- +-File: rtl_nic/rtl8168g-2.fw +-Version: 0.0.1 +- +-File: rtl_nic/rtl8168g-3.fw +-Version: 0.0.1 +- +-File: rtl_nic/rtl8168h-1.fw +-Version: 0.0.2 +- +-File: rtl_nic/rtl8168h-2.fw +-Version: 0.0.2 +- +-File: rtl_nic/rtl8168fp-3.fw +-Version: 0.0.1 +- +-File: rtl_nic/rtl8107e-1.fw +-Version: 0.0.2 +- +-File: rtl_nic/rtl8107e-2.fw +-Version: 0.0.2 +- +-File: rtl_nic/rtl8125a-3.fw +-Version: 0.0.1 +- +-File: rtl_nic/rtl8125b-1.fw +-Version: 0.0.2 +- +-File: rtl_nic/rtl8125b-2.fw +-Version: 0.0.2 +- +-Licence: +- * Copyright © 2011-2013, Realtek Semiconductor Corporation +- * +- * Permission is hereby granted for the distribution of this firmware +- * data in hexadecimal or equivalent format, provided this copyright +- * notice is accompanying it. +- +--------------------------------------------------------------------------- +- +-Driver: r8152 - Realtek RTL8152/RTL8153 Based USB Ethernet Adapters +- +-File: rtl_nic/rtl8153a-2.fw +-File: rtl_nic/rtl8153a-3.fw +-File: rtl_nic/rtl8153a-4.fw +-File: rtl_nic/rtl8153b-2.fw +-File: rtl_nic/rtl8153c-1.fw +-File: rtl_nic/rtl8156a-2.fw +-File: rtl_nic/rtl8156b-2.fw +- +-Licence: Redistributable. See LICENCE.rtlwifi_firmware.txt for details. +- +--------------------------------------------------------------------------- +- +-Driver: vt6656 - VIA VT6656 USB wireless driver +- +-File: vntwusb.fw +- +-Licence: Redistributable. See LICENCE.via_vt6656 for details. +- +--------------------------------------------------------------------------- +- +-Driver: DFU Driver for Atheros bluetooth chipset AR3012 +- +-File: ar3k/AthrBT_0x01020001.dfu +-File: ar3k/ramps_0x01020001_26.dfu +-File: ar3k/AthrBT_0x01020200.dfu +-File: ar3k/ramps_0x01020200_26.dfu +-File: ar3k/ramps_0x01020200_40.dfu +-File: ar3k/AthrBT_0x31010000.dfu +-File: ar3k/ramps_0x31010000_40.dfu +-File: ar3k/AthrBT_0x11020000.dfu +-File: ar3k/ramps_0x11020000_40.dfu +-File: ar3k/ramps_0x01020201_26.dfu +-File: ar3k/ramps_0x01020201_40.dfu +-File: ar3k/AthrBT_0x41020000.dfu +-File: ar3k/ramps_0x41020000_40.dfu +-File: ar3k/AthrBT_0x11020100.dfu +-File: ar3k/ramps_0x11020100_40.dfu +-File: ar3k/AthrBT_0x31010100.dfu +-File: ar3k/ramps_0x31010100_40.dfu +- +-Licence: Redistributable. See LICENCE.atheros_firmware for details +- +--------------------------------------------------------------------------- +- +-Driver: DFU Driver for Atheros bluetooth chipset AR3012 +- +-File: ar3k/AthrBT_0x01020201.dfu +-File: ar3k/1020201coex/ramps_0x01020201_26_HighPriority.dfu +- +-Licence: Redistributable. See LICENSE.QualcommAtheros_ar3k for details +- +--------------------------------------------------------------------------- +- +-Driver:Atheros AR300x UART HCI Bluetooth Chip driver +- +-File: ar3k/1020201/PS_ASIC.pst +-File: ar3k/1020201/RamPatch.txt +-File: ar3k/1020200/ar3kbdaddr.pst +-File: ar3k/1020200/PS_ASIC.pst +-File: ar3k/1020200/RamPatch.txt +-File: ar3k/30101/ar3kbdaddr.pst +-File: ar3k/30101/PS_ASIC.pst +-File: ar3k/30101/RamPatch.txt +-File: ar3k/30000/ar3kbdaddr.pst +-File: ar3k/30000/PS_ASIC.pst +-File: ar3k/30000/RamPatch.txt +- +-Licence: Redistributable. See LICENCE.atheros_firmware for details +- +--------------------------------------------------------------------------- +- +-Driver: ath6kl - Atheros support for AR6003 +- +-File: ath6k/AR6004/hw1.3/fw-3.bin +-File: ath6k/AR6004/hw1.3/bdata.bin +-File: ath6k/AR6004/hw1.2/fw-2.bin +-File: ath6k/AR6004/hw1.2/bdata.bin +-File: ath6k/AR6003/hw1.0/otp.bin.z77 +-File: ath6k/AR6003/hw1.0/bdata.SD31.bin +-File: ath6k/AR6003/hw1.0/bdata.SD32.bin +-File: ath6k/AR6003/hw1.0/data.patch.bin +-File: ath6k/AR6003/hw1.0/bdata.WB31.bin +-File: ath6k/AR6003/hw1.0/athwlan.bin.z77 +-File: ath6k/AR6003/hw2.1.1/fw-2.bin +-File: ath6k/AR6003/hw2.1.1/fw-3.bin +-File: ath6k/AR6003/hw2.1.1/otp.bin +-File: ath6k/AR6003/hw2.1.1/athwlan.bin +-File: ath6k/AR6003/hw2.1.1/endpointping.bin +-File: ath6k/AR6003/hw2.1.1/bdata.SD31.bin +-File: ath6k/AR6003/hw2.1.1/bdata.SD32.bin +-File: ath6k/AR6003/hw2.1.1/data.patch.bin +-File: ath6k/AR6003/hw2.1.1/bdata.WB31.bin +-File: ath6k/AR6003/hw2.0/otp.bin.z77 +-File: ath6k/AR6003/hw2.0/bdata.SD31.bin +-File: ath6k/AR6003/hw2.0/bdata.SD32.bin +-File: ath6k/AR6003/hw2.0/data.patch.bin +-File: ath6k/AR6003/hw2.0/bdata.WB31.bin +-File: ath6k/AR6003/hw2.0/athwlan.bin.z77 +-File: ath6k/AR6002/eeprom.data +-File: ath6k/AR6002/eeprom.bin +-File: ath6k/AR6002/athwlan.bin.z77 +-File: ath6k/AR6002/data.patch.hw2_0.bin +- +-Licence: Redistributable. See LICENCE.atheros_firmware for details +- +--------------------------------------------------------------------------- +- +-Driver: ath10k - Qualcomm Atheros support for QCA988x family of chips +- +-File: ath10k/QCA988X/hw2.0/board.bin +-File: ath10k/QCA988X/hw2.0/firmware-4.bin +-Version: 10.2.4.45 +-File: ath10k/QCA988X/hw2.0/notice_ath10k_firmware-4.txt +-File: ath10k/QCA988X/hw2.0/firmware-5.bin +-Version: 10.2.4-1.0-00047 +-File: ath10k/QCA988X/hw2.0/notice_ath10k_firmware-5.txt +-File: ath10k/QCA6174/hw2.1/board.bin +-File: ath10k/QCA6174/hw2.1/board-2.bin +-File: ath10k/QCA6174/hw2.1/firmware-5.bin +-Version: SW_RM.1.1.1-00157-QCARMSWPZ-1 +-File: ath10k/QCA6174/hw2.1/notice_ath10k_firmware-5.txt +-File: ath10k/QCA6174/hw3.0/board.bin +-File: ath10k/QCA6174/hw3.0/board-2.bin +-File: ath10k/QCA6174/hw3.0/firmware-4.bin +-Version: WLAN.RM.2.0-00180-QCARMSWPZ-1 +-File: ath10k/QCA6174/hw3.0/notice_ath10k_firmware-4.txt +-File: ath10k/QCA6174/hw3.0/firmware-6.bin +-Version: WLAN.RM.4.4.1-00288-QCARMSWPZ-1 +-File: ath10k/QCA6174/hw3.0/notice_ath10k_firmware-6.txt +-File: ath10k/QCA6174/hw3.0/firmware-sdio-6.bin +-Version: WLAN.RMH.4.4.1-00174 +-File: ath10k/QCA6174/hw3.0/notice_ath10k_firmware-sdio-6.txt +-File: ath10k/QCA9377/hw1.0/board.bin +-File: ath10k/QCA9377/hw1.0/board-2.bin +-File: ath10k/QCA9377/hw1.0/firmware-5.bin +-Version: WLAN.TF.1.0-00002-QCATFSWPZ-5 +-File: ath10k/QCA9377/hw1.0/notice_ath10k_firmware-5.txt +-File: ath10k/QCA9377/hw1.0/firmware-sdio-5.bin +-Version: WLAN.TF.1.1.1-00061-QCATFSWPZ-1 +-File: ath10k/QCA9377/hw1.0/notice_ath10k_firmware-sdio-5.txt +-File: ath10k/QCA99X0/hw2.0/board-2.bin +-File: ath10k/QCA99X0/hw2.0/firmware-5.bin +-Version: 10.4.1.00030-1 +-File: ath10k/QCA99X0/hw2.0/notice_ath10k_firmware-5.txt +-File: ath10k/QCA4019/hw1.0/board-2.bin +-File: ath10k/QCA4019/hw1.0/firmware-5.bin +-Version: 10.4-3.6-00140 +-File: ath10k/QCA4019/hw1.0/notice_ath10k_firmware-5.txt +-File: ath10k/QCA9887/hw1.0/board.bin +-File: ath10k/QCA9887/hw1.0/firmware-5.bin +-Version: 10.2.4-1.0-00047 +-File: ath10k/QCA9887/hw1.0/notice_ath10k_firmware-5.txt +-File: ath10k/QCA9888/hw2.0/board-2.bin +-File: ath10k/QCA9888/hw2.0/firmware-5.bin +-Version: 10.4-3.9.0.2-00157 +-File: ath10k/QCA9888/hw2.0/notice_ath10k_firmware-5.txt +-File: ath10k/QCA9984/hw1.0/board-2.bin +-File: ath10k/QCA9984/hw1.0/firmware-5.bin +-Version: 10.4-3.9.0.2-00157 +-File: ath10k/QCA9984/hw1.0/notice_ath10k_firmware-5.txt +-File: ath10k/QCA9377/hw1.0/firmware-6.bin +-Version: WLAN.TF.2.1-00021-QCARMSWP-1 +-File: ath10k/QCA9377/hw1.0/notice_ath10k_firmware-6.txt +-File: ath10k/WCN3990/hw1.0/board-2.bin +-File: ath10k/WCN3990/hw1.0/firmware-5.bin +-File: ath10k/WCN3990/hw1.0/wlanmdsp.mbn +-Link: qcom/sdm845/wlanmdsp.mbn -> ../../ath10k/WCN3990/hw1.0/wlanmdsp.mbn +-Version: WLAN.HL.2.0-01387-QCAHLSWMTPLZ-1 +-File: ath10k/WCN3990/hw1.0/notice.txt_wlanmdsp +- +-Licence: Redistributable. See LICENSE.QualcommAtheros_ath10k for details +- +--------------------------------------------------------------------------- +- +-Driver: ath11k - Qualcomm Technologies 802.11ax chipset support +- +-File: ath11k/IPQ6018/hw1.0/board-2.bin +-File: ath11k/IPQ6018/hw1.0/m3_fw.b00 +-File: ath11k/IPQ6018/hw1.0/m3_fw.b01 +-File: ath11k/IPQ6018/hw1.0/m3_fw.b02 +-File: ath11k/IPQ6018/hw1.0/m3_fw.flist +-File: ath11k/IPQ6018/hw1.0/m3_fw.mdt +-File: ath11k/IPQ6018/hw1.0/q6_fw.b00 +-File: ath11k/IPQ6018/hw1.0/q6_fw.b01 +-File: ath11k/IPQ6018/hw1.0/q6_fw.b02 +-File: ath11k/IPQ6018/hw1.0/q6_fw.b03 +-File: ath11k/IPQ6018/hw1.0/q6_fw.b04 +-File: ath11k/IPQ6018/hw1.0/q6_fw.b05 +-File: ath11k/IPQ6018/hw1.0/q6_fw.b07 +-File: ath11k/IPQ6018/hw1.0/q6_fw.b08 +-File: ath11k/IPQ6018/hw1.0/q6_fw.flist +-File: ath11k/IPQ6018/hw1.0/q6_fw.mdt +-Version: WLAN.HK.2.7.0.1-01744-QCAHKSWPL_SILICONZ-1 +-File: ath11k/IPQ6018/hw1.0/Notice.txt +-File: ath11k/IPQ8074/hw2.0/board-2.bin +-File: ath11k/IPQ8074/hw2.0/m3_fw.b00 +-File: ath11k/IPQ8074/hw2.0/m3_fw.b01 +-File: ath11k/IPQ8074/hw2.0/m3_fw.b02 +-File: ath11k/IPQ8074/hw2.0/m3_fw.flist +-File: ath11k/IPQ8074/hw2.0/m3_fw.mdt +-File: ath11k/IPQ8074/hw2.0/q6_fw.b00 +-File: ath11k/IPQ8074/hw2.0/q6_fw.b01 +-File: ath11k/IPQ8074/hw2.0/q6_fw.b02 +-File: ath11k/IPQ8074/hw2.0/q6_fw.b03 +-File: ath11k/IPQ8074/hw2.0/q6_fw.b04 +-File: ath11k/IPQ8074/hw2.0/q6_fw.b05 +-File: ath11k/IPQ8074/hw2.0/q6_fw.b07 +-File: ath11k/IPQ8074/hw2.0/q6_fw.b08 +-File: ath11k/IPQ8074/hw2.0/q6_fw.flist +-File: ath11k/IPQ8074/hw2.0/q6_fw.mdt +-Version: WLAN.HK.2.7.0.1-01744-QCAHKSWPL_SILICONZ-1 +-File: ath11k/IPQ8074/hw2.0/Notice.txt +-File: ath11k/QCA6390/hw2.0/board-2.bin +-File: ath11k/QCA6390/hw2.0/amss.bin +-File: ath11k/QCA6390/hw2.0/m3.bin +-Version: WLAN.HST.1.0.1-05266-QCAHSTSWPLZ_V2_TO_X86-1 +-File: ath11k/QCA6390/hw2.0/Notice.txt +-File: ath11k/WCN6855/hw2.0/regdb.bin +-File: ath11k/WCN6855/hw2.0/board-2.bin +-File: ath11k/WCN6855/hw2.0/amss.bin +-File: ath11k/WCN6855/hw2.0/m3.bin +-Version: WLAN.HSP.1.1-03125-QCAHSPSWPL_V1_V2_SILICONZ_LITE-3.6510.23 +-File: ath11k/WCN6855/hw2.0/Notice.txt +-Link: ath11k/WCN6855/hw2.1/regdb.bin -> ../hw2.0/regdb.bin +-Link: ath11k/WCN6855/hw2.1/board-2.bin -> ../hw2.0/board-2.bin +-Link: ath11k/WCN6855/hw2.1/amss.bin -> ../hw2.0/amss.bin +-Link: ath11k/WCN6855/hw2.1/m3.bin -> ../hw2.0/m3.bin +-File: ath11k/QCN9074/hw1.0/board-2.bin +-File: ath11k/QCN9074/hw1.0/amss.bin +-File: ath11k/QCN9074/hw1.0/m3.bin +-Version: WLAN.HK.2.7.0.1-01744-QCAHKSWPL_SILICONZ-1 +-File: ath11k/QCN9074/hw1.0/Notice.txt +-File: ath11k/WCN6750/hw1.0/board-2.bin +-File: ath11k/WCN6750/hw1.0/wpss.b00 +-File: ath11k/WCN6750/hw1.0/wpss.b01 +-File: ath11k/WCN6750/hw1.0/wpss.b02 +-File: ath11k/WCN6750/hw1.0/wpss.b03 +-File: ath11k/WCN6750/hw1.0/wpss.b04 +-File: ath11k/WCN6750/hw1.0/wpss.b05 +-File: ath11k/WCN6750/hw1.0/wpss.b06 +-File: ath11k/WCN6750/hw1.0/wpss.b07 +-File: ath11k/WCN6750/hw1.0/wpss.b08 +-File: ath11k/WCN6750/hw1.0/wpss.mdt +-Version: WLAN.MSL.1.0.1-01160-QCAMSLSWPLZ-1 +-File: ath11k/WCN6750/hw1.0/Notice.txt +-File: ath11k/IPQ5018/hw1.0/board-2.bin +-File: ath11k/IPQ5018/hw1.0/m3_fw.b00 +-File: ath11k/IPQ5018/hw1.0/m3_fw.b01 +-File: ath11k/IPQ5018/hw1.0/m3_fw.b02 +-File: ath11k/IPQ5018/hw1.0/m3_fw.flist +-File: ath11k/IPQ5018/hw1.0/m3_fw.mdt +-File: ath11k/IPQ5018/hw1.0/q6_fw.b00 +-File: ath11k/IPQ5018/hw1.0/q6_fw.b01 +-File: ath11k/IPQ5018/hw1.0/q6_fw.b02 +-File: ath11k/IPQ5018/hw1.0/q6_fw.b03 +-File: ath11k/IPQ5018/hw1.0/q6_fw.b04 +-File: ath11k/IPQ5018/hw1.0/q6_fw.b05 +-File: ath11k/IPQ5018/hw1.0/q6_fw.b07 +-File: ath11k/IPQ5018/hw1.0/q6_fw.b08 +-File: ath11k/IPQ5018/hw1.0/q6_fw.b09 +-File: ath11k/IPQ5018/hw1.0/q6_fw.b10 +-File: ath11k/IPQ5018/hw1.0/q6_fw.b11 +-File: ath11k/IPQ5018/hw1.0/q6_fw.b13 +-File: ath11k/IPQ5018/hw1.0/q6_fw.b14 +-File: ath11k/IPQ5018/hw1.0/q6_fw.flist +-File: ath11k/IPQ5018/hw1.0/q6_fw.mdt +-Version: WLAN.HK.2.6.0.1-00861-QCAHKSWPL_SILICONZ-1 +-File: ath11k/IPQ5018/hw1.0/Notice.txt +- +-Licence: Redistributable. See LICENSE.QualcommAtheros_ath10k for details +- +--------------------------------------------------------------------------- +- +-Driver: myri10ge - Myri10GE 10GbE NIC driver +- +-File: myri10ge_eth_z8e.dat +-File: myri10ge_ethp_z8e.dat +-File: myri10ge_rss_eth_z8e.dat +-File: myri10ge_rss_ethp_z8e.dat +-File: myri10ge_eth_big_z8e.dat +-File: myri10ge_ethp_big_z8e.dat +-File: myri10ge_rss_eth_big_z8e.dat +-File: myri10ge_rss_ethp_big_z8e.dat +-Version: 1.4.57 +- +-License: Redistributable. See LICENCE.myri10ge_firmware for details. +- +--------------------------------------------------------------------------- +- +-Driver: ath6kl - Atheros support for AR6003 WiFi-Bluetooth combo module +- +-File: ath6k/AR6003.1/hw2.1.1/athwlan.bin +-File: ath6k/AR6003.1/hw2.1.1/bdata.SD31.bin +-File: ath6k/AR6003.1/hw2.1.1/bdata.SD32.bin +-File: ath6k/AR6003.1/hw2.1.1/bdata.WB31.bin +-File: ath6k/AR6003.1/hw2.1.1/data.patch.bin +-File: ath6k/AR6003.1/hw2.1.1/endpointping.bin +-File: ath6k/AR6003.1/hw2.1.1/otp.bin +- +-License: Redistributable. See LICENCE.atheros_firmware for details +- +--------------------------------------------------------------------------- +- +-Driver: ath6kl - Atheros support for AR3001 WiFi-Bluetooth combo module +- +-File: ar3k/30101coex/ar3kbdaddr.pst +-File: ar3k/30101coex/PS_ASIC_aclLowPri.pst +-File: ar3k/30101coex/PS_ASIC_aclHighPri.pst +-File: ar3k/30101coex/PS_ASIC.pst +-File: ar3k/30101coex/RamPatch.txt +- +-License: Redistributable. See LICENCE.atheros_firmware for details +- +--------------------------------------------------------------------------- +- +-Driver: ene-ub6250 -- ENE UB6250 SD card reader driver +- +-File: ene-ub6250/sd_init1.bin +-File: ene-ub6250/sd_init2.bin +-File: ene-ub6250/sd_rdwr.bin +-File: ene-ub6250/ms_init.bin +-File: ene-ub6250/msp_rdwr.bin +-File: ene-ub6250/ms_rdwr.bin +- +-Licence: Redistributable. See LICENCE.ene_firmware for details. +- +--------------------------------------------------------------------------- +- +-Driver: isci -- Intel C600 SAS controller driver +- +-File: isci/isci_firmware.bin +-Source: isci/ ++File: isci/isci_firmware.bin ++Source: isci/ + + Licence: GPLv2. See GPL-2 for details. + + -------------------------------------------------------------------------- + +-Driver: ar5523 -- Atheros AR5523 based USB Wifi dongles +- +-File: ar5523.bin +- +-Licence: Redistributable. See LICENCE.atheros_firmware for details +- +--------------------------------------------------------------------------- +- +-Driver: carl9170 -- Atheros AR9170 802.11 draft-n USB driver +- +-File: carl9170-1.fw +-Version: 1.9.6 +-Source: carl9170fw/ +- +-Downloaded from http://linuxwireless.org/en/users/Drivers/carl9170 +- +-Licence: GPLv2. Some build scripts use the New BSD (3-clause) licence.. See GPL-2 for details. +- +--------------------------------------------------------------------------- +- +-Driver: btusb - Bluetooth USB driver +- +-File: intel/ibt-hw-37.7.bseq +-Version: 1316.02.00 +-File: intel/ibt-hw-37.7.10-fw-1.80.2.3.d.bseq +-Version: BT_WilkinsPeak_B3_REL_87_0001 +-File: intel/ibt-hw-37.7.10-fw-1.0.2.3.d.bseq +-Version: BT_WilkinsPeak_B3_REL_87_0001 +-File: intel/ibt-hw-37.7.10-fw-1.80.1.2d.d.bseq +-Version: BT_WilkinsPeak_B5_REL_42_0001 +-File: intel/ibt-hw-37.7.10-fw-1.0.1.2d.d.bseq +-Version: BT_WilkinsPeak_B5_REL_42_0001 +-File: intel/ibt-hw-37.8.bseq +-Version: 1339_02.00 +-File: intel/ibt-hw-37.8.10-fw-1.10.2.27.d.bseq +-Version: BT_StonePeak_C0_REL_59_0001 +-File: intel/ibt-hw-37.8.10-fw-1.10.3.11.e.bseq +-Version: BT_StonePeak_D0_REL_50_0002 +-File: intel/ibt-hw-37.8.10-fw-22.50.19.14.f.bseq +-Version: BT_StonePeak_D1_REL_67_1278 +-File: intel/ibt-11-5.ddc +-Version: LnP/SfP_REL1294 +-File: intel/ibt-11-5.sfi +-Version: BT_LightningPeak_REL0487 +-File: intel/ibt-12-16.ddc +-Version: BT_WindStormPeak_REL1299 +-File: intel/ibt-12-16.sfi +-Version: BT_WindStormPeak_REL1299 +-File: intel/ibt-17-16-1.sfi +-Version: BT_JeffersonPeak_B0_B0_REL20332 +-File: intel/ibt-17-16-1.ddc +-Version: BT_JeffersonPeak_B0_B0_REL20332 +-File: intel/ibt-17-2.sfi +-Version: BT_JeffersonPeak_B0_B0_REL20332 +-File: intel/ibt-17-2.ddc +-Version: BT_JeffersonPeak_B0_B0_REL20332 +-File: intel/ibt-17-0-1.sfi +-Version: BT_JeffersonPeak_A0_B0_REL0201 +-File: intel/ibt-17-0-1.ddc +-Version: BT_JeffersonPeak_A0_B0_REL0201 +-File: intel/ibt-17-1.sfi +-Version: BT_JeffersonPeak_A0_B0_REL0201 +-File: intel/ibt-17-1.ddc +-Version: BT_JeffersonPeak_A0_B0_REL0201 +-File: intel/ibt-18-16-1.sfi +-Version: BT_ThunderPeak_B0_B0_REL20182 +-File: intel/ibt-18-16-1.ddc +-Version: BT_ThunderPeak_B0_B0_REL20182 +-File: intel/ibt-18-2.sfi +-Version: BT_ThunderPeak_B0_B0_REL20182 +-File: intel/ibt-18-2.ddc +-Version: BT_ThunderPeak_B0_B0_REL20182 +-File: intel/ibt-18-0-1.sfi +-Version: BT_ThunderPeak_A0_B0_REL0201 +-File: intel/ibt-18-0-1.ddc +-Version: BT_ThunderPeak_A0_B0_REL0201 +-File: intel/ibt-18-1.sfi +-Version: BT_ThunderPeak_A0_B0_REL0201 +-File: intel/ibt-18-1.ddc +-Version: BT_ThunderPeak_A0_B0_REL0201 +-File:intel/ibt-20-0-3.sfi +-Version: BT_CyclonePeak_A0_REL53392 +-File:intel/ibt-20-0-3.ddc +-Version: BT_CyclonePeak_A0_REL53392 +-File:intel/ibt-20-1-3.sfi +-Version: BT_CyclonePeak_A0_REL53392 +-File:intel/ibt-20-1-3.ddc +-Version: BT_CyclonePeak_A0_REL53392 +-File:intel/ibt-20-1-4.sfi +-Version: BT_CyclonePeak_A0_REL53392 +-File:intel/ibt-20-1-4.ddc +-Version: BT_CyclonePeak_A0_REL53392 +-File:intel/ibt-19-0-0.sfi +-Version: BT_Quasar_REL53392 +-File:intel/ibt-19-0-0.ddc +-Version: BT_Quasar_REL53392 +-File:intel/ibt-19-0-1.sfi +-Version: BT_Quasar_REL53392 +-File:intel/ibt-19-0-1.ddc +-Version: BT_Quasar_REL53392 +-File:intel/ibt-19-0-3.sfi +-Version: BT_Quasar_REL53263 +-File:intel/ibt-19-0-3.ddc +-Version: BT_Quasar_REL53263 +-File:intel/ibt-19-0-4.sfi +-Version: BT_HarrisonPeak_REL53392 +-File:intel/ibt-19-0-4.ddc +-Version: BT_HarrisonPeak_REL53392 +-File:intel/ibt-19-16-4.sfi +-Version: BT_HarrisonPeak_REL53392 +-File:intel/ibt-19-16-4.ddc +-Version: BT_HarrisonPeak_REL53392 +-File:intel/ibt-19-32-1.sfi +-Version: BT_HarrisonPeak_REL53392 +-File:intel/ibt-19-32-1.ddc +-Version: BT_HarrisonPeak_REL53392 +-File:intel/ibt-19-32-0.sfi +-Version: BT_HarrisonPeak_REL53392 +-File:intel/ibt-19-32-0.ddc +-Version: BT_HarrisonPeak_REL53392 +-File:intel/ibt-19-32-4.sfi +-Version: BT_HarrisonPeak_REL53392 +-File:intel/ibt-19-32-4.ddc +-Version: BT_HarrisonPeak_REL53392 +-File:intel/ibt-19-240-1.sfi +-Version: BT_HarrisonPeak_REL53392 +-File:intel/ibt-19-240-1.ddc +-Version: BT_HarrisonPeak_REL53392 +-File:intel/ibt-19-240-4.sfi +-Version: BT_HarrisonPeak_REL53392 +-File:intel/ibt-19-240-4.ddc +-Version: BT_HarrisonPeak_REL53392 +-File:intel/ibt-0041-0041.sfi +-Version: BT_TyphoonPeak_REL62562 +-File:intel/ibt-0041-0041.ddc +-Version: BT_TyphoonPeak_REL62562 +-File:intel/ibt-0040-0041.sfi +-Version: BT_Solar_GfP2_REL62562 +-File:intel/ibt-0040-0041.ddc +-Version: BT_Solar_GfP2_REL62562 +-File:intel/ibt-1040-0041.sfi +-Version: BT_SolarF_GfP2_REL62562 +-File:intel/ibt-1040-0041.ddc +-Version: BT_SolarF_GfP2_REL62562 +- +-File:intel/ibt-0040-1020.sfi +-Version: BT_Solar_JfP1_REL59564 +-File:intel/ibt-0040-1020.ddc +-Version: BT_Solar_JfP1_REL59564 +-File:intel/ibt-1040-1020.sfi +-Version: BT_SolarF_JfP1_REL59564 +-File:intel/ibt-1040-1020.ddc +-Version: BT_SolarF_JfP1_REL59564 +- +-File:intel/ibt-0040-2120.sfi +-Version: BT_Solar_JfP2_REL52159 +-File:intel/ibt-0040-2120.ddc +-Version: BT_Solar_JfP2_REL52159 +-File:intel/ibt-1040-2120.sfi +-Version: BT_SolarF_JfP2_REL59564 +-File:intel/ibt-1040-2120.ddc +-Version: BT_SolarF_JfP2_REL59564 +- +-File:intel/ibt-0040-4150.sfi +-Version: BT_Solar_JnP2_REL62562 +-File:intel/ibt-0040-4150.ddc +-Version: BT_Solar_JnP2_REL62562 +-File:intel/ibt-1040-4150.sfi +-Version: BT_SolarF_JnP2_REL62562 +-File:intel/ibt-1040-4150.ddc +-Version: BT_SolarF_JnP2_REL62562 +- +-Licence: Redistributable. See LICENCE.ibt_firmware for details +- +-File: rtl_bt/rtl8192ee_fw.bin +-File: rtl_bt/rtl8192eu_fw.bin +-File: rtl_bt/rtl8723a_fw.bin +-File: rtl_bt/rtl8723b_fw.bin +-File: rtl_bt/rtl8723bs_fw.bin +-File: rtl_bt/rtl8723bs_config-OBDA8723.bin +-Link: rtl_bt/rtl8723bs_config-OBDA0623.bin -> rtl8723bs_config-OBDA8723.bin +-File: rtl_bt/rtl8761a_fw.bin +-File: rtl_bt/rtl8761b_fw.bin +-File: rtl_bt/rtl8761b_config.bin +-File: rtl_bt/rtl8761bu_fw.bin +-File: rtl_bt/rtl8761bu_config.bin +-File: rtl_bt/rtl8812ae_fw.bin +-File: rtl_bt/rtl8821a_fw.bin +-Link: rtl_bt/rtl8821a_config.bin -> rtl8821c_config.bin +-File: rtl_bt/rtl8822b_fw.bin +-File: rtl_bt/rtl8822b_config.bin +-File: rtl_bt/rtl8723d_fw.bin +-File: rtl_bt/rtl8723d_config.bin +-File: rtl_bt/rtl8821c_fw.bin +-File: rtl_bt/rtl8821c_config.bin +-File: rtl_bt/rtl8821cs_fw.bin +-File: rtl_bt/rtl8821cs_config.bin +-File: rtl_bt/rtl8822cu_fw.bin +-File: rtl_bt/rtl8822cu_config.bin +-File: rtl_bt/rtl8822cs_fw.bin +-File: rtl_bt/rtl8822cs_config.bin +-File: rtl_bt/rtl8852au_fw.bin +-File: rtl_bt/rtl8852au_config.bin +-File: rtl_bt/rtl8852bu_fw.bin +-File: rtl_bt/rtl8852bu_config.bin +-File: rtl_bt/rtl8852cu_fw.bin +-File: rtl_bt/rtl8852cu_config.bin +-File: rtl_bt/rtl8851bu_fw.bin +-File: rtl_bt/rtl8851bu_config.bin +- +-Licence: Redistributable. See LICENCE.rtlwifi_firmware.txt for details. +- +-Found in vendor driver, linux_bt_usb_2.11.20140423_8723be.rar +-From https://github.com/troy-tan/driver_store +-Files rtl_bt/rtl8822b_* came directly from Realtek. These files are +-updated on April 14, 2017. +- +-Found in vendor driver, 20200806_LINUX_BT_DRIVER_RTL8761B_COEX_v0202.zip +-File rtl_bt/rtl8761b_config.bin +-File rtl_bt/rtl8761bu_config.bin +- +--------------------------------------------------------------------------- +- +-Driver: btmtk_usb - Bluetooth USB driver +- +-File: mediatek/mt7650.bin +-Link: mt7650.bin -> mediatek/mt7650.bin +- +-Licence: Redistributable. See LICENCE.ralink_a_mediatek_company_firmware for details +- +--------------------------------------------------------------------------- +- + Driver: rp2 -- Comtrol RocketPort 2 serial driver + + File: rp2.fw +@@ -3632,56 +1902,6 @@ License: Redistributable. See LICENCE.moxa for details + + -------------------------------------------------------------------------- + +-Driver: cw1200 - ST-E CW1100/CW1200 WLAN driver +- +-File: wsm_22.bin +-Version: WSM395 +-Licence: Redistributable. See LICENCE.cw1200 for details. +- +-File: sdd_sagrad_1091_1098.bin +- +-License: +- Copyright (c) 2011-2013 Sagrad, Inc. +- +- This SDD ("Static Dynamic Data") file is licensed strictly for use with +- the Sagrad WiFi modules (such as the SG901-1091/1098) that utilize the +- cw1200 driver. There is no warranty expressed or implied about its +- fitness for any purpose. +- +- Permission is hereby granted for the distribution of this SDD file as +- part of Linux or other Open Source operating system kernel in text or +- binary form as required. +- +- (Please note that the actual device firmware is separately licensed) +- +--------------------------------------------------------------------------- +- +-Driver: BFA/BNA - QLogic BR-series Adapter FC/FCOE drivers +- +-File: cbfw-3.2.5.1.bin +-File: ctfw-3.2.5.1.bin +-File: ct2fw-3.2.5.1.bin +- +-Licence: +- +-This file contains firmware data derived from proprietary unpublished +-source code. +-Copyright (c) 2013-2014 Brocade Communications Systems, Inc. +-Copyright (c) 2014-2015 QLogic Corporation. +- +-Permission is hereby granted for the distribution of this firmware data +-in hexadecimal or equivalent format, provided this copyright notice is +-accompanying it. +- +-QLogic grants permission to use and redistribute these firmware files +-for use with QLogic BR-series devices, but not as a part of the Linux +-kernel or in any other form which would require these files themselves +-to be covered by the terms of the GNU General Public License. +-These firmware files are distributed in the hope that they will be +-useful, but WITHOUT ANY WARRANTY; without even the implied warranty +-of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +- +--------------------------------------------------------------------------- + Driver: qat - Intel(R) QAT crypto accelerator + + File: qat_895xcc.bin +@@ -3698,36 +1918,6 @@ Licence: Redistributable. See LICENCE.qat_firmware for details + + -------------------------------------------------------------------------- + +-Driver: rsi -- Redpine Signals Inc 91x driver +- +-File: rsi_91x.fw +- +-File: rsi/rs9113_wlan_qspi.rps +-Version: 1.6.1 +- +-File: rsi/rs9113_wlan_bt_dual_mode.rps +-Version: 1.6.1 +- +-File: rsi/rs9113_ap_bt_dual_mode.rps +-Version: 1.6.1 +- +-File: rsi/rs9116_wlan.rps +-Version: 1.0.5b +- +-File: rsi/rs9116_wlan_bt_classic.rps +-Version: 1.0.5b +- +-Licence: +- * Firmware is: +- * Derived from proprietary unpublished source code, +- * Copyright (C) 2019 Redpine Signals Inc. +- * +- * Permission is hereby granted for the distribution of this firmware +- * as part of Linux or other Open Source operating system kernel +- * provided this copyright notice is accompanying it. +- +--------------------------------------------------------------------------- +- + Driver: xhci-rcar -- Renesas R-Car Gen2/3 USB 3.0 host controller driver + + File: r8a779x_usb3_v1.dlmem +@@ -3770,86 +1960,6 @@ Licence: GPLv2 or later. See GPL-2 and GPL-3 for details. + + -------------------------------------------------------------------------- + +-Driver: btqca - Qualcomm Atheros Bluetooth support for QCA61x4 chips +- +-File: qca/nvm_usb_00000201.bin +-File: qca/nvm_usb_00000200.bin +-File: qca/nvm_usb_00000300.bin +-File: qca/nvm_usb_00000302.bin +-File: qca/nvm_00130300.bin +-File: qca/nvm_00130302.bin +-File: qca/nvm_00230302.bin +-File: qca/rampatch_usb_00000200.bin +-File: qca/rampatch_usb_00000201.bin +-File: qca/rampatch_usb_00000300.bin +-File: qca/rampatch_usb_00000302.bin +-File: qca/rampatch_00130300.bin +-File: qca/rampatch_00130302.bin +-File: qca/rampatch_00230302.bin +-File: qca/nvm_00440302.bin +-File: qca/rampatch_00440302.bin +-File: qca/nvm_00440302_eu.bin +-File: qca/nvm_00440302_i2s_eu.bin +-File: qca/nvm_usb_00000302_eu.bin +-File: qca/htbtfw20.tlv +-File: qca/htnv20.bin +-File: qca/rampatch_usb_00130200.bin +-File: qca/nvm_usb_00130200.bin +-File: qca/nvm_usb_00130200_0104.bin +-File: qca/nvm_usb_00130200_0105.bin +-File: qca/nvm_usb_00130200_0106.bin +-File: qca/nvm_usb_00130200_0107.bin +-File: qca/nvm_usb_00130200_0109.bin +-File: qca/nvm_usb_00130200_0110.bin +-File: qca/rampatch_usb_00130201.bin +-File: qca/nvm_usb_00130201.bin +-File: qca/nvm_usb_00130201_010a.bin +-File: qca/nvm_usb_00130201_010b.bin +-File: qca/nvm_usb_00130201_0303.bin +-File: qca/nvm_usb_00130201_gf.bin +-File: qca/nvm_usb_00130201_gf_010a.bin +-File: qca/nvm_usb_00130201_gf_010b.bin +-File: qca/nvm_usb_00130201_gf_0303.bin +-File: qca/rampatch_usb_00190200.bin +-File: qca/nvm_usb_00190200.bin +- +-Licence: Redistributable. See LICENSE.QualcommAtheros_ath10k and qca/NOTICE.txt for details +- +--------------------------------------------------------------------------- +- +-Driver: qca - Qualcomm Atheros Bluetooth support for WCN399x chips +- +-File: qca/crbtfw21.tlv +-File: qca/crnv21.bin +-File: qca/crbtfw32.tlv +-File: qca/crnv32.bin +-File: qca/crnv32u.bin +- +-Driver: qca - Qualcomm Atheros Bluetooth support for WCN6750 chips +- +-File: qca/msbtfw11.mbn +-File: qca/msbtfw11.tlv +-File: qca/msnv11.bin +-File: qca/msnv11.b0a +-File: qca/msnv11.b09 +- +-Licence: Redistributable. See LICENSE.QualcommAtheros_ath10k and qca/NOTICE.txt for details +- +-Driver: qca - Qualcomm Atheros Bluetooth support for QCA2066 chips +- +-File: qca/hpbtfw21.tlv +-File: qca/hpnv21.bin +-File: qca/hpnv21g.bin +-File: qca/hpnv21.301 +-File: qca/hpnv21.302 +-File: qca/hpnv21g.301 +-File: qca/hpnv21g.302 +- +- +-Licence: Redistributable. See LICENSE.QualcommAtheros_ath10k and qca/NOTICE.txt for details +- +--------------------------------------------------------------------------- +- + Driver: liquidio -- Cavium LiquidIO driver + + File: liquidio/lio_23xx_nic.bin +@@ -4663,19 +2773,6 @@ Licence: Redistributable. See LICENCE.nvidia for details + + -------------------------------------------------------------------------- + +-Driver: wilc1000 - Atmel 802.11n WLAN driver for WILC1000 +- +-File: atmel/wilc1000_fw.bin +-File: atmel/wilc1000_ap_fw.bin +-File: atmel/wilc1000_p2p_fw.bin +-File: atmel/wilc1000_wifi_firmware.bin +-File: atmel/wilc1000_wifi_firmware-1.bin +-Version: 16.0 +- +-License: Redistributable. See LICENSE.atmel for details +- +--------------------------------------------------------------------------- +- + Driver: hfi1 - Intel OPA Gen 1 adapter + + File: hfi1_dc8051.fw +@@ -4699,18 +2796,6 @@ Licence: Redistributable. See LICENCE.ti-keystone for details. + + -------------------------------------------------------------------------- + +-Driver: mwlwifi - Marvell mac80211 driver for 80211ac cards. +- +-File: mwlwifi/88W8864.bin +-Version: 7.2.8.6 +- +-File: mwlwifi/88W8897.bin +-Version: 8.2.0.10 +- +-Licence: Redistributable. See LICENCE.Marvell for details. +- +--------------------------------------------------------------------------- +- + Driver: mtk_scp - MediaTek SCP System Control Processing Driver + + File: mediatek/mt8183/scp.img +@@ -4726,195 +2811,6 @@ Licence: Redistributable. See LICENCE.mediatek for details. + + -------------------------------------------------------------------------- + +-Driver: btmtk - MediaTek Bluetooth Driver +- +-File: mediatek/mt7622pr2h.bin +-Version: 20180621204904 +-File: mediatek/mt7668pr2h.bin +-Version: 20180517181834 +-# Note: explicitly commented out, since it's duplicated further down +-# File: mediatek/mt7663pr2h.bin +-# Version: 7663e2ccn04-2006030247 +- +-Licence: Redistributable. See LICENCE.mediatek for details. +- +--------------------------------------------------------------------------- +- +-Driver: mt76x0 - MediaTek MT76x0 Wireless MACs +- +-File: mediatek/mt7610u.bin +-File: mediatek/mt7610e.bin +-Version: 2.6 +-File: mediatek/mt7650e.bin +-Version: 1.0.07-b370 +- +-Licence: Redistributable. See LICENCE.mediatek for details. +- +---------------------------------------------------------------------------- +- +-Driver: mt76x2e - MediaTek MT76x2 Wireless MACs +- +-File: mediatek/mt7662.bin +-Version: 1.9 +-Link: mt7662.bin -> mediatek/mt7662.bin +- +-File: mediatek/mt7662_rom_patch.bin +-Version: 0.0.2_P69 +-Link: mt7662_rom_patch.bin -> mediatek/mt7662_rom_patch.bin +- +-Licence: Redistributable. See LICENCE.ralink_a_mediatek_company_firmware for details +- +---------------------------------------------------------------------------- +- +-Driver: mt76x2u - MediaTek MT76x2u Wireless MACs +- +-File: mediatek/mt7662u.bin +-Version: 1.5 +- +-File: mediatek/mt7662u_rom_patch.bin +-Version: 0.0.2_P48 +- +-Licence: Redistributable. See LICENCE.mediatek for details. +- +--------------------------------------------------------------------------- +- +-Driver: mt7615e - MediaTek MT7615e Wireless MACs +- +-File: mediatek/mt7615_n9.bin +-Version: 20200814 +-File: mediatek/mt7615_cr4.bin +-Version: 20190114 +-File: mediatek/mt7615_rom_patch.bin +-Version: 20190114 +- +-Licence: Redistributable. See LICENCE.mediatek for details. +- +--------------------------------------------------------------------------- +- +-Driver: mt7622 - MediaTek MT7622 Wireless MACs +- +-File: mediatek/mt7622_n9.bin +-Version: 20200630 +-File: mediatek/mt7622_rom_patch.bin +-Version: 20190114 +- +-Licence: Redistributable. See LICENCE.mediatek for details. +- +--------------------------------------------------------------------------- +- +-Driver: mt7663 - MediaTek MT7663 Wireless MACs +- +-File: mediatek/mt7663pr2h.bin +-Version: 7663e2ccn04-2006030247 +-File: mediatek/mt7663_n9_v3.bin +-Version: v3.1.1 +- +-File: mediatek/mt7663pr2h_rebb.bin +-Version: 7663e2-1802-19091404338b809 +-File: mediatek/mt7663_n9_rebb.bin +-Version: 7663mp1827-20190914043434 +- +-Licence: Redistributable. See LICENCE.mediatek for details. +- +--------------------------------------------------------------------------- +- +-Driver: mt7915e - MediaTek Wireless MACs for MT7915/MT7916/MT7986/MT7981 +- +-File: mediatek/mt7915_wm.bin +-Version: 20220929104145 +-File: mediatek/mt7915_wa.bin +-Version: 20220929104205 +-File: mediatek/mt7915_rom_patch.bin +-Version: 20220929104113a +-File: mediatek/mt7915_eeprom.bin +-Version: 20200821 +-File: mediatek/mt7915_eeprom_dbdc.bin +-Version: 20200821 +- +-File: mediatek/mt7916_wm.bin +-Version: 20230202145005 +-File: mediatek/mt7916_wa.bin +-Version: 20230202143332 +-File: mediatek/mt7916_rom_patch.bin +-Version: 20230202144915a +-File: mediatek/mt7916_eeprom.bin +-Version: 20211130 +- +-File: mediatek/mt7986_wm.bin +-Version: 20221012174725 +-File: mediatek/mt7986_wm_mt7975.bin +-Version: 20221012174805 +-File: mediatek/mt7986_wa.bin +-Version: 20221012174937 +-File: mediatek/mt7986_rom_patch.bin +-Version: 20221012174648a +-File: mediatek/mt7986_rom_patch_mt7975.bin +-Version: 20221012174743a +-File: mediatek/mt7986_wo_0.bin +-Version: 20221012175005 +-File: mediatek/mt7986_wo_1.bin +-Version: 20221012175032 +-File: mediatek/mt7986_eeprom_mt7976.bin +-Version: 20211105 +-File: mediatek/mt7986_eeprom_mt7976_dbdc.bin +-Version: 20220223 +-File: mediatek/mt7986_eeprom_mt7976_dual.bin +-Version: 20211115 +-File: mediatek/mt7986_eeprom_mt7975_dual.bin +-Version: 20220208 +- +-File: mediatek/mt7981_wm.bin +-Version: 20221208201806 +-File: mediatek/mt7981_wa.bin +-Version: 20221208202048 +-File: mediatek/mt7981_rom_patch.bin +-Version: 20221208201745a +-File: mediatek/mt7981_wo.bin +-Version: 20221208202138 +- +-Licence: Redistributable. See LICENCE.mediatek for details. +- +--------------------------------------------------------------------------- +- +-Driver: mt7921 - MediaTek MT7921 Wireless MACs +- +-File: mediatek/WIFI_MT7961_patch_mcu_1_2_hdr.bin +-Version: 20230526130917a +-File: mediatek/WIFI_RAM_CODE_MT7961_1.bin +-Version: 20230526130958 +- +-Licence: Redistributable. See LICENCE.mediatek for details. +- +--------------------------------------------------------------------------- +- +-Driver: mt7921 - MediaTek MT7921 bluetooth chipset +- +-File: mediatek/BT_RAM_CODE_MT7961_1_2_hdr.bin +-Version: 20230526131214 +- +-Licence: Redistributable. See LICENCE.mediatek for details. +- +--------------------------------------------------------------------------- +- +-Driver: mt7922 - MediaTek MT7922 Wireless MACs +- +-File: mediatek/WIFI_MT7922_patch_mcu_1_1_hdr.bin +-Version: 20230530123154a +-File: mediatek/WIFI_RAM_CODE_MT7922_1.bin +-Version: 20230530123236 +- +-Licence: Redistributable. See LICENCE.mediatek for details. +- +--------------------------------------------------------------------------- +- +-Driver: mt7922 - MediaTek MT7922 bluetooth chipset +- +-File: mediatek/BT_RAM_CODE_MT7922_1_1_hdr.bin +-Version: 20230530123531 +- +-Licence: Redistributable. See LICENCE.mediatek for details. +- +--------------------------------------------------------------------------- + Driver: nfp - Netronome Flow Processor + + Link: netronome/nic_AMDA0081-0001_1x40.nffw -> nic/nic_AMDA0081-0001_1x40.nffw +@@ -5009,16 +2905,6 @@ Licence: Redistributable. See LICENCE.Netronome for details + + -------------------------------------------------------------------------- + +-Driver: wil6210 - Qualcomm Atheros support for 11ad family of chips +- +-File: wil6210.fw +-File: wil6210.brd +-Version: 5.2.0.18 +- +-Licence: Redistributable. See LICENSE.QualcommAtheros_ath10k for details +- +--------------------------------------------------------------------------- +- + Driver: imx-sdma - support for i.MX SDMA driver + + File: imx/sdma/sdma-imx6q.bin +@@ -5077,69 +2963,6 @@ https://github.com/genesi/linux-legacy/tree/master/drivers/mxc/amd-gpu + + -------------------------------------------------------------------------- + +-Driver: qcom_q6v5_pas - Qualcomm remoteproc firmware +- +-File: qcom/apq8016/mba.mbn +-File: qcom/apq8016/modem.mbn +-File: qcom/apq8016/wcnss.mbn +-File: qcom/apq8016/WCNSS_qcom_wlan_nv_sbc.bin +-File: qcom/apq8096/adsp.mbn +-File: qcom/apq8096/adspr.jsn +-File: qcom/apq8096/adspua.jsn +-File: qcom/apq8096/mba.mbn +-File: qcom/apq8096/modem.mbn +-File: qcom/apq8096/modemr.jsn +-File: qcom/sdm845/adsp.mbn +-File: qcom/sdm845/adspr.jsn +-File: qcom/sdm845/adspua.jsn +-File: qcom/sdm845/cdsp.mbn +-File: qcom/sdm845/cdspr.jsn +-File: qcom/sm8250/adsp.mbn +-File: qcom/sm8250/adspr.jsn +-File: qcom/sm8250/adspua.jsn +-File: qcom/sm8250/cdsp.mbn +-File: qcom/sm8250/cdspr.jsn +-File: qcom/sc8280xp/LENOVO/21BX/adspr.jsn +-File: qcom/sc8280xp/LENOVO/21BX/adspua.jsn +-File: qcom/sc8280xp/LENOVO/21BX/battmgr.jsn +-File: qcom/sc8280xp/LENOVO/21BX/cdspr.jsn +-File: qcom/sc8280xp/LENOVO/21BX/qcadsp8280.mbn +-File: qcom/sc8280xp/LENOVO/21BX/qccdsp8280.mbn +-File: qcom/sc8280xp/LENOVO/21BX/qcslpi8280.mbn +-Link: qcom/LENOVO/21BX -> ../sc8280xp/LENOVO/21BX +- +-Licence: Redistributable. See LICENSE.qcom and qcom/NOTICE.txt for details +- +-Binary files supplied originally from +-http://releases.linaro.org/96boards/dragonboard410c/qualcomm/firmware/linux-board-support-package-r1036.1.zip +-http://releases.linaro.org/96boards/dragonboard845c/qualcomm/firmware/RB3_firmware_20221121000000-v5.zip +-http://releases.linaro.org/96boards/rb5/qualcomm/firmware/RB5_firmware_20210331-v4.zip +- +-adsp.mbn has been converted from 20-adsp_split/firmware/adsp.* using +-https://github.com/andersson/pil-squasher +- +-cdsp.mbn has been converted from 21-cdsp_split/firmware/cdsp.* using +-https://github.com/andersson/pil-squasher +- +--------------------------------------------------------------------------- +- +-Driver: qcom_q6v5_mss - Qualcomm modem subsystem firmware +- +-File: qcom/sdm845/mba.mbn +-File: qcom/sdm845/modem_nm.mbn +-File: qcom/sdm845/modemuw.jsn +-Link: qcom/sdm845/modem.mbn -> modem_nm.mbn +- +-Licence: Redistributable. See LICENSE.qcom and qcom/NOTICE.txt for details +- +-Binary files supplied originally from +-http://releases.linaro.org/96boards/dragonboard845c/qualcomm/firmware/RB3_firmware_20221121000000-v5.zip +- +-modem.mbn has been converted from 28-modem/modem.* using +-https://github.com/andersson/pil-squasher +- +--------------------------------------------------------------------------- +- + Driver: mlxsw_spectrum - Mellanox Spectrum switch + + File: mellanox/mlxsw_spectrum-13.1420.122.mfa2 +@@ -5312,26 +3135,6 @@ Licence: Redistributable. See LICENCE.Marvell for details. + + ------------------------------------------------ + +-Driver: wfx - Silicon Labs Wi-Fi Transceiver +- +-File: wfx/wfm_wf200_C0.sec +-Version: 3.14 +- +-File: wfx/brd4001a.pds +-File: wfx/brd8022a.pds +-File: wfx/brd8023a.pds +- +-Licence: Redistributable. See wfx/LICENCE.wf200 for details. +- +-The firmware itself originates from https://github.com/SiliconLabs/wfx-firmware +- +-The *.pds files come from https://github.com/SiliconLabs/wfx-pds +- +-They have been processed with the tool "pds_compress" available on +-https://github.com/SiliconLabs/wfx-linux-tools +- +--------------------------------------------------------------------------- +- + Driver: rvu_cptpf - Marvell CPT driver + + File: mrvl/cpt01/ae.out +@@ -5351,31 +3154,3 @@ Version: v1.21 + Licence: Redistributable. See LICENCE.Marvell for details. + + --------------------------------------------------------------------------- +- +-Driver: nxp-sr1xx - NXP Ultra Wide Band driver +-File: nxp/sr150_fw.bin +-Version: 35.00.03 +- +-Licence: Redistributable. See LICENSE.nxp for details +-Originates from https://github.com/NXP/uwb-NXPUWB-FW.git +--------------------------------------------------------------------------- +- +-Driver: btnxpuart - NXP BT UART driver +- +-File: nxp/uartuart8997_bt_v4.bin +-File: nxp/uartiw416_bt_v0.bin +-File: nxp/helper_uart_3000000.bin +-Version: 16.92.21.p81 +- +-File: nxp/uartuart8987_bt.bin +-Version: 16.92.21.p76.5 +- +-File: nxp/uartuart9098_bt_v1.bin +-Version: 17.92.1.p136.24 +- +-File: nxp/uartspi_n61x_v1.bin.se +-Version: 18.99.1.p154.40 +- +-Licence: Redistributable. See LICENSE.nxp for details +- +--------------------------------------------------------------------------- +-- +2.40.1 + diff --git a/packages/linux-firmware/0004-linux-firmware-scsi-Remove-firmware-for-SCSI-devices.patch b/packages/linux-firmware/0004-linux-firmware-scsi-Remove-firmware-for-SCSI-devices.patch new file mode 100644 index 00000000..17de9c06 --- /dev/null +++ b/packages/linux-firmware/0004-linux-firmware-scsi-Remove-firmware-for-SCSI-devices.patch @@ -0,0 +1,191 @@ +From 8d9ded48714bc49d16a2250e1b477244c66d16a9 Mon Sep 17 00:00:00 2001 +From: Leonard Foerster +Date: Tue, 25 Jul 2023 12:12:03 +0000 +Subject: [PATCH] linux-firmware: scsi: Remove firmware for SCSI devices + +Bottlerocket does not configure drivers for most SCSI devices for any of +its kernels. Without the driver support, there is no point in providing +firmware for these devices. The list below maps driver names as +specified in WHENCE and maps them to kernel config options to enable us +to easily add firmware when necessitated by driver addition. + +* advansys - CONFIG_SCSI_ADVANSYS +* qla1280 - CONFIG_SCSI_QLOGIC_1280 +* qlogicpti - CONFIG_SCSI_QLOGICPTI +* isci - CONFIG_SCSI_ISCI +* BFA/BNA - CONFIG_BNA && CONFIG_SCSI_BFA +* qla2xxx - CONFIG_TCM_QLA2XXX + +Signed-off-by: Leonard Foerster +--- + LICENCE.qla1280 | 23 ------------------ + LICENCE.qla2xxx | 31 ------------------------ + WHENCE | 63 ------------------------------------------------- + 3 files changed, 117 deletions(-) + delete mode 100644 LICENCE.qla1280 + delete mode 100644 LICENCE.qla2xxx + +diff --git a/LICENCE.qla1280 b/LICENCE.qla1280 +deleted file mode 100644 +index 00cd353..0000000 +--- a/LICENCE.qla1280 ++++ /dev/null +@@ -1,23 +0,0 @@ +-Copyright (C) 1995, 1996, 1997, 1998, 1999, 2000 QLogic, Inc. +-All rights reserved. +- +-Redistribution and use in source and binary forms are permitted provided +-that the following conditions are met: +-1. Redistribution of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +-2. Redistribution in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the distribution. +-3. The name of the author may not be used to endorse or promote products +- derived from this software without specific prior written permission +- +-THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +-OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +-IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +-NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +diff --git a/LICENCE.qla2xxx b/LICENCE.qla2xxx +deleted file mode 100644 +index 6b3d8ff..0000000 +--- a/LICENCE.qla2xxx ++++ /dev/null +@@ -1,31 +0,0 @@ +-Copyright (c) 2003-2017 QLogic Corporation +-QLogic Linux Fibre Channel Adapter Firmware +- +-Redistribution and use in binary form, without modification, for use in conjunction +-with QLogic authorized products is permitted provided that the following conditions +-are met: +- +-1. Redistribution in binary form must reproduce the above copyright notice, this +- list of conditions and the following disclaimer in the documentation and/or +- other materials provided with the distribution. +-2. The name of QLogic Corporation may not be used to endorse or promote products +- derived from this software without specific prior written permission. +-3. Reverse engineering, decompilation, or disassembly of this firmware is not +- permitted. +- +-REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,THIS PROGRAM IS +-PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, +-INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +-FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR +-BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +-GOODS OR SERVICES; LOSS OF USE,DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +-LIABILITY,OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT CREATE OR GIVE +-GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR OTHERWISE IN ANY INTELLECTUAL +-PROPERTY RIGHTS (PATENT, COPYRIGHT, TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY +-RIGHT) EMBODIED IN ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN +-COMBINATION WITH THIS PROGRAM. +diff --git a/WHENCE b/WHENCE +index edf6f75..bf5204f 100644 +--- a/WHENCE ++++ b/WHENCE +@@ -8,29 +8,6 @@ kernel. + + -------------------------------------------------------------------------- + +-Driver: advansys - AdvanSys SCSI +- +-File: advansys/mcode.bin +-File: advansys/3550.bin +-File: advansys/38C0800.bin +-File: advansys/38C1600.bin +- +-Licence: BSD, no source available. +- +-Found in hex form in kernel source. +- +--------------------------------------------------------------------------- +- +-Driver: qla1280 - Qlogic QLA 1240/1x80/1x160 SCSI support +- +-File: qlogic/1040.bin +-File: qlogic/1280.bin +-File: qlogic/12160.bin +- +-Licence: Redistributable. See LICENCE.qla1280 for details +- +--------------------------------------------------------------------------- +- + Driver: kaweth -- USB KLSI KL5USB101-based Ethernet device + + File: kaweth/new_code.bin +@@ -265,27 +242,6 @@ http://www.zdomain.com/a56.html + + -------------------------------------------------------------------------- + +-Driver: qla2xxx - QLogic QLA2XXX Fibre Channel +- +-File: ql2100_fw.bin +-Version: 1.19.38 TP +-File: ql2200_fw.bin +-Version: 2.02.08 TP +-File: ql2300_fw.bin +-Version: 3.03.28 IPX +-File: ql2322_fw.bin +-Version: 3.03.28 IPX +-File: ql2400_fw.bin +-Version: 8.07.00 MID +-File: ql2500_fw.bin +-Version: 8.07.00 MIDQ +- +-Licence: Redistributable. See LICENCE.qla2xxx for details +- +-Available from http://ldriver.qlogic.com/firmware/ +- +--------------------------------------------------------------------------- +- + Driver: orinoco - Agere/Prism/Symbol Orinoco support + + File: agere_sta_fw.bin +@@ -631,16 +587,6 @@ Found in hex form in kernel source. + + -------------------------------------------------------------------------- + +-Driver: qlogicpti - PTI Qlogic, ISP Driver +- +-File: qlogic/isp1000.bin +- +-Licence: Unknown +- +-Found in hex form in kernel source. +- +--------------------------------------------------------------------------- +- + Driver: myri_sbus - MyriCOM Gigabit Ethernet + + File: myricom/lanai.bin +@@ -1810,15 +1756,6 @@ Licence: Redistributable. See LICENCE.ene_firmware for details. + + -------------------------------------------------------------------------- + +-Driver: isci -- Intel C600 SAS controller driver +- +-File: isci/isci_firmware.bin +-Source: isci/ +- +-Licence: GPLv2. See GPL-2 for details. +- +--------------------------------------------------------------------------- +- + Driver: rp2 -- Comtrol RocketPort 2 serial driver + + File: rp2.fw +-- +2.40.1 + diff --git a/packages/linux-firmware/0005-linux-firmware-usb-remove-firmware-for-USB-Serial-PC.patch b/packages/linux-firmware/0005-linux-firmware-usb-remove-firmware-for-USB-Serial-PC.patch new file mode 100644 index 00000000..c4cf7552 --- /dev/null +++ b/packages/linux-firmware/0005-linux-firmware-usb-remove-firmware-for-USB-Serial-PC.patch @@ -0,0 +1,901 @@ +From abd8555549e48367b9edb9baaa61b40afcfd6231 Mon Sep 17 00:00:00 2001 +From: Leonard Foerster +Date: Tue, 25 Jul 2023 14:27:14 +0000 +Subject: [PATCH] linux-firmware: usb: remove firmware for USB/Serial/PCMCIA + devices + +Bottlerocket does not configure most drivers for USB,PCMCIA, and serial devices +for any of its kernels. Without those drivers, the firmware for these +devices is not of use and does not need to be shipped by us. + +The following list maps the driver names as specified in WHENCE to +kernel config options enabling drivers. That way we are able to find the +firmware for any driver we may be enabling in the future. + +* kaweth - CONFIG_USB_KAWETH +* keyspan - CONFIG_SERIAL_KEYSPAN +* keyspan_pda - CONFIG_SERIAL_KEYSPAN_PDA +* emi26 - CONFIG_USB_EMI26 +* emi62 - CONFIG_USB_EMI62 +* ti_usb_3410_5052 - CONFIG_USB_SERIAL_TI +* whiteheat - CONFIG_USB_SERIAL_WHITEHEAT +* io_edgeport - CONFIG_USB_SERIAL_EDGEPORT +* io_ti - CONFIG_USB_SERIAL_EDGEPORT_TI +* orinoco - CONFIG_ORINOCO_USB +* usbdux - CONFIG_COMEDI_USBDUX +* usbduxfast - CONFIG_COMEDI_USBDUXFAST +* usbduxsigma - CONFIG_COMEDI_USBDUXSIGMA +* s2255drv - CONFIG_USB_S2255 +* ueagle-atm - CONFIG_USB_EAGLEATM +* vt6656 - CONFIG_VT6656 +* ene-ub6250 - CONFIG_USB_STORAGE_ENE_UB6250 +* mxu11x0 - CONFIG_USB_SERIAL_MXUPORT11 +* mxuport - CONFIG_USB_SERIAL_MXUPORT +* xhci-rcar - CONFIG_USB_XHCI_RCAR +* xhci-tegra - CONFIG_USB_XHCI_TEGRA +* atusb - CONFIG_IEEE802154_ATUSB +* pcnet_cs - CONFIG_PCMCIA_PCNET +* 3c589_cs - CONFIG_PCMCIA_3C589 +* 3c574_cs - CONFIG_PCMCIA_3C574 +* serial_cs - CONFIG_SERIAL_8250_CS +* smc91c92_cs - CONFIG_PCMCIA_SMC91C92 +* rp2 - CONFIG_SERIAL_RP2 + +Signed-off-by: Leonard Foerster +--- + LICENCE.agere | 77 ------ + LICENCE.ene_firmware | 14 - + LICENCE.kaweth | 28 -- + LICENCE.moxa | 16 -- + LICENCE.r8a779x_usb3 | 26 -- + LICENCE.ueagle-atm4-firmware | 39 --- + LICENCE.via_vt6656 | 25 -- + WHENCE | 495 ----------------------------------- + 8 files changed, 720 deletions(-) + delete mode 100644 LICENCE.agere + delete mode 100644 LICENCE.ene_firmware + delete mode 100644 LICENCE.kaweth + delete mode 100644 LICENCE.moxa + delete mode 100644 LICENCE.r8a779x_usb3 + delete mode 100644 LICENCE.ueagle-atm4-firmware + delete mode 100644 LICENCE.via_vt6656 + +diff --git a/LICENCE.agere b/LICENCE.agere +deleted file mode 100644 +index c11466c..0000000 +--- a/LICENCE.agere ++++ /dev/null +@@ -1,77 +0,0 @@ +-agere_sta_fw.bin -- 9.48 Hermes I +-agere_ap_fw.bin -- 9.48 Hermes I +- +-The above firmware images were compiled from the Agere linux driver +-wl_lkm_718_release.tar.gz, and dumped. The driver is coverred by the +-following copyright and software license. +- +- * SOFTWARE LICENSE +- * +- * This software is provided subject to the following terms and conditions, +- * which you should read carefully before using the software. Using this +- * software indicates your acceptance of these terms and conditions. If you do +- * not agree with these terms and conditions, do not use the software. +- * +- * COPYRIGHT © 1994 - 1995 by AT&T. All Rights Reserved +- * COPYRIGHT © 1996 - 2000 by Lucent Technologies. All Rights Reserved +- * COPYRIGHT © 2001 - 2004 by Agere Systems Inc. All Rights Reserved +- * All rights reserved. +- * +- * Redistribution and use in source or binary forms, with or without +- * modifications, are permitted provided that the following conditions are met: +- * +- * . Redistributions of source code must retain the above copyright notice, this +- * list of conditions and the following Disclaimer as comments in the code as +- * well as in the documentation and/or other materials provided with the +- * distribution. +- * +- * . Redistributions in binary form must reproduce the above copyright notice, +- * this list of conditions and the following Disclaimer in the documentation +- * and/or other materials provided with the distribution. +- * +- * . Neither the name of Agere Systems Inc. nor the names of the contributors +- * may be used to endorse or promote products derived from this software +- * without specific prior written permission. +- * +- * Disclaimer +- * +- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +- * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF +- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY +- * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN +- * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY +- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +- * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT +- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT +- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +- * DAMAGE. +- +-The following statement from Agere clarifies the status of the firmware +- +---- +-I would like to confirm that the two drivers; Linux LKM Wireless Driver +-Source Code, Version 7.18 and Linux LKM Wireless Driver Source Code, +-Version 7.22 comply with Open Source BSD License. Therefore the source +-code can be distributed in unmodified or modified form consistent with +-the terms of the license. +- +-The Linux driver architecture was based on two modules, the MSF (Module +-specific functions) and the HCF (Hardware Control Functions). Included +-in the HCF is run-time firmware (binary format) which is downloaded into +-the RAM of the Hermes 1/2/2.5 WMAC. +- +-This hex coded firmware is not based on any open source software and +-hence it is not subject to any Open Source License. The firmware was +-developed by Agere and runs on the DISC processor embedded within the +-Hermes 1/2/2.5 Wireless MAC devices. +- +-Hope this helps. +- +-Sincerely, +- +-Viren Pathare +-Intellectual Property Licensing Manager +-Agere +---- +diff --git a/LICENCE.ene_firmware b/LICENCE.ene_firmware +deleted file mode 100644 +index 08f2b01..0000000 +--- a/LICENCE.ene_firmware ++++ /dev/null +@@ -1,14 +0,0 @@ +-copyright (c) 2011, ENE TECHNOLOGY INC. +- +-Permission to use, copy, modify, and/or distribute this software for any purpose +-with or without fee is hereby granted, provided that the above copyright notice +-and this permission notice appear in all copies. +- +-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL +-WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL +-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT +-SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR +-CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +-LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +-NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION +-WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +diff --git a/LICENCE.kaweth b/LICENCE.kaweth +deleted file mode 100644 +index 75a59c0..0000000 +--- a/LICENCE.kaweth ++++ /dev/null +@@ -1,28 +0,0 @@ +-Copyright 1999 Kawasaki LSI. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions +-are met: +-1. Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +-2. Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the distribution. +-3. All advertising materials mentioning features or use of this software +- must display the following acknowledgement: +- This product includes software developed by Kawasaki LSI. +-4. Neither the name of the company nor the names of its contributors +- may be used to endorse or promote products derived from this software +- without specific prior written permission. +- +-THIS SOFTWARE IS PROVIDED BY KAWASAKI LSI ``AS IS'' AND ANY EXPRESS OR +-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +-DISCLAIMED. IN NO EVENT SHALL KAWASAKI LSI BE LIABLE FOR ANY DIRECT, +-INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +-STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +-IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +-POSSIBILITY OF SUCH DAMAGE. +diff --git a/LICENCE.moxa b/LICENCE.moxa +deleted file mode 100644 +index 120017b..0000000 +--- a/LICENCE.moxa ++++ /dev/null +@@ -1,16 +0,0 @@ +-The software accompanying this license statement (the “Software”) +-is the property of Moxa Inc. (the “Moxa”), and is protected by +-United States and International Copyright Laws and International +-treaty provisions. No ownership rights are granted by this +-Agreement or possession of the Software. Therefore, you must treat +-the Licensed Software like any other copyrighted material. Your +-rights and obligations in its use are described as follows: +- +-1. You may freely redistribute this software under this license. +-2. You may freely download and use this software on Moxa's device. +-3. You may not modify or attempt to reverse engineer the software, or +- make any attempt to change or even examine the source code of the +- software. +-4. You may not re-license or sub-license the software to any person or +- business, using any other license. +-5. Moxa(r) is worldwide registered trademark. +diff --git a/LICENCE.r8a779x_usb3 b/LICENCE.r8a779x_usb3 +deleted file mode 100644 +index e2afcc9..0000000 +--- a/LICENCE.r8a779x_usb3 ++++ /dev/null +@@ -1,26 +0,0 @@ +-Copyright (c) 2014, Renesas Electronics Corporation +-All rights reserved. +- +-Redistribution and use in binary form, without modification, are permitted +-provided that the following conditions are met: +- +-1. Redistribution in binary form must reproduce the above copyright notice, +- this list of conditions and the following disclaimer in the documentation +- and/or other materials provided with the distribution. +-2. The name of Renesas Electronics Corporation may not be used to endorse or +- promote products derived from this software without specific prior written +- permission. +-3. Reverse engineering, decompilation, or disassembly of this software is +- not permitted. +- +-THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS ELECTRONICS CORPORATION DISCLAIMS +-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, AND +-NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL RENESAS ELECTRONICS +-CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, +-OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +-POSSIBILITY OF SUCH DAMAGE. +diff --git a/LICENCE.ueagle-atm4-firmware b/LICENCE.ueagle-atm4-firmware +deleted file mode 100644 +index 333675d..0000000 +--- a/LICENCE.ueagle-atm4-firmware ++++ /dev/null +@@ -1,39 +0,0 @@ +-This license applies to eagle4 firmware & DSPcode +-namely, the files eagleIV.fw DSP4p.bin* +- +-| Copyright (2006) Ikanos Communications, Inc. +-| +-| Redistribution and use in source and binary forms, with or without +-| modification, are permitted provided that the following +-| conditions are met: +-| +-| * Redistribution of source code must retain the above copyright +-| notice, this list of conditions and the following disclaimer. +-| +-| * Redistribution in binary form must reproduce the above +-| copyright notice, this list of conditions and the following +-| disclaimer in the documentation and/or other materials provided +-| with the distribution. +-| +-| * The name of Ikanos Corporation may not be used to endorse +-| or promote products derived from this source code without specific +-| prior written consent of Ikanos Corporation. +-| +-| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +-| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +-| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +-| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +-| OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +-| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +-| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +-| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +-| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +-| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +-| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-| USER ACKNOWLEDGES AND AGREES THAT THE PURCHASE OR USE OF THIS SOFTWARE WILL +-| NOT CREATE OR GIVE GROUNDS FOR A +-| LICENSE BY IMPLICATION, ESTOPPEL, OR OTHERWISE IN ANY INTELLECTUAL +-| PROPERTY RIGHTS (PATENT, COPYRIGHT, TRADE SECRET, MASK WORK, OR OTHER +-| PROPRIETARY RIGHT) EMBODIED IN ANY OTHER IKANOS HARDWARE OR SOFTWARE +-| EITHER SOLELY OR IN COMBINATION WITH THIS SOFTWARE. +- +diff --git a/LICENCE.via_vt6656 b/LICENCE.via_vt6656 +deleted file mode 100644 +index f231f98..0000000 +--- a/LICENCE.via_vt6656 ++++ /dev/null +@@ -1,25 +0,0 @@ +-The following license applies to the binary-only VT6656 firmware +-as contained in the file "vntwusb.fw" +-================================================================ +-Copyright 1998-2010 VIA Technologies, Inc. All Rights Reserved. +- +-Permission is hereby granted, free of charge, to any person +-obtaining a copy of this software and associated documentation +-files (the "Software"), to deal in the Software without +-restriction, including without limitation the rights to use, +-copy, modify, merge, publish, distribute, sublicense, and/or sell +-copies of the Software, and to permit persons to whom the +-Software is furnished to do so, subject to the following +-conditions: +- +-The above copyright notice and this permission notice shall be +-included in all copies or substantial portions of the Software. +- +-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +-OTHER DEALINGS IN THE SOFTWARE. +diff --git a/WHENCE b/WHENCE +index bf5204f..2ed9e9a 100644 +--- a/WHENCE ++++ b/WHENCE +@@ -8,226 +8,6 @@ kernel. + + -------------------------------------------------------------------------- + +-Driver: kaweth -- USB KLSI KL5USB101-based Ethernet device +- +-File: kaweth/new_code.bin +-File: kaweth/new_code_fix.bin +-File: kaweth/trigger_code.bin +-File: kaweth/trigger_code_fix.bin +- +-Licence: Redistributable. See LICENCE.kaweth for details +- +-Found in hex form in the kernel source. +- +--------------------------------------------------------------------------- +- +-Driver: keyspan -- USB Keyspan USA-xxx serial device +- +-File: keyspan/mpr.fw +-File: keyspan/usa18x.fw +-File: keyspan/usa19.fw +-File: keyspan/usa19qi.fw +-File: keyspan/usa19qw.fw +-File: keyspan/usa19w.fw +-File: keyspan/usa28.fw +-File: keyspan/usa28xa.fw +-File: keyspan/usa28xb.fw +-File: keyspan/usa28x.fw +-File: keyspan/usa49w.fw +-File: keyspan/usa49wlc.fw +- +-Converted from Intel HEX files, used in our binary representation of ihex. +- +-Original licence information: +- +- Copyright (C) 1999-2001 +- Keyspan, A division of InnoSys Incorporated ("Keyspan") +- +- as an unpublished work. This notice does not imply unrestricted or +- public access to the source code from which this firmware image is +- derived. Except as noted below this firmware image may not be +- reproduced, used, sold or transferred to any third party without +- Keyspan's prior written consent. All Rights Reserved. +- +- Permission is hereby granted for the distribution of this firmware +- image as part of a Linux or other Open Source operating system kernel +- in text or binary form as required. +- +- This firmware may not be modified and may only be used with +- Keyspan hardware. Distribution and/or Modification of the +- keyspan.c driver which includes this firmware, in whole or in +- part, requires the inclusion of this statement." +- +--------------------------------------------------------------------------- +- +-Driver: keyspan_pda -- USB Keyspan PDA single-port serial device +- +-File: keyspan_pda/keyspan_pda.fw +-Source: keyspan_pda/keyspan_pda.S +- +-File: keyspan_pda/xircom_pgs.fw +-Source: keyspan_pda/xircom_pgs.S +- +-Source: keyspan_pda/Makefile +- +-Licence: GPLv2 or later. See GPL-2 and GPL-3 for details. +- +-Compiled from original 8051 source into Intel HEX, used in our binary ihex form. +- +--------------------------------------------------------------------------- +- +-Driver: emi26 -- EMI 2|6 USB Audio interface +- +-File: emi26/bitstream.fw +-Version: 1.1.1.131 +-Info: DATE=2001dec06 +- +-File: emi26/firmware.fw +-Version: 1.0.2.916 +-Info: DATE=12.02.2002 +- +-File: emi26/loader.fw +- +-Converted from Intel HEX files, used in our binary representation of ihex. +- +-Original licence information: +-/* +- * This firmware is for the Emagic EMI 2|6 Audio Interface +- * +- * The firmware contained herein is Copyright (c) 1999-2002 Emagic +- * as an unpublished work. This notice does not imply unrestricted +- * or public access to this firmware which is a trade secret of Emagic, +- * and which may not be reproduced, used, sold or transferred to +- * any third party without Emagic's written consent. All Rights Reserved. +- * +- * Permission is hereby granted for the distribution of this firmware +- * image as part of a Linux or other Open Source operating system kernel +- * in text or binary form as required. +- * +- * This firmware may not be modified and may only be used with the +- * Emagic EMI 2|6 Audio Interface. Distribution and/or Modification of +- * any driver which includes this firmware, in whole or in part, +- * requires the inclusion of this statement. +- */ +- +--------------------------------------------------------------------------- +- +-Driver: emi62 -- EMI 6|2m USB Audio interface +- +-File: emi62/bitstream.fw +-Version: 1.0.0.191 +-Info: DATE= 2002oct28 +- +-File: emi62/loader.fw +-Version: 1.0.2.002 +-Info: DATE=10.01.2002 +- +-File: emi62/midi.fw +-Version: 1.04.062 +-Info: DATE=16.10.2002 +- +-File: emi62/spdif.fw +-Version: 1.04.062 +-Info: DATE=16.10.2002 +- +-Converted from Intel HEX files, used in our binary representation of ihex. +- +-Original licence information: None +- +--------------------------------------------------------------------------- +- +-Driver: ti_usb_3410_5052 -- USB TI 3410/5052 serial device +- +-File: ti_3410.fw +-Info: firmware 9/10/04 FW3410_Special_StartWdogOnStartPort +- +-File: ti_5052.fw +-Info: firmware 9/18/04 +- +-Licence: Allegedly GPLv2+, but no source visible. Marked: +- Copyright (C) 2004 Texas Instruments +- +-Found in hex form in kernel source. +- +--------------------------------------------------------------------------- +- +-Driver: ti_usb_3410_5052 -- Multi-Tech USB cell modems +- +-File: mts_cdma.fw +-File: mts_gsm.fw +-File: mts_edge.fw +- +-Licence: "all firmware components are redistributable in binary form" +- per support@multitech.com +- Copyright (C) 2005 Multi-Tech Systems, Inc. +- +-Found in hex form in ftp://ftp.multitech.com/wireless/wireless_linux.zip +- +--------------------------------------------------------------------------- +- +-Driver: ti_usb_3410_5052 -- Multi-Tech USB fax modems +- +-File: mts_mt9234mu.fw +-File: mts_mt9234zba.fw +- +-Licence: Unknown +- +--------------------------------------------------------------------------- +- +-Driver: whiteheat -- USB ConnectTech WhiteHEAT serial device +- +-File: whiteheat.fw +-Version: 4.06 +- +-File: whiteheat_loader.fw +- +-Licence: Allegedly GPLv2, but no source visible. Marked: +- Copyright (C) 2000-2002 ConnectTech Inc +- +-Debug loader claims the following behaviour: +- Port 1 LED flashes when the vend_ax program is running +- Port 2 LED flashes when any SETUP command arrives +- Port 3 LED flashes when any valid VENDOR request occurs +- Port 4 LED flashes when the EXTERNAL RAM DOWNLOAD request occurs +- +-Converted from Intel HEX files, used in our binary representation of ihex. +- +--------------------------------------------------------------------------- +- +-Driver: io_edgeport - USB Inside Out Edgeport Serial Driver +- +-File: edgeport/boot.fw +-File: edgeport/boot2.fw +-File: edgeport/down.fw +-File: edgeport/down2.fw +- +-Licence: Allegedly GPLv2+, but no source visible. Marked: +-//************************************************************** +-//* Edgeport/4 Binary Image +-//* Generated by HEX2C v1.06 +-//* Copyright (C) 1998 Inside Out Networks, All rights reserved. +-//************************************************************** +- +-Found in hex form in kernel source. +- +--------------------------------------------------------------------------- +- +-Driver: io_ti - USB Inside Out Edgeport Serial Driver +-(TI Devices) +- +-File: edgeport/down3.bin +- +-Licence: +-//************************************************************** +-//* Edgeport Binary Image (for TI based products) +-//* Generated by TIBin2C v2.00 (watchport) +-//* Copyright (C) 2001 Inside Out Networks, All rights reserved. +-//************************************************************** +- +-Found in hex form in kernel source. +- +--------------------------------------------------------------------------- +- + Driver: dsp56k - Atari DSP56k support + + File: dsp56k/bootstrap.bin +@@ -242,17 +22,6 @@ http://www.zdomain.com/a56.html + + -------------------------------------------------------------------------- + +-Driver: orinoco - Agere/Prism/Symbol Orinoco support +- +-File: agere_sta_fw.bin +-Version: 9.48 Hermes I +-File: agere_ap_fw.bin +-Version: 9.48 Hermes I +- +-Licence: Redistributable. See LICENCE.agere for details +- +--------------------------------------------------------------------------- +- + Driver: cassini - Sun Cassini + + File: sun/cassini.bin +@@ -503,90 +272,6 @@ Found in hex form in kernel source. + + -------------------------------------------------------------------------- + +-Driver: pcnet_cs - NE2000 compatible PCMCIA adapter +- +-File: cis/LA-PCM.cis +-File: cis/PCMLM28.cis +-File: cis/DP83903.cis +-File: cis/NE2K.cis +-File: cis/tamarack.cis +-File: cis/PE-200.cis +-File: cis/PE520.cis +-Source: cis/ +- +-Licence: Dual GPLv2/MPL +- +-Originally developed by the pcmcia-cs project +-Copyright (C) 1998, 1999, 2000 David A. Hinds +- +--------------------------------------------------------------------------- +- +-Driver: 3c589_cs - 3Com PCMCIA adapter +- +-File: cis/3CXEM556.cis +-Source: cis/src/3CXEM556.cis +- +-Licence: Dual GPLv2/MPL +- +-Originally developed by the pcmcia-cs project +-Copyright (C) 1998, 1999, 2000 David A. Hinds +- +--------------------------------------------------------------------------- +- +-Driver: 3c574_cs - 3Com PCMCIA adapter +- +-File: cis/3CCFEM556.cis +-Source: cis/src/3CCFEM556.cis +- +-Licence: Dual GPLv2/MPL +- +-Originally developed by the pcmcia-cs project +-Copyright (C) 1998, 1999, 2000 David A. Hinds +- +--------------------------------------------------------------------------- +- +-Driver: serial_cs - Serial PCMCIA adapter +- +-File: cis/MT5634ZLX.cis +-File: cis/RS-COM-2P.cis +-File: cis/COMpad2.cis +-File: cis/COMpad4.cis +-Source: cis/src/MT5634ZLX.cis +-Source: cis/src/RS-COM-2P.cis +-Source: cis/src/COMpad2.cis +-Source: cis/src/COMpad4.cis +- +-Licence: Dual GPLv2/MPL +- +-Originally developed by the pcmcia-cs project +-Copyright (C) 1998, 1999, 2000 David A. Hinds +- +--------------------------------------------------------------------------- +- +-Driver: serial_cs - Serial PCMCIA adapter +- +-File: cis/SW_555_SER.cis +-File: cis/SW_7xx_SER.cis +-File: cis/SW_8xx_SER.cis +- +-Licence: GPLv3. See GPL-3 for details. +- +-Copyright Sierra Wireless +- +--------------------------------------------------------------------------- +- +-Driver: smc91c92_cs - SMC 91Cxx PCMCIA +- +-File: ositech/Xilinx7OD.bin +- +-Licence: Allegedly GPL, but no source visible. Marked: +- This file contains the firmware of Seven of Diamonds from OSITECH. +- (Special thanks to Kevin MacPherson of OSITECH) +- +-Found in hex form in kernel source. +- +--------------------------------------------------------------------------- +- + Driver: myri_sbus - MyriCOM Gigabit Ethernet + + File: myricom/lanai.bin +@@ -660,19 +345,6 @@ Available from http://ldriver.qlogic.com/firmware/netxen_nic/new/ + + -------------------------------------------------------------------------- + +-Driver: usbdux/usbduxfast/usbduxsigma - usbdux data acquisition cards +- +-File: usbdux_firmware.bin +-File: usbduxfast_firmware.bin +-File: usbduxsigma_firmware.bin +-Source: usbdux/ +- +-Licence: GPLv2. See GPL-2 for details. +- +-Provided from the author, Bernd Porr +- +--------------------------------------------------------------------------- +- + Driver: mga - Matrox G200/G400/G550 + + File: matrox/g200_warp.fw +@@ -1571,23 +1243,6 @@ Licence: Redistributable. See LICENSE.amdgpu for details. + + -------------------------------------------------------------------------- + +-Driver: s2255drv +- +-File: f2255usb.bin +-Version: 1.2.8 +- +-Licence: Redistributable. +- +- Sensoray grants permission to use and redistribute these firmware +- files for use with Sensoray devices, but not as a part of the Linux +- kernel or in any other form which would require these files themselves +- to be covered by the terms of the GNU General Public License. +- These firmware files are distributed in the hope that they will be +- useful, but WITHOUT ANY WARRANTY; without even the implied warranty +- of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +- +--------------------------------------------------------------------------- +- + Driver: ib_qib - QLogic Infiniband + + File: qlogic/sd7220.fw +@@ -1662,47 +1317,6 @@ Licence: + + -------------------------------------------------------------------------- + +-Driver: ueagle-atm - Driver for USB ADSL Modems based on Eagle IV Chipset +- +-File: ueagle-atm/CMV4p.bin.v2 +-File: ueagle-atm/DSP4p.bin +-File: ueagle-atm/eagleIV.fw +-Version: 1.0 +- +-Licence: Redistributable. See LICENCE.ueagle-atm4-firmware for details +- +--------------------------------------------------------------------------- +- +-Driver: ueagle-atm - Driver for USB ADSL Modems based on Eagle I,II,III +- +-File: ueagle-atm/930-fpga.bin +-File: ueagle-atm/CMVeiWO.bin +-File: ueagle-atm/CMVepFR10.bin +-File: ueagle-atm/DSP9p.bin +-File: ueagle-atm/eagleIII.fw +-File: ueagle-atm/adi930.fw +-File: ueagle-atm/CMVep.bin +-File: ueagle-atm/CMVepFR.bin +-File: ueagle-atm/DSPei.bin +-File: ueagle-atm/CMV9i.bin +-File: ueagle-atm/CMVepES03.bin +-File: ueagle-atm/CMVepIT.bin +-File: ueagle-atm/DSPep.bin +-File: ueagle-atm/CMV9p.bin +-File: ueagle-atm/CMVepES.bin +-File: ueagle-atm/CMVepWO.bin +-File: ueagle-atm/eagleI.fw +-File: ueagle-atm/CMVei.bin +-File: ueagle-atm/CMVepFR04.bin +-File: ueagle-atm/DSP9i.bin +-File: ueagle-atm/eagleII.fw +-Version: 1.1 +- +-Licence: Redistributable. Based on +- https://mail.gna.org/public/eagleusb-dev/2004-11/msg00172.html +- +--------------------------------------------------------------------------- +- + Driver: vxge - Exar X3100 Series 10GbE PCIe I/O Virtualized Server Adapter + + File: vxge/X3fw.ncf +@@ -1720,14 +1334,6 @@ Licence: + + -------------------------------------------------------------------------- + +-Driver: vt6656 - VIA VT6656 USB wireless driver +- +-File: vntwusb.fw +- +-Licence: Redistributable. See LICENCE.via_vt6656 for details. +- +--------------------------------------------------------------------------- +- + Driver: myri10ge - Myri10GE 10GbE NIC driver + + File: myri10ge_eth_z8e.dat +@@ -1742,37 +1348,6 @@ Version: 1.4.57 + + License: Redistributable. See LICENCE.myri10ge_firmware for details. + +--------------------------------------------------------------------------- +-Driver: ene-ub6250 -- ENE UB6250 SD card reader driver +- +-File: ene-ub6250/sd_init1.bin +-File: ene-ub6250/sd_init2.bin +-File: ene-ub6250/sd_rdwr.bin +-File: ene-ub6250/ms_init.bin +-File: ene-ub6250/msp_rdwr.bin +-File: ene-ub6250/ms_rdwr.bin +- +-Licence: Redistributable. See LICENCE.ene_firmware for details. +- +--------------------------------------------------------------------------- +- +-Driver: rp2 -- Comtrol RocketPort 2 serial driver +- +-File: rp2.fw +- +-Licence: Redistributable. +- +-Copyright (C) 2013 Comtrol Corporation +- +-Comtrol grants permission to use and redistribute these firmware +-files for use with Comtrol devices, but not as part of the Linux +-kernel or in any other form which would require these files themselves +-to be covered by the terms of the GNU General Public License. +- +-These firmware files are distributed in the hope that they will be +-useful, but WITHOUT ANY WARRANTY; without even the implied warranty +-of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +- + -------------------------------------------------------------------------- + + Driver: ccp - Platform Security Processor (PSP) device +@@ -1811,34 +1386,6 @@ License: Redistributable. See LICENSE.amd-ucode for details + + -------------------------------------------------------------------------- + +-Driver: mxu11x0 - MOXA UPort 11x0 USB Serial hub driver +- +-File: moxa/moxa-1110.fw +-File: moxa/moxa-1130.fw +-File: moxa/moxa-1131.fw +-File: moxa/moxa-1150.fw +-File: moxa/moxa-1151.fw +- +-License: Redistributable. See LICENCE.moxa for details +- +--------------------------------------------------------------------------- +- +-Driver: mxuport - MOXA UPort USB Serial hub driver +- +-File: moxa/moxa-1250.fw +-File: moxa/moxa-1251.fw +-File: moxa/moxa-1410.fw +-File: moxa/moxa-1450.fw +-File: moxa/moxa-1451.fw +-File: moxa/moxa-1613.fw +-File: moxa/moxa-1618.fw +-File: moxa/moxa-1653.fw +-File: moxa/moxa-1658.fw +- +-License: Redistributable. See LICENCE.moxa for details +- +--------------------------------------------------------------------------- +- + Driver: qat - Intel(R) QAT crypto accelerator + + File: qat_895xcc.bin +@@ -1855,48 +1402,6 @@ Licence: Redistributable. See LICENCE.qat_firmware for details + + -------------------------------------------------------------------------- + +-Driver: xhci-rcar -- Renesas R-Car Gen2/3 USB 3.0 host controller driver +- +-File: r8a779x_usb3_v1.dlmem +-File: r8a779x_usb3_v2.dlmem +-File: r8a779x_usb3_v3.dlmem +- +-Licence: Redistributable. See LICENCE.r8a779x_usb3 for details. +- +--------------------------------------------------------------------------- +- +-Driver: xhci-tegra -- NVIDIA Tegra XHCI driver +- +-File: nvidia/tegra124/xusb.bin +-Version: v45.46 +- +-File: nvidia/tegra210/xusb.bin +-Version: v50.24 +- +-File: nvidia/tegra186/xusb.bin +-Version: v55.15 +- +-File: nvidia/tegra194/xusb.bin +-Version: v60.06 +- +-Licence: Redistributable. See LICENCE.nvidia for details +- +--------------------------------------------------------------------------- +- +-Driver: atusb - ATUSB IEEE 802.15.4 transceiver driver +- +-File: atusb/atusb-0.2.dfu +-Version: 0.2 +-File: atusb/atusb-0.3.dfu +-Version: 0.3 +-File: atusb/rzusb-0.3.bin +-Version: 0.3 +-Info: atusb/ChangeLog +- +-Licence: GPLv2 or later. See GPL-2 and GPL-3 for details. +- +--------------------------------------------------------------------------- +- + Driver: liquidio -- Cavium LiquidIO driver + + File: liquidio/lio_23xx_nic.bin +-- +2.40.1 + diff --git a/packages/linux-firmware/0006-linux-firmware-ethernet-Remove-firmware-for-ethernet.patch b/packages/linux-firmware/0006-linux-firmware-ethernet-Remove-firmware-for-ethernet.patch new file mode 100644 index 00000000..d5bdb124 --- /dev/null +++ b/packages/linux-firmware/0006-linux-firmware-ethernet-Remove-firmware-for-ethernet.patch @@ -0,0 +1,539 @@ +From 138cb336108faab12961813bdf7ccc1ae4e1822b Mon Sep 17 00:00:00 2001 +From: Leonard Foerster +Date: Wed, 26 Jul 2023 08:40:14 +0000 +Subject: [PATCH] linux-firmware: ethernet: Remove firmware for ethernet/IB + devices + +Bottlerocket does not ship drivers for older network equipment. Without +the drivers shipping the firmware for thesde devices does not make +sense. Drop the firmware to reduce image size. + +The following list maps driver names as specified in WHENCE to kernel +config options enabling the driver. This way we can easily decide if we +need to add firmware back into the package when we enable new drivers. + +* cassini - CONFIG_CASSINI +* slicoss - CONFIG_SLICOSS +* sxg - CONFIG_SXG +* cxgb3 - CONFIG_CHELSIO_T3 +* e100 - CONFIG_E100 +* acenic - CONFIG_ACENIC +* tehuti - CONFIG_TEHUTI +* bnx2 - CONFIG_BNX2 +* ib_qib - CONFIG_INFINIBAND_QIB +* myri_sbus - CONFIG_MYRI_SBUS (dropped upstream after 3.0) +* hfi1 - CONFIG_INFINIBAND_HFI1 +* starfire - CONFIG_ADAPTEC_STARFIRE +* typhoon - CONFIG_TYPHOON +* vxge - CONFIG_VXGE +* mscc-phy - CONFIG_MICROSEMI_PHY + +Signed-off-by: Leonard Foerster +--- + LICENCE.e100 | 28 ---- + LICENCE.microchip | 40 ------ + LICENSE.hfi1_firmware | 39 ------ + WHENCE | 303 ------------------------------------------ + 4 files changed, 410 deletions(-) + delete mode 100644 LICENCE.e100 + delete mode 100644 LICENCE.microchip + delete mode 100644 LICENSE.hfi1_firmware + +diff --git a/LICENCE.e100 b/LICENCE.e100 +deleted file mode 100644 +index 0553817..0000000 +--- a/LICENCE.e100 ++++ /dev/null +@@ -1,28 +0,0 @@ +-Copyright (c) 1999-2001, Intel Corporation +- +-All rights reserved. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions are met: +- +- 1. Redistributions of source code must retain the above copyright notice, +- this list of conditions and the following disclaimer. +- +- 2. Redistributions in binary form must reproduce the above copyright notice, +- this list of conditions and the following disclaimer in the documentation +- and/or other materials provided with the distribution. +- +- 3. Neither the name of Intel Corporation nor the names of its contributors +- may be used to endorse or promote products derived from this software +- without specific prior written permission. +- +-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' +-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +-DISCLAIMED. IN NO EVENT SHALL CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +-EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +diff --git a/LICENCE.microchip b/LICENCE.microchip +deleted file mode 100644 +index f270c99..0000000 +--- a/LICENCE.microchip ++++ /dev/null +@@ -1,40 +0,0 @@ +-Copyright (C) 2018 Microchip Technology Incorporated and its subsidiaries. +-All rights reserved. +- +-REDISTRIBUTION: Permission is hereby granted by Microchip Technology +-Incorporated (Microchip), free of any license fees, to any person obtaining a +-copy of this firmware (the "Software"), to install, reproduce, copy and +-distribute copies, in binary form, hexadecimal or equivalent formats only, the +-Software and to permit persons to whom the Software is provided to do the same, +-subject to the following conditions: +- +-* Any redistribution of the Software must reproduce the above copyright notice, +- this license notice, and the following disclaimers and notices in the +- documentation and/or other materials provided with the Software. +- +-* Neither the name of Microchip, its products nor the names of its suppliers +- may be used to endorse or promote products derived from this Software without +- specific prior written permission. +- +-* No reverse engineering, decompilation, or disassembly of this Software is +- permitted. +- +-Limited patent license. Microchip grants a world-wide, royalty-free, +-non-exclusive, revocable license under any patents that it now has or hereafter +-may have, own or control related to the Software to make, have made, use, +-import, offer to sell and sell ("Utilize") this Software, but solely to the +-extent that any such patent is necessary to Utilize the Software in conjunction +-with Microchip processors. The patent license shall not apply to any other +-combinations which include this Software nor to any other Microchip patents or +-patent rights. No hardware per se is licensed hereunder. +- +-DISCLAIMER: THIS SOFTWARE IS PROVIDED BY MICROCHIP "AS IS" AND ANY EXPRESS OR +-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE +-DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +-ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +diff --git a/LICENSE.hfi1_firmware b/LICENSE.hfi1_firmware +deleted file mode 100644 +index 01f0932..0000000 +--- a/LICENSE.hfi1_firmware ++++ /dev/null +@@ -1,39 +0,0 @@ +-Copyright (c) 2015, Intel Corporation. +-All rights reserved. +- +-Redistribution. +- +-Redistribution and use in binary form, without modification, are permitted +-provided that the following conditions are met: +-* Redistributions must reproduce the above copyright notice and the +- following disclaimer in the documentation and/or other materials provided +- with the distribution. +-* Neither the name of Intel Corporation nor the names of its suppliers may +- be used to endorse or promote products derived from this software without +- specific prior written permission. +-* No reverse engineering, decompilation, or disassembly of this software is +- permitted. +- +-Limited patent license. +- +-Intel Corporation grants a world-wide, royalty-free, non-exclusive license +-under patents it now or hereafter owns or controls to make, have made, use, +-import, offer to sell and sell (“Utilize”) this software, but solely to the +-extent that any such patent is necessary to Utilize the software alone. The +-patent license shall not apply to any combinations which include this software. +-No hardware per se is licensed hereunder. +- +- +-DISCLAIMER. +- +-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +diff --git a/WHENCE b/WHENCE +index 2ed9e9a..f6b0299 100644 +--- a/WHENCE ++++ b/WHENCE +@@ -22,98 +22,6 @@ http://www.zdomain.com/a56.html + + -------------------------------------------------------------------------- + +-Driver: cassini - Sun Cassini +- +-File: sun/cassini.bin +- +-Licence: Unknown +- +-Found in hex form in kernel source. +- +--------------------------------------------------------------------------- +- +-Driver: slicoss - Alacritech IS-NIC products +- +-File: slicoss/gbdownload.sys +-File: slicoss/gbrcvucode.sys +-File: slicoss/oasisdbgdownload.sys +-File: slicoss/oasisdownload.sys +-File: slicoss/oasisrcvucode.sys +- +-Licence: +- Copyright (C) 1999-2009 Alacritech, Inc. +- +- as an unpublished work. This notice does not imply unrestricted or +- public access to the source code from which this firmware image is +- derived. Except as noted below this firmware image may not be +- reproduced, used, sold or transferred to any third party without +- Alacritech's prior written consent. All Rights Reserved. +- +- Permission is hereby granted for the distribution of this firmware +- image as part of a Linux or other Open Source operating system kernel +- in text or binary form as required. +- +- This firmware may not be modified. +- +-Found in hex form in kernel source. +- +--------------------------------------------------------------------------- +- +-Driver: sxg - Alacritech IS-NIC products +- +-File: sxg/saharadownloadB.sys +-File: sxg/saharadbgdownloadB.sys +- +-Licence: +- Copyright (C) 1999-2009 Alacritech, Inc. +- +- as an unpublished work. This notice does not imply unrestricted or +- public access to the source code from which this firmware image is +- derived. Except as noted below this firmware image may not be +- reproduced, used, sold or transferred to any third party without +- Alacritech's prior written consent. All Rights Reserved. +- +- Permission is hereby granted for the distribution of this firmware +- image as part of a Linux or other Open Source operating system kernel +- in text or binary form as required. +- +- This firmware may not be modified. +- +-Found in hex form in kernel source. +- +--------------------------------------------------------------------------- +- +-Driver: cxgb3 - Chelsio Terminator 3 1G/10G Ethernet adapter +- +-File: cxgb3/t3b_psram-1.1.0.bin +-File: cxgb3/t3c_psram-1.1.0.bin +-File: cxgb3/t3fw-7.0.0.bin +-File: cxgb3/t3fw-7.1.0.bin +-File: cxgb3/t3fw-7.4.0.bin +-File: cxgb3/t3fw-7.10.0.bin +-File: cxgb3/t3fw-7.12.0.bin +- +-Licence: GPLv2 or OpenIB.org BSD license, no source visible +- +--------------------------------------------------------------------------- +- +-Driver: cxgb3 - Chelsio Terminator 3 1G/10G Ethernet adapter +- +-File: cxgb3/ael2005_opt_edc.bin +-File: cxgb3/ael2005_twx_edc.bin +-File: cxgb3/ael2020_twx_edc.bin +- +-Licence: +- * Copyright (c) 2007-2009 NetLogic Microsystems, Inc. +- * +- * Permission is hereby granted for the distribution of this firmware +- * data in hexadecimal or equivalent format, provided this copyright +- * notice is accompanying it. +- +-Found in hex form in kernel source. +- +--------------------------------------------------------------------------- +- + Driver: cxgb4 - Chelsio Terminator 4/5/6 1/10/25/40/100G Ethernet adapter + + File: cxgb4/t4fw-1.14.4.0.bin +@@ -141,28 +49,6 @@ Licence: Redistributable. See LICENCE.chelsio_firmware for details + + -------------------------------------------------------------------------- + +-Driver: e100 -- Intel PRO/100 Ethernet NIC +- +-File: e100/d101m_ucode.bin +-File: e100/d101s_ucode.bin +-File: e100/d102e_ucode.bin +- +-Licence: Redistributable. See LICENCE.e100 for details +- +--------------------------------------------------------------------------- +- +-Driver: acenic -- Alteon AceNIC Gigabit Ethernet card +- +-File: acenic/tg1.bin +-File: acenic/tg2.bin +- +-Licence: Unknown +- +-Found in hex form in kernel source, but source allegedly available at +-http://alteon.shareable.org/ +- +--------------------------------------------------------------------------- +- + Driver: tg3 -- Broadcom Tigon3 based gigabit Ethernet cards + + File: tigon/tg3.bin +@@ -183,83 +69,6 @@ Found in hex form in kernel source. + + -------------------------------------------------------------------------- + +-Driver: starfire - Adaptec Starfire/DuraLAN support +- +-File: adaptec/starfire_rx.bin +-File: adaptec/starfire_tx.bin +- +-Licence: Allegedly GPLv2, but no source visible. +- +-Found in hex form in kernel source, with the following notice: +- +- BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE IT IS LICENSED "AS IS" AND +- THERE IS NO WARRANTY FOR THE PROGRAM, INCLUDING BUT NOT LIMITED TO THE +- IMPLIED WARRANTIES OF MERCHANTIBILITY OR FITNESS FOR A PARTICULAR PURPOSE +- (TO THE EXTENT PERMITTED BY APPLICABLE LAW). USE OF THE PROGRAM IS AT YOUR +- OWN RISK. IN NO EVENT WILL ADAPTEC OR ITS LICENSORS BE LIABLE TO YOU FOR +- DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES +- ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM. +- +--------------------------------------------------------------------------- +- +-Driver: tehuti - Tehuti Networks 10G Ethernet +- +-File: tehuti/bdx.bin +- +-Licence: +- +- Copyright (C) 2007 Tehuti Networks Ltd. +- +- Permission is hereby granted for the distribution of this firmware data +- in hexadecimal or equivalent format, provided this copyright notice is +- accompanying it. +- +-Found in hex form in kernel source. +- +--------------------------------------------------------------------------- +- +-Driver: typhoon - 3cr990 series Typhoon +- +-File: 3com/typhoon.bin +- +-Licence: +-/* +- * Copyright 1999-2004 3Com Corporation. All Rights Reserved. +- * +- * Redistribution and use in source and binary forms of the 3c990img.h +- * microcode software are permitted provided that the following conditions +- * are met: +- * 1. Redistribution of source code must retain the above copyright +- * notice, this list of conditions and the following disclaimer. +- * 2. Redistribution in binary form must reproduce the above copyright +- * notice, this list of conditions and the following disclaimer in the +- * documentation and/or other materials provided with the distribution. +- * 3. The name of 3Com may not be used to endorse or promote products +- * derived from this software without specific prior written permission +- * +- * THIS SOFTWARE IS PROVIDED BY 3COM ``AS IS'' AND ANY EXPRESS OR +- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- * +- * USER ACKNOWLEDGES AND AGREES THAT PURCHASE OR USE OF THE 3c990img.h +- * MICROCODE SOFTWARE WILL NOT CREATE OR GIVE GROUNDS FOR A LICENSE BY +- * IMPLICATION, ESTOPPEL, OR OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS +- * (PATENT, COPYRIGHT, TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) +- * EMBODIED IN ANY OTHER 3COM HARDWARE OR SOFTWARE EITHER SOLELY OR IN +- * COMBINATION WITH THE 3c990img.h MICROCODE SOFTWARE +- */ +- +-Found in hex form in kernel source. +- +--------------------------------------------------------------------------- +- + Driver: yam - YAM driver for AX.25 + + File: yam/1200.bin +@@ -272,16 +81,6 @@ Found in hex form in kernel source. + + -------------------------------------------------------------------------- + +-Driver: myri_sbus - MyriCOM Gigabit Ethernet +- +-File: myricom/lanai.bin +- +-Licence: Unknown +- +-Found in hex form in kernel source. +- +--------------------------------------------------------------------------- +- + Driver: bnx2x: Broadcom Everest + + File: bnx2x/bnx2x-e1-7.13.1.0.fw +@@ -313,27 +112,6 @@ Found in hex form in kernel source. + + -------------------------------------------------------------------------- + +-Driver: bnx2 - Broadcom NetXtremeII +- +-File: bnx2/bnx2-mips-06-6.2.3.fw +-File: bnx2/bnx2-mips-09-6.2.1b.fw +-File: bnx2/bnx2-rv2p-06-6.0.15.fw +-File: bnx2/bnx2-rv2p-09-6.0.17.fw +-File: bnx2/bnx2-rv2p-09ax-6.0.17.fw +- +-Licence: +- +- This file contains firmware data derived from proprietary unpublished +- source code, Copyright (c) 2004 - 2010 Broadcom Corporation. +- +- Permission is hereby granted for the distribution of this firmware data +- in hexadecimal or equivalent format, provided this copyright notice is +- accompanying it. +- +-Found in hex form in kernel source. +- +--------------------------------------------------------------------------- +- + Driver: netxen_nic - NetXen Multi port (1/10) Gigabit Ethernet NIC + + File: phanfw.bin +@@ -1243,46 +1021,6 @@ Licence: Redistributable. See LICENSE.amdgpu for details. + + -------------------------------------------------------------------------- + +-Driver: ib_qib - QLogic Infiniband +- +-File: qlogic/sd7220.fw +- +-Licence: +- +- * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved. +- * +- * This software is available to you under a choice of one of two +- * licenses. You may choose to be licensed under the terms of the GNU +- * General Public License (GPL) Version 2, available from the file +- * COPYING in the main directory of this source tree, or the +- * OpenIB.org BSD license below: +- * +- * Redistribution and use in source and binary forms, with or +- * without modification, are permitted provided that the following +- * conditions are met: +- * +- * - Redistributions of source code must retain the above +- * copyright notice, this list of conditions and the following +- * disclaimer. +- * +- * - Redistributions in binary form must reproduce the above +- * copyright notice, this list of conditions and the following +- * disclaimer in the documentation and/or other materials +- * provided with the distribution. +- * +- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +- * SOFTWARE. +- +-Found in hex form in kernel source. +- +--------------------------------------------------------------------------- +- + Driver: qed - QLogic 4xxxx Ethernet Driver Core Module. + + File: qed/qed_init_values_zipped-8.4.2.0.bin +@@ -1317,23 +1055,6 @@ Licence: + + -------------------------------------------------------------------------- + +-Driver: vxge - Exar X3100 Series 10GbE PCIe I/O Virtualized Server Adapter +- +-File: vxge/X3fw.ncf +-File: vxge/X3fw-pxe.ncf +-Version: 1.8.1 +- +-Licence: +- +- This file contains firmware data derived from proprietary unpublished +- source code, Copyright (c) 2010 Exar Corporation. +- +- Permission is hereby granted for the distribution of this firmware data +- in hexadecimal or equivalent format, provided this copyright notice is +- accompanying it. +- +--------------------------------------------------------------------------- +- + Driver: myri10ge - Myri10GE 10GbE NIC driver + + File: myri10ge_eth_z8e.dat +@@ -2215,21 +1936,6 @@ Licence: Redistributable. See LICENCE.nvidia for details + + -------------------------------------------------------------------------- + +-Driver: hfi1 - Intel OPA Gen 1 adapter +- +-File: hfi1_dc8051.fw +-Version: 1.27.0 +-File: hfi1_fabric.fw +-Version: 0x1055 +-File: hfi1_pcie.fw +-Version: 0x4755 +-File: hfi1_sbus.fw +-Version: 0x10130001 +- +-Licence: Redistributable. See LICENSE.hfi1_firmware for details +- +--------------------------------------------------------------------------- +- + Driver: knav_qmss_queue - TI Keystone 2 QMSS driver + + File: ti-keystone/ks2_qmss_pdsp_acc48_k2_le_1_0_0_9.bin +@@ -2527,15 +2233,6 @@ Licence: Redistributable. See LICENSE.nxp_mc_firmware for details + + -------------------------------------------------------------------------- + +-Driver: mscc-phy - Microchip PHY drivers +- +-File: microchip/mscc_vsc8574_revb_int8051_29e8.bin +-File: microchip/mscc_vsc8584_revb_int8051_fb48.bin +- +-Licence: Redistributable. See LICENCE.microchip for details +- +--------------------------------------------------------------------------- +- + Driver: ice - Intel(R) Ethernet Connection E800 Series + + File: intel/ice/ddp/ice-1.3.30.0.pkg +-- +2.40.1 + diff --git a/packages/linux-firmware/0007-linux-firmware-Remove-firmware-for-Accelarator-devic.patch b/packages/linux-firmware/0007-linux-firmware-Remove-firmware-for-Accelarator-devic.patch new file mode 100644 index 00000000..15bec937 --- /dev/null +++ b/packages/linux-firmware/0007-linux-firmware-Remove-firmware-for-Accelarator-devic.patch @@ -0,0 +1,836 @@ +From eaa004914098930239abf2f3c2e9f4acc79c10b0 Mon Sep 17 00:00:00 2001 +From: Leonard Foerster +Date: Wed, 26 Jul 2023 11:06:45 +0000 +Subject: [PATCH] linux-firmware: Remove firmware for Accelarator devices + +Bottlerocket does not ship drivers for specialty accelarator hardware. +Without those drivers the firmware is of no use. Drop the firmware from +our images to reduce image size. + +The following list maps the driver names as specified in WHENCE to +kernel config options. This creates an easy reference should we need to +ship addiitonal firmware when adding drivers. + +* rvu_cptpf - CONFIG_CRYPTO_DEV_OCTEONTX_CPT && + CONFIG_CRYPTO_DEV_OCTEONTX2_CPT +* ccp - CONFIG_CRYPTO_DEV_CCP +* qat - CONFIG_CRYPTO_DEV_QAT +* liquidio - CONFIG_LIQUIDIO +* nitrox - CONFIG_NITROX +* knav_qmss_queue - CONFIG_KEYSTONE_NAVIGATOR_QMSS +* nfp - CONFIG_NFP +* fsl_mc bus - CONFIG_FSL_MC_BUS +* inside-secure - CONFIG_CRYPTO_DEV_SAFEXCEL + +Signed-off-by: Leonard Foerster +--- + LICENCE.Netronome | 65 ------------ + LICENCE.cavium | 59 ----------- + LICENCE.cavium_liquidio | 68 ------------- + LICENCE.qat_firmware | 36 ------- + LICENCE.ti-keystone | 61 ----------- + LICENSE.amd-sev | 64 ------------ + LICENSE.nxp_mc_firmware | 127 ----------------------- + WHENCE | 218 ---------------------------------------- + 8 files changed, 698 deletions(-) + delete mode 100644 LICENCE.Netronome + delete mode 100644 LICENCE.cavium + delete mode 100644 LICENCE.cavium_liquidio + delete mode 100644 LICENCE.qat_firmware + delete mode 100644 LICENCE.ti-keystone + delete mode 100644 LICENSE.amd-sev + delete mode 100644 LICENSE.nxp_mc_firmware + +diff --git a/LICENCE.Netronome b/LICENCE.Netronome +deleted file mode 100644 +index 1ed7a7c..0000000 +--- a/LICENCE.Netronome ++++ /dev/null +@@ -1,65 +0,0 @@ +-Copyright (c) 2017, NETRONOME Systems, Inc. All rights reserved. +- +-Agilio(r) Firmware License Agreement (the "AGREEMENT") +- +-BY INSTALLING OR USING IN ANY MANNER THE SOFTWARE THAT ACCOMPANIES THIS +-AGREEMENT (THE "SOFTWARE") YOU (THE "LICENSEE") ACKNOWLEDGE TO BE BOUND +-BY ALL OF THE TERMS OF THIS AGREEMENT. +- +-LICENSE GRANT. Subject to the terms and conditions set forth herein, +-Netronome Systems, Inc. ("NETRONOME") hereby grants LICENSEE a non- +-exclusive license to use, reproduce and distribute the SOFTWARE +-exclusively in object form. +- +-Restrictions. LICENSEE agrees that, (a) unless explicitly provided by +-NETRONOME, the source code of the SOFTWARE is not being provided to +-LICENSEE and is confidential and proprietary to NETRONOME and that +-LICENSEE has no right to access or use such source code. Accordingly, +-LICENSEE agrees that it shall not cause or permit the disassembly, +-decompilation or reverse engineering of the SOFTWARE or otherwise attempt +-to gain access to the source code for the SOFTWARE; and (b) LICENSEE +-agrees that it shall not subject the SOFTWARE in whole or in part, to the +-terms of any software license that requires, as a condition of use, +-modification and/or distribution that the source code of the SOFTWARE, or +-the SOFTWARE be i) disclosed or distributed in source code form; ii) +-licensed for the purpose of making derivative works of the source code of +-the SOFTWARE; or iii) redistribution of the source code of the SOFTWARE +-at no charge. +- +-DISCLAIMER OF ALL WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS" AND WITH +-ALL FAULTS AND NETRONOME AND ITS LICENSORS HEREBY DISCLAIM ALL EXPRESS OR +-IMPLIED WARRANTIES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ANY +-WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT AND FITNESS FOR A +-PARTICULAR PURPOSE. +- +-LIMITATIONS OF LIABILITY. EXCEPT WHERE PROHIBITED BY LAW, IN NO EVENT +-SHALL NETRONOME OR ANY OTHER PARTY INVOLVED IN THE CREATION, PRODUCTION, +-OR DELIVERY OF THE SOFTWARE BE LIABLE FOR ANY LOSS OF PROFITS, DATA, USE +-OF THE SOFTWARE, DOCUMENTATION OR EQUIPMENT, OR FOR ANY SPECIAL, +-INCIDENTAL, CONSEQUENTIAL, EXEMPLARY, PUNITIVE, MULTIPLE OR OTHER +-DAMAGES, ARISING FROM OR IN CONNECTION WITH THE SOFTWARE EVEN IF +-NETRONOME OR ITS LICENSORS HAVE BEEN MADE AWARE OF THE POSSIBILITY OF +-SUCH DAMAGES AND NOTWITHSTANDING ANY FAILURE OF ESSENTIAL PURPOSE OF ANY +-LIMITED REMEDY. +- +-EXPORT COMPLIANCE. LICENSEE shall not use or export or transmit the +-SOFTWARE, directly or indirectly, to any restricted countries or in any +-other manner that would violate any applicable US and other export +-control and other regulations and laws as shall from time to time govern +-the delivery, license and use of technology, including without limitation +-the Export Administration Act of 1979, as amended, and any regulations +-issued thereunder. +- +-PROHIBITION OF SOFTWARE USE IN HIGH RISK ACTIVITIES AND LIFE +-SUPPORT APPLICATIONS. The SOFTWARE is not designed, manufactured or +-intended for use as on-line control equipment in hazardous environments +-requiring fail-safe performance, such as in the operation of nuclear +-facilities, aircraft navigation or communications systems, air traffic +-control, life support systems, human implantation or any other +-application where product failure could lead to loss of life or +-catastrophic property damage or weapons systems, in which the failure of +-the SOFTWARE could lead directly to death, personal injury, or severe +-physical or environmental damage ("High Risk Activities"). Accordingly +-NETRONOME and, where applicable, NETRONOME'S third party licensors +-specifically disclaim any express or implied warranty of fitness for High +-Risk Activities. +diff --git a/LICENCE.cavium b/LICENCE.cavium +deleted file mode 100644 +index 5d2a2bb..0000000 +--- a/LICENCE.cavium ++++ /dev/null +@@ -1,59 +0,0 @@ +-Copyright © 2015, Cavium, Inc. All rights reserved. +- +-Software License Agreement +- +-ANY USE, REPRODUCTION, OR DISTRIBUTION OF THE ACCOMPANYING BINARY SOFTWARE +-CONSTITUTES LICENSEEE'S ACCEPTANCE OF THE TERMS AND CONDITIONS OF THIS AGREEMENT. +- +-Licensed Software. Subject to the terms and conditions of this Agreement, +-Cavium, Inc. ("Cavium") grants to Licensee a worldwide, non-exclusive, and +-royalty-free license to use, reproduce, and distribute the binary software in +-its complete and unmodified form as provided by Cavium. +- +-Restrictions. Licensee must reproduce the Cavium copyright notice above with +-each binary software copy. Licensee must not reverse engineer, decompile, +-disassemble or modify in any way the binary software. Licensee must not use +-the binary software in violation of any applicable law or regulation. This +-Agreement shall automatically terminate upon Licensee's breach of any term or +-condition of this Agreement in which case, Licensee shall destroy all copies of +-the binary software. +- +-Warranty Disclaimer. THE LICENSED SOFTWARE IS OFFERED "AS IS," AND CAVIUM +-GRANTS AND LICENSEE RECEIVES NO WARRANTIES OF ANY KIND, WHETHER EXPRESS, +-IMPLIED, STATUTORY, OR BY COURSE OF COMMUNICATION OR DEALING WITH LICENSEE, OR +-OTHERWISE. CAVIUM AND ITS LICENSORS SPECIFICALLY DISCLAIM ANY IMPLIED +-WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE, OR +-NONINFRINGEMENT OF THIRD PARTY RIGHTS, CONCERNING THE LICENSED SOFTWARE, +-DERIVATIVE WORKS, OR ANY DOCUMENTATION PROVIDED WITH THE FOREGOING. WITHOUT +-LIMITING THE GENERALITY OF THE FOREGOING, CAVIUM DOES NOT WARRANT THAT THE +-LICENSED SOFTWARE IS ERROR-FREE OR WILL OPERATE WITHOUT INTERRUPTION, AND +-CAVIUM GRANTS NO WARRANTY REGARDING ITS USE OR THE RESULTS THEREFROM, INCLUDING +-ITS CORRECTNESS, ACCURACY, OR RELIABILITY. +- +-Limitation of Liability. IN NO EVENT WILL LICENSEE, CAVIUM, OR ANY OF CAVIUM'S +-LICENSORS HAVE ANY LIABILITY HEREUNDER FOR ANY INDIRECT, SPECIAL, OR +-CONSEQUENTIAL DAMAGES, HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +-FOR BREACH OF CONTRACT, TORT (INCLUDING NEGLIGENCE), OR OTHERWISE, ARISING OUT +-OF THIS AGREEMENT, INCLUDING DAMAGES FOR LOSS OF PROFITS, OR THE COST OF +-PROCUREMENT OF SUBSTITUTE GOODS, EVEN IF SUCH PARTY HAS BEEN ADVISED OF THE +-POSSIBILITY OF SUCH DAMAGES. +- +-Export and Import Laws. Licensee acknowledges and agrees that the Licensed +-Software (including technical data and related technology) may be controlled by +-the export control laws, rules, regulations, restrictions and national security +-controls of the United States and other applicable foreign agencies (the +-"Export Controls"), and agrees not export or re-export, or allow the export or +-re-export of export-controlled the Licensed Software (including technical data +-and related technology) or any copy, portion or direct product of the foregoing +-in violation of the Export Controls. Licensee hereby represents that +-(i) Licensee is not an entity or person to whom provision of the Licensed +-Software (including technical data and related technology) is restricted or +-prohibited by the Export Controls; and (ii) Licensee will not export, re-export +-or otherwise transfer the export-controlled Licensed Software (including +-technical data and related technology) in violation of U.S. sanction programs +-or export control regulations to (a) any country, or national or resident of +-any country, subject to a United States trade embargo, (b) any person or entity +-to whom shipment is restricted or prohibited by the Export Controls, or +-(c) anyone who is engaged in activities related to the design, development, +-production, or use of nuclear materials, nuclear facilities, nuclear weapons, +-missiles or chemical or biological weapons. +diff --git a/LICENCE.cavium_liquidio b/LICENCE.cavium_liquidio +deleted file mode 100644 +index 250b9fe..0000000 +--- a/LICENCE.cavium_liquidio ++++ /dev/null +@@ -1,68 +0,0 @@ +-This file contains licences pertaining to the following firmwares for +-LiquidIO (c) adapters +- +-1. lio_nic_23xx.bin, lio_210nv_nic.bin, lio_410nv_nic.bin +- +-########################################################################### +- +-1. lio_nic_23xx.bin, lio_210nv_nic.bin, lio_410nv_nic.bin +- +-Copyright (c) 2018, Cavium, Inc. All rights reserved. +- +-Software License Agreement +- +-ANY USE, REPRODUCTION, OR DISTRIBUTION OF THE ACCOMPANYING BINARY SOFTWARE +-CONSTITUTES LICENSEEE'S ACCEPTANCE OF THE TERMS AND CONDITIONS OF THIS AGREEMENT. +- +-Licensed Software. Subject to the terms and conditions of this Agreement, +-Cavium, Inc. ("Cavium") grants to Licensee a worldwide, non-exclusive, and +-royalty-free license to use, reproduce, and distribute the binary software in +-its complete and unmodified form as provided by Cavium. +- +-Restrictions. Licensee must reproduce the Cavium copyright notice above with +-each binary software copy. Licensee must not reverse engineer, decompile, +-disassemble or modify in any way the binary software. Licensee must not use +-the binary software in violation of any applicable law or regulation. This +-Agreement shall automatically terminate upon Licensee's breach of any term or +-condition of this Agreement in which case, Licensee shall destroy all copies of +-the binary software. +- +-Warranty Disclaimer. THE LICENSED SOFTWARE IS OFFERED "AS IS," AND CAVIUM +-GRANTS AND LICENSEE RECEIVES NO WARRANTIES OF ANY KIND, WHETHER EXPRESS, +-IMPLIED, STATUTORY, OR BY COURSE OF COMMUNICATION OR DEALING WITH LICENSEE, OR +-OTHERWISE. CAVIUM AND ITS LICENSORS SPECIFICALLY DISCLAIM ANY IMPLIED +-WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE, OR +-NONINFRINGEMENT OF THIRD PARTY RIGHTS, CONCERNING THE LICENSED SOFTWARE, +-DERIVATIVE WORKS, OR ANY DOCUMENTATION PROVIDED WITH THE FOREGOING. WITHOUT +-LIMITING THE GENERALITY OF THE FOREGOING, CAVIUM DOES NOT WARRANT THAT THE +-LICENSED SOFTWARE IS ERROR-FREE OR WILL OPERATE WITHOUT INTERRUPTION, AND +-CAVIUM GRANTS NO WARRANTY REGARDING ITS USE OR THE RESULTS THEREFROM, INCLUDING +-ITS CORRECTNESS, ACCURACY, OR RELIABILITY. +- +-Limitation of Liability. IN NO EVENT WILL LICENSEE, CAVIUM, OR ANY OF CAVIUM'S +-LICENSORS HAVE ANY LIABILITY HEREUNDER FOR ANY INDIRECT, SPECIAL, OR +-CONSEQUENTIAL DAMAGES, HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +-FOR BREACH OF CONTRACT, TORT (INCLUDING NEGLIGENCE), OR OTHERWISE, ARISING OUT +-OF THIS AGREEMENT, INCLUDING DAMAGES FOR LOSS OF PROFITS, OR THE COST OF +-PROCUREMENT OF SUBSTITUTE GOODS, EVEN IF SUCH PARTY HAS BEEN ADVISED OF THE +-POSSIBILITY OF SUCH DAMAGES. +- +-Export and Import Laws. Licensee acknowledges and agrees that the Licensed +-Software (including technical data and related technology) may be controlled by +-the export control laws, rules, regulations, restrictions and national security +-controls of the United States and other applicable foreign agencies (the +-"Export Controls"), and agrees not export or re-export, or allow the export or +-re-export of export-controlled the Licensed Software (including technical data +-and related technology) or any copy, portion or direct product of the foregoing +-in violation of the Export Controls. Licensee hereby represents that +-(i) Licensee is not an entity or person to whom provision of the Licensed +-Software (including technical data and related technology) is restricted or +-prohibited by the Export Controls; and (ii) Licensee will not export, re-export +-or otherwise transfer the export-controlled Licensed Software (including +-technical data and related technology) in violation of U.S. sanction programs +-or export control regulations to (a) any country, or national or resident of +-any country, subject to a United States trade embargo, (b) any person or entity +-to whom shipment is restricted or prohibited by the Export Controls, or +-(c) anyone who is engaged in activities related to the design, development, +-production, or use of nuclear materials, nuclear facilities, nuclear weapons, +-missiles or chemical or biological weapons. +diff --git a/LICENCE.qat_firmware b/LICENCE.qat_firmware +deleted file mode 100644 +index 75a4ff1..0000000 +--- a/LICENCE.qat_firmware ++++ /dev/null +@@ -1,36 +0,0 @@ +-Copyright (c) 2014-2023 Intel Corporation. +-All rights reserved. +- +-Redistribution. Redistribution and use in binary form, without +-modification, are permitted provided that the following conditions are +-met: +- +-* Redistributions must reproduce the above copyright notice and the +- following disclaimer in the documentation and/or other materials +- provided with the distribution. +-* Neither the name of Intel Corporation nor the names of its suppliers +- may be used to endorse or promote products derived from this software +- without specific prior written permission. +-* No reverse engineering, decompilation, or disassembly of this software +- is permitted. +- +-Limited patent license. Intel Corporation grants a world-wide, +-royalty-free, non-exclusive license under patents it now or hereafter +-owns or controls to make, have made, use, import, offer to sell and +-sell ("Utilize") this software, but solely to the extent that any +-such patent is necessary to Utilize the software alone. The patent +-license shall not apply to any other combinations which include this +-software. No hardware per se is licensed hereunder. +- +-DISCLAIMER. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +-CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +-BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +-FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +-COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +-DAMAGE. +diff --git a/LICENCE.ti-keystone b/LICENCE.ti-keystone +deleted file mode 100644 +index 62cc3b3..0000000 +--- a/LICENCE.ti-keystone ++++ /dev/null +@@ -1,61 +0,0 @@ +-Copyright (c) 2015 Texas Instruments Incorporated +- +-All rights reserved not granted herein. +- +-Limited License. +- +-Texas Instruments Incorporated grants a world-wide, royalty-free, non-exclusive +-license under copyrights and patents it now or hereafter owns or controls to +-make, have made, use, import, offer to sell and sell ("Utilize") this software +-subject to the terms herein. With respect to the foregoing patent license, such +-license is granted solely to the extent that any such patent is necessary to +-Utilize the software alone. The patent license shall not apply to any +-combinations which include this software, other than combinations with devices +-manufactured by or for TI (“TI Devices”). No hardware patent is licensed +-hereunder. +- +-Redistributions must preserve existing copyright notices and reproduce this +-license (including the above copyright notice and the disclaimer and +-(if applicable) source code license limitations below) in the documentation +-and/or other materials provided with the distribution +- +-Redistribution and use in binary form, without modification, are permitted +-provided that the following conditions are met: +- +- * No reverse engineering, decompilation, or disassembly of this +- software is permitted with respect to any software provided in binary +- form. +- +- * any redistribution and use are licensed by TI for use only with TI +- Devices. +- +- * Nothing shall obligate TI to provide you with source code for the +- software licensed and provided to you in object code. +- +-If software source code is provided to you, modification and redistribution of +-the source code are permitted provided that the following conditions are met: +- +- * any redistribution and use of the source code, including any +- resulting derivative works, are licensed by TI for use only with TI +- Devices. +- +- * any redistribution and use of any object code compiled from the +- source code and any resulting derivative works, are licensed by TI +- for use only with TI Devices. +- +-Neither the name of Texas Instruments Incorporated nor the names of its +-suppliers may be used to endorse or promote products derived from this +-software without specific prior written permission. +- +-DISCLAIMER. +- +-THIS SOFTWARE IS PROVIDED BY TI AND TI’S LICENSORS "AS IS" AND ANY EXPRESS OR +-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +-EVENT SHALL TI AND TI’S LICENSORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +-ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +diff --git a/LICENSE.amd-sev b/LICENSE.amd-sev +deleted file mode 100644 +index de4d948..0000000 +--- a/LICENSE.amd-sev ++++ /dev/null +@@ -1,64 +0,0 @@ +-Copyright (C) 2015-2019 Advanced Micro Devices, Inc., All rights reserved. +- +-Permission is hereby granted by Advanced Micro Devices, Inc. ("AMD"), +-free of any license fees, to any person obtaining a copy of this +-microcode in binary form (the "Software") ("You"), to install, +-reproduce, copy and distribute copies of the Software and to permit +-persons to whom the Software is provided to do the same, subject to +-the following terms and conditions. Your use of any portion of the +-Software shall constitute Your acceptance of the following terms and +-conditions. If You do not agree to the following terms and conditions, +-do not use, retain or redistribute any portion of the Software. +- +-If You redistribute this Software, You must reproduce the above +-copyright notice and this license with the Software. +-Without specific, prior, written permission from AMD, You may not +-reference AMD or AMD products in the promotion of any product derived +-from or incorporating this Software in any manner that implies that +-AMD endorses or has certified such product derived from or +-incorporating this Software. +- +-You may not reverse engineer, decompile, or disassemble this Software +-or any portion thereof. +- +-THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED +-WARRANTY OF ANY KIND, INCLUDING BUT NOT LIMITED TO WARRANTIES OF +-MERCHANTABILITY, NONINFRINGEMENT, TITLE, FITNESS FOR ANY PARTICULAR +-PURPOSE, OR WARRANTIES ARISING FROM CONDUCT, COURSE OF DEALING, OR +-USAGE OF TRADE. IN NO EVENT SHALL AMD OR ITS LICENSORS BE LIABLE FOR +-ANY DAMAGES WHATSOEVER (INCLUDING, WITHOUT LIMITATION, DAMAGES FOR +-LOSS OF PROFITS, BUSINESS INTERRUPTION, OR LOSS OF DATA OR +-INFORMATION) ARISING OUT OF AMD'S NEGLIGENCE, GROSS NEGLIGENCE, THE +-USE OF OR INABILITY TO USE THE SOFTWARE, EVEN IF AMD HAS BEEN ADVISED +-OF THE POSSIBILITY OF SUCH DAMAGES. BECAUSE SOME JURISDICTIONS +-PROHIBIT THE EXCLUSION OR LIMITATION OF LIABILITY FOR CONSEQUENTIAL OR +-INCIDENTAL DAMAGES OR THE EXCLUSION OF IMPLIED WARRANTIES, THE ABOVE +-LIMITATION MAY NOT APPLY TO YOU. +- +-Without limiting the foregoing, the Software may implement third party +-technologies for which You must obtain licenses from parties other +-than AMD. You agree that AMD has not obtained or conveyed to You, and +-that You shall be responsible for obtaining the rights to use and/or +-distribute the applicable underlying intellectual property rights +-related to the third party technologies. These third party +-technologies are not licensed hereunder. +- +-If You use the Software (in whole or in part), You shall adhere to all +-applicable U.S., European, and other export laws, including but not +-limited to the U.S. Export Administration Regulations ("EAR"), (15 +-C.F.R. Sections 730 through 774), and E.U. Council Regulation (EC) No +-1334/2000 of 22 June 2000. Further, pursuant to Section 740.6 of the +-EAR, You hereby certify that, except pursuant to a license granted by +-the United States Department of Commerce Bureau of Industry and +-Security or as otherwise permitted pursuant to a License Exception +-under the U.S. Export Administration Regulations ("EAR"), You will not +-(1) export, re-export or release to a national of a country in Country +-Groups D:1, E:1 or E:2 any restricted technology, software, or source +-code You receive hereunder, or (2) export to Country Groups D:1, E:1 +-or E:2 the direct product of such technology or software, if such +-foreign produced direct product is subject to national security +-controls as identified on the Commerce Control List (currently found +-in Supplement 1 to Part 774 of EAR). For the most current Country +-Group listings, or for additional information about the EAR or Your +-obligations under those regulations, please refer to the U.S. Bureau +-of Industry and Security’s website at ttp://www.bis.doc.gov/. +diff --git a/LICENSE.nxp_mc_firmware b/LICENSE.nxp_mc_firmware +deleted file mode 100644 +index 4b12f58..0000000 +--- a/LICENSE.nxp_mc_firmware ++++ /dev/null +@@ -1,127 +0,0 @@ +-Copyright (c) 2018 NXP. All rights reserved. +- +-Software License Agreement ("Agreement") +- +-ANY USE, REPRODUCTION, OR DISTRIBUTION OF THE ACCOMPANYING BINARY SOFTWARE +-CONSTITUTES LICENSEE'S ACCEPTANCE OF THE TERMS AND CONDITIONS OF THIS AGREEMENT. +- +-Licensed Software. "Binary Software" means software in binary form specified in +-ANNEX A. Subject to the terms and conditions of this Agreement, NXP USA, Inc. +-("Licensor"), grants to you ("Licensee") a worldwide, non-exclusive, and +-royalty-free license to reproduce and distribute the Binary Software in its +-complete and unmodified binary form as provided by Licensor, for use solely in +-conjunction with a programmable processing unit supplied directly or indirectly +-from Licensor. +- +-Restrictions. Licensee must reproduce the Licensor copyright notice above with +-each binary copy of the Binary Software or in the accompanying documentation. +-Licensee must not reverse engineer, decompile, disassemble or modify in any way +-the Binary Software. Licensee must not use the Binary Software in violation of +-any applicable law or regulation. This Agreement shall automatically terminate +-upon Licensee's breach of any term or condition of this Agreement in which case, +-Licensee shall destroy all copies of the Binary Software. Neither the name of +-Licensor nor the names of its suppliers may be used to endorse or promote +-products derived from this Binary Software without specific prior written +-permission. +- +-Disclaimer. TO THE MAXIMUM EXTENT PERMITTED BY LAW, LICENSOR EXPRESSLY +-DISCLAIMS ANY WARRANTY FOR THE BINARY SOFTWARE. THE BINARY SOFTWARE IS PROVIDED +-"AS IS", WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING +-WITHOUT LIMITATION THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +-PARTICULAR PURPOSE, OR NON-INFRINGEMENT. WITHOUT LIMITING THE GENERALITY OF THE +-FOREGOING, LICENSOR DOES NOT WARRANT THAT THE BINARY SOFTWARE IS ERROR-FREE OR +-WILL OPERATE WITHOUT INTERRUPTION, AND LICENSOR GRANTS NO WARRANTY REGARDING ITS +-USE OR THE RESULTS THEREFROM, INCLUDING ITS CORRECTNESS, ACCURACY, OR +-RELIABILITY. +- +-Limitation of Liability. IN NO EVENT WILL LICENSOR, OR ANY OF LICENSOR'S +-LICENSORS HAVE ANY LIABILITY HEREUNDER FOR ANY INDIRECT, SPECIAL, OR +-CONSEQUENTIAL DAMAGES, HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +-FOR BREACH OF CONTRACT, TORT (INCLUDING NEGLIGENCE), OR OTHERWISE, ARISING OUT +-OF THIS AGREEMENT, INCLUDING DAMAGES FOR LOSS OF PROFITS, OR THE COST OF +-PROCUREMENT OF SUBSTITUTE GOODS, EVEN IF SUCH PARTY HAS BEEN ADVISED OF THE +-POSSIBILITY OF SUCH DAMAGES. LICENSOR'S TOTAL LIABILITY FOR ALL COSTS, DAMAGES, +-CLAIMS, OR LOSSES WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH THIS AGREEMENT +-OR THE BINARY SOFTWARE SUPPLIED UNDER THIS AGREEMENT IS LIMITED TO THE AGGREGATE +-AMOUNT PAID BY LICENSEE TO LICENSOR IN CONNECTION WITH THE BINARY SOFTWARE TO +-WHICH LOSSES OR DAMAGES ARE CLAIMED. +- +-Trade Compliance. Licensee shall comply with all applicable export and import +-control laws and regulations including but not limited to the US Export +-Administration Regulation (including prohibited party lists issued by other +-federal governments), Catch-all regulations and all national and international +-embargoes. Licensee further agrees that it will not knowingly transfer, divert, +-export or re-export, directly or indirectly, any product, software, including +-software source code, or technology restricted by such regulations or by other +-applicable national regulations, received from Licensor under this Agreement, +-or any direct product of such software or technical data to any person, firm, +-entity, country or destination to which such transfer, diversion, export or +-re-export is restricted or prohibited, without obtaining prior written +-authorization from the applicable competent government authorities to the extent +-required by those laws. Licensee acknowledge that the "restricted encryption +-software" that is subject to the US Export Administration Regulations (EAR), is +-not intended for use by a government end user, as defined in part 772 of the +-EAR. This provision shall survive termination or expiration of this Agreement. +- +-Assignment. Licensee may not assign this Agreement without the prior written +-consent of Licensor. Licensor may assign this Agreement without Licensee's +-consent. +- +-Governing Law. This Agreement will be governed by, construed, and enforced in +-accordance with the laws of the State of Texas, USA, without regard to conflicts +-of laws principles, will apply to all matters relating to this Agreement or the +-Binary Software, and Licensee agrees that any litigation will be subject to the +-exclusive jurisdiction of the state or federal courts Texas, USA. The United +-Nations Convention on Contracts for the International Sale of Goods will not +-apply to this Agreement. +- +-Restrictions, Warranty Disclaimer, Limitation of Liability, Trade Compliance, +-Assignment, Governing Law, and Third Party Terms shall survive termination or +-expiration of this Agreement. +- +-Third Party Terms. The licensed Binary Software includes the following third +-party software for which the following terms apply: +- +-Libfdt - Flat Device Tree manipulation +-Copyright (c) 2006 David Gibson, IBM Corporation +-All rights reserved. +- +-Redistributions must reproduce the above copyright notice, this list of +-conditions and the following disclaimer in the documentation and/or other +-materials provided with the distribution. +- +-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-LibElf +-Copyright (c) 2006,2008-2011 Joseph Koshy +-All rights reserved. +- +-Redistributions must reproduce the above copyright notice, this list of +-conditions and the following disclaimer in the documentation and/or other +-materials provided with the distribution. +- +-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +- +-ANNEX A +-BINARY SOFTWARE +-Only software in binary form may be provided under this Agreement +- +diff --git a/WHENCE b/WHENCE +index f6b0299..58e9ca1 100644 +--- a/WHENCE ++++ b/WHENCE +@@ -1071,19 +1071,6 @@ License: Redistributable. See LICENCE.myri10ge_firmware for details. + + -------------------------------------------------------------------------- + +-Driver: ccp - Platform Security Processor (PSP) device +- +-File: amd/amd_sev_fam17h_model0xh.sbin +-Version: 2022-2-25 +-File: amd/amd_sev_fam17h_model3xh.sbin +-Version: 2022-2-25 +-File: amd/amd_sev_fam19h_model0xh.sbin +-Version: 2022-2-25 +- +-License: Redistributable. See LICENSE.amd-sev for details +- +--------------------------------------------------------------------------- +- + Driver: microcode_amd - AMD CPU Microcode Update Driver for Linux + + File: amd-ucode/microcode_amd.bin +@@ -1107,52 +1094,6 @@ License: Redistributable. See LICENSE.amd-ucode for details + + -------------------------------------------------------------------------- + +-Driver: qat - Intel(R) QAT crypto accelerator +- +-File: qat_895xcc.bin +-File: qat_895xcc_mmp.bin +-File: qat_c3xxx.bin +-File: qat_c3xxx_mmp.bin +-File: qat_c62x.bin +-File: qat_c62x_mmp.bin +-Link: qat_mmp.bin -> qat_895xcc_mmp.bin +-File: qat_4xxx.bin +-File: qat_4xxx_mmp.bin +- +-Licence: Redistributable. See LICENCE.qat_firmware for details +- +--------------------------------------------------------------------------- +- +-Driver: liquidio -- Cavium LiquidIO driver +- +-File: liquidio/lio_23xx_nic.bin +-Version: v1.7.2 +- +-File: liquidio/lio_210nv_nic.bin +-Version: v1.7.2 +- +-File: liquidio/lio_210sv_nic.bin +-Version: v1.7.2 +- +-File: liquidio/lio_410nv_nic.bin +-Version: v1.7.2 +- +-Licence: Redistributable. See LICENCE.cavium_liquidio for details +- +--------------------------------------------------------------------------- +- +-Driver: nitrox -- Cavium CNN55XX crypto driver +- +-File: cavium/cnn55xx_ae.fw +-Version: v01 +- +-File: cavium/cnn55xx_se.fw +-Version: v10 +- +-Licence: Redistributable. See LICENCE.cavium for details +- +--------------------------------------------------------------------------- +- + Driver: i915 -- Intel Integrated Graphics driver + + File: i915/skl_dmc_ver1_23.bin +@@ -1936,14 +1877,6 @@ Licence: Redistributable. See LICENCE.nvidia for details + + -------------------------------------------------------------------------- + +-Driver: knav_qmss_queue - TI Keystone 2 QMSS driver +- +-File: ti-keystone/ks2_qmss_pdsp_acc48_k2_le_1_0_0_9.bin +- +-Licence: Redistributable. See LICENCE.ti-keystone for details. +- +--------------------------------------------------------------------------- +- + Driver: mtk_scp - MediaTek SCP System Control Processing Driver + + File: mediatek/mt8183/scp.img +@@ -1959,100 +1892,6 @@ Licence: Redistributable. See LICENCE.mediatek for details. + + -------------------------------------------------------------------------- + +-Driver: nfp - Netronome Flow Processor +- +-Link: netronome/nic_AMDA0081-0001_1x40.nffw -> nic/nic_AMDA0081-0001_1x40.nffw +-Link: netronome/nic_AMDA0097-0001_2x40.nffw -> nic/nic_AMDA0097-0001_2x40.nffw +-Link: netronome/nic_AMDA0099-0001_2x10.nffw -> nic/nic_AMDA0099-0001_2x10.nffw +-Link: netronome/nic_AMDA0081-0001_4x10.nffw -> nic/nic_AMDA0081-0001_4x10.nffw +-Link: netronome/nic_AMDA0097-0001_4x10_1x40.nffw -> nic/nic_AMDA0097-0001_4x10_1x40.nffw +-Link: netronome/nic_AMDA0099-0001_1x10_1x25.nffw -> nic/nic_AMDA0099-0001_1x10_1x25.nffw +-Link: netronome/nic_AMDA0099-0001_2x25.nffw -> nic/nic_AMDA0099-0001_2x25.nffw +-Link: netronome/nic_AMDA0096-0001_2x10.nffw -> nic/nic_AMDA0096-0001_2x10.nffw +-Link: netronome/nic_AMDA0097-0001_8x10.nffw -> nic/nic_AMDA0097-0001_8x10.nffw +-Link: netronome/nic_AMDA0058-0011_2x40.nffw -> nic/nic_AMDA0058-0011_2x40.nffw +-Link: netronome/nic_AMDA0058-0012_2x40.nffw -> nic/nic_AMDA0058-0012_2x40.nffw +-Link: netronome/nic_AMDA0078-0011_1x100.nffw -> nic/nic_AMDA0078-0011_1x100.nffw +-File: netronome/nic/nic_AMDA0081-0001_1x40.nffw +-File: netronome/nic/nic_AMDA0097-0001_2x40.nffw +-File: netronome/nic/nic_AMDA0099-0001_2x10.nffw +-File: netronome/nic/nic_AMDA0081-0001_4x10.nffw +-File: netronome/nic/nic_AMDA0097-0001_4x10_1x40.nffw +-File: netronome/nic/nic_AMDA0099-0001_1x10_1x25.nffw +-File: netronome/nic/nic_AMDA0099-0001_2x25.nffw +-File: netronome/nic/nic_AMDA0096-0001_2x10.nffw +-File: netronome/nic/nic_AMDA0097-0001_8x10.nffw +-File: netronome/nic/nic_AMDA0058-0011_2x40.nffw +-File: netronome/nic/nic_AMDA0058-0012_2x40.nffw +-File: netronome/nic/nic_AMDA0078-0011_1x100.nffw +-File: netronome/nic-sriov/nic_AMDA0081-0001_1x40.nffw +-File: netronome/nic-sriov/nic_AMDA0097-0001_2x40.nffw +-File: netronome/nic-sriov/nic_AMDA0099-0001_2x10.nffw +-File: netronome/nic-sriov/nic_AMDA0081-0001_4x10.nffw +-File: netronome/nic-sriov/nic_AMDA0097-0001_4x10_1x40.nffw +-File: netronome/nic-sriov/nic_AMDA0099-0001_1x10_1x25.nffw +-File: netronome/nic-sriov/nic_AMDA0099-0001_2x25.nffw +-File: netronome/nic-sriov/nic_AMDA0096-0001_2x10.nffw +-File: netronome/nic-sriov/nic_AMDA0097-0001_8x10.nffw +-File: netronome/nic-sriov/nic_AMDA0058-0011_2x40.nffw +-File: netronome/nic-sriov/nic_AMDA0058-0012_2x40.nffw +-File: netronome/nic-sriov/nic_AMDA0078-0011_1x100.nffw +- +-Version: v2.1.16.1 +- +-File: netronome/flower/nic_AMDA0099.nffw +-File: netronome/flower/nic_AMDA0096.nffw +-File: netronome/flower/nic_AMDA0097.nffw +-File: netronome/flower/nic_AMDA0058.nffw +-Link: netronome/flower/nic_AMDA0081.nffw -> nic_AMDA0097.nffw +-Link: netronome/flower/nic_AMDA0081-0001_1x40.nffw -> nic_AMDA0081.nffw +-Link: netronome/flower/nic_AMDA0097-0001_2x40.nffw -> nic_AMDA0097.nffw +-Link: netronome/flower/nic_AMDA0099-0001_2x10.nffw -> nic_AMDA0099.nffw +-Link: netronome/flower/nic_AMDA0081-0001_4x10.nffw -> nic_AMDA0081.nffw +-Link: netronome/flower/nic_AMDA0097-0001_4x10_1x40.nffw -> nic_AMDA0097.nffw +-Link: netronome/flower/nic_AMDA0099-0001_2x25.nffw -> nic_AMDA0099.nffw +-Link: netronome/flower/nic_AMDA0096-0001_2x10.nffw -> nic_AMDA0096.nffw +-Link: netronome/flower/nic_AMDA0097-0001_8x10.nffw -> nic_AMDA0097.nffw +-Link: netronome/flower/nic_AMDA0099-0001_1x10_1x25.nffw -> nic_AMDA0099.nffw +-Link: netronome/flower/nic_AMDA0058-0011_1x100.nffw -> nic_AMDA0058.nffw +-Link: netronome/flower/nic_AMDA0058-0011_2x40.nffw -> nic_AMDA0058.nffw +-Link: netronome/flower/nic_AMDA0058-0011_4x10_1x40.nffw -> nic_AMDA0058.nffw +-Link: netronome/flower/nic_AMDA0058-0011_8x10.nffw -> nic_AMDA0058.nffw +-Link: netronome/flower/nic_AMDA0058-0012_1x100.nffw -> nic_AMDA0058.nffw +-Link: netronome/flower/nic_AMDA0058-0012_2x40.nffw -> nic_AMDA0058.nffw +-Link: netronome/flower/nic_AMDA0058-0012_4x10_1x40.nffw -> nic_AMDA0058.nffw +-Link: netronome/flower/nic_AMDA0058-0012_8x10.nffw -> nic_AMDA0058.nffw +-Link: netronome/flower/nic_AMDA0078-0011_1x100.nffw -> nic_AMDA0058.nffw +-Link: netronome/flower/nic_AMDA0078-0011_2x40.nffw -> nic_AMDA0058.nffw +-Link: netronome/flower/nic_AMDA0078-0011_4x10_1x40.nffw -> nic_AMDA0058.nffw +-Link: netronome/flower/nic_AMDA0078-0011_8x10.nffw -> nic_AMDA0058.nffw +-Link: netronome/flower/nic_AMDA0078-0012_1x100.nffw -> nic_AMDA0058.nffw +-Link: netronome/flower/nic_AMDA0078-0012_2x40.nffw -> nic_AMDA0058.nffw +-Link: netronome/flower/nic_AMDA0078-0012_4x10_1x40.nffw -> nic_AMDA0058.nffw +-Link: netronome/flower/nic_AMDA0078-0012_8x10.nffw -> nic_AMDA0058.nffw +- +-Version: AOTC-2.14.A.6 +- +-File: netronome/bpf/nic_AMDA0081-0001_1x40.nffw +-File: netronome/bpf/nic_AMDA0097-0001_2x40.nffw +-File: netronome/bpf/nic_AMDA0099-0001_2x10.nffw +-File: netronome/bpf/nic_AMDA0081-0001_4x10.nffw +-File: netronome/bpf/nic_AMDA0097-0001_4x10_1x40.nffw +-File: netronome/bpf/nic_AMDA0099-0001_1x10_1x25.nffw +-File: netronome/bpf/nic_AMDA0099-0001_2x25.nffw +-File: netronome/bpf/nic_AMDA0096-0001_2x10.nffw +-File: netronome/bpf/nic_AMDA0097-0001_8x10.nffw +-File: netronome/bpf/nic_AMDA0058-0011_2x40.nffw +-File: netronome/bpf/nic_AMDA0058-0012_2x40.nffw +-File: netronome/bpf/nic_AMDA0078-0011_1x100.nffw +- +-Version: v2.0.6.124 +- +- +-Licence: Redistributable. See LICENCE.Netronome for details +- +--------------------------------------------------------------------------- +- + Driver: imx-sdma - support for i.MX SDMA driver + + File: imx/sdma/sdma-imx6q.bin +@@ -2211,28 +2050,6 @@ Licence: + + -------------------------------------------------------------------------- + +-Driver: fsl-mc bus - NXP Management Complex Bus Driver +- +-File: dpaa2/mc/mc_10.10.0_ls1088a.itb +-File: dpaa2/mc/mc_10.10.0_ls2088a.itb +-File: dpaa2/mc/mc_10.10.0_lx2160a.itb +-File: dpaa2/mc/mc_10.14.3_ls1088a.itb +-File: dpaa2/mc/mc_10.14.3_ls2088a.itb +-File: dpaa2/mc/mc_10.14.3_lx2160a.itb +-File: dpaa2/mc/mc_10.16.2_ls1088a.itb +-File: dpaa2/mc/mc_10.16.2_ls2088a.itb +-File: dpaa2/mc/mc_10.16.2_lx2160a.itb +-File: dpaa2/mc/mc_10.18.0_ls1088a.itb +-File: dpaa2/mc/mc_10.18.0_ls2088a.itb +-File: dpaa2/mc/mc_10.18.0_lx2160a.itb +-File: dpaa2/mc/mc_10.28.1_ls1088a.itb +-File: dpaa2/mc/mc_10.28.1_ls2088a.itb +-File: dpaa2/mc/mc_10.28.1_lx2160a.itb +- +-Licence: Redistributable. See LICENSE.nxp_mc_firmware for details +- +--------------------------------------------------------------------------- +- + Driver: ice - Intel(R) Ethernet Connection E800 Series + + File: intel/ice/ddp/ice-1.3.30.0.pkg +@@ -2247,21 +2064,6 @@ License: Redistributable. See LICENSE.ice_enhanced for details + + -------------------------------------------------------------------------- + +-Driver: inside-secure -- Inside Secure EIP197 crypto driver +- +-File: inside-secure/eip197_minifw/ipue.bin +-File: inside-secure/eip197_minifw/ifpp.bin +- +-Licence: Redistributable. +-Copyright (c) 2019 Verimatrix, Inc. +- +-Derived from proprietary unpublished source code. +-Permission is hereby granted for the distribution of this firmware +-as part of Linux or other Open Source operating system kernel, +-provided this copyright notice is accompanying it. +- +------------------------------------------------- +- + Driver: prestera - Marvell driver for Prestera family ASIC devices + + File: mrvl/prestera/mvsw_prestera_fw-v2.0.img +@@ -2273,23 +2075,3 @@ File: mrvl/prestera/mvsw_prestera_fw_arm64-v4.1.img + Licence: Redistributable. See LICENCE.Marvell for details. + + ------------------------------------------------ +- +-Driver: rvu_cptpf - Marvell CPT driver +- +-File: mrvl/cpt01/ae.out +-File: mrvl/cpt01/se.out +-File: mrvl/cpt01/ie.out +-File: mrvl/cpt02/ae.out +-File: mrvl/cpt02/se.out +-File: mrvl/cpt02/ie.out +-File: mrvl/cpt03/ae.out +-File: mrvl/cpt03/se.out +-File: mrvl/cpt03/ie.out +-File: mrvl/cpt04/ae.out +-File: mrvl/cpt04/se.out +-File: mrvl/cpt04/ie.out +-Version: v1.21 +- +-Licence: Redistributable. See LICENCE.Marvell for details. +- +---------------------------------------------------------------------------- +-- +2.40.1 + diff --git a/packages/linux-firmware/0008-linux-firmware-gpu-Remove-firmware-for-GPU-devices.patch b/packages/linux-firmware/0008-linux-firmware-gpu-Remove-firmware-for-GPU-devices.patch new file mode 100644 index 00000000..92eb94f9 --- /dev/null +++ b/packages/linux-firmware/0008-linux-firmware-gpu-Remove-firmware-for-GPU-devices.patch @@ -0,0 +1,1923 @@ +From 2819be643186fa19869700e0af67444872d677c7 Mon Sep 17 00:00:00 2001 +From: Leonard Foerster +Date: Wed, 26 Jul 2023 11:16:37 +0000 +Subject: [PATCH] linux-firmware: gpu: Remove firmware for GPU devices + +Bottlerocket does not provide drivers for any GPUs for any of its +kernels. Thus, shipping firmware for any of these non-supported devices +makes no sense. One exception is the nvidia drivers for tesla devices in +aws-*-nvidia variants. These variants include drivers through +kmod-*-nvidia packages which ship device specific firmware for supported +cards. + +The following list maps drivers as specified in WHENCE to kernel config +options for easy reference, should firmware be needed through driver +addition. + +* mga - CONFIG_DRM_MGA && CONFIG_DRM_MGAG200 +* r128 - CONFIG_DRM_R128 +* radeon - CONFIG_DRM_RADEON +* amdgpu - CONFIG_DRM_AMDGPU +* nouveau - CONFIG_DRM_NOUVEAU +* adreno - CONFIG_DRM_MSM + +Signed-off-by: Leonard Foerster +--- + LICENCE.nvidia | 131 ----- + LICENSE.amdgpu | 51 -- + LICENSE.qcom | 206 ------- + LICENSE.qcom_yamato | 25 - + LICENSE.radeon | 51 -- + WHENCE | 1363 ------------------------------------------- + 6 files changed, 1827 deletions(-) + delete mode 100644 LICENCE.nvidia + delete mode 100644 LICENSE.amdgpu + delete mode 100644 LICENSE.qcom + delete mode 100644 LICENSE.qcom_yamato + delete mode 100644 LICENSE.radeon + +diff --git a/LICENCE.nvidia b/LICENCE.nvidia +deleted file mode 100644 +index b99d5a3..0000000 +--- a/LICENCE.nvidia ++++ /dev/null +@@ -1,131 +0,0 @@ +- License For Customer Use of NVIDIA Software +- +- +-IMPORTANT NOTICE -- READ CAREFULLY: This License For Customer Use of +-NVIDIA Software ("LICENSE") is the agreement which governs use of +-the software of NVIDIA Corporation and its subsidiaries ("NVIDIA") +-downloadable herefrom, including computer software and associated +-printed materials ("SOFTWARE"). By downloading, installing, copying, +-or otherwise using the SOFTWARE, you agree to be bound by the terms +-of this LICENSE. If you do not agree to the terms of this LICENSE, +-do not download the SOFTWARE. +- +-RECITALS +- +-Use of NVIDIA's products requires three elements: the SOFTWARE, the +-hardware, and a personal computer. The SOFTWARE is protected by copyright +-laws and international copyright treaties, as well as other intellectual +-property laws and treaties. The SOFTWARE may be protected by various +-patents, and is not sold, and instead is only licensed for use, strictly +-in accordance with this document. The hardware is protected by various +-patents, and is sold, but this agreement does not cover that sale, since +-it may not necessarily be sold as a package with the SOFTWARE. This +-agreement sets forth the terms and conditions of the SOFTWARE LICENSE only. +- +-1. DEFINITIONS +- +-1.1 Customer. Customer means the entity or individual that +-downloads or otherwise obtains the SOFTWARE. +- +-2. GRANT OF LICENSE +- +-2.1 Rights and Limitations of Grant. NVIDIA hereby grants Customer +-the following non-exclusive, non-transferable right to use the +-SOFTWARE, with the following limitations: +- +-2.1.1 Rights. Customer may install and use multiple copies of the +-SOFTWARE on a shared computer or concurrently on different computers, +-and make multiple back-up copies of the SOFTWARE, solely for Customer's +-use within Customer's Enterprise. "Enterprise" shall mean individual use +-by Customer or any legal entity (such as a corporation or university) +-and the subsidiaries it owns by more than fifty percent (50%). +- +-2.1.2 Open Source Exception. Notwithstanding the foregoing terms +-of Section 2.1.1, SOFTWARE may be copied and redistributed solely for +-use on operating systems distributed under the terms of an OSI-approved +-open source license as listed by the Open Source Initiative at +-http://opensource.org, provided that the binary files thereof are not +-modified, and Customer provides a copy of this license with the SOFTWARE. +- +-2.1.3 Limitations. +- +-No Reverse Engineering. Customer may not reverse engineer, +-decompile, or disassemble the SOFTWARE, nor attempt in any other +-manner to obtain the source code. +- +-Usage. SOFTWARE is licensed only for use with microprocessor(s) which have +-been (i) designed by NVIDIA and (ii) either (a) sold by or (b) licensed by +-NVIDIA. Customer shall not use SOFTWARE in conjunction with, nor cause +-SOFTWARE to be executed by, any other microprocessor. +- +-No Translation. Customer shall not translate SOFTWARE, nor cause or permit +-SOFTWARE to be translated, from the architecture or language in which it is +-originally provided by NVIDIA, into any other architecture or language. +- +-No Rental. Customer may not rent or lease the SOFTWARE to someone +-else. +- +-3. TERMINATION +- +-This LICENSE will automatically terminate if Customer fails to +-comply with any of the terms and conditions hereof. In such event, +-Customer must destroy all copies of the SOFTWARE and all of its +-component parts. +- +-Defensive Suspension. If Customer commences or participates in any legal +-proceeding against NVIDIA, then NVIDIA may, in its sole discretion, +-suspend or terminate all license grants and any other rights provided +-under this LICENSE during the pendency of such legal proceedings. +- +-4. COPYRIGHT +- +-All title and copyrights in and to the SOFTWARE (including but +-not limited to all images, photographs, animations, video, audio, +-music, text, and other information incorporated into the SOFTWARE), +-the accompanying printed materials, and any copies of the SOFTWARE, +-are owned by NVIDIA, or its suppliers. The SOFTWARE is protected +-by copyright laws and international treaty provisions. Accordingly, +-Customer is required to treat the SOFTWARE like any other copyrighted +-material, except as otherwise allowed pursuant to this LICENSE +-and that it may make one copy of the SOFTWARE solely for backup or +-archive purposes. +- +-5. APPLICABLE LAW +- +-This agreement shall be deemed to have been made in, and shall be +-construed pursuant to, the laws of the State of California. +- +-6. DISCLAIMER OF WARRANTIES AND LIMITATION ON LIABILITY +- +-6.1 No Warranties. TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE +-LAW, THE SOFTWARE IS PROVIDED "AS IS" AND NVIDIA AND ITS SUPPLIERS +-DISCLAIM ALL WARRANTIES, EITHER EXPRESS OR IMPLIED, INCLUDING, BUT +-NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +-FOR A PARTICULAR PURPOSE. +- +-6.2 No Liability for Consequential Damages. TO THE MAXIMUM +-EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT SHALL NVIDIA OR +-ITS SUPPLIERS BE LIABLE FOR ANY SPECIAL, INCIDENTAL, INDIRECT, OR +-CONSEQUENTIAL DAMAGES WHATSOEVER (INCLUDING, WITHOUT LIMITATION, +-DAMAGES FOR LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS +-OF BUSINESS INFORMATION, OR ANY OTHER PECUNIARY LOSS) ARISING OUT +-OF THE USE OF OR INABILITY TO USE THE SOFTWARE, EVEN IF NVIDIA HAS +-BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +- +-7. MISCELLANEOUS +- +-The United Nations Convention on Contracts for the International +-Sale of Goods is specifically disclaimed. If any provision of this +-LICENSE is inconsistent with, or cannot be fully enforced under, +-the law, such provision will be construed as limited to the extent +-necessary to be consistent with and fully enforceable under the law. +-This agreement is the final, complete and exclusive agreement between +-the parties relating to the subject matter hereof, and supersedes +-all prior or contemporaneous understandings and agreements relating +-to such subject matter, whether oral or written. Customer agrees +-that it will not ship, transfer or export the SOFTWARE into any +-country, or use the SOFTWARE in any manner, prohibited by the +-United States Bureau of Export Administration or any export laws, +-restrictions or regulations. This LICENSE may only be modified in +-writing signed by an authorized officer of NVIDIA. +- +diff --git a/LICENSE.amdgpu b/LICENSE.amdgpu +deleted file mode 100644 +index 349e207..0000000 +--- a/LICENSE.amdgpu ++++ /dev/null +@@ -1,51 +0,0 @@ +-Copyright (C) 2023 Advanced Micro Devices, Inc. All rights reserved. +- +-REDISTRIBUTION: Permission is hereby granted, free of any license fees, +-to any person obtaining a copy of this microcode (the "Software"), to +-install, reproduce, copy and distribute copies, in binary form only, of +-the Software and to permit persons to whom the Software is provided to +-do the same, provided that the following conditions are met: +- +-No reverse engineering, decompilation, or disassembly of this Software +-is permitted. +- +-Redistributions must reproduce the above copyright notice, this +-permission notice, and the following disclaimers and notices in the +-Software documentation and/or other materials provided with the +-Software. +- +-DISCLAIMER: THE USE OF THE SOFTWARE IS AT YOUR SOLE RISK. THE SOFTWARE +-IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND AND COPYRIGHT +-HOLDER AND ITS LICENSORS EXPRESSLY DISCLAIM ALL WARRANTIES, EXPRESS AND +-IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. +-COPYRIGHT HOLDER AND ITS LICENSORS DO NOT WARRANT THAT THE SOFTWARE WILL +-MEET YOUR REQUIREMENTS, OR THAT THE OPERATION OF THE SOFTWARE WILL BE +-UNINTERRUPTED OR ERROR-FREE. THE ENTIRE RISK ASSOCIATED WITH THE USE OF +-THE SOFTWARE IS ASSUMED BY YOU. FURTHERMORE, COPYRIGHT HOLDER AND ITS +-LICENSORS DO NOT WARRANT OR MAKE ANY REPRESENTATIONS REGARDING THE USE +-OR THE RESULTS OF THE USE OF THE SOFTWARE IN TERMS OF ITS CORRECTNESS, +-ACCURACY, RELIABILITY, CURRENTNESS, OR OTHERWISE. +- +-DISCLAIMER: UNDER NO CIRCUMSTANCES INCLUDING NEGLIGENCE, SHALL COPYRIGHT +-HOLDER AND ITS LICENSORS OR ITS DIRECTORS, OFFICERS, EMPLOYEES OR AGENTS +-("AUTHORIZED REPRESENTATIVES") BE LIABLE FOR ANY INCIDENTAL, INDIRECT, +-SPECIAL OR CONSEQUENTIAL DAMAGES (INCLUDING DAMAGES FOR LOSS OF BUSINESS +-PROFITS, BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, AND THE +-LIKE) ARISING OUT OF THE USE, MISUSE OR INABILITY TO USE THE SOFTWARE, +-BREACH OR DEFAULT, INCLUDING THOSE ARISING FROM INFRINGEMENT OR ALLEGED +-INFRINGEMENT OF ANY PATENT, TRADEMARK, COPYRIGHT OR OTHER INTELLECTUAL +-PROPERTY RIGHT EVEN IF COPYRIGHT HOLDER AND ITS AUTHORIZED +-REPRESENTATIVES HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. IN +-NO EVENT SHALL COPYRIGHT HOLDER OR ITS AUTHORIZED REPRESENTATIVES TOTAL +-LIABILITY FOR ALL DAMAGES, LOSSES, AND CAUSES OF ACTION (WHETHER IN +-CONTRACT, TORT (INCLUDING NEGLIGENCE) OR OTHERWISE) EXCEED THE AMOUNT OF +-US$10. +- +-Notice: The Software is subject to United States export laws and +-regulations. You agree to comply with all domestic and international +-export laws and regulations that apply to the Software, including but +-not limited to the Export Administration Regulations administered by the +-U.S. Department of Commerce and International Traffic in Arm Regulations +-administered by the U.S. Department of State. These laws include +-restrictions on destinations, end users and end use. +diff --git a/LICENSE.qcom b/LICENSE.qcom +deleted file mode 100644 +index faacf9c..0000000 +--- a/LICENSE.qcom ++++ /dev/null +@@ -1,206 +0,0 @@ +-PLEASE READ THIS LICENSE AGREEMENT ("AGREEMENT") CAREFULLY. THIS AGREEMENT IS +-A BINDING LEGAL AGREEMENT ENTERED INTO BY AND BETWEEN YOU (OR IF YOU ARE +-ENTERING INTO THIS AGREEMENT ON BEHALF OF AN ENTITY, THEN THE ENTITY THAT YOU +-REPRESENT) AND QUALCOMM TECHNOLOGIES, INC. ("QTI" "WE" "OUR" OR "US"). THIS IS +-THE AGREEMENT THAT APPLIES TO YOUR USE OF THE DESIGNATED AND/OR LINKED +-APPLICATIONS, THE ENCLOSED QUALCOMM TECHNOLOGIES' MATERIALS, INCLUDING RELATED +-DOCUMENTATION AND ANY UPDATES OR IMPROVEMENTS THEREOF +-(COLLECTIVELY, "MATERIALS"). BY USING OR COMPLETING THE INSTALLATION OF THE +-MATERIALS, YOU ARE ACCEPTING THIS AGREEMENT AND YOU AGREE TO BE BOUND BY ITS +-TERMS AND CONDITIONS. IF YOU DO NOT AGREE TO THESE TERMS, QTI IS UNWILLING TO +-AND DOES NOT LICENSE THE MATERIALS TO YOU. IF YOU DO NOT AGREE TO THESE TERMS +-YOU MUST DISCONTINUE THE INSTALLATION PROCESS AND YOU MAY NOT USE THE MATERIALS +-OR RETAIN ANY COPIES OF THE MATERIALS. ANY USE OR POSSESSION OF THE MATERIALS +-BY YOU IS SUBJECT TO THE TERMS AND CONDITIONS SET FORTH IN THIS AGREEMENT. +- +-1. RIGHT TO USE DELIVERABLES; RESTRICTIONS. +- +- 1.1 License. Subject to the terms and conditions of this Agreement, +- including, without limitation, the restrictions, conditions, limitations and +- exclusions set forth in this Agreement, QTI hereby grants to you a +- nonexclusive, limited license under QTI's copyrights to: (i) install and use +- the Materials; and (ii) to reproduce and redistribute the binary code portions +- of the Materials (the "Redistributable Binary Code"). You may make and use a +- reasonable number of copies of any documentation. +- +- 1.2 Redistribution Restrictions. Distribution of the Redistributable Binary +- Code is subject to the following restrictions: (i) Redistributable Binary Code +- may only be distributed in binary format and may not be distributed in source +- code format:; (ii) the Redistributable Binary Code may only operate in +- conjunction with platforms incorporating Qualcomm Technologies, Inc. chipsets; +- (iii) redistribution of the Redistributable Binary Code must include the .txt +- file setting forth the terms and condition of this Agreement; (iv) you may not +- use Qualcomm Technologies' or its affiliates or subsidiaries name, logo or +- trademarks; and (v) copyright, trademark, patent and any other notices that +- appear on the Materials may not be removed or obscured. +- +- 1.3 Additional Restrictions. Except as expressly permitted by this Agreement, +- you shall have no right to sublicense, transfer or otherwise disclose the +- Materials to any third party. You shall not reverse engineer, reverse +- assemble, reverse translate, decompile or reduce to source code form any +- portion of the Materials provided in object code form or executable form. +- Except for the purposes expressly permitted in this Agreement, You shall not +- use the Materials for any other purpose. QTI (or its licensors) shall retain +- title and all ownership rights in and to the Materials and any alterations, +- modifications (including all derivative works), translations or adaptations +- made of the Materials, and all copies thereof, and nothing herein shall be +- deemed to grant any right to You under any of QTI's or its affiliates' +- patents. You shall not subject the Materials to any third party license +- terms (e.g., open source license terms). You shall not use the Materials for +- the purpose of identifying or providing evidence to support any potential +- patent infringement claim against QTI, its affiliates, or any of QTI's or +- QTI's affiliates' suppliers and/or direct or indirect customers. QTI hereby +- reserves all rights not expressly granted herein. +- +- 1.4 Third Party Software and Materials. The Software may contain or link to +- certain software and/or materials that are written or owned by third parties. +- Such third party code and materials may be licensed under separate or +- different terms and conditions and are not licensed to you under the terms of +- this Agreement. You agree to comply with all terms and conditions imposed on +- you in the applicable third party licenses. Such terms and conditions may +- impose certain obligations on you as a condition to the permitted use of such +- third party code and materials. QTI does not represent or warrant that such +- third party licensors have or will continue to license or make available their +- code and materials to you. +- +- 1.5 Feedback. QTI may from time to time receive suggestions, feedback or +- other information from You regarding the Materials. Any suggestions, feedback +- or other disclosures received from You are and shall be entirely voluntary on +- the part of You. Notwithstanding any other term in this Agreement, QTI shall +- be free to use suggestions, feedback or other information received from You, +- without obligation of any kind to You. The Parties agree that all inventions, +- product improvements, and modifications conceived of or made by QTI that are +- based, either in whole or in part, on ideas, feedback, suggestions, or +- recommended improvements received from You are the exclusive property of QTI, +- and all right, title and interest in and to any such inventions, product +- improvements, and modifications will vest solely in QTI. +- +- 1.6 No Technical Support. QTI is under no obligation to provide any form of +- technical support for the Materials, and if QTI, in its sole discretion, +- chooses to provide any form of support or information relating to the +- Materials, such support and information shall be deemed confidential and +- proprietary to QTI. +- +-2. WARRANTY DISCLAIMER. YOU EXPRESSLY ACKNOWLEDGE AND AGREE THAT THE USE OF +-THE MATERIALS IS AT YOUR SOLE RISK. THE MATERIALS AND TECHNICAL SUPPORT, IF +-ANY, ARE PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND, WHETHER EXPRESS OR +-IMPLIED. QTI ITS LICENSORS AND AFFILIATES MAKE NO WARRANTIES, EXPRESS OR +-IMPLIED, WITH RESPECT TO THE MATERIALS OR ANY OTHER INFORMATION OR DOCUMENTATION +-PROVIDED UNDER THIS AGREEMENT, INCLUDING BUT NOT LIMITED TO ANY WARRANTY OF +-MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE OR AGAINST INFRINGEMENT, OR +-ANY EXPRESS OR IMPLIED WARRANTY ARISING OUT OF TRADE USAGE OR OUT OF A COURSE OF +-DEALING OR COURSE OF PERFORMANCE. NOTHING CONTAINED IN THIS AGREEMENT SHALL BE +-CONSTRUED AS (I) A WARRANTY OR REPRESENTATION BY QTI, ITS LICENSORS OR +-AFFILIATES AS TO THE VALIDITY OR SCOPE OF ANY PATENT, COPYRIGHT OR OTHER +-INTELLECTUAL PROPERTY RIGHT OR (II) A WARRANTY OR REPRESENTATION BY QTI THAT ANY +-MANUFACTURE OR USE WILL BE FREE FROM INFRINGEMENT OF PATENTS, COPYRIGHTS OR +-OTHER INTELLECTUAL PROPERTY RIGHTS OF OTHERS, AND IT SHALL BE THE SOLE +-RESPONSIBILITY OF YOU TO MAKE SUCH DETERMINATION AS IS NECESSARY WITH RESPECT TO +-THE ACQUISITION OF LICENSES UNDER PATENTS AND OTHER INTELLECTUAL PROPERTY OF +-THIRD PARTIES. +- +-3. NO OTHER LICENSES OR INTELLECTUAL PROPERTY RIGHTS. Neither this Agreement, +-nor any act by QTI or any of its affiliates pursuant to this Agreement or +-relating to the Materials (including, without limitation, the provision by QTI +-or its affiliates of the Materials), shall provide to You any license or any +-other rights whatsoever under any patents, trademarks, trade secrets, copyrights +-or any other intellectual property of QTI or any of its affiliates, except for +-the copyright rights expressly licensed under this Agreement. You understand and +-agree that: +- +- (i) Neither this Agreement, nor delivery of the Materials, grants any right to +- practice, or any other right at all with respect to, any patent of QTI or any +- of its affiliates; and +- +- (ii) A separate license agreement from QUALCOMM Incorporated is needed to use +- or practice any patent of QUALCOMM Incorporated. You agree not to contend in +- any context that, as a result of the provision or use of the Materials, either +- QTI or any of its affiliates has any obligation to extend, or You or any other +- party has obtained any right to, any license, whether express or implied, with +- respect to any patent of QTI or any of its affiliates for any purpose. +- +-4. TERMINATION. This Agreement shall be effective upon acceptance, or access or +-use of the Materials (whichever occurs first) by You and shall continue until +-terminated. You may terminate the Agreement at any time by deleting and +-destroying all copies of the Materials and all related information in Your +-possession or control. This Agreement terminates immediately and automatically, +-with or without notice, if You fail to comply with any provision hereof. +-Additionally, QTI may at any time terminate this Agreement, without cause, upon +-notice to You. Upon termination You must, to the extent possible, delete or +-destroy all copies of the Materials in Your possession and the license granted +-to You in this Agreement shall terminate. Sections 1.2 through 10 shall survive +-the termination of this Agreement. In the event that any restrictions, +-conditions, limitations are found to be either invalid or unenforceable, the +-rights granted to You in Section 1 (License) shall be null, void and ineffective +-from the Effective Date, and QTI shall also have the right to terminate this +-Agreement immediately, and with retroactive effect to the effective date. +- +-5. LIMITATION OF LIABILITY. IN NO EVENT SHALL QTI, QTI's AFFILIATES OR ITS +-LICENSORS BE LIABLE TO YOU FOR ANY INCIDENTAL, CONSEQUENTIAL OR SPECIAL DAMAGES, +-INCLUDING BUT NOT LIMITED TO ANY LOST PROFITS, LOST SAVINGS, OR OTHER INCIDENTAL +-DAMAGES, ARISING OUT OF THE USE OR INABILITY TO USE, OR THE DELIVERY OR FAILURE +-TO DELIVER, ANY OF THE DELIVERABLES, OR ANY BREACH OF ANY OBLIGATION UNDER THIS +-AGREEMENT, EVEN IF QTI HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +-THE FOREGOING LIMITATION OF LIABILITY SHALL REMAIN IN FULL FORCE AND EFFECT +-REGARDLESS OF WHETHER YOUR REMEDIES HEREUNDER ARE DETERMINED TO HAVE FAILED OF +-THEIR ESSENTIAL PURPOSE. THE ENTIRE LIABILITY OF QTI, QTI's AFFILIATES AND ITS +-LICENSORS, AND THE SOLE AND EXCLUSIVE REMEDY OF YOU, FOR ANY CLAIM OR CAUSE OF +-ACTION ARISING HEREUNDER (WHETHER IN CONTRACT, TORT, OR OTHERWISE) SHALL NOT +-EXCEED US$50. +- +-6. INDEMNIFICATION. You agree to indemnify and hold harmless QTI and its +-officers, directors, employees and successors and assigns against any and all +-third party claims, demands, causes of action, losses, liabilities, damages, +-costs and expenses, incurred by QTI (including but not limited to costs of +-defense, investigation and reasonable attorney's fees) arising out of, resulting +-from or related to: (i) any breach of this Agreement by You; and (ii) your acts, +-omissions, products and services. If requested by QTI, You agree to defend QTI +-in connection with any third party claims, demands, or causes of action +-resulting from, arising out of or in connection with any of the foregoing. +- +-7. ASSIGNMENT. You shall not assign this Agreement or any right or interest +-under this Agreement, nor delegate any obligation to be performed under this +-Agreement, without QTI's prior written consent. For purposes of this Section 7, +-an "assignment" by You under this Section shall be deemed to include, without +-limitation, any merger, consolidation, sale of all or substantially all of its +-assets, or any substantial change in the management or control of You. +-Any attempted assignment in contravention of this Section 9 shall be void. +-QTI may freely assign this Agreement or delegate any or all of its rights and +-obligations hereunder to any third party. +- +-8. COMPLIANCE WITH LAWS; APPLICABLE LAW. You agree to comply with all +-applicable local, international and national laws and regulations and with U.S. +-Export Administration Regulations, as they apply to the subject matter of this +-Agreement. This Agreement is governed by the laws of the State of California, +-excluding California's choice of law rules. +- +-9. CONTRACTING PARTIES. If the Materials are downloaded on any computer owned +-by a corporation or other legal entity, then this Agreement is formed by and +-between QTI and such entity. The individual accepting the terms of this +-Agreement represents and warrants to QTI that they have the authority to bind +-such entity to the terms and conditions of this Agreement. +- +-10. MISCELLANEOUS PROVISIONS. This Agreement, together with all exhibits +-attached hereto, which are incorporated herein by this reference, constitutes +-the entire agreement between QTI and You and supersedes all prior negotiations, +-representations and agreements between the parties with respect to the subject +-matter hereof. No addition or modification of this Agreement shall be effective +-unless made in writing and signed by the respective representatives of QTI and +-You. The restrictions, limitations, exclusions and conditions set forth in this +-Agreement shall apply even if QTI or any of its affiliates becomes aware of or +-fails to act in a manner to address any violation or failure to comply +-therewith. You hereby acknowledge and agree that the restrictions, limitations, +-conditions and exclusions imposed in this Agreement on the rights granted in +-this Agreement are not a derogation of the benefits of such rights. You further +-acknowledges that, in the absence of such restrictions, limitations, conditions +-and exclusions, QTI would not have entered into this Agreement with You. Each +-party shall be responsible for and shall bear its own expenses in connection +-with this Agreement. If any of the provisions of this Agreement are determined +-to be invalid, illegal, or otherwise unenforceable, the remaining provisions +-shall remain in full force and effect. This Agreement is entered into solely +-in the English language, and if for any reason any other language version is +-prepared by any party, it shall be solely for convenience and the English +-version shall govern and control all aspects. If You are located in the +-province of Quebec, Canada, the following applies: The Parties hereby confirm +-they have requested this Agreement and all related documents be prepared +-in English. +diff --git a/LICENSE.qcom_yamato b/LICENSE.qcom_yamato +deleted file mode 100644 +index 1fd702b..0000000 +--- a/LICENSE.qcom_yamato ++++ /dev/null +@@ -1,25 +0,0 @@ +-Copyright (c) 2008-2011, QUALCOMM Incorporated. All rights reserved. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions are met: +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the distribution. +- * Neither the name of QUALCOMM Incorporated nor +- the names of its contributors may be used to endorse or promote +- products derived from this software without specific prior written +- permission. +- +-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +-POSSIBILITY OF SUCH DAMAGE. +diff --git a/LICENSE.radeon b/LICENSE.radeon +deleted file mode 100644 +index b05e714..0000000 +--- a/LICENSE.radeon ++++ /dev/null +@@ -1,51 +0,0 @@ +-Copyright (C) 2009-2017 Advanced Micro Devices, Inc. All rights reserved. +- +-REDISTRIBUTION: Permission is hereby granted, free of any license fees, +-to any person obtaining a copy of this microcode (the "Software"), to +-install, reproduce, copy and distribute copies, in binary form only, of +-the Software and to permit persons to whom the Software is provided to +-do the same, provided that the following conditions are met: +- +-No reverse engineering, decompilation, or disassembly of this Software +-is permitted. +- +-Redistributions must reproduce the above copyright notice, this +-permission notice, and the following disclaimers and notices in the +-Software documentation and/or other materials provided with the +-Software. +- +-DISCLAIMER: THE USE OF THE SOFTWARE IS AT YOUR SOLE RISK. THE SOFTWARE +-IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND AND COPYRIGHT +-HOLDER AND ITS LICENSORS EXPRESSLY DISCLAIM ALL WARRANTIES, EXPRESS AND +-IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. +-COPYRIGHT HOLDER AND ITS LICENSORS DO NOT WARRANT THAT THE SOFTWARE WILL +-MEET YOUR REQUIREMENTS, OR THAT THE OPERATION OF THE SOFTWARE WILL BE +-UNINTERRUPTED OR ERROR-FREE. THE ENTIRE RISK ASSOCIATED WITH THE USE OF +-THE SOFTWARE IS ASSUMED BY YOU. FURTHERMORE, COPYRIGHT HOLDER AND ITS +-LICENSORS DO NOT WARRANT OR MAKE ANY REPRESENTATIONS REGARDING THE USE +-OR THE RESULTS OF THE USE OF THE SOFTWARE IN TERMS OF ITS CORRECTNESS, +-ACCURACY, RELIABILITY, CURRENTNESS, OR OTHERWISE. +- +-DISCLAIMER: UNDER NO CIRCUMSTANCES INCLUDING NEGLIGENCE, SHALL COPYRIGHT +-HOLDER AND ITS LICENSORS OR ITS DIRECTORS, OFFICERS, EMPLOYEES OR AGENTS +-("AUTHORIZED REPRESENTATIVES") BE LIABLE FOR ANY INCIDENTAL, INDIRECT, +-SPECIAL OR CONSEQUENTIAL DAMAGES (INCLUDING DAMAGES FOR LOSS OF BUSINESS +-PROFITS, BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, AND THE +-LIKE) ARISING OUT OF THE USE, MISUSE OR INABILITY TO USE THE SOFTWARE, +-BREACH OR DEFAULT, INCLUDING THOSE ARISING FROM INFRINGEMENT OR ALLEGED +-INFRINGEMENT OF ANY PATENT, TRADEMARK, COPYRIGHT OR OTHER INTELLECTUAL +-PROPERTY RIGHT EVEN IF COPYRIGHT HOLDER AND ITS AUTHORIZED +-REPRESENTATIVES HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. IN +-NO EVENT SHALL COPYRIGHT HOLDER OR ITS AUTHORIZED REPRESENTATIVES TOTAL +-LIABILITY FOR ALL DAMAGES, LOSSES, AND CAUSES OF ACTION (WHETHER IN +-CONTRACT, TORT (INCLUDING NEGLIGENCE) OR OTHERWISE) EXCEED THE AMOUNT OF +-US$10. +- +-Notice: The Software is subject to United States export laws and +-regulations. You agree to comply with all domestic and international +-export laws and regulations that apply to the Software, including but +-not limited to the Export Administration Regulations administered by the +-U.S. Department of Commerce and International Traffic in Arm Regulations +-administered by the U.S. Department of State. These laws include +-restrictions on destinations, end users and end use. +diff --git a/WHENCE b/WHENCE +index 58e9ca1..3bb6523 100644 +--- a/WHENCE ++++ b/WHENCE +@@ -123,904 +123,6 @@ Available from http://ldriver.qlogic.com/firmware/netxen_nic/new/ + + -------------------------------------------------------------------------- + +-Driver: mga - Matrox G200/G400/G550 +- +-File: matrox/g200_warp.fw +-File: matrox/g400_warp.fw +- +-Licence: +- +-Copyright 1999 Matrox Graphics Inc. +-All Rights Reserved. +- +-Permission is hereby granted, free of charge, to any person obtaining a +-copy of this software and associated documentation files (the "Software"), +-to deal in the Software without restriction, including without limitation +-the rights to use, copy, modify, merge, publish, distribute, sublicense, +-and/or sell copies of the Software, and to permit persons to whom the +-Software is furnished to do so, subject to the following conditions: +- +-The above copyright notice and this permission notice shall be included +-in all copies or substantial portions of the Software. +- +-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +-MATROX GRAPHICS INC., OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, +-DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +-OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +-OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +- +-Found in hex form in kernel source. +- +--------------------------------------------------------------------------- +- +-Driver: r128 - ATI Rage 128 +- +-File: r128/r128_cce.bin +- +-Licence: +- +-Copyright 2000 Advanced Micro Devices, Inc. +- +- * Permission is hereby granted, free of charge, to any person obtaining a +- * copy of this software and associated documentation files (the "Software"), +- * to deal in the Software without restriction, including without limitation +- * the rights to use, copy, modify, merge, publish, distribute, sublicense, +- * and/or sell copies of the Software, and to permit persons to whom the +- * Software is furnished to do so, subject to the following conditions: +- * +- * The above copyright notice and this permission notice (including the next +- * paragraph) shall be included in all copies or substantial portions of the +- * Software. +- * +- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR +- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +- * DEALINGS IN THE SOFTWARE. +- +-Found in decimal form in kernel source. +- +--------------------------------------------------------------------------- +- +-Driver: radeon - ATI Radeon +- +-File: radeon/R100_cp.bin +-File: radeon/R200_cp.bin +-File: radeon/R300_cp.bin +-File: radeon/R420_cp.bin +-File: radeon/RS600_cp.bin +-File: radeon/RS690_cp.bin +-File: radeon/R520_cp.bin +-File: radeon/R600_pfp.bin +-File: radeon/R600_me.bin +-File: radeon/RV610_pfp.bin +-File: radeon/RV610_me.bin +-File: radeon/RV630_pfp.bin +-File: radeon/RV630_me.bin +-File: radeon/RV620_pfp.bin +-File: radeon/RV620_me.bin +-File: radeon/RV635_pfp.bin +-File: radeon/RV635_me.bin +-File: radeon/RV670_pfp.bin +-File: radeon/RV670_me.bin +-File: radeon/RS780_pfp.bin +-File: radeon/RS780_me.bin +-File: radeon/RV770_pfp.bin +-File: radeon/RV770_me.bin +-File: radeon/RV730_pfp.bin +-File: radeon/RV730_me.bin +-File: radeon/RV710_pfp.bin +-File: radeon/RV710_me.bin +- +-Licence: +- +- * Copyright 2007-2009 Advanced Micro Devices, Inc. +- * All Rights Reserved. +- * +- * Permission is hereby granted, free of charge, to any person obtaining a +- * copy of this software and associated documentation files (the "Software"), +- * to deal in the Software without restriction, including without limitation +- * the rights to use, copy, modify, merge, publish, distribute, sublicense, +- * and/or sell copies of the Software, and to permit persons to whom the +- * Software is furnished to do so, subject to the following conditions: +- * +- * The above copyright notice and this permission notice (including the next +- * paragraph) shall be included in all copies or substantial portions of the +- * Software. +- * +- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +- +-Found in hex form in kernel source. +- +--------------------------------------------------------------------------- +- +-Driver: radeon - ATI Radeon +- +-File: radeon/R600_rlc.bin +-File: radeon/R600_uvd.bin +-File: radeon/RS780_uvd.bin +-File: radeon/R700_rlc.bin +-File: radeon/RV710_uvd.bin +-File: radeon/RV710_smc.bin +-File: radeon/RV730_smc.bin +-File: radeon/RV740_smc.bin +-File: radeon/RV770_smc.bin +-File: radeon/RV770_uvd.bin +-File: radeon/CEDAR_me.bin +-File: radeon/CEDAR_pfp.bin +-File: radeon/CEDAR_rlc.bin +-File: radeon/CEDAR_smc.bin +-File: radeon/CYPRESS_me.bin +-File: radeon/CYPRESS_pfp.bin +-File: radeon/CYPRESS_rlc.bin +-File: radeon/CYPRESS_uvd.bin +-File: radeon/CYPRESS_smc.bin +-File: radeon/JUNIPER_me.bin +-File: radeon/JUNIPER_pfp.bin +-File: radeon/JUNIPER_rlc.bin +-File: radeon/JUNIPER_smc.bin +-File: radeon/REDWOOD_me.bin +-File: radeon/REDWOOD_pfp.bin +-File: radeon/REDWOOD_rlc.bin +-File: radeon/REDWOOD_smc.bin +-File: radeon/PALM_me.bin +-File: radeon/PALM_pfp.bin +-File: radeon/SUMO_rlc.bin +-File: radeon/SUMO_uvd.bin +-File: radeon/BARTS_mc.bin +-File: radeon/BARTS_me.bin +-File: radeon/BARTS_pfp.bin +-File: radeon/BARTS_smc.bin +-File: radeon/BTC_rlc.bin +-File: radeon/CAICOS_mc.bin +-File: radeon/CAICOS_me.bin +-File: radeon/CAICOS_pfp.bin +-File: radeon/CAICOS_smc.bin +-File: radeon/TURKS_mc.bin +-File: radeon/TURKS_me.bin +-File: radeon/TURKS_pfp.bin +-File: radeon/TURKS_smc.bin +-File: radeon/CAYMAN_mc.bin +-File: radeon/CAYMAN_me.bin +-File: radeon/CAYMAN_pfp.bin +-File: radeon/CAYMAN_rlc.bin +-File: radeon/CAYMAN_smc.bin +-File: radeon/SUMO_pfp.bin +-File: radeon/SUMO_me.bin +-File: radeon/SUMO2_pfp.bin +-File: radeon/SUMO2_me.bin +-File: radeon/ARUBA_me.bin +-File: radeon/ARUBA_pfp.bin +-File: radeon/ARUBA_rlc.bin +-File: radeon/PITCAIRN_ce.bin +-File: radeon/PITCAIRN_mc.bin +-File: radeon/PITCAIRN_mc2.bin +-File: radeon/PITCAIRN_me.bin +-File: radeon/PITCAIRN_pfp.bin +-File: radeon/PITCAIRN_rlc.bin +-File: radeon/PITCAIRN_smc.bin +-File: radeon/TAHITI_ce.bin +-File: radeon/TAHITI_mc.bin +-File: radeon/TAHITI_mc2.bin +-File: radeon/TAHITI_me.bin +-File: radeon/TAHITI_pfp.bin +-File: radeon/TAHITI_rlc.bin +-File: radeon/TAHITI_uvd.bin +-File: radeon/TAHITI_smc.bin +-File: radeon/TAHITI_vce.bin +-File: radeon/VERDE_ce.bin +-File: radeon/VERDE_mc.bin +-File: radeon/VERDE_mc2.bin +-File: radeon/VERDE_me.bin +-File: radeon/VERDE_pfp.bin +-File: radeon/VERDE_rlc.bin +-File: radeon/VERDE_smc.bin +-File: radeon/OLAND_ce.bin +-File: radeon/OLAND_mc.bin +-File: radeon/OLAND_mc2.bin +-File: radeon/OLAND_me.bin +-File: radeon/OLAND_pfp.bin +-File: radeon/OLAND_rlc.bin +-File: radeon/OLAND_smc.bin +-File: radeon/HAINAN_ce.bin +-File: radeon/HAINAN_mc.bin +-File: radeon/HAINAN_mc2.bin +-File: radeon/HAINAN_me.bin +-File: radeon/HAINAN_pfp.bin +-File: radeon/HAINAN_rlc.bin +-File: radeon/HAINAN_smc.bin +-File: radeon/BONAIRE_ce.bin +-File: radeon/BONAIRE_mc.bin +-File: radeon/BONAIRE_mc2.bin +-File: radeon/BONAIRE_me.bin +-File: radeon/BONAIRE_mec.bin +-File: radeon/BONAIRE_pfp.bin +-File: radeon/BONAIRE_rlc.bin +-File: radeon/BONAIRE_sdma.bin +-File: radeon/BONAIRE_uvd.bin +-File: radeon/BONAIRE_smc.bin +-File: radeon/BONAIRE_vce.bin +-File: radeon/KABINI_ce.bin +-File: radeon/KABINI_me.bin +-File: radeon/KABINI_mec.bin +-File: radeon/KABINI_pfp.bin +-File: radeon/KABINI_rlc.bin +-File: radeon/KABINI_sdma.bin +-File: radeon/KAVERI_ce.bin +-File: radeon/KAVERI_me.bin +-File: radeon/KAVERI_mec.bin +-File: radeon/KAVERI_pfp.bin +-File: radeon/KAVERI_rlc.bin +-File: radeon/KAVERI_sdma.bin +-File: radeon/HAWAII_ce.bin +-File: radeon/HAWAII_mc.bin +-File: radeon/HAWAII_mc2.bin +-File: radeon/HAWAII_me.bin +-File: radeon/HAWAII_mec.bin +-File: radeon/HAWAII_pfp.bin +-File: radeon/HAWAII_rlc.bin +-File: radeon/HAWAII_sdma.bin +-File: radeon/HAWAII_smc.bin +-File: radeon/MULLINS_ce.bin +-File: radeon/MULLINS_me.bin +-File: radeon/MULLINS_mec.bin +-File: radeon/MULLINS_pfp.bin +-File: radeon/MULLINS_rlc.bin +-File: radeon/MULLINS_sdma.bin +-File: radeon/pitcairn_ce.bin +-File: radeon/pitcairn_k_smc.bin +-File: radeon/pitcairn_mc.bin +-File: radeon/pitcairn_me.bin +-File: radeon/pitcairn_pfp.bin +-File: radeon/pitcairn_rlc.bin +-File: radeon/pitcairn_smc.bin +-File: radeon/tahiti_ce.bin +-File: radeon/tahiti_k_smc.bin +-File: radeon/tahiti_mc.bin +-File: radeon/tahiti_me.bin +-File: radeon/tahiti_pfp.bin +-File: radeon/tahiti_rlc.bin +-File: radeon/tahiti_smc.bin +-File: radeon/verde_ce.bin +-File: radeon/verde_k_smc.bin +-File: radeon/verde_mc.bin +-File: radeon/verde_me.bin +-File: radeon/verde_pfp.bin +-File: radeon/verde_rlc.bin +-File: radeon/verde_smc.bin +-File: radeon/oland_ce.bin +-File: radeon/oland_k_smc.bin +-File: radeon/oland_mc.bin +-File: radeon/oland_me.bin +-File: radeon/oland_pfp.bin +-File: radeon/oland_rlc.bin +-File: radeon/oland_smc.bin +-File: radeon/hainan_ce.bin +-File: radeon/hainan_k_smc.bin +-File: radeon/hainan_mc.bin +-File: radeon/hainan_me.bin +-File: radeon/hainan_pfp.bin +-File: radeon/hainan_rlc.bin +-File: radeon/hainan_smc.bin +-File: radeon/bonaire_ce.bin +-File: radeon/bonaire_k_smc.bin +-File: radeon/bonaire_mc.bin +-File: radeon/bonaire_me.bin +-File: radeon/bonaire_mec.bin +-File: radeon/bonaire_pfp.bin +-File: radeon/bonaire_rlc.bin +-File: radeon/bonaire_sdma.bin +-File: radeon/bonaire_sdma1.bin +-File: radeon/bonaire_smc.bin +-File: radeon/bonaire_uvd.bin +-File: radeon/bonaire_vce.bin +-File: radeon/kabini_ce.bin +-File: radeon/kabini_me.bin +-File: radeon/kabini_mec.bin +-File: radeon/kabini_pfp.bin +-File: radeon/kabini_rlc.bin +-File: radeon/kabini_sdma.bin +-File: radeon/kabini_sdma1.bin +-File: radeon/kabini_uvd.bin +-File: radeon/kabini_vce.bin +-File: radeon/kaveri_ce.bin +-File: radeon/kaveri_me.bin +-File: radeon/kaveri_mec.bin +-File: radeon/kaveri_mec2.bin +-File: radeon/kaveri_pfp.bin +-File: radeon/kaveri_rlc.bin +-File: radeon/kaveri_sdma.bin +-File: radeon/kaveri_sdma1.bin +-File: radeon/kaveri_uvd.bin +-File: radeon/kaveri_vce.bin +-File: radeon/hawaii_ce.bin +-File: radeon/hawaii_k_smc.bin +-File: radeon/hawaii_mc.bin +-File: radeon/hawaii_me.bin +-File: radeon/hawaii_mec.bin +-File: radeon/hawaii_pfp.bin +-File: radeon/hawaii_rlc.bin +-File: radeon/hawaii_sdma.bin +-File: radeon/hawaii_sdma1.bin +-File: radeon/hawaii_smc.bin +-File: radeon/hawaii_uvd.bin +-File: radeon/hawaii_vce.bin +-File: radeon/mullins_ce.bin +-File: radeon/mullins_me.bin +-File: radeon/mullins_mec.bin +-File: radeon/mullins_pfp.bin +-File: radeon/mullins_rlc.bin +-File: radeon/mullins_sdma.bin +-File: radeon/mullins_sdma1.bin +-File: radeon/mullins_uvd.bin +-File: radeon/mullins_vce.bin +-File: radeon/banks_k_2_smc.bin +-File: radeon/si58_mc.bin +- +-Licence: Redistributable. See LICENSE.radeon for details. +- +--------------------------------------------------------------------------- +- +-Driver: amdgpu - AMD Radeon +- +-File: amdgpu/tahiti_ce.bin +-File: amdgpu/tahiti_k_smc.bin +-File: amdgpu/tahiti_mc.bin +-File: amdgpu/tahiti_me.bin +-File: amdgpu/tahiti_pfp.bin +-File: amdgpu/tahiti_rlc.bin +-File: amdgpu/tahiti_smc.bin +-File: amdgpu/tahiti_uvd.bin +-File: amdgpu/pitcairn_ce.bin +-File: amdgpu/pitcairn_k_smc.bin +-File: amdgpu/pitcairn_mc.bin +-File: amdgpu/pitcairn_me.bin +-File: amdgpu/pitcairn_pfp.bin +-File: amdgpu/pitcairn_rlc.bin +-File: amdgpu/pitcairn_smc.bin +-File: amdgpu/pitcairn_uvd.bin +-File: amdgpu/verde_ce.bin +-File: amdgpu/verde_k_smc.bin +-File: amdgpu/verde_mc.bin +-File: amdgpu/verde_me.bin +-File: amdgpu/verde_pfp.bin +-File: amdgpu/verde_rlc.bin +-File: amdgpu/verde_smc.bin +-File: amdgpu/verde_uvd.bin +-File: amdgpu/hainan_ce.bin +-File: amdgpu/hainan_k_smc.bin +-File: amdgpu/hainan_mc.bin +-File: amdgpu/hainan_me.bin +-File: amdgpu/hainan_pfp.bin +-File: amdgpu/hainan_rlc.bin +-File: amdgpu/hainan_smc.bin +-File: amdgpu/oland_ce.bin +-File: amdgpu/oland_k_smc.bin +-File: amdgpu/oland_mc.bin +-File: amdgpu/oland_me.bin +-File: amdgpu/oland_pfp.bin +-File: amdgpu/oland_rlc.bin +-File: amdgpu/oland_smc.bin +-File: amdgpu/oland_uvd.bin +-File: amdgpu/si58_mc.bin +-File: amdgpu/banks_k_2_smc.bin +-File: amdgpu/bonaire_ce.bin +-File: amdgpu/bonaire_k_smc.bin +-File: amdgpu/bonaire_mc.bin +-File: amdgpu/bonaire_me.bin +-File: amdgpu/bonaire_mec.bin +-File: amdgpu/bonaire_pfp.bin +-File: amdgpu/bonaire_rlc.bin +-File: amdgpu/bonaire_sdma.bin +-File: amdgpu/bonaire_sdma1.bin +-File: amdgpu/bonaire_smc.bin +-File: amdgpu/bonaire_uvd.bin +-File: amdgpu/bonaire_vce.bin +-File: amdgpu/hawaii_ce.bin +-File: amdgpu/hawaii_k_smc.bin +-File: amdgpu/hawaii_mc.bin +-File: amdgpu/hawaii_me.bin +-File: amdgpu/hawaii_mec.bin +-File: amdgpu/hawaii_pfp.bin +-File: amdgpu/hawaii_rlc.bin +-File: amdgpu/hawaii_sdma.bin +-File: amdgpu/hawaii_sdma1.bin +-File: amdgpu/hawaii_smc.bin +-File: amdgpu/hawaii_uvd.bin +-File: amdgpu/hawaii_vce.bin +-File: amdgpu/kabini_ce.bin +-File: amdgpu/kabini_me.bin +-File: amdgpu/kabini_mec.bin +-File: amdgpu/kabini_pfp.bin +-File: amdgpu/kabini_rlc.bin +-File: amdgpu/kabini_sdma.bin +-File: amdgpu/kabini_sdma1.bin +-File: amdgpu/kabini_uvd.bin +-File: amdgpu/kabini_vce.bin +-File: amdgpu/mullins_ce.bin +-File: amdgpu/mullins_me.bin +-File: amdgpu/mullins_mec.bin +-File: amdgpu/mullins_pfp.bin +-File: amdgpu/mullins_rlc.bin +-File: amdgpu/mullins_sdma.bin +-File: amdgpu/mullins_sdma1.bin +-File: amdgpu/mullins_uvd.bin +-File: amdgpu/mullins_vce.bin +-File: amdgpu/kaveri_ce.bin +-File: amdgpu/kaveri_me.bin +-File: amdgpu/kaveri_mec.bin +-File: amdgpu/kaveri_mec2.bin +-File: amdgpu/kaveri_pfp.bin +-File: amdgpu/kaveri_rlc.bin +-File: amdgpu/kaveri_sdma.bin +-File: amdgpu/kaveri_sdma1.bin +-File: amdgpu/kaveri_uvd.bin +-File: amdgpu/kaveri_vce.bin +-File: amdgpu/topaz_ce.bin +-File: amdgpu/topaz_k_smc.bin +-File: amdgpu/topaz_mc.bin +-File: amdgpu/topaz_me.bin +-File: amdgpu/topaz_mec2.bin +-File: amdgpu/topaz_mec.bin +-File: amdgpu/topaz_pfp.bin +-File: amdgpu/topaz_rlc.bin +-File: amdgpu/topaz_sdma1.bin +-File: amdgpu/topaz_sdma.bin +-File: amdgpu/topaz_smc.bin +-File: amdgpu/tonga_ce.bin +-File: amdgpu/tonga_k_smc.bin +-File: amdgpu/tonga_mc.bin +-File: amdgpu/tonga_me.bin +-File: amdgpu/tonga_mec2.bin +-File: amdgpu/tonga_mec.bin +-File: amdgpu/tonga_pfp.bin +-File: amdgpu/tonga_rlc.bin +-File: amdgpu/tonga_sdma1.bin +-File: amdgpu/tonga_sdma.bin +-File: amdgpu/tonga_smc.bin +-File: amdgpu/tonga_uvd.bin +-File: amdgpu/tonga_vce.bin +-File: amdgpu/carrizo_ce.bin +-File: amdgpu/carrizo_me.bin +-File: amdgpu/carrizo_mec2.bin +-File: amdgpu/carrizo_mec.bin +-File: amdgpu/carrizo_pfp.bin +-File: amdgpu/carrizo_rlc.bin +-File: amdgpu/carrizo_sdma1.bin +-File: amdgpu/carrizo_sdma.bin +-File: amdgpu/carrizo_uvd.bin +-File: amdgpu/carrizo_vce.bin +-File: amdgpu/fiji_ce.bin +-File: amdgpu/fiji_mc.bin +-File: amdgpu/fiji_me.bin +-File: amdgpu/fiji_mec2.bin +-File: amdgpu/fiji_mec.bin +-File: amdgpu/fiji_pfp.bin +-File: amdgpu/fiji_rlc.bin +-File: amdgpu/fiji_sdma1.bin +-File: amdgpu/fiji_sdma.bin +-File: amdgpu/fiji_smc.bin +-File: amdgpu/fiji_uvd.bin +-File: amdgpu/fiji_vce.bin +-File: amdgpu/stoney_ce.bin +-File: amdgpu/stoney_me.bin +-File: amdgpu/stoney_mec.bin +-File: amdgpu/stoney_pfp.bin +-File: amdgpu/stoney_rlc.bin +-File: amdgpu/stoney_sdma.bin +-File: amdgpu/stoney_uvd.bin +-File: amdgpu/stoney_vce.bin +-File: amdgpu/polaris10_ce.bin +-File: amdgpu/polaris10_ce_2.bin +-File: amdgpu/polaris10_mc.bin +-File: amdgpu/polaris10_k_mc.bin +-File: amdgpu/polaris10_me.bin +-File: amdgpu/polaris10_me_2.bin +-File: amdgpu/polaris10_mec2.bin +-File: amdgpu/polaris10_mec2_2.bin +-File: amdgpu/polaris10_mec.bin +-File: amdgpu/polaris10_mec_2.bin +-File: amdgpu/polaris10_pfp.bin +-File: amdgpu/polaris10_pfp_2.bin +-File: amdgpu/polaris10_rlc.bin +-File: amdgpu/polaris10_sdma1.bin +-File: amdgpu/polaris10_sdma.bin +-File: amdgpu/polaris10_smc.bin +-File: amdgpu/polaris10_k_smc.bin +-File: amdgpu/polaris10_k2_smc.bin +-File: amdgpu/polaris10_smc_sk.bin +-File: amdgpu/polaris10_uvd.bin +-File: amdgpu/polaris10_vce.bin +-File: amdgpu/polaris11_ce.bin +-File: amdgpu/polaris11_ce_2.bin +-File: amdgpu/polaris11_mc.bin +-File: amdgpu/polaris11_k_mc.bin +-File: amdgpu/polaris11_me.bin +-File: amdgpu/polaris11_me_2.bin +-File: amdgpu/polaris11_mec2.bin +-File: amdgpu/polaris11_mec2_2.bin +-File: amdgpu/polaris11_mec.bin +-File: amdgpu/polaris11_mec_2.bin +-File: amdgpu/polaris11_pfp.bin +-File: amdgpu/polaris11_pfp_2.bin +-File: amdgpu/polaris11_rlc.bin +-File: amdgpu/polaris11_sdma1.bin +-File: amdgpu/polaris11_sdma.bin +-File: amdgpu/polaris11_smc.bin +-File: amdgpu/polaris11_k_smc.bin +-File: amdgpu/polaris11_k2_smc.bin +-File: amdgpu/polaris11_smc_sk.bin +-File: amdgpu/polaris11_uvd.bin +-File: amdgpu/polaris11_vce.bin +-File: amdgpu/polaris12_ce.bin +-File: amdgpu/polaris12_ce_2.bin +-File: amdgpu/polaris12_mc.bin +-File: amdgpu/polaris12_k_mc.bin +-File: amdgpu/polaris12_32_mc.bin +-File: amdgpu/polaris12_me.bin +-File: amdgpu/polaris12_me_2.bin +-File: amdgpu/polaris12_mec.bin +-File: amdgpu/polaris12_mec_2.bin +-File: amdgpu/polaris12_mec2.bin +-File: amdgpu/polaris12_mec2_2.bin +-File: amdgpu/polaris12_pfp.bin +-File: amdgpu/polaris12_pfp_2.bin +-File: amdgpu/polaris12_rlc.bin +-File: amdgpu/polaris12_sdma.bin +-File: amdgpu/polaris12_sdma1.bin +-File: amdgpu/polaris12_smc.bin +-File: amdgpu/polaris12_k_smc.bin +-File: amdgpu/polaris12_uvd.bin +-File: amdgpu/polaris12_vce.bin +-File: amdgpu/vegam_ce.bin +-File: amdgpu/vegam_me.bin +-File: amdgpu/vegam_mec.bin +-File: amdgpu/vegam_mec2.bin +-File: amdgpu/vegam_pfp.bin +-File: amdgpu/vegam_rlc.bin +-File: amdgpu/vegam_sdma.bin +-File: amdgpu/vegam_sdma1.bin +-File: amdgpu/vegam_smc.bin +-File: amdgpu/vegam_uvd.bin +-File: amdgpu/vegam_vce.bin +-File: amdgpu/vega10_acg_smc.bin +-File: amdgpu/vega10_asd.bin +-File: amdgpu/vega10_ce.bin +-File: amdgpu/vega10_gpu_info.bin +-File: amdgpu/vega10_me.bin +-File: amdgpu/vega10_mec.bin +-File: amdgpu/vega10_mec2.bin +-File: amdgpu/vega10_pfp.bin +-File: amdgpu/vega10_rlc.bin +-File: amdgpu/vega10_sdma.bin +-File: amdgpu/vega10_sdma1.bin +-File: amdgpu/vega10_smc.bin +-File: amdgpu/vega10_sos.bin +-File: amdgpu/vega10_uvd.bin +-File: amdgpu/vega10_vce.bin +-File: amdgpu/vega12_asd.bin +-File: amdgpu/vega12_ce.bin +-File: amdgpu/vega12_gpu_info.bin +-File: amdgpu/vega12_me.bin +-File: amdgpu/vega12_mec.bin +-File: amdgpu/vega12_mec2.bin +-File: amdgpu/vega12_pfp.bin +-File: amdgpu/vega12_rlc.bin +-File: amdgpu/vega12_sdma.bin +-File: amdgpu/vega12_sdma1.bin +-File: amdgpu/vega12_smc.bin +-File: amdgpu/vega12_sos.bin +-File: amdgpu/vega12_uvd.bin +-File: amdgpu/vega12_vce.bin +-File: amdgpu/vega20_asd.bin +-File: amdgpu/vega20_ce.bin +-File: amdgpu/vega20_me.bin +-File: amdgpu/vega20_mec.bin +-File: amdgpu/vega20_mec2.bin +-File: amdgpu/vega20_pfp.bin +-File: amdgpu/vega20_rlc.bin +-File: amdgpu/vega20_sdma.bin +-File: amdgpu/vega20_sdma1.bin +-File: amdgpu/vega20_smc.bin +-File: amdgpu/vega20_sos.bin +-File: amdgpu/vega20_uvd.bin +-File: amdgpu/vega20_vce.bin +-File: amdgpu/vega20_ta.bin +-File: amdgpu/raven_asd.bin +-File: amdgpu/raven_ce.bin +-File: amdgpu/raven_gpu_info.bin +-File: amdgpu/raven_me.bin +-File: amdgpu/raven_mec.bin +-File: amdgpu/raven_mec2.bin +-File: amdgpu/raven_pfp.bin +-File: amdgpu/raven_rlc.bin +-File: amdgpu/raven_sdma.bin +-File: amdgpu/raven_vcn.bin +-File: amdgpu/raven_dmcu.bin +-File: amdgpu/raven_kicker_rlc.bin +-File: amdgpu/raven_ta.bin +-File: amdgpu/picasso_asd.bin +-File: amdgpu/picasso_ce.bin +-File: amdgpu/picasso_gpu_info.bin +-File: amdgpu/picasso_me.bin +-File: amdgpu/picasso_mec.bin +-File: amdgpu/picasso_mec2.bin +-File: amdgpu/picasso_pfp.bin +-File: amdgpu/picasso_rlc.bin +-File: amdgpu/picasso_rlc_am4.bin +-File: amdgpu/picasso_sdma.bin +-File: amdgpu/picasso_vcn.bin +-File: amdgpu/picasso_ta.bin +-File: amdgpu/raven2_asd.bin +-File: amdgpu/raven2_ce.bin +-File: amdgpu/raven2_gpu_info.bin +-File: amdgpu/raven2_me.bin +-File: amdgpu/raven2_mec.bin +-File: amdgpu/raven2_mec2.bin +-File: amdgpu/raven2_pfp.bin +-File: amdgpu/raven2_rlc.bin +-File: amdgpu/raven2_sdma.bin +-File: amdgpu/raven2_vcn.bin +-File: amdgpu/raven2_ta.bin +-File: amdgpu/navi10_asd.bin +-File: amdgpu/navi10_ce.bin +-File: amdgpu/navi10_gpu_info.bin +-File: amdgpu/navi10_me.bin +-File: amdgpu/navi10_mec.bin +-File: amdgpu/navi10_mec2.bin +-File: amdgpu/navi10_pfp.bin +-File: amdgpu/navi10_rlc.bin +-File: amdgpu/navi10_sdma.bin +-File: amdgpu/navi10_sdma1.bin +-File: amdgpu/navi10_smc.bin +-File: amdgpu/navi10_sos.bin +-File: amdgpu/navi10_vcn.bin +-File: amdgpu/navi10_ta.bin +-File: amdgpu/navi14_asd.bin +-File: amdgpu/navi14_ce.bin +-File: amdgpu/navi14_ce_wks.bin +-File: amdgpu/navi14_gpu_info.bin +-File: amdgpu/navi14_me.bin +-File: amdgpu/navi14_me_wks.bin +-File: amdgpu/navi14_mec.bin +-File: amdgpu/navi14_mec_wks.bin +-File: amdgpu/navi14_mec2.bin +-File: amdgpu/navi14_mec2_wks.bin +-File: amdgpu/navi14_pfp.bin +-File: amdgpu/navi14_pfp_wks.bin +-File: amdgpu/navi14_rlc.bin +-File: amdgpu/navi14_sdma.bin +-File: amdgpu/navi14_sdma1.bin +-File: amdgpu/navi14_smc.bin +-File: amdgpu/navi14_sos.bin +-File: amdgpu/navi14_vcn.bin +-File: amdgpu/navi14_ta.bin +-File: amdgpu/navi12_asd.bin +-File: amdgpu/navi12_ce.bin +-File: amdgpu/navi12_dmcu.bin +-File: amdgpu/navi12_gpu_info.bin +-File: amdgpu/navi12_me.bin +-File: amdgpu/navi12_mec.bin +-File: amdgpu/navi12_mec2.bin +-File: amdgpu/navi12_pfp.bin +-File: amdgpu/navi12_rlc.bin +-File: amdgpu/navi12_sdma.bin +-File: amdgpu/navi12_sdma1.bin +-File: amdgpu/navi12_smc.bin +-File: amdgpu/navi12_sos.bin +-File: amdgpu/navi12_vcn.bin +-File: amdgpu/navi12_ta.bin +-File: amdgpu/renoir_asd.bin +-File: amdgpu/renoir_ce.bin +-File: amdgpu/renoir_gpu_info.bin +-File: amdgpu/renoir_me.bin +-File: amdgpu/renoir_mec.bin +-File: amdgpu/renoir_mec2.bin +-File: amdgpu/renoir_pfp.bin +-File: amdgpu/renoir_rlc.bin +-File: amdgpu/renoir_sdma.bin +-File: amdgpu/renoir_vcn.bin +-File: amdgpu/renoir_dmcub.bin +-File: amdgpu/renoir_ta.bin +-File: amdgpu/sienna_cichlid_ce.bin +-File: amdgpu/sienna_cichlid_dmcub.bin +-File: amdgpu/sienna_cichlid_me.bin +-File: amdgpu/sienna_cichlid_mec.bin +-File: amdgpu/sienna_cichlid_mec2.bin +-File: amdgpu/sienna_cichlid_pfp.bin +-File: amdgpu/sienna_cichlid_rlc.bin +-File: amdgpu/sienna_cichlid_sdma.bin +-File: amdgpu/sienna_cichlid_smc.bin +-File: amdgpu/sienna_cichlid_sos.bin +-File: amdgpu/sienna_cichlid_ta.bin +-File: amdgpu/sienna_cichlid_vcn.bin +-File: amdgpu/green_sardine_asd.bin +-File: amdgpu/green_sardine_ce.bin +-File: amdgpu/green_sardine_dmcub.bin +-File: amdgpu/green_sardine_me.bin +-File: amdgpu/green_sardine_mec2.bin +-File: amdgpu/green_sardine_mec.bin +-File: amdgpu/green_sardine_pfp.bin +-File: amdgpu/green_sardine_rlc.bin +-File: amdgpu/green_sardine_sdma.bin +-File: amdgpu/green_sardine_ta.bin +-File: amdgpu/green_sardine_vcn.bin +-File: amdgpu/navy_flounder_ce.bin +-File: amdgpu/navy_flounder_dmcub.bin +-File: amdgpu/navy_flounder_me.bin +-File: amdgpu/navy_flounder_mec.bin +-File: amdgpu/navy_flounder_mec2.bin +-File: amdgpu/navy_flounder_pfp.bin +-File: amdgpu/navy_flounder_rlc.bin +-File: amdgpu/navy_flounder_sdma.bin +-File: amdgpu/navy_flounder_smc.bin +-File: amdgpu/navy_flounder_sos.bin +-File: amdgpu/navy_flounder_ta.bin +-File: amdgpu/navy_flounder_vcn.bin +-File: amdgpu/arcturus_asd.bin +-File: amdgpu/arcturus_gpu_info.bin +-File: amdgpu/arcturus_mec2.bin +-File: amdgpu/arcturus_mec.bin +-File: amdgpu/arcturus_rlc.bin +-File: amdgpu/arcturus_sdma.bin +-File: amdgpu/arcturus_smc.bin +-File: amdgpu/arcturus_sos.bin +-File: amdgpu/arcturus_ta.bin +-File: amdgpu/arcturus_vcn.bin +-File: amdgpu/dimgrey_cavefish_ce.bin +-File: amdgpu/dimgrey_cavefish_dmcub.bin +-File: amdgpu/dimgrey_cavefish_me.bin +-File: amdgpu/dimgrey_cavefish_mec.bin +-File: amdgpu/dimgrey_cavefish_mec2.bin +-File: amdgpu/dimgrey_cavefish_pfp.bin +-File: amdgpu/dimgrey_cavefish_rlc.bin +-File: amdgpu/dimgrey_cavefish_sdma.bin +-File: amdgpu/dimgrey_cavefish_smc.bin +-File: amdgpu/dimgrey_cavefish_sos.bin +-File: amdgpu/dimgrey_cavefish_ta.bin +-File: amdgpu/dimgrey_cavefish_vcn.bin +-File: amdgpu/vangogh_asd.bin +-File: amdgpu/vangogh_ce.bin +-File: amdgpu/vangogh_dmcub.bin +-File: amdgpu/vangogh_me.bin +-File: amdgpu/vangogh_mec2.bin +-File: amdgpu/vangogh_mec.bin +-File: amdgpu/vangogh_pfp.bin +-File: amdgpu/vangogh_rlc.bin +-File: amdgpu/vangogh_sdma.bin +-File: amdgpu/vangogh_toc.bin +-File: amdgpu/vangogh_vcn.bin +-File: amdgpu/yellow_carp_asd.bin +-File: amdgpu/yellow_carp_ce.bin +-File: amdgpu/yellow_carp_dmcub.bin +-File: amdgpu/yellow_carp_me.bin +-File: amdgpu/yellow_carp_mec.bin +-File: amdgpu/yellow_carp_mec2.bin +-File: amdgpu/yellow_carp_pfp.bin +-File: amdgpu/yellow_carp_rlc.bin +-File: amdgpu/yellow_carp_sdma.bin +-File: amdgpu/yellow_carp_ta.bin +-File: amdgpu/yellow_carp_toc.bin +-File: amdgpu/yellow_carp_vcn.bin +-File: amdgpu/beige_goby_ce.bin +-File: amdgpu/beige_goby_dmcub.bin +-File: amdgpu/beige_goby_me.bin +-File: amdgpu/beige_goby_mec.bin +-File: amdgpu/beige_goby_mec2.bin +-File: amdgpu/beige_goby_pfp.bin +-File: amdgpu/beige_goby_rlc.bin +-File: amdgpu/beige_goby_sdma.bin +-File: amdgpu/beige_goby_smc.bin +-File: amdgpu/beige_goby_sos.bin +-File: amdgpu/beige_goby_ta.bin +-File: amdgpu/beige_goby_vcn.bin +-File: amdgpu/cyan_skillfish2_ce.bin +-File: amdgpu/cyan_skillfish2_me.bin +-File: amdgpu/cyan_skillfish2_mec.bin +-File: amdgpu/cyan_skillfish2_mec2.bin +-File: amdgpu/cyan_skillfish2_pfp.bin +-File: amdgpu/cyan_skillfish2_rlc.bin +-File: amdgpu/cyan_skillfish2_sdma.bin +-File: amdgpu/cyan_skillfish2_sdma1.bin +-File: amdgpu/aldebaran_mec2.bin +-File: amdgpu/aldebaran_mec.bin +-File: amdgpu/aldebaran_rlc.bin +-File: amdgpu/aldebaran_sdma.bin +-File: amdgpu/aldebaran_sjt_mec2.bin +-File: amdgpu/aldebaran_sjt_mec.bin +-File: amdgpu/aldebaran_smc.bin +-File: amdgpu/aldebaran_sos.bin +-File: amdgpu/aldebaran_ta.bin +-File: amdgpu/aldebaran_vcn.bin +-File: amdgpu/gc_10_3_6_ce.bin +-File: amdgpu/gc_10_3_6_me.bin +-File: amdgpu/gc_10_3_6_mec.bin +-File: amdgpu/gc_10_3_6_mec2.bin +-File: amdgpu/gc_10_3_6_pfp.bin +-File: amdgpu/gc_10_3_6_rlc.bin +-File: amdgpu/gc_10_3_7_ce.bin +-File: amdgpu/gc_10_3_7_me.bin +-File: amdgpu/gc_10_3_7_mec.bin +-File: amdgpu/gc_10_3_7_mec2.bin +-File: amdgpu/gc_10_3_7_pfp.bin +-File: amdgpu/gc_10_3_7_rlc.bin +-File: amdgpu/gc_11_0_0_imu.bin +-File: amdgpu/gc_11_0_0_me.bin +-File: amdgpu/gc_11_0_0_mec.bin +-File: amdgpu/gc_11_0_0_mes1.bin +-File: amdgpu/gc_11_0_0_mes.bin +-File: amdgpu/gc_11_0_0_mes_2.bin +-File: amdgpu/gc_11_0_0_pfp.bin +-File: amdgpu/gc_11_0_0_rlc.bin +-File: amdgpu/gc_11_0_1_imu.bin +-File: amdgpu/gc_11_0_1_me.bin +-File: amdgpu/gc_11_0_1_mec.bin +-File: amdgpu/gc_11_0_1_mes.bin +-File: amdgpu/gc_11_0_1_mes1.bin +-File: amdgpu/gc_11_0_1_mes_2.bin +-File: amdgpu/gc_11_0_1_pfp.bin +-File: amdgpu/gc_11_0_1_rlc.bin +-File: amdgpu/gc_11_0_2_imu.bin +-File: amdgpu/gc_11_0_2_me.bin +-File: amdgpu/gc_11_0_2_mec.bin +-File: amdgpu/gc_11_0_2_mes1.bin +-File: amdgpu/gc_11_0_2_mes.bin +-File: amdgpu/gc_11_0_2_mes_2.bin +-File: amdgpu/gc_11_0_2_pfp.bin +-File: amdgpu/gc_11_0_2_rlc.bin +-File: amdgpu/gc_11_0_4_imu.bin +-File: amdgpu/gc_11_0_4_me.bin +-File: amdgpu/gc_11_0_4_mec.bin +-File: amdgpu/gc_11_0_4_mes.bin +-File: amdgpu/gc_11_0_4_mes1.bin +-File: amdgpu/gc_11_0_4_mes_2.bin +-File: amdgpu/gc_11_0_4_pfp.bin +-File: amdgpu/gc_11_0_4_rlc.bin +-File: amdgpu/dcn_3_1_4_dmcub.bin +-File: amdgpu/dcn_3_1_5_dmcub.bin +-File: amdgpu/dcn_3_1_6_dmcub.bin +-File: amdgpu/dcn_3_2_0_dmcub.bin +-File: amdgpu/dcn_3_2_1_dmcub.bin +-File: amdgpu/psp_13_0_0_sos.bin +-File: amdgpu/psp_13_0_0_ta.bin +-File: amdgpu/psp_13_0_4_ta.bin +-File: amdgpu/psp_13_0_4_toc.bin +-File: amdgpu/psp_13_0_5_asd.bin +-File: amdgpu/psp_13_0_5_ta.bin +-File: amdgpu/psp_13_0_5_toc.bin +-File: amdgpu/psp_13_0_7_sos.bin +-File: amdgpu/psp_13_0_7_ta.bin +-File: amdgpu/psp_13_0_8_asd.bin +-File: amdgpu/psp_13_0_8_ta.bin +-File: amdgpu/psp_13_0_8_toc.bin +-File: amdgpu/psp_13_0_11_ta.bin +-File: amdgpu/psp_13_0_11_toc.bin +-File: amdgpu/sdma_5_2_6.bin +-File: amdgpu/sdma_5_2_7.bin +-File: amdgpu/sdma_6_0_0.bin +-File: amdgpu/sdma_6_0_1.bin +-File: amdgpu/sdma_6_0_2.bin +-File: amdgpu/smu_13_0_0.bin +-File: amdgpu/smu_13_0_7.bin +-File: amdgpu/vcn_3_1_2.bin +-File: amdgpu/vcn_4_0_0.bin +-File: amdgpu/vcn_4_0_2.bin +-File: amdgpu/vcn_4_0_4.bin +- +-Licence: Redistributable. See LICENSE.amdgpu for details. +- +--------------------------------------------------------------------------- +- + Driver: qed - QLogic 4xxxx Ethernet Driver Core Module. + + File: qed/qed_init_values_zipped-8.4.2.0.bin +@@ -1459,424 +561,6 @@ Version: HuC API/APB ver 8.5.0 for Meteorlake + License: Redistributable. See LICENSE.i915 for details + -------------------------------------------------------------------------- + +-Driver: nouveau - NVIDIA GPU driver +- +-File: nvidia/gk20a/fecs_data.bin +-File: nvidia/gk20a/fecs_inst.bin +-File: nvidia/gk20a/gpccs_data.bin +-File: nvidia/gk20a/gpccs_inst.bin +-File: nvidia/gk20a/sw_bundle_init.bin +-File: nvidia/gk20a/sw_ctx.bin +-File: nvidia/gk20a/sw_method_init.bin +-File: nvidia/gk20a/sw_nonctx.bin +-File: nvidia/gm200/acr/bl.bin +-File: nvidia/gm200/acr/ucode_load.bin +-File: nvidia/gm200/acr/ucode_unload.bin +-File: nvidia/gm200/gr/fecs_bl.bin +-File: nvidia/gm200/gr/fecs_data.bin +-File: nvidia/gm200/gr/fecs_inst.bin +-File: nvidia/gm200/gr/fecs_sig.bin +-File: nvidia/gm200/gr/gpccs_bl.bin +-File: nvidia/gm200/gr/gpccs_data.bin +-File: nvidia/gm200/gr/gpccs_inst.bin +-File: nvidia/gm200/gr/gpccs_sig.bin +-File: nvidia/gm200/gr/sw_bundle_init.bin +-File: nvidia/gm200/gr/sw_ctx.bin +-File: nvidia/gm200/gr/sw_method_init.bin +-File: nvidia/gm200/gr/sw_nonctx.bin +-Link: nvidia/gm204/acr/bl.bin -> ../../gm200/acr/bl.bin +-Link: nvidia/gm204/acr/ucode_load.bin -> ../../gm200/acr/ucode_load.bin +-Link: nvidia/gm204/acr/ucode_unload.bin -> ../../gm200/acr/ucode_unload.bin +-Link: nvidia/gm204/gr/fecs_bl.bin -> ../../gm200/gr/fecs_bl.bin +-File: nvidia/gm204/gr/fecs_data.bin +-Link: nvidia/gm204/gr/fecs_inst.bin -> ../../gm200/gr/fecs_inst.bin +-File: nvidia/gm204/gr/fecs_sig.bin +-Link: nvidia/gm204/gr/gpccs_bl.bin -> ../../gm200/gr/gpccs_bl.bin +-File: nvidia/gm204/gr/gpccs_data.bin +-Link: nvidia/gm204/gr/gpccs_inst.bin -> ../../gm200/gr/gpccs_inst.bin +-File: nvidia/gm204/gr/gpccs_sig.bin +-Link: nvidia/gm204/gr/sw_bundle_init.bin -> ../../gm200/gr/sw_bundle_init.bin +-Link: nvidia/gm204/gr/sw_ctx.bin -> ../../gm200/gr/sw_ctx.bin +-Link: nvidia/gm204/gr/sw_method_init.bin -> ../../gm200/gr/sw_method_init.bin +-Link: nvidia/gm204/gr/sw_nonctx.bin -> ../../gm200/gr/sw_nonctx.bin +-Link: nvidia/gm206/acr/bl.bin -> ../../gm200/acr/bl.bin +-File: nvidia/gm206/acr/ucode_load.bin +-File: nvidia/gm206/acr/ucode_unload.bin +-Link: nvidia/gm206/gr/fecs_bl.bin -> ../../gm200/gr/fecs_bl.bin +-File: nvidia/gm206/gr/fecs_data.bin +-Link: nvidia/gm206/gr/fecs_inst.bin -> ../../gm200/gr/fecs_inst.bin +-File: nvidia/gm206/gr/fecs_sig.bin +-Link: nvidia/gm206/gr/gpccs_bl.bin -> ../../gm200/gr/gpccs_bl.bin +-File: nvidia/gm206/gr/gpccs_data.bin +-Link: nvidia/gm206/gr/gpccs_inst.bin -> ../../gm200/gr/gpccs_inst.bin +-File: nvidia/gm206/gr/gpccs_sig.bin +-Link: nvidia/gm206/gr/sw_bundle_init.bin -> ../../gm200/gr/sw_bundle_init.bin +-Link: nvidia/gm206/gr/sw_ctx.bin -> ../../gm200/gr/sw_ctx.bin +-Link: nvidia/gm206/gr/sw_method_init.bin -> ../../gm200/gr/sw_method_init.bin +-Link: nvidia/gm206/gr/sw_nonctx.bin -> ../../gm200/gr/sw_nonctx.bin +-File: nvidia/gm20b/acr/bl.bin +-File: nvidia/gm20b/acr/ucode_load.bin +-File: nvidia/gm20b/gr/fecs_bl.bin +-File: nvidia/gm20b/gr/fecs_data.bin +-File: nvidia/gm20b/gr/fecs_inst.bin +-File: nvidia/gm20b/gr/fecs_sig.bin +-File: nvidia/gm20b/gr/gpccs_data.bin +-File: nvidia/gm20b/gr/gpccs_inst.bin +-File: nvidia/gm20b/gr/sw_bundle_init.bin +-File: nvidia/gm20b/gr/sw_ctx.bin +-Link: nvidia/gm20b/gr/sw_method_init.bin -> ../../gm200/gr/sw_method_init.bin +-File: nvidia/gm20b/gr/sw_nonctx.bin +-File: nvidia/gm20b/pmu/desc.bin +-File: nvidia/gm20b/pmu/image.bin +-File: nvidia/gm20b/pmu/sig.bin +-File: nvidia/gp100/acr/bl.bin +-File: nvidia/gp100/acr/ucode_load.bin +-File: nvidia/gp100/acr/ucode_unload.bin +-Link: nvidia/gp100/gr/fecs_bl.bin -> ../../gm200/gr/fecs_bl.bin +-File: nvidia/gp100/gr/fecs_data.bin +-File: nvidia/gp100/gr/fecs_inst.bin +-File: nvidia/gp100/gr/fecs_sig.bin +-Link: nvidia/gp100/gr/gpccs_bl.bin -> ../../gm200/gr/gpccs_bl.bin +-File: nvidia/gp100/gr/gpccs_data.bin +-File: nvidia/gp100/gr/gpccs_inst.bin +-File: nvidia/gp100/gr/gpccs_sig.bin +-File: nvidia/gp100/gr/sw_bundle_init.bin +-File: nvidia/gp100/gr/sw_ctx.bin +-File: nvidia/gp100/gr/sw_method_init.bin +-File: nvidia/gp100/gr/sw_nonctx.bin +-File: nvidia/gp102/acr/bl.bin +-File: nvidia/gp102/acr/ucode_load.bin +-File: nvidia/gp102/acr/ucode_unload.bin +-File: nvidia/gp102/acr/unload_bl.bin +-Link: nvidia/gp102/gr/fecs_bl.bin -> ../../gm200/gr/fecs_bl.bin +-File: nvidia/gp102/gr/fecs_data.bin +-File: nvidia/gp102/gr/fecs_inst.bin +-File: nvidia/gp102/gr/fecs_sig.bin +-Link: nvidia/gp102/gr/gpccs_bl.bin -> ../../gm200/gr/gpccs_bl.bin +-File: nvidia/gp102/gr/gpccs_data.bin +-File: nvidia/gp102/gr/gpccs_inst.bin +-File: nvidia/gp102/gr/gpccs_sig.bin +-File: nvidia/gp102/gr/sw_bundle_init.bin +-File: nvidia/gp102/gr/sw_ctx.bin +-File: nvidia/gp102/gr/sw_method_init.bin +-File: nvidia/gp102/gr/sw_nonctx.bin +-File: nvidia/gp102/nvdec/scrubber.bin +-File: nvidia/gp102/sec2/desc.bin +-File: nvidia/gp102/sec2/image.bin +-File: nvidia/gp102/sec2/sig.bin +-File: nvidia/gp102/sec2/desc-1.bin +-File: nvidia/gp102/sec2/image-1.bin +-File: nvidia/gp102/sec2/sig-1.bin +-Link: nvidia/gp104/acr/bl.bin -> ../../gp102/acr/bl.bin +-Link: nvidia/gp104/acr/ucode_load.bin -> ../../gp102/acr/ucode_load.bin +-Link: nvidia/gp104/acr/ucode_unload.bin -> ../../gp102/acr/ucode_unload.bin +-Link: nvidia/gp104/acr/unload_bl.bin -> ../../gp102/acr/unload_bl.bin +-Link: nvidia/gp104/gr/fecs_bl.bin -> ../../gp102/gr/fecs_bl.bin +-File: nvidia/gp104/gr/fecs_data.bin +-File: nvidia/gp104/gr/fecs_inst.bin +-File: nvidia/gp104/gr/fecs_sig.bin +-Link: nvidia/gp104/gr/gpccs_bl.bin -> ../../gp102/gr/gpccs_bl.bin +-File: nvidia/gp104/gr/gpccs_data.bin +-File: nvidia/gp104/gr/gpccs_inst.bin +-File: nvidia/gp104/gr/gpccs_sig.bin +-Link: nvidia/gp104/gr/sw_bundle_init.bin -> ../../gp102/gr/sw_bundle_init.bin +-Link: nvidia/gp104/gr/sw_ctx.bin -> ../../gp102/gr/sw_ctx.bin +-Link: nvidia/gp104/gr/sw_method_init.bin -> ../../gp102/gr/sw_method_init.bin +-Link: nvidia/gp104/gr/sw_nonctx.bin -> ../../gp102/gr/sw_nonctx.bin +-Link: nvidia/gp104/nvdec/scrubber.bin -> ../../gp102/nvdec/scrubber.bin +-Link: nvidia/gp104/sec2/desc.bin -> ../../gp102/sec2/desc.bin +-Link: nvidia/gp104/sec2/image.bin -> ../../gp102/sec2/image.bin +-Link: nvidia/gp104/sec2/sig.bin -> ../../gp102/sec2/sig.bin +-Link: nvidia/gp104/sec2/desc-1.bin -> ../../gp102/sec2/desc-1.bin +-Link: nvidia/gp104/sec2/image-1.bin -> ../../gp102/sec2/image-1.bin +-Link: nvidia/gp104/sec2/sig-1.bin -> ../../gp102/sec2/sig-1.bin +-Link: nvidia/gp106/acr/bl.bin -> ../../gp102/acr/bl.bin +-Link: nvidia/gp106/acr/ucode_load.bin -> ../../gp102/acr/ucode_load.bin +-Link: nvidia/gp106/acr/ucode_unload.bin -> ../../gp102/acr/ucode_unload.bin +-Link: nvidia/gp106/acr/unload_bl.bin -> ../../gp102/acr/unload_bl.bin +-Link: nvidia/gp106/gr/fecs_bl.bin -> ../../gp102/gr/fecs_bl.bin +-File: nvidia/gp106/gr/fecs_data.bin +-Link: nvidia/gp106/gr/fecs_inst.bin -> ../../gp102/gr/fecs_inst.bin +-File: nvidia/gp106/gr/fecs_sig.bin +-Link: nvidia/gp106/gr/gpccs_bl.bin -> ../../gp102/gr/gpccs_bl.bin +-File: nvidia/gp106/gr/gpccs_data.bin +-Link: nvidia/gp106/gr/gpccs_inst.bin -> ../../gp102/gr/gpccs_inst.bin +-File: nvidia/gp106/gr/gpccs_sig.bin +-Link: nvidia/gp106/gr/sw_bundle_init.bin -> ../../gp102/gr/sw_bundle_init.bin +-Link: nvidia/gp106/gr/sw_ctx.bin -> ../../gp102/gr/sw_ctx.bin +-Link: nvidia/gp106/gr/sw_method_init.bin -> ../../gp102/gr/sw_method_init.bin +-Link: nvidia/gp106/gr/sw_nonctx.bin -> ../../gp102/gr/sw_nonctx.bin +-Link: nvidia/gp106/nvdec/scrubber.bin -> ../../gp102/nvdec/scrubber.bin +-Link: nvidia/gp106/sec2/desc.bin -> ../../gp102/sec2/desc.bin +-Link: nvidia/gp106/sec2/image.bin -> ../../gp102/sec2/image.bin +-Link: nvidia/gp106/sec2/sig.bin -> ../../gp102/sec2/sig.bin +-Link: nvidia/gp106/sec2/desc-1.bin -> ../../gp102/sec2/desc-1.bin +-Link: nvidia/gp106/sec2/image-1.bin -> ../../gp102/sec2/image-1.bin +-Link: nvidia/gp106/sec2/sig-1.bin -> ../../gp102/sec2/sig-1.bin +-Link: nvidia/gp107/acr/bl.bin -> ../../gp102/acr/bl.bin +-Link: nvidia/gp107/acr/ucode_load.bin -> ../../gp102/acr/ucode_load.bin +-Link: nvidia/gp107/acr/ucode_unload.bin -> ../../gp102/acr/ucode_unload.bin +-Link: nvidia/gp107/acr/unload_bl.bin -> ../../gp102/acr/unload_bl.bin +-File: nvidia/gp107/gr/fecs_bl.bin +-File: nvidia/gp107/gr/fecs_data.bin +-File: nvidia/gp107/gr/fecs_inst.bin +-File: nvidia/gp107/gr/fecs_sig.bin +-File: nvidia/gp107/gr/gpccs_bl.bin +-File: nvidia/gp107/gr/gpccs_data.bin +-File: nvidia/gp107/gr/gpccs_inst.bin +-File: nvidia/gp107/gr/gpccs_sig.bin +-Link: nvidia/gp107/gr/sw_bundle_init.bin -> ../../gp102/gr/sw_bundle_init.bin +-File: nvidia/gp107/gr/sw_ctx.bin +-Link: nvidia/gp107/gr/sw_method_init.bin -> ../../gp102/gr/sw_method_init.bin +-File: nvidia/gp107/gr/sw_nonctx.bin +-Link: nvidia/gp107/nvdec/scrubber.bin -> ../../gp102/nvdec/scrubber.bin +-Link: nvidia/gp107/sec2/desc.bin -> ../../gp102/sec2/desc.bin +-Link: nvidia/gp107/sec2/image.bin -> ../../gp102/sec2/image.bin +-Link: nvidia/gp107/sec2/sig.bin -> ../../gp102/sec2/sig.bin +-Link: nvidia/gp107/sec2/desc-1.bin -> ../../gp102/sec2/desc-1.bin +-Link: nvidia/gp107/sec2/image-1.bin -> ../../gp102/sec2/image-1.bin +-Link: nvidia/gp107/sec2/sig-1.bin -> ../../gp102/sec2/sig-1.bin +-File: nvidia/gp10b/acr/bl.bin +-File: nvidia/gp10b/acr/ucode_load.bin +-File: nvidia/gp10b/gr/fecs_bl.bin +-File: nvidia/gp10b/gr/fecs_data.bin +-File: nvidia/gp10b/gr/fecs_inst.bin +-File: nvidia/gp10b/gr/fecs_sig.bin +-File: nvidia/gp10b/gr/gpccs_bl.bin +-File: nvidia/gp10b/gr/gpccs_data.bin +-File: nvidia/gp10b/gr/gpccs_inst.bin +-File: nvidia/gp10b/gr/gpccs_sig.bin +-File: nvidia/gp10b/gr/sw_bundle_init.bin +-File: nvidia/gp10b/gr/sw_ctx.bin +-File: nvidia/gp10b/gr/sw_method_init.bin +-File: nvidia/gp10b/gr/sw_nonctx.bin +-File: nvidia/gp10b/pmu/desc.bin +-File: nvidia/gp10b/pmu/image.bin +-File: nvidia/gp10b/pmu/sig.bin +-Link: nvidia/gp108/acr/bl.bin -> ../../gp102/acr/bl.bin +-Link: nvidia/gp108/acr/ucode_load.bin -> ../../gp102/acr/ucode_load.bin +-Link: nvidia/gp108/acr/ucode_unload.bin -> ../../gp102/acr/ucode_unload.bin +-Link: nvidia/gp108/acr/unload_bl.bin -> ../../gp102/acr/unload_bl.bin +-File: nvidia/gp108/gr/fecs_bl.bin +-File: nvidia/gp108/gr/fecs_data.bin +-File: nvidia/gp108/gr/fecs_inst.bin +-File: nvidia/gp108/gr/fecs_sig.bin +-File: nvidia/gp108/gr/gpccs_bl.bin +-File: nvidia/gp108/gr/gpccs_data.bin +-File: nvidia/gp108/gr/gpccs_inst.bin +-File: nvidia/gp108/gr/gpccs_sig.bin +-File: nvidia/gp108/gr/sw_bundle_init.bin +-File: nvidia/gp108/gr/sw_ctx.bin +-File: nvidia/gp108/gr/sw_method_init.bin +-File: nvidia/gp108/gr/sw_nonctx.bin +-Link: nvidia/gp108/nvdec/scrubber.bin -> ../../gp102/nvdec/scrubber.bin +-Link: nvidia/gp108/sec2/desc.bin -> ../../gp102/sec2/desc-1.bin +-Link: nvidia/gp108/sec2/image.bin -> ../../gp102/sec2/image-1.bin +-Link: nvidia/gp108/sec2/sig.bin -> ../../gp102/sec2/sig-1.bin +-File: nvidia/gv100/acr/bl.bin +-File: nvidia/gv100/acr/ucode_load.bin +-File: nvidia/gv100/acr/ucode_unload.bin +-File: nvidia/gv100/acr/unload_bl.bin +-File: nvidia/gv100/gr/fecs_bl.bin +-File: nvidia/gv100/gr/fecs_data.bin +-File: nvidia/gv100/gr/fecs_inst.bin +-File: nvidia/gv100/gr/fecs_sig.bin +-File: nvidia/gv100/gr/gpccs_bl.bin +-File: nvidia/gv100/gr/gpccs_data.bin +-File: nvidia/gv100/gr/gpccs_inst.bin +-File: nvidia/gv100/gr/gpccs_sig.bin +-File: nvidia/gv100/gr/sw_bundle_init.bin +-File: nvidia/gv100/gr/sw_ctx.bin +-File: nvidia/gv100/gr/sw_method_init.bin +-File: nvidia/gv100/gr/sw_nonctx.bin +-File: nvidia/gv100/nvdec/scrubber.bin +-File: nvidia/gv100/sec2/desc.bin +-File: nvidia/gv100/sec2/image.bin +-File: nvidia/gv100/sec2/sig.bin +-File: nvidia/tu102/acr/bl.bin +-File: nvidia/tu102/acr/ucode_ahesasc.bin +-File: nvidia/tu102/acr/ucode_asb.bin +-File: nvidia/tu102/acr/unload_bl.bin +-File: nvidia/tu102/acr/ucode_unload.bin +-File: nvidia/tu102/gr/fecs_bl.bin +-File: nvidia/tu102/gr/fecs_data.bin +-File: nvidia/tu102/gr/fecs_inst.bin +-File: nvidia/tu102/gr/fecs_sig.bin +-File: nvidia/tu102/gr/gpccs_bl.bin +-File: nvidia/tu102/gr/gpccs_data.bin +-File: nvidia/tu102/gr/gpccs_inst.bin +-File: nvidia/tu102/gr/gpccs_sig.bin +-File: nvidia/tu102/gr/sw_bundle_init.bin +-File: nvidia/tu102/gr/sw_ctx.bin +-File: nvidia/tu102/gr/sw_method_init.bin +-File: nvidia/tu102/gr/sw_nonctx.bin +-File: nvidia/tu102/gr/sw_veid_bundle_init.bin +-File: nvidia/tu102/nvdec/scrubber.bin +-File: nvidia/tu102/sec2/desc.bin +-File: nvidia/tu102/sec2/image.bin +-File: nvidia/tu102/sec2/sig.bin +-Link: nvidia/tu104/acr/bl.bin -> ../../tu102/acr/bl.bin +-Link: nvidia/tu104/acr/ucode_ahesasc.bin -> ../../tu102/acr/ucode_ahesasc.bin +-Link: nvidia/tu104/acr/ucode_asb.bin -> ../../tu102/acr/ucode_asb.bin +-Link: nvidia/tu104/acr/unload_bl.bin -> ../../tu102/acr/unload_bl.bin +-Link: nvidia/tu104/acr/ucode_unload.bin -> ../../tu102/acr/ucode_unload.bin +-Link: nvidia/tu104/gr/fecs_bl.bin -> ../../tu102/gr/fecs_bl.bin +-File: nvidia/tu104/gr/fecs_data.bin +-File: nvidia/tu104/gr/fecs_inst.bin +-File: nvidia/tu104/gr/fecs_sig.bin +-Link: nvidia/tu104/gr/gpccs_bl.bin -> ../../tu102/gr/gpccs_bl.bin +-File: nvidia/tu104/gr/gpccs_data.bin +-File: nvidia/tu104/gr/gpccs_inst.bin +-File: nvidia/tu104/gr/gpccs_sig.bin +-File: nvidia/tu104/gr/sw_bundle_init.bin +-File: nvidia/tu104/gr/sw_ctx.bin +-File: nvidia/tu104/gr/sw_method_init.bin +-File: nvidia/tu104/gr/sw_nonctx.bin +-File: nvidia/tu104/gr/sw_veid_bundle_init.bin +-Link: nvidia/tu104/nvdec/scrubber.bin -> ../../tu102/nvdec/scrubber.bin +-Link: nvidia/tu104/sec2/desc.bin -> ../../tu102/sec2/desc.bin +-Link: nvidia/tu104/sec2/image.bin -> ../../tu102/sec2/image.bin +-Link: nvidia/tu104/sec2/sig.bin -> ../../tu102/sec2/sig.bin +-Link: nvidia/tu106/acr/bl.bin -> ../../tu102/acr/bl.bin +-Link: nvidia/tu106/acr/ucode_ahesasc.bin -> ../../tu102/acr/ucode_ahesasc.bin +-Link: nvidia/tu106/acr/ucode_asb.bin -> ../../tu102/acr/ucode_asb.bin +-Link: nvidia/tu106/acr/unload_bl.bin -> ../../tu102/acr/unload_bl.bin +-Link: nvidia/tu106/acr/ucode_unload.bin -> ../../tu102/acr/ucode_unload.bin +-Link: nvidia/tu106/gr/fecs_bl.bin -> ../../tu102/gr/fecs_bl.bin +-File: nvidia/tu106/gr/fecs_data.bin +-File: nvidia/tu106/gr/fecs_inst.bin +-File: nvidia/tu106/gr/fecs_sig.bin +-Link: nvidia/tu106/gr/gpccs_bl.bin -> ../../tu102/gr/gpccs_bl.bin +-File: nvidia/tu106/gr/gpccs_data.bin +-File: nvidia/tu106/gr/gpccs_inst.bin +-File: nvidia/tu106/gr/gpccs_sig.bin +-File: nvidia/tu106/gr/sw_bundle_init.bin +-File: nvidia/tu106/gr/sw_ctx.bin +-File: nvidia/tu106/gr/sw_method_init.bin +-File: nvidia/tu106/gr/sw_nonctx.bin +-File: nvidia/tu106/gr/sw_veid_bundle_init.bin +-Link: nvidia/tu106/nvdec/scrubber.bin -> ../../tu102/nvdec/scrubber.bin +-Link: nvidia/tu106/sec2/desc.bin -> ../../tu102/sec2/desc.bin +-Link: nvidia/tu106/sec2/image.bin -> ../../tu102/sec2/image.bin +-Link: nvidia/tu106/sec2/sig.bin -> ../../tu102/sec2/sig.bin +-File: nvidia/tu116/acr/bl.bin +-File: nvidia/tu116/acr/ucode_ahesasc.bin +-File: nvidia/tu116/acr/ucode_asb.bin +-File: nvidia/tu116/acr/ucode_unload.bin +-File: nvidia/tu116/acr/unload_bl.bin +-File: nvidia/tu116/gr/fecs_bl.bin +-File: nvidia/tu116/gr/fecs_data.bin +-File: nvidia/tu116/gr/fecs_inst.bin +-File: nvidia/tu116/gr/fecs_sig.bin +-File: nvidia/tu116/gr/gpccs_bl.bin +-File: nvidia/tu116/gr/gpccs_data.bin +-File: nvidia/tu116/gr/gpccs_inst.bin +-File: nvidia/tu116/gr/gpccs_sig.bin +-File: nvidia/tu116/gr/sw_bundle_init.bin +-File: nvidia/tu116/gr/sw_ctx.bin +-File: nvidia/tu116/gr/sw_method_init.bin +-File: nvidia/tu116/gr/sw_nonctx.bin +-File: nvidia/tu116/gr/sw_veid_bundle_init.bin +-File: nvidia/tu116/nvdec/scrubber.bin +-File: nvidia/tu116/sec2/desc.bin +-File: nvidia/tu116/sec2/image.bin +-File: nvidia/tu116/sec2/sig.bin +-Link: nvidia/tu117/acr/bl.bin -> ../../tu116/acr/bl.bin +-Link: nvidia/tu117/acr/ucode_ahesasc.bin -> ../../tu116/acr/ucode_ahesasc.bin +-Link: nvidia/tu117/acr/ucode_asb.bin -> ../../tu116/acr/ucode_asb.bin +-Link: nvidia/tu117/acr/ucode_unload.bin -> ../../tu116/acr/ucode_unload.bin +-Link: nvidia/tu117/acr/unload_bl.bin -> ../../tu116/acr/unload_bl.bin +-Link: nvidia/tu117/gr/fecs_bl.bin -> ../../tu116/gr/fecs_bl.bin +-File: nvidia/tu117/gr/fecs_data.bin +-File: nvidia/tu117/gr/fecs_inst.bin +-File: nvidia/tu117/gr/fecs_sig.bin +-Link: nvidia/tu117/gr/gpccs_bl.bin -> ../../tu116/gr/gpccs_bl.bin +-File: nvidia/tu117/gr/gpccs_data.bin +-File: nvidia/tu117/gr/gpccs_inst.bin +-File: nvidia/tu117/gr/gpccs_sig.bin +-File: nvidia/tu117/gr/sw_bundle_init.bin +-File: nvidia/tu117/gr/sw_ctx.bin +-File: nvidia/tu117/gr/sw_method_init.bin +-File: nvidia/tu117/gr/sw_nonctx.bin +-File: nvidia/tu117/gr/sw_veid_bundle_init.bin +-Link: nvidia/tu117/nvdec/scrubber.bin -> ../../tu116/nvdec/scrubber.bin +-Link: nvidia/tu117/sec2/desc.bin -> ../../tu116/sec2/desc.bin +-Link: nvidia/tu117/sec2/image.bin -> ../../tu116/sec2/image.bin +-Link: nvidia/tu117/sec2/sig.bin -> ../../tu116/sec2/sig.bin +-File: nvidia/ga102/acr/ucode_ahesasc.bin +-File: nvidia/ga102/acr/ucode_asb.bin +-File: nvidia/ga102/acr/ucode_unload.bin +-File: nvidia/ga102/gr/fecs_bl.bin +-File: nvidia/ga102/gr/fecs_sig.bin +-File: nvidia/ga102/gr/gpccs_bl.bin +-File: nvidia/ga102/gr/gpccs_sig.bin +-File: nvidia/ga102/gr/NET_img.bin +-File: nvidia/ga102/nvdec/scrubber.bin +-File: nvidia/ga102/sec2/desc.bin +-File: nvidia/ga102/sec2/hs_bl_sig.bin +-File: nvidia/ga102/sec2/image.bin +-File: nvidia/ga102/sec2/sig.bin +-Link: nvidia/ga103/acr/ucode_ahesasc.bin -> ../../ga102/acr/ucode_ahesasc.bin +-Link: nvidia/ga103/acr/ucode_asb.bin -> ../../ga102/acr/ucode_asb.bin +-Link: nvidia/ga103/acr/ucode_unload.bin -> ../../ga102/acr/ucode_unload.bin +-File: nvidia/ga103/gr/fecs_bl.bin +-File: nvidia/ga103/gr/fecs_sig.bin +-File: nvidia/ga103/gr/gpccs_bl.bin +-File: nvidia/ga103/gr/gpccs_sig.bin +-File: nvidia/ga103/gr/NET_img.bin +-Link: nvidia/ga103/nvdec/scrubber.bin -> ../../ga102/nvdec/scrubber.bin +-Link: nvidia/ga103/sec2/desc.bin -> ../../ga102/sec2/desc.bin +-Link: nvidia/ga103/sec2/hs_bl_sig.bin -> ../../ga102/sec2/hs_bl_sig.bin +-Link: nvidia/ga103/sec2/image.bin -> ../../ga102/sec2/image.bin +-Link: nvidia/ga103/sec2/sig.bin -> ../../ga102/sec2/sig.bin +-Link: nvidia/ga104/acr/ucode_ahesasc.bin -> ../../ga102/acr/ucode_ahesasc.bin +-Link: nvidia/ga104/acr/ucode_asb.bin -> ../../ga102/acr/ucode_asb.bin +-Link: nvidia/ga104/acr/ucode_unload.bin -> ../../ga102/acr/ucode_unload.bin +-File: nvidia/ga104/gr/fecs_bl.bin +-File: nvidia/ga104/gr/fecs_sig.bin +-File: nvidia/ga104/gr/gpccs_bl.bin +-File: nvidia/ga104/gr/gpccs_sig.bin +-File: nvidia/ga104/gr/NET_img.bin +-Link: nvidia/ga104/nvdec/scrubber.bin -> ../../ga102/nvdec/scrubber.bin +-Link: nvidia/ga104/sec2/desc.bin -> ../../ga102/sec2/desc.bin +-Link: nvidia/ga104/sec2/hs_bl_sig.bin -> ../../ga102/sec2/hs_bl_sig.bin +-Link: nvidia/ga104/sec2/image.bin -> ../../ga102/sec2/image.bin +-Link: nvidia/ga104/sec2/sig.bin -> ../../ga102/sec2/sig.bin +-Link: nvidia/ga106/acr/ucode_ahesasc.bin -> ../../ga102/acr/ucode_ahesasc.bin +-Link: nvidia/ga106/acr/ucode_asb.bin -> ../../ga102/acr/ucode_asb.bin +-Link: nvidia/ga106/acr/ucode_unload.bin -> ../../ga102/acr/ucode_unload.bin +-File: nvidia/ga106/gr/fecs_bl.bin +-File: nvidia/ga106/gr/fecs_sig.bin +-File: nvidia/ga106/gr/gpccs_bl.bin +-File: nvidia/ga106/gr/gpccs_sig.bin +-File: nvidia/ga106/gr/NET_img.bin +-Link: nvidia/ga106/nvdec/scrubber.bin -> ../../ga102/nvdec/scrubber.bin +-Link: nvidia/ga106/sec2/desc.bin -> ../../ga102/sec2/desc.bin +-Link: nvidia/ga106/sec2/hs_bl_sig.bin -> ../../ga102/sec2/hs_bl_sig.bin +-Link: nvidia/ga106/sec2/image.bin -> ../../ga102/sec2/image.bin +-Link: nvidia/ga106/sec2/sig.bin -> ../../ga102/sec2/sig.bin +-Link: nvidia/ga107/acr/ucode_ahesasc.bin -> ../../ga102/acr/ucode_ahesasc.bin +-Link: nvidia/ga107/acr/ucode_asb.bin -> ../../ga102/acr/ucode_asb.bin +-Link: nvidia/ga107/acr/ucode_unload.bin -> ../../ga102/acr/ucode_unload.bin +-File: nvidia/ga107/gr/fecs_bl.bin +-File: nvidia/ga107/gr/fecs_sig.bin +-File: nvidia/ga107/gr/gpccs_bl.bin +-File: nvidia/ga107/gr/gpccs_sig.bin +-File: nvidia/ga107/gr/NET_img.bin +-Link: nvidia/ga107/nvdec/scrubber.bin -> ../../ga102/nvdec/scrubber.bin +-Link: nvidia/ga107/sec2/desc.bin -> ../../ga102/sec2/desc.bin +-Link: nvidia/ga107/sec2/hs_bl_sig.bin -> ../../ga102/sec2/hs_bl_sig.bin +-Link: nvidia/ga107/sec2/image.bin -> ../../ga102/sec2/image.bin +-Link: nvidia/ga107/sec2/sig.bin -> ../../ga102/sec2/sig.bin +- +-File: nvidia/tu10x/typec/ccg_primary.cyacd +-File: nvidia/tu10x/typec/ccg_secondary.cyacd +-File: nvidia/tu10x/typec/ccg_boot.cyacd +- +-Licence: Redistributable. See LICENCE.nvidia for details +- +--------------------------------------------------------------------------- +- + Driver: mtk_scp - MediaTek SCP System Control Processing Driver + + File: mediatek/mt8183/scp.img +@@ -1903,53 +587,6 @@ Licence: Redistributable. See LICENSE.sdma_firmware for details + + -------------------------------------------------------------------------- + +-Driver: adreno - Qualcomm Adreno GPU firmware +- +-File: qcom/a300_pfp.fw +-Link: a300_pfp.fw -> qcom/a300_pfp.fw +-File: qcom/a300_pm4.fw +-Link: a300_pm4.fw -> qcom/a300_pm4.fw +-File: qcom/a330_pfp.fw +-File: qcom/a330_pm4.fw +-File: qcom/a420_pfp.fw +-File: qcom/a420_pm4.fw +-File: qcom/a530_pfp.fw +-File: qcom/a530_pm4.fw +-File: qcom/a530v3_gpmu.fw2 +-File: qcom/apq8096/a530_zap.mbn +-Link: qcom/a530_zap.mdt -> apq8096/a530_zap.mbn +-File: qcom/a630_gmu.bin +-File: qcom/a630_sqe.fw +-File: qcom/sdm845/a630_zap.mbn +-File: qcom/a650_gmu.bin +-File: qcom/a650_sqe.fw +-File: qcom/sm8250/a650_zap.mbn +-File: qcom/a660_gmu.bin +-File: qcom/a660_sqe.fw +-File: qcom/leia_pfp_470.fw +-File: qcom/leia_pm4_470.fw +-File: qcom/sc8280xp/LENOVO/21BX/qcdxkmsuc8280.mbn +- +-Licence: Redistributable. See LICENSE.qcom and qcom/NOTICE.txt for details +- +-Binary files supplied originally from +-https://developer.qualcomm.com/hardware/dragonboard-410c/tools +- +--------------------------------------------------------------------------- +- +-Driver: adreno - Qualcomm Adreno GPU firmware +- +-File: qcom/yamato_pfp.fw +-File: qcom/yamato_pm4.fw +- +-Licence: Redistributable, BSD-3-Clause licence, See LICENSE.qcom_yamato for details +- +-Binary files generated from header files in EfikaMX kernel sources. A prefix of +-four zero bytes was prepended to make them work with the DRM MSM driver. See +-https://github.com/genesi/linux-legacy/tree/master/drivers/mxc/amd-gpu +- +--------------------------------------------------------------------------- +- + Driver: mlxsw_spectrum - Mellanox Spectrum switch + + File: mellanox/mlxsw_spectrum-13.1420.122.mfa2 +-- +2.40.1 + diff --git a/packages/linux-firmware/0009-linux-firmware-various-Remove-firmware-for-various-d.patch b/packages/linux-firmware/0009-linux-firmware-various-Remove-firmware-for-various-d.patch new file mode 100644 index 00000000..09372aca --- /dev/null +++ b/packages/linux-firmware/0009-linux-firmware-various-Remove-firmware-for-various-d.patch @@ -0,0 +1,324 @@ +From aa70d48430741bc74b64f9ceb0df1348e7a0aa6d Mon Sep 17 00:00:00 2001 +From: Leonard Foerster +Date: Wed, 26 Jul 2023 11:23:46 +0000 +Subject: [PATCH] linux-firmware: various: Remove firmware for various devices + +This patch is a catch all for any specialized hardware that did not +losely fit into any of the other categories. Bottlerocket does not +provide drivers for any of these devices, so there is no use in shipping +firmware for them. + +The following list maps driver names as specified in WHENCE to kernel +config options to allow for easy adding of firmware should driver +enablement make that necessary. + +* dsp56k - CONFIG_ATARI_DSP56K +* yam - CONFIG_YAM +* mtk_scp - CONFIG_MTK_SCP +* imx-sdma - CONFIG_IMX_SDMA +* mlxsw_spectrum - CONFIG_MLXSW_SPECTRUM +* prestera - CONFIG_PRESTERA + +Signed-off-by: Leonard Foerster +--- + LICENCE.Marvell | 22 ------ + LICENCE.mediatek | 9 --- + LICENSE.sdma_firmware | 47 ------------ + WHENCE | 164 ------------------------------------------ + 4 files changed, 242 deletions(-) + delete mode 100644 LICENCE.Marvell + delete mode 100644 LICENCE.mediatek + delete mode 100644 LICENSE.sdma_firmware + +diff --git a/LICENCE.Marvell b/LICENCE.Marvell +deleted file mode 100644 +index fdf4cda..0000000 +--- a/LICENCE.Marvell ++++ /dev/null +@@ -1,22 +0,0 @@ +-Copyright © 2019. Marvell International Ltd. All rights reserved. +- +-Redistribution and use in binary form is permitted provided that the following +-conditions are met: +- +-1. Redistributions must reproduce the above copyright notice, this list of +-conditions and the following disclaimer in the documentation and/or other +-materials provided with the distribution. +- +-2. Redistribution and use shall be used only with Marvell silicon products. +-Any other use, reproduction, modification, translation, or compilation of the +-Software is prohibited. +- +-3. No reverse engineering, decompilation, or disassembly is permitted. +- +-TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, THIS SOFTWARE IS PROVIDED +-“AS IS” WITHOUT WARRANTY OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ANY EXPRESS +-OR IMPLIED WARRANTIES OF MERCHANTABILITY, ACCURACY, FITNESS OR SUFFICIENCY FOR A +-PARTICULAR PURPOSE, SATISFACTORY QUALITY, CORRESPONDENCE WITH DESCRIPTION, QUIET +-ENJOYMENT OR NON-INFRINGEMENT OF THIRD PARTY INTELLECTUAL PROPERTY RIGHTS. +-MARVELL, ITS AFFILIATES AND THEIR SUPPLIERS DISCLAIM ANY WARRANTY THAT THE +-DELIVERABLES WILL OPERATE WITHOUT INTERRUPTION OR BE ERROR-FREE. +diff --git a/LICENCE.mediatek b/LICENCE.mediatek +deleted file mode 100644 +index 6886c61..0000000 +--- a/LICENCE.mediatek ++++ /dev/null +@@ -1,9 +0,0 @@ +-MediaTek Inc. grants permission to use and redistribute aforementioned firmware +-files for the use with devices containing MediaTek chipsets, but not as part of +-the Linux kernel or in any other form which would require these files themselves +-to be covered by the terms of the GNU General Public License or the GNU Lesser +-General Public License. +- +-These firmware files are distributed in the hope that they will be useful, but +-are provided WITHOUT ANY WARRANTY, INCLUDING BUT NOT LIMITED TO IMPLIED WARRANTY +-OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. +diff --git a/LICENSE.sdma_firmware b/LICENSE.sdma_firmware +deleted file mode 100644 +index 0d3d562..0000000 +--- a/LICENSE.sdma_firmware ++++ /dev/null +@@ -1,47 +0,0 @@ +-Copyright 2017, NXP +-All rights reserved. +- +-Redistribution. Reproduction and redistribution in binary form, without +-modification, for use solely in conjunction with a NXP +-chipset, is permitted provided that the following conditions are met: +- +- . Redistributions must reproduce the above copyright notice and the following +- disclaimer in the documentation and/or other materials provided with the +- distribution. +- +- . Neither the name of NXP nor the names of its suppliers +- may be used to endorse or promote products derived from this Software +- without specific prior written permission. +- +- . No reverse engineering, decompilation, or disassembly of this Software is +- permitted. +- +-Limited patent license. NXP (.Licensor.) grants you +-(.Licensee.) a limited, worldwide, royalty-free, non-exclusive license under +-the Patents to make, have made, use, import, offer to sell and sell the +-Software. No hardware per se is licensed hereunder. +-The term .Patents. as used in this agreement means only those patents or patent +-applications owned solely and exclusively by Licensor as of the date of +-Licensor.s submission of the Software and any patents deriving priority (i.e., +-having a first effective filing date) therefrom. The term .Software. as used in +-this agreement means the firmware image submitted by Licensor, under the terms +-of this license, to git://git.kernel.org/pub/scm/linux/kernel/git/firmware/ +-linux-firmware.git. +-Notwithstanding anything to the contrary herein, Licensor does not grant and +-Licensee does not receive, by virtue of this agreement or the Licensor's +-submission of any Software, any license or other rights under any patent or +-patent application owned by any affiliate of Licensor or any other entity +-(other than Licensor), whether expressly, impliedly, by virtue of estoppel or +-exhaustion, or otherwise. +- +-DISCLAIMER. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +-CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +-BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +-FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +-THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +diff --git a/WHENCE b/WHENCE +index 3bb6523..748a81e 100644 +--- a/WHENCE ++++ b/WHENCE +@@ -8,20 +8,6 @@ kernel. + + -------------------------------------------------------------------------- + +-Driver: dsp56k - Atari DSP56k support +- +-File: dsp56k/bootstrap.bin +-Source: dsp56k/bootstrap.asm +-Source: dsp56k/Makefile +-Source: dsp56k/concat-bootstrap.pl +- +-Licence: GPLv2 or later. See GPL-2 and GPL-3 for details. +- +-DSP56001 assembler, buildable with a56 from +-http://www.zdomain.com/a56.html +- +--------------------------------------------------------------------------- +- + Driver: cxgb4 - Chelsio Terminator 4/5/6 1/10/25/40/100G Ethernet adapter + + File: cxgb4/t4fw-1.14.4.0.bin +@@ -69,18 +55,6 @@ Found in hex form in kernel source. + + -------------------------------------------------------------------------- + +-Driver: yam - YAM driver for AX.25 +- +-File: yam/1200.bin +-File: yam/9600.bin +- +-Licence: +- * (C) F6FBB 1998 +- +-Found in hex form in kernel source. +- +--------------------------------------------------------------------------- +- + Driver: bnx2x: Broadcom Everest + + File: bnx2x/bnx2x-e1-7.13.1.0.fw +@@ -561,132 +535,6 @@ Version: HuC API/APB ver 8.5.0 for Meteorlake + License: Redistributable. See LICENSE.i915 for details + -------------------------------------------------------------------------- + +-Driver: mtk_scp - MediaTek SCP System Control Processing Driver +- +-File: mediatek/mt8183/scp.img +-Version: v2.0.13324 +-File: mediatek/mt8186/scp.img +-Version: v0.0.9 +-File: mediatek/mt8192/scp.img +-Version: v2.0.20536 +-File: mediatek/mt8195/scp.img +-Version: v2.0.11966 +- +-Licence: Redistributable. See LICENCE.mediatek for details. +- +--------------------------------------------------------------------------- +- +-Driver: imx-sdma - support for i.MX SDMA driver +- +-File: imx/sdma/sdma-imx6q.bin +-Version: 3.3 +-File: imx/sdma/sdma-imx7d.bin +-Version: 4.2 +- +-Licence: Redistributable. See LICENSE.sdma_firmware for details +- +--------------------------------------------------------------------------- +- +-Driver: mlxsw_spectrum - Mellanox Spectrum switch +- +-File: mellanox/mlxsw_spectrum-13.1420.122.mfa2 +-File: mellanox/mlxsw_spectrum-13.1530.152.mfa2 +-File: mellanox/mlxsw_spectrum-13.1620.192.mfa2 +-File: mellanox/mlxsw_spectrum-13.1702.6.mfa2 +-File: mellanox/mlxsw_spectrum-13.1703.4.mfa2 +-File: mellanox/mlxsw_spectrum-13.1910.622.mfa2 +-File: mellanox/mlxsw_spectrum-13.2000.1122.mfa2 +-File: mellanox/mlxsw_spectrum-13.2000.1886.mfa2 +-File: mellanox/mlxsw_spectrum-13.2000.2308.mfa2 +-File: mellanox/mlxsw_spectrum2-29.2000.2308.mfa2 +-File: mellanox/mlxsw_spectrum-13.2000.2714.mfa2 +-File: mellanox/mlxsw_spectrum2-29.2000.2714.mfa2 +-File: mellanox/mlxsw_spectrum-13.2007.1168.mfa2 +-File: mellanox/mlxsw_spectrum2-29.2007.1168.mfa2 +-File: mellanox/mlxsw_spectrum3-30.2007.1168.mfa2 +-File: mellanox/mlxsw_spectrum-13.2008.1036.mfa2 +-File: mellanox/mlxsw_spectrum2-29.2008.1036.mfa2 +-File: mellanox/mlxsw_spectrum3-30.2008.1036.mfa2 +-File: mellanox/mlxsw_spectrum-13.2008.1310.mfa2 +-File: mellanox/mlxsw_spectrum2-29.2008.1310.mfa2 +-File: mellanox/mlxsw_spectrum3-30.2008.1310.mfa2 +-File: mellanox/mlxsw_spectrum-13.2008.1312.mfa2 +-File: mellanox/mlxsw_spectrum2-29.2008.1312.mfa2 +-File: mellanox/mlxsw_spectrum3-30.2008.1312.mfa2 +-File: mellanox/mlxsw_spectrum-13.2008.2018.mfa2 +-File: mellanox/mlxsw_spectrum2-29.2008.2018.mfa2 +-File: mellanox/mlxsw_spectrum3-30.2008.2018.mfa2 +-File: mellanox/mlxsw_spectrum-13.2008.2304.mfa2 +-File: mellanox/mlxsw_spectrum2-29.2008.2304.mfa2 +-File: mellanox/mlxsw_spectrum3-30.2008.2304.mfa2 +-File: mellanox/mlxsw_spectrum-13.2008.2406.mfa2 +-File: mellanox/mlxsw_spectrum2-29.2008.2406.mfa2 +-File: mellanox/mlxsw_spectrum3-30.2008.2406.mfa2 +-File: mellanox/mlxsw_spectrum-13.2008.2438.mfa2 +-File: mellanox/mlxsw_spectrum2-29.2008.2438.mfa2 +-File: mellanox/mlxsw_spectrum3-30.2008.2438.mfa2 +-File: mellanox/mlxsw_spectrum-13.2008.2946.mfa2 +-File: mellanox/mlxsw_spectrum2-29.2008.2946.mfa2 +-File: mellanox/mlxsw_spectrum3-30.2008.2946.mfa2 +-File: mellanox/mlxsw_spectrum-13.2008.3326.mfa2 +-File: mellanox/mlxsw_spectrum2-29.2008.3326.mfa2 +-File: mellanox/mlxsw_spectrum3-30.2008.3326.mfa2 +-File: mellanox/mlxsw_spectrum-13.2010.1006.mfa2 +-File: mellanox/mlxsw_spectrum2-29.2010.1006.mfa2 +-File: mellanox/mlxsw_spectrum3-30.2010.1006.mfa2 +-File: mellanox/lc_ini_bundle_2010_1006.bin +-File: mellanox/mlxsw_spectrum-13.2010.1232.mfa2 +-File: mellanox/mlxsw_spectrum2-29.2010.1232.mfa2 +-File: mellanox/mlxsw_spectrum3-30.2010.1232.mfa2 +-File: mellanox/mlxsw_spectrum-13.2010.1406.mfa2 +-File: mellanox/mlxsw_spectrum2-29.2010.1406.mfa2 +-File: mellanox/mlxsw_spectrum3-30.2010.1406.mfa2 +-File: mellanox/mlxsw_spectrum-13.2010.1502.mfa2 +-File: mellanox/mlxsw_spectrum2-29.2010.1502.mfa2 +-File: mellanox/mlxsw_spectrum3-30.2010.1502.mfa2 +-File: mellanox/lc_ini_bundle_2010_1502.bin +-File: mellanox/mlxsw_spectrum-13.2010.3020.mfa2 +-File: mellanox/mlxsw_spectrum2-29.2010.3020.mfa2 +-File: mellanox/mlxsw_spectrum3-30.2010.3020.mfa2 +-File: mellanox/lc_ini_bundle_2010_3020.bin +-File: mellanox/mlxsw_spectrum-13.2010.3146.mfa2 +-File: mellanox/mlxsw_spectrum2-29.2010.3146.mfa2 +-File: mellanox/mlxsw_spectrum3-30.2010.3146.mfa2 +-File: mellanox/lc_ini_bundle_2010_3146.bin +- +-Licence: +- Copyright (c) 2017-2020 Mellanox Technologies, Ltd. All rights reserved. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions are met: +- +- 1. Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- 2. Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in the +- documentation and/or other materials provided with the distribution. +- 3. Neither the names of the copyright holders nor the names of its +- contributors may be used to endorse or promote products derived from +- this software without specific prior written permission. +- +- Alternatively, this software may be distributed under the terms of the +- GNU General Public License ("GPL") version 2 as published by the Free +- Software Foundation. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +- POSSIBILITY OF SUCH DAMAGE. +- +--------------------------------------------------------------------------- +- + Driver: ice - Intel(R) Ethernet Connection E800 Series + + File: intel/ice/ddp/ice-1.3.30.0.pkg +@@ -700,15 +548,3 @@ File: intel/ice/ddp-wireless_edge/ice_wireless_edge-1.3.10.0.pkg + License: Redistributable. See LICENSE.ice_enhanced for details + + -------------------------------------------------------------------------- +- +-Driver: prestera - Marvell driver for Prestera family ASIC devices +- +-File: mrvl/prestera/mvsw_prestera_fw-v2.0.img +-File: mrvl/prestera/mvsw_prestera_fw-v3.0.img +-File: mrvl/prestera/mvsw_prestera_fw-v4.0.img +-File: mrvl/prestera/mvsw_prestera_fw-v4.1.img +-File: mrvl/prestera/mvsw_prestera_fw_arm64-v4.1.img +- +-Licence: Redistributable. See LICENCE.Marvell for details. +- +------------------------------------------------- +-- +2.40.1 + diff --git a/packages/linux-firmware/0010-linux-firmware-amd-ucode-Remove-amd-microcode.patch b/packages/linux-firmware/0010-linux-firmware-amd-ucode-Remove-amd-microcode.patch new file mode 100644 index 00000000..d1082c14 --- /dev/null +++ b/packages/linux-firmware/0010-linux-firmware-amd-ucode-Remove-amd-microcode.patch @@ -0,0 +1,122 @@ +From 820980a4ec6d39dcec84639fb4b7a80bb33f8a21 Mon Sep 17 00:00:00 2001 +From: Leonard Foerster +Date: Wed, 26 Jul 2023 11:28:35 +0000 +Subject: [PATCH] linux-firmware: amd-ucode: Remove amd microcode + +Bottlerocket ships AMD microcode as part of the kernel packages already. +There is no need to ship these microcode images twice. + +Signed-off-by: Leonard Foerster +--- + LICENSE.amd-ucode | 64 ----------------------------------------------- + WHENCE | 23 ----------------- + 2 files changed, 87 deletions(-) + delete mode 100644 LICENSE.amd-ucode + +diff --git a/LICENSE.amd-ucode b/LICENSE.amd-ucode +deleted file mode 100644 +index ea47c57..0000000 +--- a/LICENSE.amd-ucode ++++ /dev/null +@@ -1,64 +0,0 @@ +-Copyright (C) 2010-2022 Advanced Micro Devices, Inc., All rights reserved. +- +-Permission is hereby granted by Advanced Micro Devices, Inc. ("AMD"), +-free of any license fees, to any person obtaining a copy of this +-microcode in binary form (the "Software") ("You"), to install, +-reproduce, copy and distribute copies of the Software and to permit +-persons to whom the Software is provided to do the same, subject to +-the following terms and conditions. Your use of any portion of the +-Software shall constitute Your acceptance of the following terms and +-conditions. If You do not agree to the following terms and conditions, +-do not use, retain or redistribute any portion of the Software. +- +-If You redistribute this Software, You must reproduce the above +-copyright notice and this license with the Software. +-Without specific, prior, written permission from AMD, You may not +-reference AMD or AMD products in the promotion of any product derived +-from or incorporating this Software in any manner that implies that +-AMD endorses or has certified such product derived from or +-incorporating this Software. +- +-You may not reverse engineer, decompile, or disassemble this Software +-or any portion thereof. +- +-THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED +-WARRANTY OF ANY KIND, INCLUDING BUT NOT LIMITED TO WARRANTIES OF +-MERCHANTABILITY, NONINFRINGEMENT, TITLE, FITNESS FOR ANY PARTICULAR +-PURPOSE, OR WARRANTIES ARISING FROM CONDUCT, COURSE OF DEALING, OR +-USAGE OF TRADE. IN NO EVENT SHALL AMD OR ITS LICENSORS BE LIABLE FOR +-ANY DAMAGES WHATSOEVER (INCLUDING, WITHOUT LIMITATION, DAMAGES FOR +-LOSS OF PROFITS, BUSINESS INTERRUPTION, OR LOSS OF DATA OR +-INFORMATION) ARISING OUT OF AMD'S NEGLIGENCE, GROSS NEGLIGENCE, THE +-USE OF OR INABILITY TO USE THE SOFTWARE, EVEN IF AMD HAS BEEN ADVISED +-OF THE POSSIBILITY OF SUCH DAMAGES. BECAUSE SOME JURISDICTIONS +-PROHIBIT THE EXCLUSION OR LIMITATION OF LIABILITY FOR CONSEQUENTIAL OR +-INCIDENTAL DAMAGES OR THE EXCLUSION OF IMPLIED WARRANTIES, THE ABOVE +-LIMITATION MAY NOT APPLY TO YOU. +- +-Without limiting the foregoing, the Software may implement third party +-technologies for which You must obtain licenses from parties other +-than AMD. You agree that AMD has not obtained or conveyed to You, and +-that You shall be responsible for obtaining the rights to use and/or +-distribute the applicable underlying intellectual property rights +-related to the third party technologies. These third party +-technologies are not licensed hereunder. +- +-If You use the Software (in whole or in part), You shall adhere to all +-applicable U.S., European, and other export laws, including but not +-limited to the U.S. Export Administration Regulations ("EAR"), (15 +-C.F.R. Sections 730 through 774), and E.U. Council Regulation (EC) No +-1334/2000 of 22 June 2000. Further, pursuant to Section 740.6 of the +-EAR, You hereby certify that, except pursuant to a license granted by +-the United States Department of Commerce Bureau of Industry and +-Security or as otherwise permitted pursuant to a License Exception +-under the U.S. Export Administration Regulations ("EAR"), You will not +-(1) export, re-export or release to a national of a country in Country +-Groups D:1, E:1 or E:2 any restricted technology, software, or source +-code You receive hereunder, or (2) export to Country Groups D:1, E:1 +-or E:2 the direct product of such technology or software, if such +-foreign produced direct product is subject to national security +-controls as identified on the Commerce Control List (currently found +-in Supplement 1 to Part 774 of EAR). For the most current Country +-Group listings, or for additional information about the EAR or Your +-obligations under those regulations, please refer to the U.S. Bureau +-of Industry and Security?s website at ttp://www.bis.doc.gov/. +diff --git a/WHENCE b/WHENCE +index 748a81e..5c19692 100644 +--- a/WHENCE ++++ b/WHENCE +@@ -147,29 +147,6 @@ License: Redistributable. See LICENCE.myri10ge_firmware for details. + + -------------------------------------------------------------------------- + +-Driver: microcode_amd - AMD CPU Microcode Update Driver for Linux +- +-File: amd-ucode/microcode_amd.bin +-Raw: amd-ucode/microcode_amd.bin +-Version: 2013-07-10 +-File: amd-ucode/microcode_amd_fam15h.bin +-Raw: amd-ucode/microcode_amd_fam15h.bin +-Version: 2018-05-24 +-File: amd-ucode/microcode_amd_fam16h.bin +-Raw: amd-ucode/microcode_amd_fam16h.bin +-Version: 2014-10-28 +-File: amd-ucode/microcode_amd_fam17h.bin +-Raw: amd-ucode/microcode_amd_fam17h.bin +-Version: 2023-04-13 +-File: amd-ucode/microcode_amd_fam19h.bin +-Raw: amd-ucode/microcode_amd_fam19h.bin +-Version: 2023-01-31 +-File: amd-ucode/README +- +-License: Redistributable. See LICENSE.amd-ucode for details +- +--------------------------------------------------------------------------- +- + Driver: i915 -- Intel Integrated Graphics driver + + File: i915/skl_dmc_ver1_23.bin +-- +2.40.1 + diff --git a/packages/linux-firmware/linux-firmware.spec b/packages/linux-firmware/linux-firmware.spec index 7453277c..497e1e59 100644 --- a/packages/linux-firmware/linux-firmware.spec +++ b/packages/linux-firmware/linux-firmware.spec @@ -16,6 +16,17 @@ URL: https://www.kernel.org/ Source0: https://www.kernel.org/pub/linux/kernel/firmware/linux-firmware-%{version}.tar.xz +Patch0001: 0001-linux-firmware-snd-remove-firmware-for-snd-audio-dev.patch +Patch0002: 0002-linux-firmware-video-Remove-firmware-for-video-broad.patch +Patch0003: 0003-linux-firmware-bt-wifi-Remove-firmware-for-Bluetooth.patch +Patch0004: 0004-linux-firmware-scsi-Remove-firmware-for-SCSI-devices.patch +Patch0005: 0005-linux-firmware-usb-remove-firmware-for-USB-Serial-PC.patch +Patch0006: 0006-linux-firmware-ethernet-Remove-firmware-for-ethernet.patch +Patch0007: 0007-linux-firmware-Remove-firmware-for-Accelarator-devic.patch +Patch0008: 0008-linux-firmware-gpu-Remove-firmware-for-GPU-devices.patch +Patch0009: 0009-linux-firmware-various-Remove-firmware-for-various-d.patch +Patch0010: 0010-linux-firmware-amd-ucode-Remove-amd-microcode.patch + %description %{summary}. From 4b5e53977dbbd6b6873200ebead653155a6e82bc Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 8 Aug 2023 13:54:22 +0000 Subject: [PATCH 1070/1356] linux-firmware: Adjust Lincense definition As we want to adhere to SPDX identifiers the Licensing originally given by AL/Fedora is not suitable. Adjust the Licenses to cover the firmware we actually package. Signed-off-by: Leonard Foerster --- packages/linux-firmware/linux-firmware.spec | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/packages/linux-firmware/linux-firmware.spec b/packages/linux-firmware/linux-firmware.spec index 497e1e59..1039b62f 100644 --- a/packages/linux-firmware/linux-firmware.spec +++ b/packages/linux-firmware/linux-firmware.spec @@ -11,7 +11,16 @@ Name: %{_cross_os}linux-firmware Version: 20230625 Release: 1%{?dist} Summary: Firmware files used by the Linux kernel -License: GPL+ and GPLv2+ and MIT and Redistributable, no modification permitted +# The following list of SPDX identifiers was constructed with help of scancode +# tooling and has turned up the following licenses for different drivers by +# checking the different LICENCE/LICENSE files and the licenses in WHENCE: +# * BSD-Source-Code - myri10ge +# * LicenseRef-scancode-chelsio-linux-firmware - cxgb4 +# * LicenseRef-scancode-qlogic-firmware - netxen_nic +# * LicenseRef-scancode-intel - i915, ice +# * LicenseRef-scancode-proprietary-license - bnx2x, qed +# * LicenseRef-scancode-free-unknown - tg3 +License: GPL-1.0-or-later AND GPL-2.0-or-later AND BSD-Source-Code AND LicenseRef-scancode-chelsio-linux-firmware AND LicenseRef-scancode-qlogic-firmware AND LicenseRef-scancode-intel AND LicenseRef-scancode-proprietary-license AND LicenseRef-scancode-free-unknown URL: https://www.kernel.org/ Source0: https://www.kernel.org/pub/linux/kernel/firmware/linux-firmware-%{version}.tar.xz From 8e31357844632173feccc4668073d0f192881c9a Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Fri, 11 Aug 2023 14:02:26 +0000 Subject: [PATCH 1071/1356] microcode: Update amd and intel ucode and remove indirection Update microcode for Intel and AMD processors in face of recent processor vulnerabilities. In the process cut out the indirection step through Amazon Linux. They do not change the microcode between getting it from upstream and us consuming it, so it is an extra step of indirection. Signed-off-by: Leonard Foerster --- packages/microcode/Cargo.toml | 8 ++++---- packages/microcode/microcode.spec | 16 +++++++--------- 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/packages/microcode/Cargo.toml b/packages/microcode/Cargo.toml index 3cb5d305..65475428 100644 --- a/packages/microcode/Cargo.toml +++ b/packages/microcode/Cargo.toml @@ -11,9 +11,9 @@ path = "../packages.rs" # Use latest-srpm-urls.sh to get these. [[package.metadata.build-package.external-files]] -url = "https://cdn.amazonlinux.com/blobstore/6d7f707779f6aff41c89bad00f7abe69dc70919cee29a8d3e5060f8070efe71d/linux-firmware-20200421-79.git78c0348.amzn2.src.rpm" -sha512 = "d5a62eca6ddd7ff322574f17359681d03a733acc51c334127f291af5d5e39fcdf821c073ddcd977b2ca088cd95d35dc31db2001ca4c312a62dcbd4ea935434fd" +url = "https://www.kernel.org/pub/linux/kernel/firmware/linux-firmware-20230804.tar.xz" +sha512 = "b7fdffd49530223394a0896608a746395cbe9d1a3ca7e4e744bc8381e937845e085f08e2b56854a233426164072f4c365b281db2f0dbb47192a97a94ada8fae6" [[package.metadata.build-package.external-files]] -url = "https://cdn.amazonlinux.com/blobstore/76e8f9f15ec2b27c70aff3ca15a28df51790b25c73fc8dc1bf1f28a9069b15e8/microcode_ctl-2.1-47.amzn2.0.9.src.rpm" -sha512 = "e1347139d1edbd52d2619d970ba0f03500ba7367d071bb30ab3d209e44b3ff63000fcaa681f7352c79f7d5d2f0753130161b42b0eab7aab97b5b4fc4bfaa1b3b" +url = "https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files/archive/refs/tags/microcode-20230808.tar.gz" +sha512 = "8316eb9d35b315e630c6c9fab1ba601b91e72cc42926ef14e7c2b77e7025d276ae06c143060f44cd1a873d3879c067d11ad82e1886c796e6be6bf466243ad85b" diff --git a/packages/microcode/microcode.spec b/packages/microcode/microcode.spec index 1d1439eb..710d34ef 100644 --- a/packages/microcode/microcode.spec +++ b/packages/microcode/microcode.spec @@ -3,8 +3,8 @@ # These are specific to the upstream source RPM, and will likely need to be # updated for each new version. -%global amd_ucode_archive linux-firmware-20200421.tar.gz -%global intel_ucode_archive microcode-20210608-1-amzn.tgz +%global amd_ucode_version 20230804 +%global intel_ucode_version 20230808 Name: %{_cross_os}microcode Version: 0.0 @@ -18,9 +18,8 @@ License: LicenseRef-scancode-amd-linux-firmware-export AND LicenseRef-scancode-i # the subpackage definitions. URL: https://github.com/bottlerocket-os/bottlerocket/tree/develop/packages/microcode -# We use Amazon Linux 2 as our upstream for microcode updates. -Source0: https://cdn.amazonlinux.com/blobstore/6d7f707779f6aff41c89bad00f7abe69dc70919cee29a8d3e5060f8070efe71d/linux-firmware-20200421-79.git78c0348.amzn2.src.rpm -Source1: https://cdn.amazonlinux.com/blobstore/76e8f9f15ec2b27c70aff3ca15a28df51790b25c73fc8dc1bf1f28a9069b15e8/microcode_ctl-2.1-47.amzn2.0.9.src.rpm +Source0: https://www.kernel.org/pub/linux/kernel/firmware/linux-firmware-%{amd_ucode_version}.tar.xz +Source1: https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files/archive/refs/tags/microcode-%{intel_ucode_version}.tar.gz # Lets us install "microcode" to pull in the AMD and Intel updates. Requires: %{_cross_os}microcode-amd @@ -75,11 +74,9 @@ Requires: %{_cross_os}microcode-intel-license %{summary}. %prep -rpm2cpio %{SOURCE0} | cpio -iu %{amd_ucode_archive} -rpm2cpio %{SOURCE1} | cpio -iu %{intel_ucode_archive} mkdir amd intel -tar -C amd -xof %{amd_ucode_archive} -tar -C intel -xof %{intel_ucode_archive} +tar -C amd --strip-components=1 -xof %{SOURCE0} +tar -C intel --strip-components=1 -xof %{SOURCE1} cp {amd/,}LICENSE.amd-ucode cp intel/intel-ucode-with-caveats/* intel/intel-ucode cp intel/license LICENSE.intel-ucode @@ -110,6 +107,7 @@ install -p -m 0644 intel/intel-ucode/* %{buildroot}%{_cross_libdir}/firmware/int %dir %{_cross_libdir}/firmware %dir %{_cross_libdir}/firmware/intel-ucode %{_cross_libdir}/firmware/intel-ucode/??-??-?? +%exclude %{_cross_libdir}/firmware/intel-ucode/??-??-??_DUPLICATE %files intel-license %license LICENSE.intel-ucode LicenseRef-scancode-intel-mcu-2018 From 4d0e05ea854f613f90b7853c1e5a9a748f880c5e Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Mon, 21 Aug 2023 14:52:23 +0000 Subject: [PATCH 1072/1356] microcode: Clean up latest-srpm-urls.sh and its references With the move to upstream sources for microcode the helper script `latest-srpm-urls.sh` is not applicable anymore. Remove it and references to it. Signed-off-by: Leonard Foerster --- packages/microcode/Cargo.toml | 2 +- packages/microcode/latest-srpm-urls.sh | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) delete mode 100755 packages/microcode/latest-srpm-urls.sh diff --git a/packages/microcode/Cargo.toml b/packages/microcode/Cargo.toml index 65475428..e53448b0 100644 --- a/packages/microcode/Cargo.toml +++ b/packages/microcode/Cargo.toml @@ -8,7 +8,7 @@ build = "../build.rs" [lib] path = "../packages.rs" -# Use latest-srpm-urls.sh to get these. +# Check the two upstream repositories for the latest releases [[package.metadata.build-package.external-files]] url = "https://www.kernel.org/pub/linux/kernel/firmware/linux-firmware-20230804.tar.xz" diff --git a/packages/microcode/latest-srpm-urls.sh b/packages/microcode/latest-srpm-urls.sh deleted file mode 100755 index a3d4c510..00000000 --- a/packages/microcode/latest-srpm-urls.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -docker run --rm amazonlinux:2 sh -c 'yum install -q -y yum-utils && yumdownloader -q --source --urls linux-firmware microcode_ctl | grep ^http' From 67c150cf94affe832d526a7b662a2aeb390c0168 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Tue, 29 Aug 2023 20:26:10 +0000 Subject: [PATCH 1073/1356] Update root.json hash in docs With the switch to the new TUF repo root, this updates all references in the docs and examples to use the new hash. --- BUILDING.md | 2 +- tools/pubsys/Infra.toml.example | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/BUILDING.md b/BUILDING.md index d4f5f1b2..684c36d6 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -214,7 +214,7 @@ Next, you need the Bottlerocket root role, which is used by tuftool to verify th This will download and verify the root role itself: ```shell curl -O "https://cache.bottlerocket.aws/root.json" -sha512sum -c <<<"b81af4d8eb86743539fbc4709d33ada7b118d9f929f0c2f6c04e1d41f46241ed80423666d169079d736ab79965b4dd25a5a6db5f01578b397496d49ce11a3aa2 root.json" +sha512sum -c <<<"a3c58bc73999264f6f28f3ed9bfcb325a5be943a782852c7d53e803881968e0a4698bd54c2f125493f4669610a9da83a1787eb58a8303b2ee488fa2a3f7d802f root.json" ``` Next, set your desired parameters, and download the kmod kit: diff --git a/tools/pubsys/Infra.toml.example b/tools/pubsys/Infra.toml.example index 4fd5e48e..9b5b5e8e 100644 --- a/tools/pubsys/Infra.toml.example +++ b/tools/pubsys/Infra.toml.example @@ -14,7 +14,7 @@ root_role_sha512 = "0123456789abcdef" # For reference, this is the Bottlerocket root role: #root_role_url = "https://cache.bottlerocket.aws/root.json" -#root_role_sha512 = "b81af4d8eb86743539fbc4709d33ada7b118d9f929f0c2f6c04e1d41f46241ed80423666d169079d736ab79965b4dd25a5a6db5f01578b397496d49ce11a3aa2" +#root_role_sha512 = "a3c58bc73999264f6f28f3ed9bfcb325a5be943a782852c7d53e803881968e0a4698bd54c2f125493f4669610a9da83a1787eb58a8303b2ee488fa2a3f7d802f" # pubsys assumes a single publication key that signs the snapshot, targets, # and timestamp roles. Here you specify where that key lives so we can sign From 4d522d51202e146b0b1b17f6d49ee1701b728273 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 29 Aug 2023 11:31:08 +0000 Subject: [PATCH 1074/1356] kernel-5.10: cherry-pick fix for CVE-2023-20588 ("DIV0") The fix for CVE-2023-20588 is currently only available in the kernel.org upstream 5.10 stable kernel, but not yet in an Amazon Linux kernel release. Cherry-pick it from the upstream kernel. Bring in an extra commit (x86/bugs: Increase the x86 bugs vector size to two u32s) as a dependency. Contextual changes are necessary to make these patches apply as we currently carry Amazon Linux' patches mitigating GDS and SRSO instead of upstream variants of these patches. Signed-off-by: Leonard Foerster --- ...e-the-x86-bugs-vector-size-to-two-u3.patch | 48 ++++++++ ...ot-leak-quotient-data-after-a-divisi.patch | 111 ++++++++++++++++++ ...MD-Fix-the-DIV-0-initial-fix-attempt.patch | 83 +++++++++++++ packages/kernel-5.10/kernel-5.10.spec | 6 + 4 files changed, 248 insertions(+) create mode 100644 packages/kernel-5.10/5011-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch create mode 100644 packages/kernel-5.10/5012-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch create mode 100644 packages/kernel-5.10/5013-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch diff --git a/packages/kernel-5.10/5011-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch b/packages/kernel-5.10/5011-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch new file mode 100644 index 00000000..7207e627 --- /dev/null +++ b/packages/kernel-5.10/5011-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch @@ -0,0 +1,48 @@ +From d573bee81157742dfb6710646d365bcd37a0f92c Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Sat, 8 Jul 2023 10:21:35 +0200 +Subject: [PATCH] x86/bugs: Increase the x86 bugs vector size to two u32s + +Upstream commit: 0e52740ffd10c6c316837c6c128f460f1aaba1ea + +There was never a doubt in my mind that they would not fit into a single +u32 eventually. + +Signed-off-by: Borislav Petkov (AMD) +Signed-off-by: Greg Kroah-Hartman +(cherry picked from commit 073a28a9b50662991e7d6956c2cf2fc5d54f28cd) +Signed-off-by: Leonard Foerster +--- + arch/x86/include/asm/cpufeatures.h | 2 +- + tools/arch/x86/include/asm/cpufeatures.h | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index 0b0b9453b19f..9b06e142bad1 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -14,7 +14,7 @@ + * Defines x86 CPU feature bits + */ + #define NCAPINTS 19 /* N 32-bit words worth of info */ +-#define NBUGINTS 1 /* N 32-bit bug flags */ ++#define NBUGINTS 2 /* N 32-bit bug flags */ + + /* + * Note: If the comment begins with a quoted string, that string is used +diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h +index 54ba20492ad1..51a8fdb487c7 100644 +--- a/tools/arch/x86/include/asm/cpufeatures.h ++++ b/tools/arch/x86/include/asm/cpufeatures.h +@@ -14,7 +14,7 @@ + * Defines x86 CPU feature bits + */ + #define NCAPINTS 19 /* N 32-bit words worth of info */ +-#define NBUGINTS 1 /* N 32-bit bug flags */ ++#define NBUGINTS 2 /* N 32-bit bug flags */ + + /* + * Note: If the comment begins with a quoted string, that string is used +-- +2.40.1 + diff --git a/packages/kernel-5.10/5012-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch b/packages/kernel-5.10/5012-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch new file mode 100644 index 00000000..d6a61771 --- /dev/null +++ b/packages/kernel-5.10/5012-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch @@ -0,0 +1,111 @@ +From 188ef20eb7f347966659092d75051f0cd4b572bf Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Sat, 5 Aug 2023 00:06:43 +0200 +Subject: [PATCH] x86/CPU/AMD: Do not leak quotient data after a division by 0 + +commit 77245f1c3c6495521f6a3af082696ee2f8ce3921 upstream. + +Under certain circumstances, an integer division by 0 which faults, can +leave stale quotient data from a previous division operation on Zen1 +microarchitectures. + +Do a dummy division 0/1 before returning from the #DE exception handler +in order to avoid any leaks of potentially sensitive data. + +Signed-off-by: Borislav Petkov (AMD) +Cc: +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman +(cherry picked from commit b6fc2fbf89089ecfb8eb9a89a7fc91d444f4fec7) +Signed-off-by: Leonard Foerster +--- + arch/x86/include/asm/cpufeatures.h | 2 ++ + arch/x86/include/asm/processor.h | 2 ++ + arch/x86/kernel/cpu/amd.c | 19 +++++++++++++++++++ + arch/x86/kernel/traps.c | 2 ++ + 4 files changed, 25 insertions(+) + +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index 9b06e142bad1..630196281a48 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -435,4 +435,6 @@ + #define X86_BUG_RAS_POISONING X86_BUG(29) /* CPU is affected by RAS poisoning */ + #define X86_BUG_GDS X86_BUG(30) /* CPU is affected by Gather Data Sampling */ + ++/* BUG word 2 */ ++#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */ + #endif /* _ASM_X86_CPUFEATURES_H */ +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h +index 12714134f5eb..f20dc0c73cae 100644 +--- a/arch/x86/include/asm/processor.h ++++ b/arch/x86/include/asm/processor.h +@@ -810,9 +810,11 @@ DECLARE_PER_CPU(u64, msr_misc_features_shadow); + #ifdef CONFIG_CPU_SUP_AMD + extern u16 amd_get_nb_id(int cpu); + extern u32 amd_get_nodes_per_socket(void); ++extern void amd_clear_divider(void); + #else + static inline u16 amd_get_nb_id(int cpu) { return 0; } + static inline u32 amd_get_nodes_per_socket(void) { return 0; } ++static inline void amd_clear_divider(void) { } + #endif + + static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index 3d99a823ffac..842357ee7724 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -76,6 +76,10 @@ static const int amd_zenbleed[] = + AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), + AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); + ++static const int amd_div0[] = ++ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf), ++ AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf)); ++ + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) + { + int osvw_id = *erratum++; +@@ -1168,6 +1172,11 @@ static void init_amd(struct cpuinfo_x86 *c) + check_null_seg_clears_base(c); + + zenbleed_check(c); ++ ++ if (cpu_has_amd_erratum(c, amd_div0)) { ++ pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n"); ++ setup_force_cpu_bug(X86_BUG_DIV0); ++ } + } + + #ifdef CONFIG_X86_32 +@@ -1293,3 +1302,13 @@ void amd_check_microcode(void) + { + on_each_cpu(zenbleed_check_cpu, NULL, 1); + } ++ ++/* ++ * Issue a DIV 0/1 insn to clear any division data from previous DIV ++ * operations. ++ */ ++void noinstr amd_clear_divider(void) ++{ ++ asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) ++ :: "a" (0), "d" (0), "r" (1)); ++} +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c +index 3780c728345c..d8142b5738ac 100644 +--- a/arch/x86/kernel/traps.c ++++ b/arch/x86/kernel/traps.c +@@ -198,6 +198,8 @@ DEFINE_IDTENTRY(exc_divide_error) + { + do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, + FPE_INTDIV, error_get_trap_addr(regs)); ++ ++ amd_clear_divider(); + } + + DEFINE_IDTENTRY(exc_overflow) +-- +2.40.1 + diff --git a/packages/kernel-5.10/5013-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch b/packages/kernel-5.10/5013-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch new file mode 100644 index 00000000..6ef00d99 --- /dev/null +++ b/packages/kernel-5.10/5013-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch @@ -0,0 +1,83 @@ +From ea19dbd49d7dcdfa1a807ce1ea48164f10129113 Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Fri, 11 Aug 2023 23:38:24 +0200 +Subject: [PATCH] x86/CPU/AMD: Fix the DIV(0) initial fix attempt + +commit f58d6fbcb7c848b7f2469be339bc571f2e9d245b upstream. + +Initially, it was thought that doing an innocuous division in the #DE +handler would take care to prevent any leaking of old data from the +divider but by the time the fault is raised, the speculation has already +advanced too far and such data could already have been used by younger +operations. + +Therefore, do the innocuous division on every exit to userspace so that +userspace doesn't see any potentially old data from integer divisions in +kernel space. + +Do the same before VMRUN too, to protect host data from leaking into the +guest too. + +Fixes: 77245f1c3c64 ("x86/CPU/AMD: Do not leak quotient data after a division by 0") +Signed-off-by: Borislav Petkov (AMD) +Cc: +Link: https://lore.kernel.org/r/20230811213824.10025-1-bp@alien8.de +Signed-off-by: Greg Kroah-Hartman +(cherry picked from commit 69712baf249570a1419e75dc1a103a44e375b2cd) +Signed-off-by: Leonard Foerster +--- + arch/x86/include/asm/entry-common.h | 1 + + arch/x86/kernel/cpu/amd.c | 1 + + arch/x86/kernel/traps.c | 2 -- + arch/x86/kvm/svm/svm.c | 1 + + 4 files changed, 3 insertions(+), 2 deletions(-) + +diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h +index 4a382fb6a9ef..5443851d3aa6 100644 +--- a/arch/x86/include/asm/entry-common.h ++++ b/arch/x86/include/asm/entry-common.h +@@ -78,6 +78,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, + static __always_inline void arch_exit_to_user_mode(void) + { + mds_user_clear_cpu_buffers(); ++ amd_clear_divider(); + } + #define arch_exit_to_user_mode arch_exit_to_user_mode + +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index 842357ee7724..64e97f243441 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -1312,3 +1312,4 @@ void noinstr amd_clear_divider(void) + asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) + :: "a" (0), "d" (0), "r" (1)); + } ++EXPORT_SYMBOL_GPL(amd_clear_divider); +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c +index d8142b5738ac..3780c728345c 100644 +--- a/arch/x86/kernel/traps.c ++++ b/arch/x86/kernel/traps.c +@@ -198,8 +198,6 @@ DEFINE_IDTENTRY(exc_divide_error) + { + do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, + FPE_INTDIV, error_get_trap_addr(regs)); +- +- amd_clear_divider(); + } + + DEFINE_IDTENTRY(exc_overflow) +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c +index 5ddc75ade8f0..d0a1c0420c92 100644 +--- a/arch/x86/kvm/svm/svm.c ++++ b/arch/x86/kvm/svm/svm.c +@@ -3381,6 +3381,7 @@ static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) + + static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) + { ++ amd_clear_divider(); + } + + static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) +-- +2.40.1 + diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 45579c4c..54bb16e9 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -29,6 +29,12 @@ Patch2001: 2001-kbuild-add-support-for-zstd-compressed-modules.patch Patch5001: 5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch Patch5002: 5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch +# Cherry-picked fixes for CVE-2023-20588 ("DIV0"). Can be dropped when moving +# upstream to 5.10.192 or later. +Patch5011: 5011-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch +Patch5012: 5012-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch +Patch5013: 5013-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch + BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From 4d394c771b5da05d3e156c3414e17503e114428a Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 29 Aug 2023 11:37:56 +0000 Subject: [PATCH 1075/1356] kernel-5.15: cherry-pick fix for CVE-2023-20588 ("DIV0") The fic for CVE-2023-20588 is currently only available in the kernel.org upstream 5.15 stable kernel, but not yet in an Amazon Linux kernel release. Cherry-pick it from the upstream kernel. Bring in an extra commit (x86/bugs: Increase the x86 bugs vector size to two u32s) as a dependency. Contextual changes are necessary to make these patches apply as we currently carry Amazon Linux' patches mitigating GDS and SRSO instead of upstream variants of these patches. Signed-off-by: Leonard Foerster --- ...e-the-x86-bugs-vector-size-to-two-u3.patch | 48 ++++++++ ...ot-leak-quotient-data-after-a-divisi.patch | 111 ++++++++++++++++++ ...MD-Fix-the-DIV-0-initial-fix-attempt.patch | 82 +++++++++++++ packages/kernel-5.15/kernel-5.15.spec | 6 + 4 files changed, 247 insertions(+) create mode 100644 packages/kernel-5.15/5001-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch create mode 100644 packages/kernel-5.15/5002-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch create mode 100644 packages/kernel-5.15/5003-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch diff --git a/packages/kernel-5.15/5001-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch b/packages/kernel-5.15/5001-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch new file mode 100644 index 00000000..fccdce71 --- /dev/null +++ b/packages/kernel-5.15/5001-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch @@ -0,0 +1,48 @@ +From 40f837f02c448b37fb8967e5c50878c8a4e9459a Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Sat, 8 Jul 2023 10:21:35 +0200 +Subject: [PATCH] x86/bugs: Increase the x86 bugs vector size to two u32s + +Upstream commit: 0e52740ffd10c6c316837c6c128f460f1aaba1ea + +There was never a doubt in my mind that they would not fit into a single +u32 eventually. + +Signed-off-by: Borislav Petkov (AMD) +Signed-off-by: Greg Kroah-Hartman +(cherry picked from commit 236dd7133394bfe30275191e3aefcc6b3b09962b) +Signed-off-by: Leonard Foerster +--- + arch/x86/include/asm/cpufeatures.h | 2 +- + tools/arch/x86/include/asm/cpufeatures.h | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index ad6984f941f7..3800d0ec048d 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -14,7 +14,7 @@ + * Defines x86 CPU feature bits + */ + #define NCAPINTS 20 /* N 32-bit words worth of info */ +-#define NBUGINTS 1 /* N 32-bit bug flags */ ++#define NBUGINTS 2 /* N 32-bit bug flags */ + + /* + * Note: If the comment begins with a quoted string, that string is used +diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h +index 3781a7f489ef..da6d66e1fbb1 100644 +--- a/tools/arch/x86/include/asm/cpufeatures.h ++++ b/tools/arch/x86/include/asm/cpufeatures.h +@@ -14,7 +14,7 @@ + * Defines x86 CPU feature bits + */ + #define NCAPINTS 20 /* N 32-bit words worth of info */ +-#define NBUGINTS 1 /* N 32-bit bug flags */ ++#define NBUGINTS 2 /* N 32-bit bug flags */ + + /* + * Note: If the comment begins with a quoted string, that string is used +-- +2.40.1 + diff --git a/packages/kernel-5.15/5002-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch b/packages/kernel-5.15/5002-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch new file mode 100644 index 00000000..fc264f5e --- /dev/null +++ b/packages/kernel-5.15/5002-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch @@ -0,0 +1,111 @@ +From 7292d6bb18710a2c1f283f77f3f69196536bdc2b Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Sat, 5 Aug 2023 00:06:43 +0200 +Subject: [PATCH] x86/CPU/AMD: Do not leak quotient data after a division by 0 + +commit 77245f1c3c6495521f6a3af082696ee2f8ce3921 upstream. + +Under certain circumstances, an integer division by 0 which faults, can +leave stale quotient data from a previous division operation on Zen1 +microarchitectures. + +Do a dummy division 0/1 before returning from the #DE exception handler +in order to avoid any leaks of potentially sensitive data. + +Signed-off-by: Borislav Petkov (AMD) +Cc: +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman +(cherry picked from commit a74878207b02060c5feaf88b5566208ed08eb78d) +Signed-off-by: Leonard Foerster +--- + arch/x86/include/asm/cpufeatures.h | 2 ++ + arch/x86/include/asm/processor.h | 2 ++ + arch/x86/kernel/cpu/amd.c | 19 +++++++++++++++++++ + arch/x86/kernel/traps.c | 2 ++ + 4 files changed, 25 insertions(+) + +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index 3800d0ec048d..8d64a1e26589 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -454,4 +454,6 @@ + #define X86_BUG_RAS_POISONING X86_BUG(29) /* CPU is affected by RAS poisoning */ + #define X86_BUG_GDS X86_BUG(30) /* CPU is affected by Gather Data Sampling */ + ++/* BUG word 2 */ ++#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */ + #endif /* _ASM_X86_CPUFEATURES_H */ +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h +index 3e3bd5b7d5db..aeef8a6c2088 100644 +--- a/arch/x86/include/asm/processor.h ++++ b/arch/x86/include/asm/processor.h +@@ -803,9 +803,11 @@ extern u16 get_llc_id(unsigned int cpu); + #ifdef CONFIG_CPU_SUP_AMD + extern u32 amd_get_nodes_per_socket(void); + extern u32 amd_get_highest_perf(void); ++extern void amd_clear_divider(void); + #else + static inline u32 amd_get_nodes_per_socket(void) { return 0; } + static inline u32 amd_get_highest_perf(void) { return 0; } ++static inline void amd_clear_divider(void) { } + #endif + + static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index 3daceadf5d1f..892eb16a9ea2 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -75,6 +75,10 @@ static const int amd_zenbleed[] = + AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), + AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); + ++static const int amd_div0[] = ++ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf), ++ AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf)); ++ + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) + { + int osvw_id = *erratum++; +@@ -1140,6 +1144,11 @@ static void init_amd(struct cpuinfo_x86 *c) + check_null_seg_clears_base(c); + + zenbleed_check(c); ++ ++ if (cpu_has_amd_erratum(c, amd_div0)) { ++ pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n"); ++ setup_force_cpu_bug(X86_BUG_DIV0); ++ } + } + + #ifdef CONFIG_X86_32 +@@ -1281,3 +1290,13 @@ void amd_check_microcode(void) + { + on_each_cpu(zenbleed_check_cpu, NULL, 1); + } ++ ++/* ++ * Issue a DIV 0/1 insn to clear any division data from previous DIV ++ * operations. ++ */ ++void noinstr amd_clear_divider(void) ++{ ++ asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) ++ :: "a" (0), "d" (0), "r" (1)); ++} +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c +index ca47080e3774..3361d32d090f 100644 +--- a/arch/x86/kernel/traps.c ++++ b/arch/x86/kernel/traps.c +@@ -202,6 +202,8 @@ DEFINE_IDTENTRY(exc_divide_error) + { + do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, + FPE_INTDIV, error_get_trap_addr(regs)); ++ ++ amd_clear_divider(); + } + + DEFINE_IDTENTRY(exc_overflow) +-- +2.40.1 + diff --git a/packages/kernel-5.15/5003-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch b/packages/kernel-5.15/5003-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch new file mode 100644 index 00000000..b2de61d2 --- /dev/null +++ b/packages/kernel-5.15/5003-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch @@ -0,0 +1,82 @@ +From 1524872707f69fe6bea94d26238e8f6d9302b5d6 Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Fri, 11 Aug 2023 23:38:24 +0200 +Subject: [PATCH] x86/CPU/AMD: Fix the DIV(0) initial fix attempt + +commit f58d6fbcb7c848b7f2469be339bc571f2e9d245b upstream. + +Initially, it was thought that doing an innocuous division in the #DE +handler would take care to prevent any leaking of old data from the +divider but by the time the fault is raised, the speculation has already +advanced too far and such data could already have been used by younger +operations. + +Therefore, do the innocuous division on every exit to userspace so that +userspace doesn't see any potentially old data from integer divisions in +kernel space. + +Do the same before VMRUN too, to protect host data from leaking into the +guest too. + +Fixes: 77245f1c3c64 ("x86/CPU/AMD: Do not leak quotient data after a division by 0") +Signed-off-by: Borislav Petkov (AMD) +Cc: +Link: https://lore.kernel.org/r/20230811213824.10025-1-bp@alien8.de +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/entry-common.h | 1 + + arch/x86/kernel/cpu/amd.c | 1 + + arch/x86/kernel/traps.c | 2 -- + arch/x86/kvm/svm/svm.c | 2 ++ + 4 files changed, 4 insertions(+), 2 deletions(-) + +diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h +index 43184640b579..a12fdf01dc26 100644 +--- a/arch/x86/include/asm/entry-common.h ++++ b/arch/x86/include/asm/entry-common.h +@@ -92,6 +92,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, + static __always_inline void arch_exit_to_user_mode(void) + { + mds_user_clear_cpu_buffers(); ++ amd_clear_divider(); + } + #define arch_exit_to_user_mode arch_exit_to_user_mode + +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index 892eb16a9ea2..f485e6c3ae90 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -1300,3 +1300,4 @@ void noinstr amd_clear_divider(void) + asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) + :: "a" (0), "d" (0), "r" (1)); + } ++EXPORT_SYMBOL_GPL(amd_clear_divider); +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c +index 3361d32d090f..ca47080e3774 100644 +--- a/arch/x86/kernel/traps.c ++++ b/arch/x86/kernel/traps.c +@@ -202,8 +202,6 @@ DEFINE_IDTENTRY(exc_divide_error) + { + do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, + FPE_INTDIV, error_get_trap_addr(regs)); +- +- amd_clear_divider(); + } + + DEFINE_IDTENTRY(exc_overflow) +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c +index 0611dac70c25..944a08cc3b6b 100644 +--- a/arch/x86/kvm/svm/svm.c ++++ b/arch/x86/kvm/svm/svm.c +@@ -1452,6 +1452,8 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) + struct vcpu_svm *svm = to_svm(vcpu); + struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); + ++ amd_clear_divider(); ++ + if (sev_es_guest(vcpu->kvm)) + sev_es_unmap_ghcb(svm); + +-- +2.40.1 + diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 4e2dc548..085eb6cc 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -22,6 +22,12 @@ Patch1003: 1003-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch # Increase default of sysctl net.unix.max_dgram_qlen to 512. Patch1004: 1004-af_unix-increase-default-max_dgram_qlen-to-512.patch +# Cherry-picked fixes for CVE-2023-20588 ("DIV0"). Can be dropped when moving +# upstream to 5.15.128 or later. +Patch5001: 5001-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch +Patch5002: 5002-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch +Patch5003: 5003-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch + BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From 206ced13287a69290cedebece468982d7bf07e1f Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 29 Aug 2023 11:47:59 +0000 Subject: [PATCH 1076/1356] kernel-6.1: cherry-pick fix for CVE-2023-20588 ("DIV0") The fix for CVE-2023-20588 is currently only available in the knerel.org upstream 6.1 stable kernel, but not yet in an Amazon Linux kernel release. Cherry-pick it from the upstream kernel. Bring in an extra commit (x86/bugs: Increase the x86 bugs vector size to two u32s) as a dependency. Contextual changes are necessary to make these patches apply as we currently carry Amazon Linux' patches mitigating GDS and SRSO instead of upstream vairants of these patches. Signed-off-by: Leonard Foerster --- ...e-the-x86-bugs-vector-size-to-two-u3.patch | 48 ++++++++ ...ot-leak-quotient-data-after-a-divisi.patch | 111 ++++++++++++++++++ ...MD-Fix-the-DIV-0-initial-fix-attempt.patch | 82 +++++++++++++ packages/kernel-6.1/kernel-6.1.spec | 6 + 4 files changed, 247 insertions(+) create mode 100644 packages/kernel-6.1/5001-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch create mode 100644 packages/kernel-6.1/5002-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch create mode 100644 packages/kernel-6.1/5003-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch diff --git a/packages/kernel-6.1/5001-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch b/packages/kernel-6.1/5001-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch new file mode 100644 index 00000000..2052baa3 --- /dev/null +++ b/packages/kernel-6.1/5001-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch @@ -0,0 +1,48 @@ +From 3bf59e709af08ffd0e321755b5699942474c1962 Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Sat, 8 Jul 2023 10:21:35 +0200 +Subject: [PATCH] x86/bugs: Increase the x86 bugs vector size to two u32s + +Upstream commit: 0e52740ffd10c6c316837c6c128f460f1aaba1ea + +There was never a doubt in my mind that they would not fit into a single +u32 eventually. + +Signed-off-by: Borislav Petkov (AMD) +Signed-off-by: Greg Kroah-Hartman +(cherry picked from commit dfede4cb8ef732039b7a479d260bd89d3b474f14) +Signed-off-by: Leonard Foerster +--- + arch/x86/include/asm/cpufeatures.h | 2 +- + tools/arch/x86/include/asm/cpufeatures.h | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index b69f948be454..32221013c45d 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -14,7 +14,7 @@ + * Defines x86 CPU feature bits + */ + #define NCAPINTS 20 /* N 32-bit words worth of info */ +-#define NBUGINTS 1 /* N 32-bit bug flags */ ++#define NBUGINTS 2 /* N 32-bit bug flags */ + + /* + * Note: If the comment begins with a quoted string, that string is used +diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h +index b71f4f2ecdd5..9ecc62861194 100644 +--- a/tools/arch/x86/include/asm/cpufeatures.h ++++ b/tools/arch/x86/include/asm/cpufeatures.h +@@ -14,7 +14,7 @@ + * Defines x86 CPU feature bits + */ + #define NCAPINTS 20 /* N 32-bit words worth of info */ +-#define NBUGINTS 1 /* N 32-bit bug flags */ ++#define NBUGINTS 2 /* N 32-bit bug flags */ + + /* + * Note: If the comment begins with a quoted string, that string is used +-- +2.40.1 + diff --git a/packages/kernel-6.1/5002-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch b/packages/kernel-6.1/5002-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch new file mode 100644 index 00000000..e0ef9e05 --- /dev/null +++ b/packages/kernel-6.1/5002-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch @@ -0,0 +1,111 @@ +From 35131bf2a0cc0d522f294c21be7d9c2a88c06035 Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Sat, 5 Aug 2023 00:06:43 +0200 +Subject: [PATCH] x86/CPU/AMD: Do not leak quotient data after a division by 0 + +commit 77245f1c3c6495521f6a3af082696ee2f8ce3921 upstream. + +Under certain circumstances, an integer division by 0 which faults, can +leave stale quotient data from a previous division operation on Zen1 +microarchitectures. + +Do a dummy division 0/1 before returning from the #DE exception handler +in order to avoid any leaks of potentially sensitive data. + +Signed-off-by: Borislav Petkov (AMD) +Cc: +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman +(cherry picked from commit f2615bb47be4f53be92c81a6a8aa286c92ef04d9) +Signed-off-by: Leonard Foerster +--- + arch/x86/include/asm/cpufeatures.h | 2 ++ + arch/x86/include/asm/processor.h | 2 ++ + arch/x86/kernel/cpu/amd.c | 19 +++++++++++++++++++ + arch/x86/kernel/traps.c | 2 ++ + 4 files changed, 25 insertions(+) + +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index 32221013c45d..e2d980757511 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -467,4 +467,6 @@ + #define X86_BUG_RAS_POISONING X86_BUG(30) /* CPU is affected by RAS poisoning */ + #define X86_BUG_GDS X86_BUG(31) /* CPU is affected by Gather Data Sampling */ + ++/* BUG word 2 */ ++#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */ + #endif /* _ASM_X86_CPUFEATURES_H */ +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h +index d8277eec1bcd..7dc733062313 100644 +--- a/arch/x86/include/asm/processor.h ++++ b/arch/x86/include/asm/processor.h +@@ -800,9 +800,11 @@ extern u16 get_llc_id(unsigned int cpu); + #ifdef CONFIG_CPU_SUP_AMD + extern u32 amd_get_nodes_per_socket(void); + extern u32 amd_get_highest_perf(void); ++extern void amd_clear_divider(void); + #else + static inline u32 amd_get_nodes_per_socket(void) { return 0; } + static inline u32 amd_get_highest_perf(void) { return 0; } ++static inline void amd_clear_divider(void) { } + #endif + + #define for_each_possible_hypervisor_cpuid_base(function) \ +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index 7f4eb8b027cc..7a93bb12302d 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -75,6 +75,10 @@ static const int amd_zenbleed[] = + AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), + AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); + ++static const int amd_div0[] = ++ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf), ++ AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf)); ++ + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) + { + int osvw_id = *erratum++; +@@ -1115,6 +1119,11 @@ static void init_amd(struct cpuinfo_x86 *c) + check_null_seg_clears_base(c); + + zenbleed_check(c); ++ ++ if (cpu_has_amd_erratum(c, amd_div0)) { ++ pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n"); ++ setup_force_cpu_bug(X86_BUG_DIV0); ++ } + } + + #ifdef CONFIG_X86_32 +@@ -1256,3 +1265,13 @@ void amd_check_microcode(void) + { + on_each_cpu(zenbleed_check_cpu, NULL, 1); + } ++ ++/* ++ * Issue a DIV 0/1 insn to clear any division data from previous DIV ++ * operations. ++ */ ++void noinstr amd_clear_divider(void) ++{ ++ asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) ++ :: "a" (0), "d" (0), "r" (1)); ++} +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c +index d3fdec706f1d..80b719ff60ed 100644 +--- a/arch/x86/kernel/traps.c ++++ b/arch/x86/kernel/traps.c +@@ -206,6 +206,8 @@ DEFINE_IDTENTRY(exc_divide_error) + { + do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, + FPE_INTDIV, error_get_trap_addr(regs)); ++ ++ amd_clear_divider(); + } + + DEFINE_IDTENTRY(exc_overflow) +-- +2.40.1 + diff --git a/packages/kernel-6.1/5003-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch b/packages/kernel-6.1/5003-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch new file mode 100644 index 00000000..0e1a58b6 --- /dev/null +++ b/packages/kernel-6.1/5003-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch @@ -0,0 +1,82 @@ +From 20eb241125391039b9a7248b82e8e6c892522931 Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Fri, 11 Aug 2023 23:38:24 +0200 +Subject: [PATCH] x86/CPU/AMD: Fix the DIV(0) initial fix attempt + +commit f58d6fbcb7c848b7f2469be339bc571f2e9d245b upstream. + +Initially, it was thought that doing an innocuous division in the #DE +handler would take care to prevent any leaking of old data from the +divider but by the time the fault is raised, the speculation has already +advanced too far and such data could already have been used by younger +operations. + +Therefore, do the innocuous division on every exit to userspace so that +userspace doesn't see any potentially old data from integer divisions in +kernel space. + +Do the same before VMRUN too, to protect host data from leaking into the +guest too. + +Fixes: 77245f1c3c64 ("x86/CPU/AMD: Do not leak quotient data after a division by 0") +Signed-off-by: Borislav Petkov (AMD) +Cc: +Link: https://lore.kernel.org/r/20230811213824.10025-1-bp@alien8.de +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/entry-common.h | 1 + + arch/x86/kernel/cpu/amd.c | 1 + + arch/x86/kernel/traps.c | 2 -- + arch/x86/kvm/svm/svm.c | 2 ++ + 4 files changed, 4 insertions(+), 2 deletions(-) + +diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h +index 674ed46d3ced..11203a9fe0a8 100644 +--- a/arch/x86/include/asm/entry-common.h ++++ b/arch/x86/include/asm/entry-common.h +@@ -92,6 +92,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, + static __always_inline void arch_exit_to_user_mode(void) + { + mds_user_clear_cpu_buffers(); ++ amd_clear_divider(); + } + #define arch_exit_to_user_mode arch_exit_to_user_mode + +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index 7a93bb12302d..b76e85f8cdb8 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -1275,3 +1275,4 @@ void noinstr amd_clear_divider(void) + asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) + :: "a" (0), "d" (0), "r" (1)); + } ++EXPORT_SYMBOL_GPL(amd_clear_divider); +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c +index 80b719ff60ed..d3fdec706f1d 100644 +--- a/arch/x86/kernel/traps.c ++++ b/arch/x86/kernel/traps.c +@@ -206,8 +206,6 @@ DEFINE_IDTENTRY(exc_divide_error) + { + do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, + FPE_INTDIV, error_get_trap_addr(regs)); +- +- amd_clear_divider(); + } + + DEFINE_IDTENTRY(exc_overflow) +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c +index fc1649b5931a..9d549826b23f 100644 +--- a/arch/x86/kvm/svm/svm.c ++++ b/arch/x86/kvm/svm/svm.c +@@ -3940,6 +3940,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in + + guest_state_enter_irqoff(); + ++ amd_clear_divider(); ++ + if (sev_es_guest(vcpu->kvm)) + __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted); + else +-- +2.40.1 + diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index eea8b89b..42397792 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -25,6 +25,12 @@ Patch1004: 1004-af_unix-increase-default-max_dgram_qlen-to-512.patch # options for nvidia are instead included through DRM_SIMPLE Patch1005: 1005-Revert-Revert-drm-fb_helper-improve-CONFIG_FB-depend.patch +# Cherry-picked fixes for CVE-2023-20588 ("DIV0"). Can be dropped when moving +# upstream to 6.1.48 or later +Patch5001: 5001-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch +Patch5002: 5002-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch +Patch5003: 5003-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch + BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From 71245badef63a743605a15040f060bc6303ba542 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 29 Aug 2023 19:04:05 -0700 Subject: [PATCH 1077/1356] Revert "chore: explicity set cargo feature resolver version to 2" `cargo make build-package` breaks due to this change. This needs re-evaluating. This reverts commit 3a4dadc3c9e10a2ed2eac76377c1643bb71a29c5. --- tools/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/Cargo.toml b/tools/Cargo.toml index 223aaf87..03fb9b38 100644 --- a/tools/Cargo.toml +++ b/tools/Cargo.toml @@ -1,5 +1,4 @@ [workspace] -resolver = "2" members = [ "infrasys", "buildsys", From a85761ce21505840757a447d8ebbda78edc86aa9 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Tue, 13 Jun 2023 19:43:37 +0000 Subject: [PATCH 1078/1356] Add dependabot config This adds a base dependabot config that does a few things: 1. Enables updates for GitHub Actions 2. Disables updates for most dependencies as we try to maintain those manually 3. Allows updates for dependencies under the `/tools` directory since our tooling doesn't need to be as strict 4. Still allows updates for any dependencies if a security issue is identified 5. Sets an appropriate label to match our label scheme instead of the default dependabot ones Signed-off-by: Sean McGinnis --- .github/dependabot.yaml | 45 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 .github/dependabot.yaml diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml new file mode 100644 index 00000000..173040d7 --- /dev/null +++ b/.github/dependabot.yaml @@ -0,0 +1,45 @@ +version: 2 +updates: + + # Maintain dependencies for GitHub Actions + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + labels: + - "area/dependencies" + + # We maintain updates for most dependencies. This disables updates other than + # security ones. + - package-ecosystem: "cargo" + directory: "/" + schedule: + interval: "daily" + labels: + - "area/dependencies" + open-pull-requests-limit: 0 + + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "daily" + labels: + - "area/dependencies" + open-pull-requests-limit: 0 + + # Enable updates for the `tools` dependencies + - package-ecosystem: "cargo" + directory: "/tools" + ignore: + # For AWS SDK for Rust, we'll update when we bump tough/coldsnap + - dependency-name: "aws-config" + - dependency-name: "aws-endpoint" + - dependency-name: "aws-http" + - dependency-name: "aws-hyper" + - dependency-name: "aws-sig*" + - dependency-name: "aws-sdk*" + - dependency-name: "aws-smithy*" + schedule: + interval: "weekly" + labels: + - "area/dependencies" From 50c89d91ead9eb32ac06da986ae60a82e40503e4 Mon Sep 17 00:00:00 2001 From: Shikha Vyaghra Date: Tue, 29 Aug 2023 20:50:23 +0000 Subject: [PATCH 1079/1356] readme: possible values for OCI-defaults rlimits --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 7e8b6853..53562f17 100644 --- a/README.md +++ b/README.md @@ -752,9 +752,9 @@ capability | setting | default value ##### OCI Defaults: Resource Limits -Each of the `resource-limits` settings below contain two numeric fields: `hard-limit` and `soft-limit`, which are **32-bit unsigned integers**. +Each of the `resource-limits` settings below contain two fields: `hard-limit` and `soft-limit`. -Please see the [`getrlimit` linux manpage](https://man7.org/linux/man-pages/man7/capabilities.7.html) for meanings of `hard-limit` and `soft-limit`. +Please see the [`getrlimit` linux manpage](https://man7.org/linux/man-pages/man2/getrlimit.2.html) for meanings of `hard-limit` and `soft-limit`. The full list of resource limits that can be configured in Bottlerocket are: From 7238cee939665e91795e44493bf4e3ab0cbaab7f Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 1 Aug 2023 12:29:34 -0700 Subject: [PATCH 1080/1356] sources, variants: add aws-k8s-1.28 variant --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 53562f17..57a071ff 100644 --- a/README.md +++ b/README.md @@ -66,6 +66,7 @@ The following variants support EKS, as described above: * `aws-k8s-1.25` * `aws-k8s-1.26` * `aws-k8s-1.27` +* `aws-k8s-1.28` * `aws-k8s-1.23-nvidia` * `aws-k8s-1.24-nvidia` * `aws-k8s-1.25-nvidia` From 84322f98b548185aeecf3dfffe288a50d264413a Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 1 Aug 2023 16:23:03 -0700 Subject: [PATCH 1081/1356] sources, variants: add aws-k8s-1.28-nvidia variant --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 57a071ff..dd305adb 100644 --- a/README.md +++ b/README.md @@ -72,6 +72,7 @@ The following variants support EKS, as described above: * `aws-k8s-1.25-nvidia` * `aws-k8s-1.26-nvidia` * `aws-k8s-1.27-nvidia` +* `aws-k8s-1.28-nvidia` The following variants support ECS: From 23b57b670e78455e6eb9da32e2938c1afa03886e Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 1 Aug 2023 16:41:14 -0700 Subject: [PATCH 1082/1356] sources, variants: add vmware-k8s-1.28 variant --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index dd305adb..aac6ea1e 100644 --- a/README.md +++ b/README.md @@ -86,6 +86,7 @@ We also have variants that are designed to be Kubernetes worker nodes in VMware: * `vmware-k8s-1.25` * `vmware-k8s-1.26` * `vmware-k8s-1.27` +* `vmware-k8s-1.28` The following variants are designed to be Kubernetes worker nodes on bare metal: From c934b9972000b0a5c922bfb1132bad56c7d0b851 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Tue, 1 Aug 2023 16:49:04 -0700 Subject: [PATCH 1083/1356] sources, variants: add metal-k8s-1.28 variant --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index aac6ea1e..93717883 100644 --- a/README.md +++ b/README.md @@ -95,6 +95,7 @@ The following variants are designed to be Kubernetes worker nodes on bare metal: * `metal-k8s-1.25` * `metal-k8s-1.26` * `metal-k8s-1.27` +* `metal-k8s-1.28` The following variants are no longer supported: From f1acc4a6a3d408d26f48c9862016d26987798615 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Thu, 31 Aug 2023 10:52:47 -0700 Subject: [PATCH 1084/1356] chore: specify feature resolver="1" in all rust workspaces This gets rid of a warning that comes with rust 1.72.0. This does not impact any of our workspaces since we've always been defaulted to using resolver version 1. We can't use resolver v2 yet since that breaks the 'cargo make build-package' task. See rust-lang/cargo#10112 for more details. --- tools/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/Cargo.toml b/tools/Cargo.toml index 03fb9b38..e6efc568 100644 --- a/tools/Cargo.toml +++ b/tools/Cargo.toml @@ -1,4 +1,5 @@ [workspace] +resolver = "1" members = [ "infrasys", "buildsys", From 3dd801fcc662ab9a6b5061a50f08d8200c1dd0b3 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 31 Aug 2023 13:34:25 +0000 Subject: [PATCH 1085/1356] linux-firmware: Add script to check for latest upstream release Add a script to check for latest upstream release. We have similar convenience scripts to check for the latest version of srpm available in Amazon Linux repositories for packages we base on Amazon Linux. Signed-off-by: Leonard Foerster --- packages/linux-firmware/latest-upstream-tags.sh | 4 ++++ 1 file changed, 4 insertions(+) create mode 100755 packages/linux-firmware/latest-upstream-tags.sh diff --git a/packages/linux-firmware/latest-upstream-tags.sh b/packages/linux-firmware/latest-upstream-tags.sh new file mode 100755 index 00000000..3177af68 --- /dev/null +++ b/packages/linux-firmware/latest-upstream-tags.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +echo "Latest upstream tag for linux-firmware:" +git ls-remote --tags --refs https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git | tail -1 From b543f77c3892bdc0ec0ea83ce90c17ed49cc4738 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 31 Aug 2023 13:39:30 +0000 Subject: [PATCH 1086/1356] microcode: Add script to check for latest upstream releases Add a script to check for latest upstream releases for both upstream sources of microcode (microcode-ctl for Intel; linux-firmware for AMD). We have similar convenience scripts to check for the latest version of srpm available in Amazon Linux repositories for packages we base on Amazon Linux. Signed-off-by: Leonard Foerster --- packages/microcode/latest-upstream-tags.sh | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100755 packages/microcode/latest-upstream-tags.sh diff --git a/packages/microcode/latest-upstream-tags.sh b/packages/microcode/latest-upstream-tags.sh new file mode 100755 index 00000000..717c7ba8 --- /dev/null +++ b/packages/microcode/latest-upstream-tags.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +echo "Latest upstream tag for Intel ucode (microcode-ctl):" +git ls-remote --tags --refs https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files.git | tail -1 + +echo "Latest upstream tag for AMD ucode (linux-firmware):" +git ls-remote --tags --refs https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git | tail -1 From 9dacebec85f1b218fccdb878ead6e6d93ea4d934 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 31 Aug 2023 19:42:58 +0000 Subject: [PATCH 1087/1356] kernel-5.15, -6.1: drop i8042 modules from shared config This effectively removes i8042 hardware support from aws variants, matching the Amazon Linux upstream configuration for EC2. i8042 is retained for vmware variants, where it's required for the interactive console to accept keyboard input, and for metal variants where it might also be useful in some environments. Signed-off-by: Ben Cressey --- packages/kernel-5.15/config-bottlerocket | 17 ----------------- packages/kernel-5.15/config-bottlerocket-metal | 17 +++++++++++++++++ packages/kernel-5.15/config-bottlerocket-vmware | 16 ++++++++++++++++ packages/kernel-6.1/config-bottlerocket | 17 ----------------- packages/kernel-6.1/config-bottlerocket-metal | 17 +++++++++++++++++ packages/kernel-6.1/config-bottlerocket-vmware | 16 ++++++++++++++++ 6 files changed, 66 insertions(+), 34 deletions(-) diff --git a/packages/kernel-5.15/config-bottlerocket b/packages/kernel-5.15/config-bottlerocket index 15c84258..3e963c6e 100644 --- a/packages/kernel-5.15/config-bottlerocket +++ b/packages/kernel-5.15/config-bottlerocket @@ -119,23 +119,6 @@ CONFIG_DECOMPRESS_ZSTD=y # CONFIG_MODULE_COMPRESS_NONE is not set CONFIG_MODULE_COMPRESS_XZ=y -# Load i8042 controller, keyboard, and mouse as modules, to avoid waiting for -# them before mounting the root device. -CONFIG_SERIO_I8042=m -CONFIG_KEYBOARD_ATKBD=m -CONFIG_MOUSE_PS2=m -# CONFIG_MOUSE_PS2_ALPS is not set -# CONFIG_MOUSE_PS2_BYD is not set -# CONFIG_MOUSE_PS2_LOGIPS2PP is not set -# CONFIG_MOUSE_PS2_SYNAPTICS is not set -# CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS is not set -# CONFIG_MOUSE_PS2_CYPRESS is not set -# CONFIG_MOUSE_PS2_TRACKPOINT is not set -# CONFIG_MOUSE_PS2_ELANTECH is not set -# CONFIG_MOUSE_PS2_SENTELIC is not set -# CONFIG_MOUSE_PS2_TOUCHKIT is not set -# CONFIG_MOUSE_PS2_FOCALTECH is not set - # Add virtio drivers for development setups running as guests in qemu CONFIG_VIRTIO_CONSOLE=m CONFIG_HW_RANDOM_VIRTIO=m diff --git a/packages/kernel-5.15/config-bottlerocket-metal b/packages/kernel-5.15/config-bottlerocket-metal index abef8fba..4022b1be 100644 --- a/packages/kernel-5.15/config-bottlerocket-metal +++ b/packages/kernel-5.15/config-bottlerocket-metal @@ -126,3 +126,20 @@ CONFIG_SCSI_SMARTPQI=y # Support for virtio scsi boot devices for other cloud providers CONFIG_SCSI_VIRTIO=y + +# Load i8042 controller, keyboard, and mouse as modules, to avoid waiting for +# them before mounting the root device. +CONFIG_SERIO_I8042=m +CONFIG_KEYBOARD_ATKBD=m +CONFIG_MOUSE_PS2=m +# CONFIG_MOUSE_PS2_ALPS is not set +# CONFIG_MOUSE_PS2_BYD is not set +# CONFIG_MOUSE_PS2_LOGIPS2PP is not set +# CONFIG_MOUSE_PS2_SYNAPTICS is not set +# CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS is not set +# CONFIG_MOUSE_PS2_CYPRESS is not set +# CONFIG_MOUSE_PS2_TRACKPOINT is not set +# CONFIG_MOUSE_PS2_ELANTECH is not set +# CONFIG_MOUSE_PS2_SENTELIC is not set +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +# CONFIG_MOUSE_PS2_FOCALTECH is not set diff --git a/packages/kernel-5.15/config-bottlerocket-vmware b/packages/kernel-5.15/config-bottlerocket-vmware index e69de29b..ec1cc1a5 100644 --- a/packages/kernel-5.15/config-bottlerocket-vmware +++ b/packages/kernel-5.15/config-bottlerocket-vmware @@ -0,0 +1,16 @@ +# Load i8042 controller, keyboard, and mouse as modules, to avoid waiting for +# them before mounting the root device. +CONFIG_SERIO_I8042=m +CONFIG_KEYBOARD_ATKBD=m +CONFIG_MOUSE_PS2=m +# CONFIG_MOUSE_PS2_ALPS is not set +# CONFIG_MOUSE_PS2_BYD is not set +# CONFIG_MOUSE_PS2_LOGIPS2PP is not set +# CONFIG_MOUSE_PS2_SYNAPTICS is not set +# CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS is not set +# CONFIG_MOUSE_PS2_CYPRESS is not set +# CONFIG_MOUSE_PS2_TRACKPOINT is not set +# CONFIG_MOUSE_PS2_ELANTECH is not set +# CONFIG_MOUSE_PS2_SENTELIC is not set +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +# CONFIG_MOUSE_PS2_FOCALTECH is not set diff --git a/packages/kernel-6.1/config-bottlerocket b/packages/kernel-6.1/config-bottlerocket index 809e36e4..f95e720c 100644 --- a/packages/kernel-6.1/config-bottlerocket +++ b/packages/kernel-6.1/config-bottlerocket @@ -124,23 +124,6 @@ CONFIG_DECOMPRESS_ZSTD=y # CONFIG_MODULE_COMPRESS_NONE is not set CONFIG_MODULE_COMPRESS_XZ=y -# Load i8042 controller, keyboard, and mouse as modules, to avoid waiting for -# them before mounting the root device. -CONFIG_SERIO_I8042=m -CONFIG_KEYBOARD_ATKBD=m -CONFIG_MOUSE_PS2=m -# CONFIG_MOUSE_PS2_ALPS is not set -# CONFIG_MOUSE_PS2_BYD is not set -# CONFIG_MOUSE_PS2_LOGIPS2PP is not set -# CONFIG_MOUSE_PS2_SYNAPTICS is not set -# CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS is not set -# CONFIG_MOUSE_PS2_CYPRESS is not set -# CONFIG_MOUSE_PS2_TRACKPOINT is not set -# CONFIG_MOUSE_PS2_ELANTECH is not set -# CONFIG_MOUSE_PS2_SENTELIC is not set -# CONFIG_MOUSE_PS2_TOUCHKIT is not set -# CONFIG_MOUSE_PS2_FOCALTECH is not set - # Add virtio drivers for development setups running as guests in qemu CONFIG_VIRTIO_CONSOLE=m CONFIG_HW_RANDOM_VIRTIO=m diff --git a/packages/kernel-6.1/config-bottlerocket-metal b/packages/kernel-6.1/config-bottlerocket-metal index b2f4972c..82831b74 100644 --- a/packages/kernel-6.1/config-bottlerocket-metal +++ b/packages/kernel-6.1/config-bottlerocket-metal @@ -124,3 +124,20 @@ CONFIG_SCSI_SMARTPQI=y # Support for virtio scsi boot devices for other cloud providers CONFIG_SCSI_VIRTIO=y + +# Load i8042 controller, keyboard, and mouse as modules, to avoid waiting for +# them before mounting the root device. +CONFIG_SERIO_I8042=m +CONFIG_KEYBOARD_ATKBD=m +CONFIG_MOUSE_PS2=m +# CONFIG_MOUSE_PS2_ALPS is not set +# CONFIG_MOUSE_PS2_BYD is not set +# CONFIG_MOUSE_PS2_LOGIPS2PP is not set +# CONFIG_MOUSE_PS2_SYNAPTICS is not set +# CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS is not set +# CONFIG_MOUSE_PS2_CYPRESS is not set +# CONFIG_MOUSE_PS2_TRACKPOINT is not set +# CONFIG_MOUSE_PS2_ELANTECH is not set +# CONFIG_MOUSE_PS2_SENTELIC is not set +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +# CONFIG_MOUSE_PS2_FOCALTECH is not set diff --git a/packages/kernel-6.1/config-bottlerocket-vmware b/packages/kernel-6.1/config-bottlerocket-vmware index e69de29b..ec1cc1a5 100644 --- a/packages/kernel-6.1/config-bottlerocket-vmware +++ b/packages/kernel-6.1/config-bottlerocket-vmware @@ -0,0 +1,16 @@ +# Load i8042 controller, keyboard, and mouse as modules, to avoid waiting for +# them before mounting the root device. +CONFIG_SERIO_I8042=m +CONFIG_KEYBOARD_ATKBD=m +CONFIG_MOUSE_PS2=m +# CONFIG_MOUSE_PS2_ALPS is not set +# CONFIG_MOUSE_PS2_BYD is not set +# CONFIG_MOUSE_PS2_LOGIPS2PP is not set +# CONFIG_MOUSE_PS2_SYNAPTICS is not set +# CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS is not set +# CONFIG_MOUSE_PS2_CYPRESS is not set +# CONFIG_MOUSE_PS2_TRACKPOINT is not set +# CONFIG_MOUSE_PS2_ELANTECH is not set +# CONFIG_MOUSE_PS2_SENTELIC is not set +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +# CONFIG_MOUSE_PS2_FOCALTECH is not set From d4c8c0f636a3c826237faf4d15e90e06843b6783 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Fri, 1 Sep 2023 07:33:42 +0000 Subject: [PATCH 1088/1356] microcode: Add amd ucode mitigating AMD Inception The AMD ucode to mitigate the Inception vulnerability on Zen4 AMD cpus has been added to the upstream linux-firmware repository a few days after the last regular release. Pick this up in order to ship the fixed microcode before the we get a chance for the next regular linux-firmware release. This patch can be reverted once we update to an upstream version that includes it already (most likely the September 2023 release). Signed-off-by: Leonard Foerster --- ...ux-firmware-Update-AMD-cpu-microcode.patch | 486 ++++++++++++++++++ packages/microcode/microcode.spec | 19 + 2 files changed, 505 insertions(+) create mode 100644 packages/microcode/0001-linux-firmware-Update-AMD-cpu-microcode.patch diff --git a/packages/microcode/0001-linux-firmware-Update-AMD-cpu-microcode.patch b/packages/microcode/0001-linux-firmware-Update-AMD-cpu-microcode.patch new file mode 100644 index 00000000..b19a577b --- /dev/null +++ b/packages/microcode/0001-linux-firmware-Update-AMD-cpu-microcode.patch @@ -0,0 +1,486 @@ +From f2eb058afc57348cde66852272d6bf11da1eef8f Mon Sep 17 00:00:00 2001 +From: John Allen +Date: Tue, 8 Aug 2023 19:02:39 +0000 +Subject: [PATCH] linux-firmware: Update AMD cpu microcode + +* Update AMD cpu microcode for processor family 19h + +Key Name = AMD Microcode Signing Key (for signing microcode container files only) +Key ID = F328AE73 +Key Fingerprint = FC7C 6C50 5DAF CC14 7183 57CA E4BE 5339 F328 AE73 + +Signed-off-by: John Allen +Signed-off-by: Josh Boyer +--- + WHENCE | 2 +- + amd-ucode/README | 13 +++++++++++++ + amd-ucode/microcode_amd_fam19h.bin | Bin 16804 -> 39172 bytes + amd-ucode/microcode_amd_fam19h.bin.asc | 16 ++++++++-------- + 4 files changed, 22 insertions(+), 9 deletions(-) + +diff --git a/WHENCE b/WHENCE +index 99cee970..54aadb0c 100644 +--- a/WHENCE ++++ b/WHENCE +@@ -3924,7 +3924,7 @@ Raw: amd-ucode/microcode_amd_fam17h.bin + Version: 2023-07-19 + File: amd-ucode/microcode_amd_fam19h.bin + Raw: amd-ucode/microcode_amd_fam19h.bin +-Version: 2023-07-18 ++Version: 2023-08-08 + File: amd-ucode/README + + License: Redistributable. See LICENSE.amd-ucode for details +diff --git a/amd-ucode/README b/amd-ucode/README +index 1d39da3b..fac11524 100644 +--- a/amd-ucode/README ++++ b/amd-ucode/README +@@ -37,6 +37,19 @@ Microcode patches in microcode_amd_fam17h.bin: + Family=0x17 Model=0x01 Stepping=0x02: Patch=0x0800126e Length=3200 bytes + + Microcode patches in microcode_amd_fam19h.bin: ++ Family=0x19 Model=0x11 Stepping=0x01: Patch=0x0a10113e Length=5568 bytes ++ Family=0x19 Model=0x11 Stepping=0x02: Patch=0x0a10123e Length=5568 bytes ++ Family=0x19 Model=0xa0 Stepping=0x02: Patch=0x0aa00212 Length=5568 bytes + Family=0x19 Model=0x01 Stepping=0x01: Patch=0x0a0011d1 Length=5568 bytes + Family=0x19 Model=0x01 Stepping=0x00: Patch=0x0a001079 Length=5568 bytes + Family=0x19 Model=0x01 Stepping=0x02: Patch=0x0a001234 Length=5568 bytes ++ Family=0x19 Model=0xa0 Stepping=0x01: Patch=0x0aa00116 Length=5568 bytes ++ ++NOTE: For Genoa (Family=0x19 Model=0x11) and Bergamo (Family=0x19 Model=0xa0), ++either AGESA version >= 1.0.0.8 OR a kernel with the following commit is ++required: ++a32b0f0db3f3 ("x86/microcode/AMD: Load late on both threads too") ++ ++When late loading the patches for Genoa or Bergamo, there may be one spurious ++NMI observed per physical core. These NMIs are benign and don't cause any ++functional issue but will result in kernel messages being logged. +diff --git a/amd-ucode/microcode_amd_fam19h.bin b/amd-ucode/microcode_amd_fam19h.bin +index 50470c3f461a068e832a1ebe33683a041d4515fe..02a5d051d58b8028275ee6a0b091f11f8d1b6e27 100644 +GIT binary patch +delta 20542 +zcmZ^~b8z56_wE_n&cqW;Y)@?4$;7sud}B^*+nU(6ZQHi*`|jS|{Z-xCr>d*Y>F(<4 +zdj9JAoYRGYph0zD+#<4qApe!$e+DXY-hadYOQ`<|(f&t{_MaRKx%~fG1_3LNgJVVl +z{U0-+<#F8j05KFFDheE=pAP8%yY)Z8e^2|ba?!lG*_H})h9c;ajFKQ+n`rnHq2st{7`Xymh9xn(55h{1PelZ4( +z8)Cd2J()0bW4j{$oYD|O%1JLSP6mi8dD@uqn+Nk4Ni+EdNRK_hB4=x$;+<&Mo+g{& +z*KXyM!$#1!qASd{&9SB02gb{Wk>5TSb+4Id$e=iKOW#2M> +z50LG^X$0N~_F-(Rr6Da*9EC=6h$n7EYmWMyr4wGa})a(=Jk9V +zwIwD<`_dsUurwAYS;82Il!+DiDt+=zEwb11U_*cb|Me&$(i2a816XMfU9IWl(u?Jp +zY`(;-_)v85_Y16@$F{@bkS&gXmDI?HTK8L8XYeeBWzf#@Jc|quRbU2%jC<$A=U#+? +z_2ZOt`PhKYu>rN`KbDs#C|}#Hx&o6vyvxk^H}J55Il(V6w#>o_OTAehMkYV`=U@R6 +z7X$bj5vX<)>iP|q1QoZa4Ie}Kpg78Q<63ABGe8AN(6d6DMh&giP_mo#p{L|)ZfQ9R +zR-gR5JiUHf?4qi)HS(^WLgR9Og<7g{lda}@ZH;STPW$X=M=)sb@;B;?(3p9mi#;H@ +z8MBIc-}rXWi7sVTH5WB@HE4}WoEd{G^zO-r!E&t+S`tB?GaDTxXjS_=WwLsLII{?drAYw~FF;OqM*BAHf +znwA@Y9-qk=K#Yvk_{5*dO9Ue2$V^uZ1C^8e;M7(sId7r<#QZ6)UShs0yXv_cBmfe7 +zCu22d9X*v~1D|ICM2=A4$zel%rTd^2##O8`S(ZNhz+TS74M}ZWNrUz +zF56-OcG@Px00YtGO)|MbTX*aXG=Lb;C%~*zC)*-QlBEct__EWii+AUjlSK$tpYs8e +zzX%dk4Y2hQoZKm#fq$g%-*OcZjUat_nLm~hT;59{Z`!wT&2iUT!N7ePfnsyN%l}Gq +zix(BBJDj_u2p83=?enFz%HHe)QT@5{T&{2H5`Bb-?J2*6Ai3T--T`@34X|B0m0n`? +z^v+7O)QD6KyNT2MkVkYc5iFwW!FBEXexZENvaPAwazdm*(n3(PA5a65b?&sw8Yh1A +zzcoZ!B=*`WYzT#Xlf$nIidpcZhqxVTt(IzLmci}vIqn{MI2GkG)@I?p3A-r_RK(S1t7ff}cHKwZ{;0UbFahDD=zPAYar|ZTypdzX_A|zF +z*5akl>`DQIf?M23a?#r9m~~GAv&zv%@)qOE@&Pd;-ep2LFWVQ11F+xiHV@>ecUpa7 +z8QavpLww2=q%!#yJ%dwZX0pN;FZB?Z?XcnX*Gq977Lvks$^Rws686S<}lr7#EWalB)=PJ)BDn +zLCKC0t2OzmZ}&)e0jx}S56QSn{Hp36IUcukTw|^wT7=W9`W1F1+#^BVMqCBRUSYCp +zy}ySZ3zVaT`{2QQ08gR89x(~99I;lA%vy#o7iStT-*LFQyA6?h-zYQnP +zkXlrU0I&-+SAk&I*)MwIlQv9G!t<4j#ri#EMaah$E2rB2+_N-&7b}olRXU09K;IXs8Cj0xt&z+M0Aj#o?e31`cUIfqC(( +zCX^V18{Iz;CncA&5K-lFT=Eiia$$+vdc-J}ChXWM8B&Gg__bJ3FPeyO|9Cwas&tL- +zYM;5g4n(TQ-zTfBY(OZ97@=h|pJPEeenE>Ake9iZkM^3nJlBF-W?^DRAA$!rr=7^R +zo&-4(qbHDh%dE+2?kQBhFyF;Fdz9P2C3Rmd`C~Rf<1)hIVQYHb0r#gc!M~btXc3Cu +zSGA*GA-Ynt#pGUW*@j@2pxohIllJ!ht7UEa=t^=e-|rNZ>l)2raWc)X+bN?c@_(a> +z0mfer3(%~RLMJeoV@gIR5FUp+c8^5tHm6Jt+IWhf9wEf$gYp{G!+0k8XtB+$X?cms +z8BbsC&)6N^-O6w~202`>b(;dPfre%oH`V9jMx$q8=*}!V&k0rIL0q9FGq^C4PVH2@`Nr9vXg67xO0c^<=|_%E0_<1l$Y<|x5{(;R;(eJAtYEcoId_ +zgYOQTst;WL38@Ce_gl!5M{`(MFWc7nR~0=Ee|kfC`FL_O@C{taZQmoo0*6)lmUpyC +zRiSZytHqsSv&imuw%Yrg#k1!gA}d3Bmkn22`Uh-fk@y^qlN_1$;TOC@^-9u+1}s1EC>$6MSdCR)bFlb@$Ak*MQ>Rr7&Zslb&w{$)EgQ@4qW~W;WzyDD<@dl8@|CHcg +zY~U(0Bsar9zJBrOD0*aj1_qrN(x66s)16!FCBP}CeeW|?R5k1wd9>U>`R{~RGT;~p +zdtYb?;GpLwc@&w+Mg_DDU^zGTzwj&?^hQ7dv!p4>0_}S*Yw5FY0Z9h9%%bz2Ki@mQ +z^(2Hg7DvkI?t>Owr92Hq=}Y@`c-$(TG73h2m>!TKD)^R57j?750K^QgOgPXe5TG`- +zn!`-=sBoMoA(j1JU1vr49uXyQR=S#K{7vq?$1+I1r#63nF_P<#!6hcw$&tKO=C +zG!Qr&s?|4DtT7rjFC3{Q5fp6u4Lpnu7b%QEJl`e319IOspTSns%dfW)7$c*$l|}&# +z^5p2NIHLGk$dj*`>OaEJP@^&)pLYzu!;_JaCVys%;J4s(0$yynQibtt1ap_APB&G# +zNOp!YF)hi6(d@9Hj>^sKjTQ)skP=(5>qxaXFuZs*AP-N*&Xel4xaS?PUD>C+U%mSW +z{a(|Xhb+uRVE4!hN)6S&9Z0wBE-L7k^{|`7DQO!x-4bvM%cSxnsec34y|cuKwDAco +zdJ}~q>#3wafPr3m$W<#CPm4;Q*^#cGB`&yvuQwb`bK8%GRyN@rHY1cDlHOOl!UZ;E8#GjWp +zCSAS}4=T*_Qj9$r8eHpnd?&FpMlr&`+%qxK>hYme0QC$ni9f~R208gV)>(+IvVal%8CLE +z6f(Gu?R7|>kV64>7-gc%yUe~Cl5KYkGx-LY3Nz+o>@ox#p=o#N4U|T%J^bI)(Ht;a +zEtpkQK)^KyxZbyP1~bNK7+=nHw_XOKWVpdUDl)NNNp2s +zZY%Vj57cK@WeL%07M&?M{9b&6A;x0$a+FGSrWS>0oaU)FpBB1)R8IrUqU5aXvKo4g +zcx#A}{<2hZu+v!;7-oRSd+z(98)*okUd@pgXk~UJ?uK(`eK!r!($%3%PwROc7OmLT +zGSz~`9ac&HqWu#oWa44lPd>1BW%8@DoJzN8oFFGdU%}`J;gDGH`i_9<-MsjYhMyUY +zEC5d|-gOCF{haW+Mq#3TejGZtFpDJz;>92E81|Kc>#FXSN>z4z>gTml-!80UeitQHTA@~8J7F|P;r%!O|4&!rr7Nlu$IXA+PbVClv8)Qh)caDLKL2&l;) +z$6ktRlkp{$QGrwJ@M!gc45P)WKjIVv&bOEtP>o#QJ2_UPajjg-<#2kARov}byiD!_ +zn8m?1*T68@rgwW=8CT%Kd(A1r7O_8TjSPquFG=1b_@fp|IG++)Bo2!EgwBGJjT^>a +zFD1e)y>kB^Q~S@g4nju6SxI&(xRL9oE*ly?U$20>j*v2>;@TD}PF1VeM!RSN<^RG8 +zy-V0GAW&``;uxZJ8Tj1Rt|l0p%kteqw6~2hV^w@CkGGoeNBh=c>iWW@`|zQRmRg-N +zp+NY?qpXgUey^UJDI#!5EFrA!{BHDz|Ld(Zwochpx>@eEVQz7ES3(Q$r?@R*gY=TK +z?zX6P;yC_nm&{sa|3-q(3?R(}!Wbl*{H(ZD#-bftA$o +zE>Py-xSN;}sZ14V%VVfXT}ZurEXQnw;)Rk@sx{G7!alz1$<$RO(H`;}m#nRUc-YVDor-^rEr1ec>cLA443R3G`5`o;1NGd2 +z*+5?o0(DGDmWE0ZSNHV`i>o>wnW=;fuWcPE%6hZ +zkVR_)?;nMxMa=UKYD3y@E;Zb}rk@Mvo}_Ta+S+e!X1IR9#d~EqH5r+XD}~qT{X})Pds$P6J&YQAE=}yS7$Q^Vxd{P+B(#G^h=a +zz5^o*FKB*Cmv?_f0h}_cFOVnSp=K5w;fTOOajvO`uD$4CZFKs$S1c*#vYu-V{(AYm +z7v%5`&R@}g$6a+Oy@PzFQ#4n|Rl;a>u#Xs<8}6{OI$h=u@}m?vz^{1LVu`p$N;;69 +zCljHT$JTC${QNV$L6+KMiS>7?_i47|uTSa8Y-kyqa@sDK0S+%Vh|;$3$uGAb$%I^jmW54SDR=NDjB>AEAJrl0T`+%c9N8{@D)(S5TE)oB>{p +zOvq3EsbmGjcFFGoT>C@v@D`Hbu(~v2`%|*bsAS;lk6lGIIgk5Uem;n_h;J-N3pW<6 +zJB-n$F1S5)UXz|=?^}C?ZfJq44%IvFN)+Ga%wf|IK$1+`D&&u?Ii7l^TzyUc9~xEG +zf3A_=Urh=G1{>DE0n^05cQ%DNWNW9v)whS@ASaY*?IevXX)1?3Tm)s|3F-gRy&g;u +z5K`j*06(<<6ZriX7yehFvoP5mZ9-Jlu=|?@sl24HsH)Db+kgmIaWTnTJ#QJUH)E9d +zimS=8_q)vIT1w0z0%2;2oTmpX7x!$SlsyB3_?*e#QS>_I=OBG|ZBM}cp2-^pMW4By6a>V(|J!fUu^o%hrr5mJg~onFsGKCn2E +z3RD}jeb0d3pgeU{WZ8wiI}TAU8|CPiI`S*q$n@-sW__Q2sphb2mHr +zmb{SUrtl~C|Eqp2t+ZhA8(P)V=YM#|f-BRuqOJRbYr+`trZUzVJI!+Pa<7hc17fy2 +zTi=#m(IWWF1EfC8mmMhr4y +zFI6L6u|r5syTv|R-bixMKyjZAQG~K{JwN?ny!v`b0yLyp^#}b{oA|ls7kV`1L6X20 +z)m=K&$!<8eob^B%kAUfL`zhUP3)qovXxNiENOjAWu#~<`7(&D_lpv{zX{!C;q`sF5 +zjUAgP@a4}}F;m?2>`)QBQc?zi3Kwzjl=3j`Qw=DEztGZ;AR3uB@3j1LSup6jGk7v& +zI|21G00{4AHprbVY>PD@EF?+SZ}+-IO~UWjL;)T#00D*-WG +z--Ex}yK4zviU2D9%3vy+>#nY@x|(vfu4(t;kGJly6A>-1)Y$}YR*pKJ>5s)am)Xpn +z%uJn+!!&xLO+--@dmA;h7PeD>--A&g-JScL6)mm1SFYb3?0(S+ +zUzuucW^9-Wpo&~*BfJq7s%bp_tyk_IKsmv3+6izbCc}1kQs9WEa`x>MDAr4op_ev% +zWyWk!fuhT=EK%k91Ev^`G&8U!@f$Da2Z#1o0AY1qx@9$RF{Kh(``na(xe +zdz_0+^@e_=h;KD}`?$D9ee`g~87rfBcLLBZg5Y1a4~dwtV32HcOfNE&j +zccl^PxFmuSnA0y_5m#B)-k&UVTUfE-V$sZdHy^>+c)^ECXyh|bTlayLSDqjBB%N*r +zE*Z@ZVvP=Ib(EP}%rn9b`0}J4dN+$LN9WdNo$Y4*kexwk;HfB%FVPCRiablCCGrL< +zlSJw1aFO-P;nGBHJ`d^0{|F9-0NWo#Uih6W0KBHGxel;{KUXMNXqb46T}hsf@krrmd{{Q +z>v8rQ<)puSuXTt2=yKT^*A5pepvR%s{LS#L(h%UH&G=({VOF&PYGm&&lNOsMZ?7dVZ_DE;M-8&RuC7e{Hjk +z`aW%(y-=*(Qt4)_weFnS8atidBQ{lJ$yI^iRPZU|f>_!rz4Z0~1{mShMd4G#zsu)7 +z7fq&U$+3+x16!>s&NNh|#70N5=5XldFTRNNU1#!kR@|r|F|#|%e}ke~>{?b1?i@fv +zB`{3L)jbaRFP|OP<`LAg1`Id;<#+@?hAXXdx~5{YrORlAhfL4Zu?+h){udEyzKJ&d +zb_6Q9#M3wb^E164c+TxF!Rf`2+TY{uf-%+|K4H>*zL*HzXLh!FA6bTg40{m<_o}j) +z5vhwT#S?@tS;k%!6P-;;XN4WW4o|wXuU+d6&nDHCKSVTKw?esXZkxcU`RTnYmG1xV +z9wIWTH!Gmw!*!G;$=1A$la+_ErXJo43u{Z(#RlwrYA~`JQ1|R7=!f#PS905j_)$02 +z&m*f#NewC5Dh->ZZoT*d>oO>XFP<3VqJ2F5!rBD~V3 +zqcg2X2ghvCjYTyz=;m}W@HZcw%NFm@(<;Pypo=bM5mmVW0qm{zj~?w}{*jt-@BMtLhO +zFkDn`GKTm}rT%69%qy{J_|GG5IL^g?&T94?3stN#H*^jBl`eX9Jy6}FmfsSvZwuh_ +z<&UQ>l7pH_y_xxHAOZ6Pwrk;S^Hf$x|3)stJkECe)X;h+hOT%q +z<^BX{unqhMo-vE~!scL%6LZuzLJ$3#o3JWz{~p3)K0;2XedfDkvz->oc1OW{=V8#O +z>QXjr#sA1u3)n3fOgLL=HQ$n+%=$3_5vhQ|bkDQg{wu1vbj|{;XO)7Oh +zpjo!3h%~xW@Y~T_r@{)SPM;~Sc-_)uR&0J=g9mvYY&UzFCM{w)A!Q1F%ZiG>2ExZ+ +zm1jqg@|!jPc+|&wPGL0SO}8c@26C1Vl%9|82HN+|E%#T~=IVEiTy?CoTze)9H#uHD +zE(|6Ny5+4-(7SCPy0P0y`=i5;E9t==zz&Ulvy4uaM(YBplZ_zJn1~P`@6e9JEvWls +zauEY-e6>4;Ta*OyE*q;(vJ?T=*dbPLLVhWN0%Bt|@Xus^hY}~=!k-uLs9UASN8`b- +zDpqLEBeLRy>x9FFGR*)HcJCJ5#TBX0=s_P`d74*h!5Ne9q4it-&F?ii++6)=VDi3j +z3904AXSr}0oMA%aB_miCiJn^G76*P|5aOj6fs`plsT95caqp%qnn$0I^*R(5Y0%en +z5mVmi&m;9O8T|Jzt4F5wWSls2tO8GSut#*NFn1iztp#+m1)0xZ97TkAfggL~NMCZc +zm?N3Y>er_<>=A!Ph9Y$t@#dCs0rVlv2fYdl+Ea6zbr7a~|l%(mNlTmn`t_@WwWi<{ +zrasf3C!lwY;A(~1X?r-P9e>#)-zm4tQUsp1kN?ShtCapy^*nw;5^C3B0Nj8`h>A}fd_J#XNiD{iovTtTh|AG!~aYR!;0WFcbW@SATN^wod;b&%@TKg2`F^`Sik*vs52DV%CW7S?`K%Q;Qter +z0ms?8Px5pp*-gix7XCTY{Yi{rf>W_PPdxiX04Z?hG)z6ku?D+34OHMza+>D}qm#Zj +z2YH(#KrmP;mBk7hG*(aai0F1di1-#1PTj0>?c7#_L8s36w_}~<3gvKl*+fhcIMeg7 +z7Buf3R8`sW>T?*y@c>MPJqN0~_U8GTbFx>;+fcr-S5bkIY+FBK5cQyeA2VidjgFA% +zbXY3`T_uGv7F}etz!LFc=}EC1obh3jpNah}t92qB-*k%>v7pz8hScXm?WJj|U1rAO +z+?vMULZu7`w0U@tLifpOHHFF69Xk6Y=Gm9ID4|hh??{sy6 +z%&NZva=nwQfk3lnvzy#u`obKF4MWdCq+5Z7=-y0+WAn-dxSWpufEFyq?}t+xl$5t+ +zNc{=^W&Vw9M8wv0_!k!Te1A3YHl0kq7ObaEkF@CS_K*4kjae8+tPx+pNcgR{KC)wO +z(BoY7x^8m=Hov{Plg;D2oxBd@Ue&AwsApr{3?#9qxMYGjwT@5qpN5y7cIF3X-D7Cc#!46kAW~!@dz91Sh&n@S#R$9OH`OeJ +z{K~l3Pvo|IRY=*XGCfC$yP`Q}f;g1cc@_NJjB}q81ZU$*#jLYT;I<|QnIA?l45!xNDUB- +zwNdB;u#!@_t*l$2JFdH)!ZAospjEcEhO)98W~tzC)1dy^av_CTt#vl4?HZp7=m-SJ +zdHdrtb4gF2{_(N@8y;*kbj+VeE#b=eS+R>ZyC{vNz; +zGdFqCj#dJX<-5WGuO=jzT}`PKNDsQXVXqIspduM^qO~mkORT>Li(h^tT=P9Xm>1=Pu3_e!y*P-k;0;~w)C +zBOH%Zz^-tO`rGlbwyoGnj0V0y=&2VU`CayTcLy*X_3Kka9oz+IG|dm+rn$hCf1c|A +zv;oymCC$s%q+9ePt?tra3Cr1+Acdg^WL$HCw3eh!hB3nq77Y8ueG()1WPrQwyRBXuyU85Sy$VLb(!FB}-uZ-{>9Z^+1cKQFuWBl)Vy0xO%t)>DyB6d3+r{H!a=Qx9sjY-&G^oC`su_ +zIx{?wq~I^ZxlqY@3bYc36V0-=U9nw+v6OPc(}|1F2to*Q0GKKHg|a)>Mcvrv{ESc6 +zdGcRKtjMK;a(}`_Ls#v28O-u#RFXIy%Mp(S6eE6HZ|Vh#^*lNPI9qw3x++N7weihf +zkx}Nlw@c!Ps<~|Q_77}L#=?4s8OfzMS>D}dGarn?)mGU+D!W8idT3k; +zu|~ov%xLg4Wr*oEDY~J7b9*}tTU%)xb7nSC)0!*05a^OU055i@G_>G0q&F#sl9C^M +zv|4DIQWYJxwukdC+WZOpLvQ#lX>c{RF{wKl$9s2$S`2p3zwy%j5MMxIz^LO}g6P$9 +zV(^}ljOm%i0V$XFrHDJz+|0d%PK-pZ&W}%Jafa5CNj1UwIY&R{+6v#{*Fh7c%w2TD +ziL@Xp+JY{Je!Vr&$cP@{O)A0GK<@CE+6W&d{%#+A-FbsStEdPx?*7XyE}%6}c_v2~ +z&I#Q#Fq-fh5bPV&wSQC?Fd~DwV|8L@H%7qBmfRmuLzh=e*M346 +z$|H&JKT_ZYLcpLNX0f=gWGVaDJwm5LciRYn;2AQ5$K06`QRVl7Y|MJa{X*ds+;)nVB!Hq;#1CGfKa$mnQ^Xh)rIKB8L}dkz((l``OFDyg#=&rmGOUQjX9 +zmjy>G*<-;%(R{oBScIf2cEoHk0^67)0=htWN^i%%i|)P%0nqTBThb!Gk0eislIST! +zFiL4(p(8#jb1m|_MWnN`YLs-Tijvi1a^Gres#vHGPzH1)y_JKJaiyF^bc#{AFohEc +zZCX~vjV40dF^XYlBch@Ie+c+r->7#9Slg(R9(?m9{RXtBn#>)BZ3Nn0@e$9Ls2dsq +zy2P3h8$((CJIens-=bsj)8zE?C-QOiNE;Zv(9)tvKBH@9@xj@v +z_2N_*0aD)hpV9fPlkq(MmScS-|GC&omu%$&UI`!6i|1GhqC;CT;Asuw5UA&|d3>n2 +z)A_}(b9-qpenNJew;M7}|3M>L!~|NH7I(~E&Q%)}W)bup3zv!!F|mYz=?Yns&$?Ti +zZQx9nbcJeOSx~2!jA-RRkbrG!5WHha{cWD0Xm}lPW3$~ZC$(~Y{z>c=s;uOS)N&RK +z4cUoL(iiYYRNmbiz`K_3+AIZom{+$Tr=Z>Zu{fr`C4WI~L=rLXCrjF=LF>7}|IZ^r;J}Q@$2Yd|`JvG@W&m +zpHoq?(niOKHXr~~_x4vez?J}dZI)4yTG1c!ZrbVJ9R`jzAc0nO5HZfV3?j@ux-Ue? +z1ql`(o?x-A+$YmY=TYaS{px*PbKQ +zHj8CME77zO%$wfk(eRf(Bgm|`>{bFb@k(M+N8|k8FJ#&9eO1;%;1X7PbI>i!=Bx=0wy*W3WjC>> +z$=DPlLwm3tO`w7{ldJVR +zKFz;}tZ8WFY*jjZ+Y`l){AaT_nWBOj!iep>N_CCeT2gBGC!&GDrWus3_4_?X=!fB6 +zCPJb-@O-tJza#wqX`vd&m4_kI;NK$ZY}1&CTLu>1sED8~EVNVp4Knt`cNo(r%2qL; +z(BzYa-j&N;7x-k^{<6gT$2F3Q0^YV;@GKE0`ozBtLM9f&<)<+!&Pq--1mR2nE3N9^>QwE +zJCSGoS4(&-19?l~rZm)Q>I7NX-Gj4il6{2MI4XwXEJfC!;fyKviF5Pb7bIF~DUyl~ +z@WaE+p^Aeuj`);fw3F~&#*4;j5tjFe9bA{8J=i%I6ATeLzP%N#V;BZP$&ZrbJnIIZ +zR7;T#N12YEkykfq#;|KC9KGI}Q2~0eR9r +zuqOeM>UBuzLLr00`bi=4mzAqS9e87wyPoc+JFlKqD`R7Qsj2cfatdmK(p8hkmWZ`N12d4nr`qodZ?>n@2$3*H|8zj*wF7CQyCntXeIQ&jORXE>F-~hK#!GVt +z*-tWZD<)Gv(gxoc`IRn9SNtFNUd+pC_=a9SU<+q5uvS{mKZ8nU+RYy4K8znw&Ty4eH1&j$s1x{dz!J^N+s0@kpE|Z--~YCg%-p5u&{#4v +zRZK`}Hm2>M@I5SY)sEU`jg%HcS@}}SvxHIOfvQydWgX;xwR39|T&X@QoQb8XTYXzs +zQYrHcTtF@<`M3Aqx0rjEdcCy9P*sY_z1!M|#-|VoQ5UWY=}7`!sO&-*k&>i45D=|_ +zGScD{AQ!Y4XA|+cE;vjz?WK`W7Hj8X_)HDV20e=Mt5C7SAY`EeJ_Fisq~*cZSN32@ +zCT~a_4#|W7UwGz^l*I5}Q8R{uG0?9X+(kD|!UR8oS4vSp +zpzS9sz`eQ!K=_kwS0phCA&WR#{iqsf42cjmB_q_)I{_{WYaaR&4|QptVGW}~s-eq~ +zv7lUDIX7SATfxLV4T;0b(Y!n|@^$ZR$wfytt*xYHxQ0ed#Fr0(32hGqykYsR3f&%P +zb77TS^|^VLHjW;1Pw9^)=mkX<$K71{f4cP3OfMzbG4f+nY-#3@302a8=kE;a6F%z0Hz2f^2;#6RZ$q(H^f9t1;z +zwKwGSD)B$7hBLmQWjJHC1f~{Uu7a03-$o?-S+dO4!YvJ-*+i6a6(6*Tf^H^@52q4? +zuD#XeqWDX`|1SDgC)fIxLYb)NZg}==xW`fI!T?dI+5lF0wEwL5DfjlR2}9w!cS!&% +zLgUG2B+r5|wswrtnnMwx!plzWir17Y=jvRvnolMOm6 +zY~V^6iASwSC}mY3Q@xxbG_P5oh^VWX-=_Bp=>eCP6}B_qP0qVNF1!EfNZf{ued)b| +zRR`bA!D&>-qJPCIi=e>SlCciJ(oPY}<`ro)?6B*8de+((6nR9C4}U%Tb9AGZ+>9g? +zwK#5)+Hk-m^`auv6LmaQNw!MrtFD;t3FPaVXv+Wx4^vcSf6;V3p;U&CKVmI>Cx@(i +z&&hv4l}Qnaf<$Xh*Z4c*N^4uYxm8|NJFTY?x}@JxA=^r)@?U+{VW35;6RF^4cSiP4 +zW1-)4)jsPsUp^FPB<#SyFPV+aV^=q9Ou~SgB7pxS#&Zq{+Q?PfqAS#HgV+Pt1v+$9 +zF+bZL@~OWPi9RG9JEIs{j9Ms%q`7l$-C}6-!^L*sc!ve=aT12i9K}F!#j&P!Qv6fu +zjC0YXXWb*lM8zxviK&XdjjsUi`D4_1yNt|~P3$Fwmfz&5^d_y{B6~3}Q)mTxHuzuo +zf(-v2hXh-}rsGG{XM>Uo=&Ag~0kem!5PSyxQtAr1tYe!^MX@ZBxADo47(@OHuAOuc +z5M0gR7}K_k>j=#!=8?60EeLqy$S7Rm)uH=_!W5<<9i9F>Q^Pz=^a?dMxZ${AA=j&d +zjO$=fc%vFrCaVqTRgUH_j;BExZTE-B61ENkSv2sQAs_2ciiV79r~|Jipc2wu+2o%d +z!=AesN4zEw^%GGml>Je611urQwhg$UC!XTyTz#ptk!fj_dDyVcziRR8uS%jf5UMjg +zJVRPDwL$olI2H=Zv(_vftJV3nn9QzL5-3P*!I*wyI*$1}(192a@n1c)Qej)Y3xYMBv5CTTrQUOKm0*=`qYocx@-fP&BpY%|DVU4A4*#K`;FXozC +zLrH`X$c0`txEkcuDtCk^53#L^n$@L8N;=z`2ZS$BbWb&$h-i$nMAeD#MZ`Qs)2>TP +zV$GyO%~PbGc7*V~i(?B?J|laVIxvsEo)d71l1dB%UkEmr$JyNZ>Nc}h +z?T-F)?w}xBf0Tj=oBBZZ5wf$!Dtp199mG#SK!hS8RB7~ +z{)1Q$Ls5F^ygnlF@o_yKU1168ces5_0C}D=Tah9K*O*i`oFkZ*Yg?LS`_>Z5o5krX +z*eygoR^Lvpleh%Kyuv)mQPs!IR7%(`1tiTx3|`ncX43xx1N+x%yL}+*GYRWAd# +zFCJnhx>zG{Mr9~TzI0IhUX4GNV#gMh-g&2uO=TMGG#-3P`AvKZZ@FvM7#gmNA|Id+ +zs#!6Qn;00KfZa1A@Qhsl`~z5-IE`>J|Mu7ZMz!x;F=n1gBqyF5X+kIz!43KEUlFQj +z$H_;?fxKIxS5HEBlNHLYzD3;Kns!D<2a1UP*iEh(U4pc3Ozz96dl~_*O+NdRehUto +ztbvM2zh)4s801nG19LF3s|+^Ex3#8h3myOdd!pPG2F98cc_m^44X(2K?=NzR7i3Q+=}49~N)uGg#lN*L`3l51K?F>6PyZU0ex!(w-|>=$ +z*UX^WJNyIhwV5g_U(eW1>N*Jv;(op8ApKCP*iz;;nF$p#|MGR&6-A_>%x^KTs*aI7 +z0*u`emL76Ln_SsQ%8VaK5j+$8iA9Vq=ck&O1aiVm>mv)a8KubScehrdWeJD1I*D

rww=MrnvD5Mk}X +zhE=K-H;s>w6p#I9R2io7J-Nq$LE$)DHdMOhgV-%)80(alWYiv=?jmtS +ztqNI3j7Xrf-aGQyaRjAUxs0ibvC0*bUo*sM&R@^m&8f!e_P^6S>sz? +z9rcz@kmBr^=LT00P)ek+r3OH;7Jx|a3>I!+M9L%WZngQ80JEH!{*5IGNKw9GT0*hR +z`6m3%{+<|j9N{Jk5wPAMe7GaJCQecuuVY`$AJRQBqCe}AxDSy6lD5TrFXu!D5*F3- +zD#|j)a||~URbqL2RGXz$jnnj$$J(UpCNKINoxd24;n}tchATsV`LHDr2KsOP`_I=C +zB<(E=#Od!X(zS&p2n)Oj!5xc7yn=`ATa7;q%8-Li@c-n8n9{wy7csejVX4*Y+#aGE +zQ|vJ8A0}h7!3Tk=OZgD4aS;*7Lc#~@zRe4D#ZI*{8iK(8B!O6<|0f9q{of>zo0FTk +zSWpbiO$#i3;Gx{FvJt<%O(vV~6Ju@tfc2oQ1w~D!o9eh9cfm-L1caA)^0fSj;mR{; +z!cv;@5nYV!;^vl%`iFw5x)0|HI(k2zm7xqKC!-gjlw3;Pgc}PnL$wX+djm?WX%bq< +zoJh*xB8?cFxuBoKrHRH9(clyQ*!&yf!QnziP4GlE*?DFoNVr)ZC+UYPK7k_1N+(Qx +zDBM0Ks6z8I96t9Si;;UfC2Os4qaq%<%*sK6iuYLLIxDVXyeT^`QW*0bXzU(p|NIII +zy*obYMz6p7G_%?Dr*uopw(;G8U*b-I4#)H%LE~Q_$sKI6EoVFI9GcLJ!OO}W^Mn|8`deIOl&%_mZxY$o +z%wFEyYXy6S~MNBq%QZ;8rn101ILgJ(0e9elIfU)j;>Mv +z$-6wi>H)v7Q7-3}Ka_nI_C6@EYMFRi8+cXvyOfBas{jE(l33G7Br3T(QaRt_spYIr +z?N}2nePS3#T^N9|VMim2y|yn9KlJOHx*Vq(Mi21>K_nQ8VcqU~_eNHamwa>q`$ +zQu){BOY#e8aoE2i^~^wfS%ynwiue5A5(dz4-GBU0CajuYED<#-P( +z8{Ejix~AKgw|#W|#crO0wpSt99(mI35g$F+b|#@OID-1!>@4|ulUbMJw21_Ce(1~O +z=X>%WJ#5XN-Ybe5DQqt`Ymxf}As+pa=ppG%NqaQk7gp1&z#kd`F +z!Td2Coc%%+DjV3_e~WniuK;fkknlvN`q{PgR|r{d<`CedIIuKXGi^pt7c^~fW7>cW +zl#G#)XsRm1#-MWoYQblJD5~y6rL16hOAFP^007X5_ldassfF*{!`MsZ62k8hps|WM +zPgaJ1eF4lQS+&G~`DAp10khqki&j)C!noDeUffPVwA&+E&L>!bi^R!T%@)jB5i396 +z{qU#dJo4`A7I%3zm}Wh)sOEXq)Y>Q3x!J6At$u#?=dEqxfiVPs-|j2WNaNsB-w_8? +z{mJsb53jKnGoH&ib1|Qn&~d=U%?zl#D7j@8e1Y4V9*~ +ziG&uZN~Z)>hGK+^%U|$E&NU-eJ8xpn?PbwxQ?S{;HnTT&1aDg^Aj|WqKRIJ +z>KKY@irSpt?G9Ccr!{Ua@CDHcfdB<855x+uh39Tu%?GGE`?g~3@X^FQ1D(?X-0(&JfIXX +zq9Dx3L8)ecMIKHhxg$>m2j6H1Y9h1mvtRd5cBqTPvD2kd$t5nXfxvjCy0g`4{)Ums +zMoM5w>`eq<<%dU#nj|Y0(yF5TUt?7P +zRz5{LRW+tmYy-h#TOCa3$Xf!xD1cPHI(={Y#dJu#Jb3jYv%dvbCX{LnS@wrkV2$O+-^DP@!!Qa=eI)>vJ`vXKGgQwD#3Hu%7a +z!hzp^O0|!Od=IT^mt|SH>=fvhU +zk2A%u0+$Xu^9K?@J&J-pI~%+nse6Xu0)%YlqBl3sO1o1g+GIjQoUV;>_7!t%ec-O- +z_xIv$E{Zpx%!IUQ-dxTJ_M`hVL}pe6O$_sY$|({WSAIJXVPX+fg>kw?Gr9&`q~ymo!S0fIq%s;N!frOLX1f6@T{SZ(m?pFy|-DWXJ +zYWF2YiLAxU;xryc-RT6(6y2-aBu@1{wRL)06O=5AkOldP^a;AE7E>yi;&VdqgH8E?A#}ije2K&;t1coh;-K>H +zf)`B;a(lu;rI5V-HrG#}rDyhaqj);{09EcD;n6LoJfphTmX$jvhmV`CvQ~$~n~Iyq +zaKkcL(Z5rb;!<-L|4$FBoPJxIxtG{~P*7ktk3O67&-f<(39kph7^B=0JwMFFLyC~@ +zH6T%$8Deq>=vZ3##O3)3%&Y{*VxBwZf@CkhgPLii^T}8jXUf`IrgPJsVE5XC(od4T +z_J>l`_Kg!);6fZeY@AT5KM(FlG^&W&oO8>iqN}Z05I#?lv(?*e*I{?Eh6HwhBnGQt +z`b{3u$g~G;A+iHfa}IzUzSU8WX)62-K-)eG*eNCmdlIuSLZ_ZrDX|Wfokcy*XQ=5m +z3Yz#TusjRRCCVEFoDrh^H@jH +zCgKjiR1&F|N3~5aJm}#iLm?@Ds_f|gfBV=klmV7q%?xF2CYg+V;K@`9O%9X6ivxL0 +zj<2&-xz}$;=9Ov*??j4EEXp>3Q9e*p>I&X#nM& +z9kvMaEvD=4{e(EnJ-ie5&=L9Q;c&I!#GofsE&1f*n37S3!y^Nblq+z5O{<7>!w +zW&W0ct0arZh4}EagdsF5p-E_>rGsX+=^BxkN<~Ad6fHg^PfVwT22)w(C6+&K>0Rcx +zph7@KUA^_?$8pZrf&9IHWfU;@=o?8fFwEp53ZjuDRO-fJXbz~)j@4IolVZA?wQYmyTcQI?Xa%*^d9+5A?s_f +z8y?|I=KI4SR;30E;mh~8OAVPf$$jEX^0UfN&-n^0^rS-x63P)2+TNuU2_n?d=XBiy +zoDoQrMyjsP&QlNNV9*4Q+8~I=?jv(LfmGr&iSw91hj83D)V(l5BP950BHQ+(>`pP* +zbk8iSOS$qo+stl%Mp`Omzahi{I4BdHgS5Ntwsl_iJMn>wUA}$UY^)lTy~@hh?$FXT +z-s+m*-0;yel5w>=`_!@#Xl%IT9*q(&{$kfACS(b(T|Siw=B7G(>J~a>Po}G~-2BbQ +z0;-5yACeoSPnkpBgmoOpdji~H!fwq6=>zfP~~6Z$p* +zH-^O3ahaL#g2$l4+C)GaG7Y;cLSf4s^xA5*I`#ya+6}i>b)}r^You*3rH8vfwW(=U +zNz=<|-&;3-_=$WT>6Z{%NLT!_)K8+9p}J-LdZt7h(%l0eucNt<1(mf>P+Op|J0dtE +z;6TL+$Ph|bIl?}oj#WYBzfYE5A%s7t@BPmNFmQEw7vb_*hGbq +zRKr{MUQz*ya%RXNdxd>bFfx)@GsME|7HZyTES=hahGThA@nduW5W)sGh$1x<1Ybf) +z3g}6Uv-D^I$NZ}k?;Kgvz{MWwm0rx>z+B`&vO;`1?vgKUV$qfbrJGvh4V5eUBZV5) +zP^$KN8AGwUJca>;2Wo +z;u!OP?vpE-3alwVtU{>ZecpkFR5Tt*Zi%H!Fy>#4AGehQulOa;Two+Pd$TU=;LROi{aq%J(B(`|mfpnq3rO^_)rdAuc@aatZXXb?t>t#P&wt9(!AFesoJf +z?^+VNm6r$lMI$koHXZ|4h7+rBEy;b>ij3`lERk;?rG)u`)>zZWDi5RPJUv4+jH81< +zGuu#Fn5Y;AjHB8uFsrZ-%CAqW$h;m@j?|n`5kZZ>*&uF9JkQADMkd$YqY?4~m;F4B +zMy;fbp5kvB&^06%FU+3UXujbOOZ^7}mjS^O_XQ#;8OifQd-&PLEL#&tVbJExuB^X* +z@n|MOjt;wm+RC;*QH7fv`Apt$hAL9UIwajtWLujJP%TmQXiNjFEYqXA%cY#&-)B(B +zrk#@ZMLANg%omz}Ov=CLC5TxqGpB3@Tp#TaqRXUgtZ-ROtSDg1$)ZEvO!3%Yg7SGa +zUly@SK +zO6>DqWadm%eRncLZ=>^KBt18Rc4*k0kRnR>Jfd8SIFt(sCf-TaGKHi|(Ab1V8rq9j +z*=-aZM-jx5ia>JC8Cy@0Wr}KDaGvZ>CgTGmn#lCW@IZ(!vM}2?#+9qd{cV4L(YyW& +zJBhn$xqiBrhLKv|4G%}EyL39MiDxOa4BwhJ7GC;hj(=|^RO|Rs~T~lu)$~)&LvZnQC{eOiLbkBO%8Y0 +zNci&?(za1apd49%2jsHn)9mOe1KFW)*tT|NvGa!ehE`_^R0j0m!F{4+t>ySyn57LNwv8(dz{k8OqjfOD!p=3fO&$k +zIO-r~6gzf4Heao$nuX?n`;h)UAg27ph0rt>tG?wCjB}2*qs2;6;98|`dPa~o#tcDn +zDiA1z3+WepBR};M%vb3QHH-LvzuRUP(Car>z`s`}oD|&yqSucR&U% +z=hcRpAc4j?E=i^OJpaH6sp^uA9BI3=eAyz@X9m(ZoolEF_D0r!JebH%F-`6_`h?2< +zR9yoRD)!W)q}vjuOhA*Z_UgROyx~(bW#Iq1N%tSA_Vsewl0AUjo_5FVwg$yokkaM% +z>o0;XzIdUmi(;zd7p#X_}{=4LSCp=4SSs7UNz#H45dU4lexOn +z3je`hZa-yr<)(a1&)RmhE0aw`Klqv4EE=vpeMMx8%H6Lzc+pQdO^5v5 +zEmS=jE4^Oz*j?%5rHPRW+Yp+!eZ!kPKXWXSh8>ky4i?-c)7`#FMSXC#=lFdG-C}$;h7_1s%9wgVi+gMATta03 +zry)_qnStosRd0OiOqebT=_K3-MEZF9IA01VVVFCTA-p|~3#JD|nlGnNx&iY|dVf?j +z#@MhHaxLjWr3fgvl9Rr<8mNU(H@=9+4Zc!AOBivp_Z^@B2FjMb_9tuwYC0{dMJnK2 +YVQu*TZ8nl?f%`zS*L1OeLEkX&JxSZWhyVZp + +delta 28 +hcmZqK#I&TDk;}!`kpTo87$#a7O{|gH{LazO5ddxw2oeAQ + +diff --git a/amd-ucode/microcode_amd_fam19h.bin.asc b/amd-ucode/microcode_amd_fam19h.bin.asc +index a32b4d61..8cff9013 100644 +--- a/amd-ucode/microcode_amd_fam19h.bin.asc ++++ b/amd-ucode/microcode_amd_fam19h.bin.asc +@@ -1,11 +1,11 @@ + -----BEGIN PGP SIGNATURE----- + +-iQEzBAABCgAdFiEE/HxsUF2vzBRxg1fK5L5TOfMornMFAmS3F00ACgkQ5L5TOfMo +-rnNEhQgAizSV8IFpvaYNytaJKLA4uevrZneGPV4czjCXnnj1yHpfQmCTyZQnoLnx +-7gyzf7K5271zO51FBQ5z2Nm48a3XPUhMbQLNP4BZdekLiA3bRpMtSyHct6zD0ULm +-xaFaOQ7MR1tGADhlon1bDvtnOuixUhwrZhEIlR9MzQAzERKDMOAVTbxn9ZhMfYiT +-LhA791Blyyi+6Z9uh7BpaA8l8uvoxt+uuvlBTjQMR3ER/TEjgcsoy+XhhK4QKS0V +-wJCtcDle/3pF+N6SAFWiXbNZ+P8p19afhcYddDl97xtpzA6/8b20a2eHkrqnu/Ds +-jTozF9kmhiifYMYpXtXgSOwI3GRZbQ== +-=t+j1 ++iQEzBAABCgAdFiEE/HxsUF2vzBRxg1fK5L5TOfMornMFAmTEYrcACgkQ5L5TOfMo ++rnN4IQf/QKbOezXZ4OYzaPANvsZQEAzLNfuylC/aQMwrPaO7daz5/zmCN4HU5XkH ++dDT8DYfPg+fQHIgxAw0/L24xPOm5Op/QuLVDyDqVr4qvL8+65eeI+JqxD/wXMXYN ++V34kkLM2p8iuyY1Nc8IDLXu4X75KGNPbKZlMRKMU3Pr7ai5O4ihmiAM+N6qv1KEJ ++YToNN6vrg0qt1cv0SLM8sa4e7L1+oblUrg/o0FViYE8pxsU3ZRRVSJMUg+lKjvl/ ++1ZPGKOdD80fcNJ+ItYGHNNs3eCc3WgW7Kc/E668eH75Yu9Zt7ewWZX8Sg/mygleY ++OzMwhbPJg4bF4zm7C/Pku7i1T2Omcg== ++=km2X + -----END PGP SIGNATURE----- +-- +2.40.1 + diff --git a/packages/microcode/microcode.spec b/packages/microcode/microcode.spec index 710d34ef..d2394adb 100644 --- a/packages/microcode/microcode.spec +++ b/packages/microcode/microcode.spec @@ -21,6 +21,8 @@ URL: https://github.com/bottlerocket-os/bottlerocket/tree/develop/packages/micro Source0: https://www.kernel.org/pub/linux/kernel/firmware/linux-firmware-%{amd_ucode_version}.tar.xz Source1: https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files/archive/refs/tags/microcode-%{intel_ucode_version}.tar.gz +Patch1: 0001-linux-firmware-Update-AMD-cpu-microcode.patch + # Lets us install "microcode" to pull in the AMD and Intel updates. Requires: %{_cross_os}microcode-amd Requires: %{_cross_os}microcode-intel @@ -77,6 +79,23 @@ Requires: %{_cross_os}microcode-intel-license mkdir amd intel tar -C amd --strip-components=1 -xof %{SOURCE0} tar -C intel --strip-components=1 -xof %{SOURCE1} +# CVE-2023-20569 - "AMD Inception" +# This is adding new microcode for Zen3/Zen4 AMD cpus. The patch was taken +# directly from the linux-firmware repository, but has not been part of a +# release there, yet. +# Unfortunately the setup here with two separate sources being brought into +# separate directories and the patch only affecting one of the two is not conducive +# of using the standard way of applying git binary patches through `autosetup -S git ...` +# Hence we have to extract some of the parts from that macro to let the patch +# apply. +# +# As soon as we update to a release that includes this patch everything from here... +pushd amd +%global __scm git +%__scm_setup_git +%autopatch -p1 +popd +# ... to here can be dropped cp {amd/,}LICENSE.amd-ucode cp intel/intel-ucode-with-caveats/* intel/intel-ucode cp intel/license LICENSE.intel-ucode From e897cfb696b8f875f9e21322b864c6e141787f75 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Wed, 30 Aug 2023 21:15:13 +0000 Subject: [PATCH 1089/1356] Ignore 'aws-credential-types' in dependabot config --- .github/dependabot.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml index 173040d7..ff27748f 100644 --- a/.github/dependabot.yaml +++ b/.github/dependabot.yaml @@ -33,6 +33,7 @@ updates: ignore: # For AWS SDK for Rust, we'll update when we bump tough/coldsnap - dependency-name: "aws-config" + - dependency-name: "aws-credential-types" - dependency-name: "aws-endpoint" - dependency-name: "aws-http" - dependency-name: "aws-hyper" From 54b60cee27ab0f7ee9cd124327c9336de9f2b1ee Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Fri, 1 Sep 2023 19:11:25 +0000 Subject: [PATCH 1090/1356] docs: mark log4j-hotpatch-enabled as deprecated Signed-off-by: Arnaldo Garcia Rincon --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 93717883..6a127048 100644 --- a/README.md +++ b/README.md @@ -696,7 +696,7 @@ For AWS variants, these settings allow you to set up CloudFormation signaling to Bottlerocket allows you to opt-in to use additional [OCI hooks](https://github.com/opencontainers/runtime-spec/blob/main/runtime.md#lifecycle) for your orchestrated containers. Once you opt-in to use additional OCI hooks, any new orchestrated containers will be configured with them, but existing containers won't be changed. -* `settings.oci-hooks.log4j-hotpatch-enabled`: Enables the [hotdog OCI hooks](https://github.com/bottlerocket-os/hotdog), which are used to inject the [Log4j Hot Patch](https://github.com/corretto/hotpatch-for-apache-log4j2) into containers. Defaults to `false`. +* `settings.oci-hooks.log4j-hotpatch-enabled`: **Deprecated**. This setting is no longer supported by Bottlerocket starting from v1.15.0. Though it is still available for backwards compatibility, enabling it has no effect beyond printing a deprecation warning to the system logs. #### OCI Defaults settings From 3cc981e2dce1cb56cbdc65d25b39f5485796eecb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Aug 2023 19:58:28 +0000 Subject: [PATCH 1091/1356] build(deps): bump serde_yaml from 0.8.26 to 0.9.21 in /tools Bumps [serde_yaml](https://github.com/dtolnay/serde-yaml) from 0.8.26 to 0.9.21. - [Release notes](https://github.com/dtolnay/serde-yaml/releases) - [Commits](https://github.com/dtolnay/serde-yaml/compare/0.8.26...0.9.21) --- updated-dependencies: - dependency-name: serde_yaml dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- tools/Cargo.lock | 8 ++++---- tools/infrasys/Cargo.toml | 2 +- tools/pubsys-config/Cargo.toml | 2 +- tools/testsys-config/Cargo.toml | 2 +- tools/testsys/Cargo.toml | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index a83367d2..71b69a42 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -1728,7 +1728,7 @@ dependencies = [ "log", "pubsys-config", "serde_json", - "serde_yaml 0.8.26", + "serde_yaml 0.9.21", "sha2", "shell-words", "simplelog", @@ -2381,7 +2381,7 @@ dependencies = [ "log", "parse-datetime", "serde", - "serde_yaml 0.8.26", + "serde_yaml 0.9.21", "snafu", "toml", "url", @@ -3125,7 +3125,7 @@ dependencies = [ "serde", "serde_json", "serde_plain", - "serde_yaml 0.8.26", + "serde_yaml 0.9.21", "snafu", "term_size", "testsys-config", @@ -3146,7 +3146,7 @@ dependencies = [ "maplit", "serde", "serde_plain", - "serde_yaml 0.8.26", + "serde_yaml 0.9.21", "snafu", "testsys-model", "toml", diff --git a/tools/infrasys/Cargo.toml b/tools/infrasys/Cargo.toml index 39c5f51f..55e22b8c 100644 --- a/tools/infrasys/Cargo.toml +++ b/tools/infrasys/Cargo.toml @@ -17,7 +17,7 @@ aws-types = "0.55" aws-sdk-cloudformation = "0.28" aws-sdk-s3 = "0.28" serde_json = "1" -serde_yaml = "0.8" +serde_yaml = "0.9" sha2 = "0.10" shell-words = "1" simplelog = "0.12" diff --git a/tools/pubsys-config/Cargo.toml b/tools/pubsys-config/Cargo.toml index d999ee1e..ba060eeb 100644 --- a/tools/pubsys-config/Cargo.toml +++ b/tools/pubsys-config/Cargo.toml @@ -13,7 +13,7 @@ lazy_static = "1" log = "0.4" parse-datetime = { path = "../../sources/parse-datetime", version = "0.1" } serde = { version = "1", features = ["derive"] } -serde_yaml = "0.8" +serde_yaml = "0.9" snafu = "0.7" toml = "0.5" url = { version = "2", features = ["serde"] } diff --git a/tools/testsys-config/Cargo.toml b/tools/testsys-config/Cargo.toml index 66790089..f6b69472 100644 --- a/tools/testsys-config/Cargo.toml +++ b/tools/testsys-config/Cargo.toml @@ -15,6 +15,6 @@ maplit="1" testsys-model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.8", tag = "v0.0.8"} serde = { version = "1", features = ["derive"] } serde_plain = "1" -serde_yaml = "0.8" +serde_yaml = "0.9" snafu = "0.7" toml = "0.5" diff --git a/tools/testsys/Cargo.toml b/tools/testsys/Cargo.toml index 22dd85ca..08a96dc6 100644 --- a/tools/testsys/Cargo.toml +++ b/tools/testsys/Cargo.toml @@ -25,7 +25,7 @@ fastrand = "1" serde = { version = "1", features = ["derive"] } serde_json = "1" serde_plain = "1" -serde_yaml = "0.8" +serde_yaml = "0.9" snafu = "0.7" term_size = "0.3" testsys-config = { path = "../testsys-config/", version = "0.1" } From 22f46347e1b56f7c7b1f95e5858bdf3afa67146b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Aug 2023 19:58:20 +0000 Subject: [PATCH 1092/1356] build(deps): bump reqwest from 0.11.19 to 0.11.20 in /tools Bumps [reqwest](https://github.com/seanmonstar/reqwest) from 0.11.19 to 0.11.20. - [Release notes](https://github.com/seanmonstar/reqwest/releases) - [Changelog](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md) - [Commits](https://github.com/seanmonstar/reqwest/compare/v0.11.19...v0.11.20) --- updated-dependencies: - dependency-name: reqwest dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- tools/Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 71b69a42..75c16061 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -2550,9 +2550,9 @@ checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" [[package]] name = "reqwest" -version = "0.11.19" +version = "0.11.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20b9b67e2ca7dd9e9f9285b759de30ff538aab981abaaf7bc9bd90b84a0126c3" +checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" dependencies = [ "base64 0.21.2", "bytes", From 70f95ce8d182d550712d97baa8622cff15e17f1d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Aug 2023 19:58:11 +0000 Subject: [PATCH 1093/1356] build(deps): bump assert-json-diff from 1.1.0 to 2.0.2 in /tools Bumps [assert-json-diff](https://github.com/davidpdrsn/assert-json-diff) from 1.1.0 to 2.0.2. - [Release notes](https://github.com/davidpdrsn/assert-json-diff/releases) - [Changelog](https://github.com/davidpdrsn/assert-json-diff/blob/main/CHANGELOG.md) - [Commits](https://github.com/davidpdrsn/assert-json-diff/compare/v1.1.0...v2.0.2) --- updated-dependencies: - dependency-name: assert-json-diff dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- tools/Cargo.lock | 17 ++--------------- tools/infrasys/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 16 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 75c16061..f0a49e0e 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -132,11 +132,10 @@ dependencies = [ [[package]] name = "assert-json-diff" -version = "1.1.0" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4259cbe96513d2f1073027a259fc2ca917feb3026a5a8d984e3628e490255cc0" +checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" dependencies = [ - "extend", "serde", "serde_json", ] @@ -1251,18 +1250,6 @@ dependencies = [ "libc", ] -[[package]] -name = "extend" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f47da3a72ec598d9c8937a7ebca8962a5c7a1f28444e38c2b33c771ba3f55f05" -dependencies = [ - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "fastrand" version = "1.9.0" diff --git a/tools/infrasys/Cargo.toml b/tools/infrasys/Cargo.toml index 55e22b8c..8579f62e 100644 --- a/tools/infrasys/Cargo.toml +++ b/tools/infrasys/Cargo.toml @@ -26,4 +26,4 @@ tokio = { version = "1", default-features = false, features = ["macros", "rt-mul url = "2" [dev-dependencies] -assert-json-diff = "1" +assert-json-diff = "2" From f5e6f10a213259466e3d0e1025a27a2ab383ed2a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Sep 2023 21:08:41 +0000 Subject: [PATCH 1094/1356] build(deps): bump actions/checkout from 3 to 4 Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/build.yml | 4 ++-- .github/workflows/cache.yml | 2 +- .github/workflows/golangci-lint.yaml | 2 +- .github/workflows/nightly.yml | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 38f192f8..f3a7db9b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -33,7 +33,7 @@ jobs: variants: ${{ steps.get-variants.outputs.variants }} aarch-enemies: ${{ steps.get-variants.outputs.aarch-enemies }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: ./.github/actions/list-variants id: get-variants @@ -56,7 +56,7 @@ jobs: delay=$((1 + $RANDOM % 32)) echo "Waiting ${delay} seconds before execution" sleep $delay - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Preflight step to set up the runner uses: ./.github/actions/setup-node - if: contains(matrix.variant, 'nvidia') diff --git a/.github/workflows/cache.yml b/.github/workflows/cache.yml index 2c7399cc..fd93f220 100644 --- a/.github/workflows/cache.yml +++ b/.github/workflows/cache.yml @@ -21,7 +21,7 @@ jobs: labels: bottlerocket_ubuntu-latest_16-core continue-on-error: true steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Preflight step to set up the runner uses: ./.github/actions/setup-node with: diff --git a/.github/workflows/golangci-lint.yaml b/.github/workflows/golangci-lint.yaml index b1b1f4fc..b1e5a0b5 100644 --- a/.github/workflows/golangci-lint.yaml +++ b/.github/workflows/golangci-lint.yaml @@ -17,7 +17,7 @@ jobs: - uses: actions/setup-go@v3 with: go-version: 1.19 - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: lint-host-ctr uses: golangci/golangci-lint-action@v3 with: diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 279a61c1..31c13bff 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -28,7 +28,7 @@ jobs: variants: ${{ steps.get-variants.outputs.variants }} aarch-enemies: ${{ steps.get-variants.outputs.aarch-enemies }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: ./.github/actions/list-variants id: get-variants @@ -46,7 +46,7 @@ jobs: fail-fast: false name: "Build ${{ matrix.variant }}-${{ matrix.arch }}" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Preflight step to set up the runner uses: ./.github/actions/setup-node - if: contains(matrix.variant, 'nvidia') From 1f8618661319524b69b27f3eb1e5252e8526a3c4 Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Tue, 5 Sep 2023 17:25:27 +0000 Subject: [PATCH 1095/1356] kernel: enable Intel VMD driver for metal variants Enabling the Intel Volume Management Device driver for metal variants lets Bottlerocket boot on hosts that have a root disk in a separate PCI domain. Signed-off-by: Markus Boehme --- packages/kernel-5.10/config-bottlerocket-metal | 4 ++++ packages/kernel-5.15/config-bottlerocket-metal | 4 ++++ packages/kernel-6.1/config-bottlerocket-metal | 4 ++++ 3 files changed, 12 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket-metal b/packages/kernel-5.10/config-bottlerocket-metal index 167620f0..01429a37 100644 --- a/packages/kernel-5.10/config-bottlerocket-metal +++ b/packages/kernel-5.10/config-bottlerocket-metal @@ -106,3 +106,7 @@ CONFIG_SCSI_SMARTPQI=y # Support for virtio scsi boot devices for other cloud providers CONFIG_SCSI_VIRTIO=y + +# Intel Volume Management Device driver, to support boot disks in a separate +# PCI domain. +CONFIG_VMD=y diff --git a/packages/kernel-5.15/config-bottlerocket-metal b/packages/kernel-5.15/config-bottlerocket-metal index 4022b1be..954257b7 100644 --- a/packages/kernel-5.15/config-bottlerocket-metal +++ b/packages/kernel-5.15/config-bottlerocket-metal @@ -143,3 +143,7 @@ CONFIG_MOUSE_PS2=m # CONFIG_MOUSE_PS2_SENTELIC is not set # CONFIG_MOUSE_PS2_TOUCHKIT is not set # CONFIG_MOUSE_PS2_FOCALTECH is not set + +# Intel Volume Management Device driver, to support boot disks in a separate +# PCI domain. +CONFIG_VMD=y diff --git a/packages/kernel-6.1/config-bottlerocket-metal b/packages/kernel-6.1/config-bottlerocket-metal index 82831b74..100c8de0 100644 --- a/packages/kernel-6.1/config-bottlerocket-metal +++ b/packages/kernel-6.1/config-bottlerocket-metal @@ -141,3 +141,7 @@ CONFIG_MOUSE_PS2=m # CONFIG_MOUSE_PS2_SENTELIC is not set # CONFIG_MOUSE_PS2_TOUCHKIT is not set # CONFIG_MOUSE_PS2_FOCALTECH is not set + +# Intel Volume Management Device driver, to support boot disks in a separate +# PCI domain. +CONFIG_VMD=y From 7848a69766fa6975321450e26638479d23e84e17 Mon Sep 17 00:00:00 2001 From: Matthew James Briggs Date: Fri, 1 Sep 2023 16:56:01 -0700 Subject: [PATCH 1096/1356] tools: fix clippy warnings Fix Clippy warnings in the tools workspace. --- tools/buildsys/src/builder.rs | 4 +--- tools/pubsys/src/aws/ssm/template.rs | 2 +- tools/testsys/src/crds.rs | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs index 8faf4bc1..0949d354 100644 --- a/tools/buildsys/src/builder.rs +++ b/tools/buildsys/src/builder.rs @@ -207,9 +207,7 @@ impl VariantBuilder { ); args.build_arg( "KERNEL_PARAMETERS", - kernel_parameters - .map(|v| v.join(" ")) - .unwrap_or_else(|| "".to_string()), + kernel_parameters.map(|v| v.join(" ")).unwrap_or_default(), ); if let Some(image_features) = image_features { diff --git a/tools/pubsys/src/aws/ssm/template.rs b/tools/pubsys/src/aws/ssm/template.rs index bde1ccde..ac60583e 100644 --- a/tools/pubsys/src/aws/ssm/template.rs +++ b/tools/pubsys/src/aws/ssm/template.rs @@ -196,7 +196,7 @@ impl From<&Vec> for RenderedParametersMap { for parameter in parameters.iter() { parameter_map .entry(parameter.ssm_key.region.to_string()) - .or_insert(HashMap::new()) + .or_default() .insert( parameter.ssm_key.name.to_owned(), parameter.value.to_owned(), diff --git a/tools/testsys/src/crds.rs b/tools/testsys/src/crds.rs index 069a7eef..43875d93 100644 --- a/tools/testsys/src/crds.rs +++ b/tools/testsys/src/crds.rs @@ -486,7 +486,7 @@ pub(crate) trait CrdCreator: Sync { .additional_fields(&test_type.to_string()) .into_iter() // Add the image id in case it is needed for cluster creation - .chain(Some(("image-id".to_string(), image_id.clone())).into_iter()) + .chain(Some(("image-id".to_string(), image_id.clone()))) .collect::>(), )?, hardware_csv: &crd_input From dfaeddb313a78535e6a96bb4d38bd78a5e99e2d1 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 5 Sep 2023 15:12:23 +0000 Subject: [PATCH 1097/1356] kernel-5.10: Allow handling of compressed firmware Firmware binary blobs can add up to quite a lot of disk space usage. With Bottlerocket we do aim to keep our images small, so enable the kernel's firmware loading infrastructure to handle compressed firmware files. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/config-bottlerocket-metal | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket-metal b/packages/kernel-5.10/config-bottlerocket-metal index 01429a37..f6a0a5a1 100644 --- a/packages/kernel-5.10/config-bottlerocket-metal +++ b/packages/kernel-5.10/config-bottlerocket-metal @@ -110,3 +110,6 @@ CONFIG_SCSI_VIRTIO=y # Intel Volume Management Device driver, to support boot disks in a separate # PCI domain. CONFIG_VMD=y + +# Support handling of compressed firmware +CONFIG_FW_LOADER_COMPRESS=y From c80f32c2174699e237e3673051771d465fdc07fa Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 5 Sep 2023 15:16:04 +0000 Subject: [PATCH 1098/1356] kernel-5.15: Allow handling of compressed firmware Firmware binary blobs can add up to quite a lot of disk space usage. With Bottlerocket we do aim to keep our images small, so enable the kernel's firmware loading infrastructure to handle compressed firmware files. Signed-off-by: Leonard Foerster --- packages/kernel-5.15/config-bottlerocket-metal | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/kernel-5.15/config-bottlerocket-metal b/packages/kernel-5.15/config-bottlerocket-metal index 954257b7..6e38510c 100644 --- a/packages/kernel-5.15/config-bottlerocket-metal +++ b/packages/kernel-5.15/config-bottlerocket-metal @@ -147,3 +147,6 @@ CONFIG_MOUSE_PS2=m # Intel Volume Management Device driver, to support boot disks in a separate # PCI domain. CONFIG_VMD=y + +# Support handling of compressed firmware +CONFIG_FW_LOADER_COMPRESS=y From 6bf2be6c43116ad5976ffc53549cba6b4c44c661 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 5 Sep 2023 15:17:41 +0000 Subject: [PATCH 1099/1356] kernel-6.1: Allow handling of compressed firmware Firmware binary blobs can add up to quite a lot of disk space usage. With Bottlerocket we do aim to keep our images small, so enable the kernel's firmware loading infrastructure to handle compressed firmware files. Previous to linux kernel series 5.19 option `CONFIG_FW_LOADER_COMPRESS` was only supporting the XZ compression algorithm. Since then also ZSTD compression is available which makes additional options necessary to select the right algorithm. For now select the default XZ compression to align all our supported kernels on the same. Once we are only supporting kernels >=6.1 we may chose to switch compression algorithm. Signed-off-by: Leonard Foerster --- packages/kernel-6.1/config-bottlerocket-metal | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/kernel-6.1/config-bottlerocket-metal b/packages/kernel-6.1/config-bottlerocket-metal index 100c8de0..94d46c91 100644 --- a/packages/kernel-6.1/config-bottlerocket-metal +++ b/packages/kernel-6.1/config-bottlerocket-metal @@ -145,3 +145,7 @@ CONFIG_MOUSE_PS2=m # Intel Volume Management Device driver, to support boot disks in a separate # PCI domain. CONFIG_VMD=y + +# Support handling of compressed firmware +CONFIG_FW_LOADER_COMPRESS=y +CONFIG_FW_LOADER_COMPRESS_XZ=y From a7fedff7947379302dc40455c83feb63c35688b8 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 5 Sep 2023 15:24:39 +0000 Subject: [PATCH 1100/1356] linux-firmware: Enable firmware compression Recoup some image size by enabling firmware compression, now that our kernels support it. None of the compressed firmware files has the execute bit set, so we can skip removing it when assembling the package. Signed-off-by: Leonard Foerster --- packages/linux-firmware/linux-firmware.spec | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/packages/linux-firmware/linux-firmware.spec b/packages/linux-firmware/linux-firmware.spec index 1039b62f..dd8b3024 100644 --- a/packages/linux-firmware/linux-firmware.spec +++ b/packages/linux-firmware/linux-firmware.spec @@ -48,17 +48,9 @@ Patch0010: 0010-linux-firmware-amd-ucode-Remove-amd-microcode.patch mkdir -p %{buildroot}/%{fwdir} mkdir -p %{buildroot}/%{fwdir}/updates -# Here we have potential to shave off some extra space by using `install-xz` of -# `install-zst` to compress firmware images on disk. However, that functionality -# relies on kernels being configured with `CONFIG_FW_LOADER_COMPRESS_[ZSTD|XZ]` -# which we currently do not have. -make DESTDIR=%{buildroot}/ FIRMWAREDIR=%{fwdir} install - - -# Remove executable bits from random firmware -pushd %{buildroot}/%{fwdir} -find . -type f -executable -exec chmod -x {} \; -popd +# Use xz compression for firmware files to reduce size on disk. This relies on +# kernel support through FW_LOADER_COMPRESS (and FW_LOADER_COMPRESS_XZ for kernels >=5.19) +make DESTDIR=%{buildroot}/ FIRMWAREDIR=%{fwdir} install-xz %files %dir %{fwdir} From dc4843cec87327b60e2a436750360051fc954390 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Tue, 5 Sep 2023 20:57:37 +0000 Subject: [PATCH 1101/1356] dependabot: Remove /tools configuration The tools directory is being removed in the very near future as things move over to twoliter. Dependencies will be updated there, so any updates now are questionable since they add to the churn as we try to live migrate over to the new location. This removes the tools-specific configuration from the dependabot config to try to reduce the noise. Signed-off-by: Sean McGinnis --- .github/dependabot.yaml | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml index ff27748f..98b4e6a7 100644 --- a/.github/dependabot.yaml +++ b/.github/dependabot.yaml @@ -26,21 +26,3 @@ updates: labels: - "area/dependencies" open-pull-requests-limit: 0 - - # Enable updates for the `tools` dependencies - - package-ecosystem: "cargo" - directory: "/tools" - ignore: - # For AWS SDK for Rust, we'll update when we bump tough/coldsnap - - dependency-name: "aws-config" - - dependency-name: "aws-credential-types" - - dependency-name: "aws-endpoint" - - dependency-name: "aws-http" - - dependency-name: "aws-hyper" - - dependency-name: "aws-sig*" - - dependency-name: "aws-sdk*" - - dependency-name: "aws-smithy*" - schedule: - interval: "weekly" - labels: - - "area/dependencies" From 023695caa00ac3c3499b633cf9a9e2c271bddb89 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Sep 2023 18:47:53 +0000 Subject: [PATCH 1102/1356] build(deps): bump webpki from 0.22.0 to 0.22.1 in /tools Bumps [webpki](https://github.com/briansmith/webpki) from 0.22.0 to 0.22.1. - [Commits](https://github.com/briansmith/webpki/commits) --- updated-dependencies: - dependency-name: webpki dependency-type: indirect ... Signed-off-by: dependabot[bot] --- tools/Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index f0a49e0e..121d4d8b 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -3769,9 +3769,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.22.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +checksum = "f0e74f82d49d545ad128049b7e88f6576df2da6b02e9ce565c6f533be576957e" dependencies = [ "ring", "untrusted", From a012ab06a8a59d8cb13e4f1bb66959fe27b22fe3 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Fri, 8 Sep 2023 13:23:41 +0000 Subject: [PATCH 1103/1356] kernel-5.10: update to 5.10.192 Rebase to Amazon Linux upstream version 5.10.192-182.736.amzn2. Drop downstream backports of fixes for CVE-2023-20593 and CVE-2023-20588 as these are now included by the upstream base version. Signed-off-by: Leonard Foerster --- ...-the-errata-checking-functionality-u.patch | 184 ------------------ .../5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch | 172 ---------------- ...e-the-x86-bugs-vector-size-to-two-u3.patch | 48 ----- ...ot-leak-quotient-data-after-a-divisi.patch | 111 ----------- ...MD-Fix-the-DIV-0-initial-fix-attempt.patch | 83 -------- packages/kernel-5.10/Cargo.toml | 4 +- packages/kernel-5.10/kernel-5.10.spec | 15 +- 7 files changed, 4 insertions(+), 613 deletions(-) delete mode 100644 packages/kernel-5.10/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch delete mode 100644 packages/kernel-5.10/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch delete mode 100644 packages/kernel-5.10/5011-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch delete mode 100644 packages/kernel-5.10/5012-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch delete mode 100644 packages/kernel-5.10/5013-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch diff --git a/packages/kernel-5.10/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch b/packages/kernel-5.10/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch deleted file mode 100644 index b5b84593..00000000 --- a/packages/kernel-5.10/5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch +++ /dev/null @@ -1,184 +0,0 @@ -From 191b8f9b0e3708e8325d8d28e1005a1fbe5e3991 Mon Sep 17 00:00:00 2001 -From: "Borislav Petkov (AMD)" -Date: Sat, 15 Jul 2023 13:31:32 +0200 -Subject: [PATCH] x86/cpu/amd: Move the errata checking functionality up - -Upstream commit: 8b6f687743dacce83dbb0c7cfacf88bab00f808a - -Avoid new and remove old forward declarations. - -No functional changes. - -Signed-off-by: Borislav Petkov (AMD) -Signed-off-by: Greg Kroah-Hartman ---- - arch/x86/kernel/cpu/amd.c | 139 ++++++++++++++++++-------------------- - 1 file changed, 67 insertions(+), 72 deletions(-) - -diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c -index 89a9b7754476..6eea37f827b1 100644 ---- a/arch/x86/kernel/cpu/amd.c -+++ b/arch/x86/kernel/cpu/amd.c -@@ -28,11 +28,6 @@ - - #include "cpu.h" - --static const int amd_erratum_383[]; --static const int amd_erratum_400[]; --static const int amd_erratum_1054[]; --static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); -- - /* - * nodes_per_socket: Stores the number of nodes per socket. - * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX -@@ -40,6 +35,73 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); - */ - static u32 nodes_per_socket = 1; - -+/* -+ * AMD errata checking -+ * -+ * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or -+ * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that -+ * have an OSVW id assigned, which it takes as first argument. Both take a -+ * variable number of family-specific model-stepping ranges created by -+ * AMD_MODEL_RANGE(). -+ * -+ * Example: -+ * -+ * const int amd_erratum_319[] = -+ * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), -+ * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), -+ * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); -+ */ -+ -+#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } -+#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } -+#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ -+ ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) -+#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) -+#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) -+#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) -+ -+static const int amd_erratum_400[] = -+ AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), -+ AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); -+ -+static const int amd_erratum_383[] = -+ AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); -+ -+/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ -+static const int amd_erratum_1054[] = -+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); -+ -+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) -+{ -+ int osvw_id = *erratum++; -+ u32 range; -+ u32 ms; -+ -+ if (osvw_id >= 0 && osvw_id < 65536 && -+ cpu_has(cpu, X86_FEATURE_OSVW)) { -+ u64 osvw_len; -+ -+ rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); -+ if (osvw_id < osvw_len) { -+ u64 osvw_bits; -+ -+ rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), -+ osvw_bits); -+ return osvw_bits & (1ULL << (osvw_id & 0x3f)); -+ } -+ } -+ -+ /* OSVW unavailable or ID unknown, match family-model-stepping range */ -+ ms = (cpu->x86_model << 4) | cpu->x86_stepping; -+ while ((range = *erratum++)) -+ if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && -+ (ms >= AMD_MODEL_RANGE_START(range)) && -+ (ms <= AMD_MODEL_RANGE_END(range))) -+ return true; -+ -+ return false; -+} -+ - static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) - { - u32 gprs[8] = { 0 }; -@@ -1153,73 +1215,6 @@ static const struct cpu_dev amd_cpu_dev = { - - cpu_dev_register(amd_cpu_dev); - --/* -- * AMD errata checking -- * -- * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or -- * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that -- * have an OSVW id assigned, which it takes as first argument. Both take a -- * variable number of family-specific model-stepping ranges created by -- * AMD_MODEL_RANGE(). -- * -- * Example: -- * -- * const int amd_erratum_319[] = -- * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), -- * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), -- * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); -- */ -- --#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } --#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } --#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ -- ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) --#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) --#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) --#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) -- --static const int amd_erratum_400[] = -- AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), -- AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); -- --static const int amd_erratum_383[] = -- AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); -- --/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ --static const int amd_erratum_1054[] = -- AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); -- --static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) --{ -- int osvw_id = *erratum++; -- u32 range; -- u32 ms; -- -- if (osvw_id >= 0 && osvw_id < 65536 && -- cpu_has(cpu, X86_FEATURE_OSVW)) { -- u64 osvw_len; -- -- rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); -- if (osvw_id < osvw_len) { -- u64 osvw_bits; -- -- rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), -- osvw_bits); -- return osvw_bits & (1ULL << (osvw_id & 0x3f)); -- } -- } -- -- /* OSVW unavailable or ID unknown, match family-model-stepping range */ -- ms = (cpu->x86_model << 4) | cpu->x86_stepping; -- while ((range = *erratum++)) -- if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && -- (ms >= AMD_MODEL_RANGE_START(range)) && -- (ms <= AMD_MODEL_RANGE_END(range))) -- return true; -- -- return false; --} -- - void set_dr_addr_mask(unsigned long mask, int dr) - { - if (!boot_cpu_has(X86_FEATURE_BPEXT)) --- -2.25.1 - diff --git a/packages/kernel-5.10/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch b/packages/kernel-5.10/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch deleted file mode 100644 index b25384ba..00000000 --- a/packages/kernel-5.10/5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch +++ /dev/null @@ -1,172 +0,0 @@ -From 93df00f9d48d48466ddbe01a06eaaf3311ecfb53 Mon Sep 17 00:00:00 2001 -From: "Borislav Petkov (AMD)" -Date: Sat, 15 Jul 2023 13:41:28 +0200 -Subject: [PATCH] x86/cpu/amd: Add a Zenbleed fix - -Upstream commit: 522b1d69219d8f083173819fde04f994aa051a98 - -Add a fix for the Zen2 VZEROUPPER data corruption bug where under -certain circumstances executing VZEROUPPER can cause register -corruption or leak data. - -The optimal fix is through microcode but in the case the proper -microcode revision has not been applied, enable a fallback fix using -a chicken bit. - -Signed-off-by: Borislav Petkov (AMD) -Signed-off-by: Greg Kroah-Hartman ---- - arch/x86/include/asm/microcode.h | 1 + - arch/x86/include/asm/microcode_amd.h | 2 + - arch/x86/include/asm/msr-index.h | 1 + - arch/x86/kernel/cpu/amd.c | 60 ++++++++++++++++++++++++++++ - arch/x86/kernel/cpu/common.c | 2 + - 5 files changed, 66 insertions(+) - -diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h -index 509cc0262fdc..394605e59f2b 100644 ---- a/arch/x86/include/asm/microcode.h -+++ b/arch/x86/include/asm/microcode.h -@@ -5,6 +5,7 @@ - #include - #include - #include -+#include - - struct ucode_patch { - struct list_head plist; -diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h -index a645b25ee442..403a8e76b310 100644 ---- a/arch/x86/include/asm/microcode_amd.h -+++ b/arch/x86/include/asm/microcode_amd.h -@@ -48,11 +48,13 @@ extern void __init load_ucode_amd_bsp(unsigned int family); - extern void load_ucode_amd_ap(unsigned int family); - extern int __init save_microcode_in_initrd_amd(unsigned int family); - void reload_ucode_amd(unsigned int cpu); -+extern void amd_check_microcode(void); - #else - static inline void __init load_ucode_amd_bsp(unsigned int family) {} - static inline void load_ucode_amd_ap(unsigned int family) {} - static inline int __init - save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } - static inline void reload_ucode_amd(unsigned int cpu) {} -+static inline void amd_check_microcode(void) {} - #endif - #endif /* _ASM_X86_MICROCODE_AMD_H */ -diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h -index f71a177b6b18..3fab152809ab 100644 ---- a/arch/x86/include/asm/msr-index.h -+++ b/arch/x86/include/asm/msr-index.h -@@ -497,6 +497,7 @@ - #define MSR_AMD64_DE_CFG 0xc0011029 - #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1 - #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT) -+#define MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT 9 - - #define MSR_AMD64_BU_CFG2 0xc001102a - #define MSR_AMD64_IBSFETCHCTL 0xc0011030 -diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c -index 6eea37f827b1..3d99a823ffac 100644 ---- a/arch/x86/kernel/cpu/amd.c -+++ b/arch/x86/kernel/cpu/amd.c -@@ -71,6 +71,11 @@ static const int amd_erratum_383[] = - static const int amd_erratum_1054[] = - AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); - -+static const int amd_zenbleed[] = -+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf), -+ AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), -+ AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); -+ - static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) - { - int osvw_id = *erratum++; -@@ -1030,6 +1035,47 @@ static void init_amd_zn(struct cpuinfo_x86 *c) - } - } - -+static bool cpu_has_zenbleed_microcode(void) -+{ -+ u32 good_rev = 0; -+ -+ switch (boot_cpu_data.x86_model) { -+ case 0x30 ... 0x3f: good_rev = 0x0830107a; break; -+ case 0x60 ... 0x67: good_rev = 0x0860010b; break; -+ case 0x68 ... 0x6f: good_rev = 0x08608105; break; -+ case 0x70 ... 0x7f: good_rev = 0x08701032; break; -+ case 0xa0 ... 0xaf: good_rev = 0x08a00008; break; -+ -+ default: -+ return false; -+ break; -+ } -+ -+ if (boot_cpu_data.microcode < good_rev) -+ return false; -+ -+ return true; -+} -+ -+static void zenbleed_check(struct cpuinfo_x86 *c) -+{ -+ if (!cpu_has_amd_erratum(c, amd_zenbleed)) -+ return; -+ -+ if (cpu_has(c, X86_FEATURE_HYPERVISOR)) -+ return; -+ -+ if (!cpu_has(c, X86_FEATURE_AVX)) -+ return; -+ -+ if (!cpu_has_zenbleed_microcode()) { -+ pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n"); -+ msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); -+ } else { -+ msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); -+ } -+} -+ - static void init_amd(struct cpuinfo_x86 *c) - { - early_init_amd(c); -@@ -1120,6 +1166,8 @@ static void init_amd(struct cpuinfo_x86 *c) - msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); - - check_null_seg_clears_base(c); -+ -+ zenbleed_check(c); - } - - #ifdef CONFIG_X86_32 -@@ -1233,3 +1281,15 @@ void set_dr_addr_mask(unsigned long mask, int dr) - break; - } - } -+ -+static void zenbleed_check_cpu(void *unused) -+{ -+ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); -+ -+ zenbleed_check(c); -+} -+ -+void amd_check_microcode(void) -+{ -+ on_each_cpu(zenbleed_check_cpu, NULL, 1); -+} -diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c -index e2dee6010846..f41781d06a5f 100644 ---- a/arch/x86/kernel/cpu/common.c -+++ b/arch/x86/kernel/cpu/common.c -@@ -2165,6 +2165,8 @@ void microcode_check(struct cpuinfo_x86 *prev_info) - - perf_check_microcode(); - -+ amd_check_microcode(); -+ - store_cpu_caps(&curr_info); - - if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability, --- -2.25.1 - diff --git a/packages/kernel-5.10/5011-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch b/packages/kernel-5.10/5011-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch deleted file mode 100644 index 7207e627..00000000 --- a/packages/kernel-5.10/5011-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch +++ /dev/null @@ -1,48 +0,0 @@ -From d573bee81157742dfb6710646d365bcd37a0f92c Mon Sep 17 00:00:00 2001 -From: "Borislav Petkov (AMD)" -Date: Sat, 8 Jul 2023 10:21:35 +0200 -Subject: [PATCH] x86/bugs: Increase the x86 bugs vector size to two u32s - -Upstream commit: 0e52740ffd10c6c316837c6c128f460f1aaba1ea - -There was never a doubt in my mind that they would not fit into a single -u32 eventually. - -Signed-off-by: Borislav Petkov (AMD) -Signed-off-by: Greg Kroah-Hartman -(cherry picked from commit 073a28a9b50662991e7d6956c2cf2fc5d54f28cd) -Signed-off-by: Leonard Foerster ---- - arch/x86/include/asm/cpufeatures.h | 2 +- - tools/arch/x86/include/asm/cpufeatures.h | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - -diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h -index 0b0b9453b19f..9b06e142bad1 100644 ---- a/arch/x86/include/asm/cpufeatures.h -+++ b/arch/x86/include/asm/cpufeatures.h -@@ -14,7 +14,7 @@ - * Defines x86 CPU feature bits - */ - #define NCAPINTS 19 /* N 32-bit words worth of info */ --#define NBUGINTS 1 /* N 32-bit bug flags */ -+#define NBUGINTS 2 /* N 32-bit bug flags */ - - /* - * Note: If the comment begins with a quoted string, that string is used -diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h -index 54ba20492ad1..51a8fdb487c7 100644 ---- a/tools/arch/x86/include/asm/cpufeatures.h -+++ b/tools/arch/x86/include/asm/cpufeatures.h -@@ -14,7 +14,7 @@ - * Defines x86 CPU feature bits - */ - #define NCAPINTS 19 /* N 32-bit words worth of info */ --#define NBUGINTS 1 /* N 32-bit bug flags */ -+#define NBUGINTS 2 /* N 32-bit bug flags */ - - /* - * Note: If the comment begins with a quoted string, that string is used --- -2.40.1 - diff --git a/packages/kernel-5.10/5012-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch b/packages/kernel-5.10/5012-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch deleted file mode 100644 index d6a61771..00000000 --- a/packages/kernel-5.10/5012-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch +++ /dev/null @@ -1,111 +0,0 @@ -From 188ef20eb7f347966659092d75051f0cd4b572bf Mon Sep 17 00:00:00 2001 -From: "Borislav Petkov (AMD)" -Date: Sat, 5 Aug 2023 00:06:43 +0200 -Subject: [PATCH] x86/CPU/AMD: Do not leak quotient data after a division by 0 - -commit 77245f1c3c6495521f6a3af082696ee2f8ce3921 upstream. - -Under certain circumstances, an integer division by 0 which faults, can -leave stale quotient data from a previous division operation on Zen1 -microarchitectures. - -Do a dummy division 0/1 before returning from the #DE exception handler -in order to avoid any leaks of potentially sensitive data. - -Signed-off-by: Borislav Petkov (AMD) -Cc: -Signed-off-by: Linus Torvalds -Signed-off-by: Greg Kroah-Hartman -(cherry picked from commit b6fc2fbf89089ecfb8eb9a89a7fc91d444f4fec7) -Signed-off-by: Leonard Foerster ---- - arch/x86/include/asm/cpufeatures.h | 2 ++ - arch/x86/include/asm/processor.h | 2 ++ - arch/x86/kernel/cpu/amd.c | 19 +++++++++++++++++++ - arch/x86/kernel/traps.c | 2 ++ - 4 files changed, 25 insertions(+) - -diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h -index 9b06e142bad1..630196281a48 100644 ---- a/arch/x86/include/asm/cpufeatures.h -+++ b/arch/x86/include/asm/cpufeatures.h -@@ -435,4 +435,6 @@ - #define X86_BUG_RAS_POISONING X86_BUG(29) /* CPU is affected by RAS poisoning */ - #define X86_BUG_GDS X86_BUG(30) /* CPU is affected by Gather Data Sampling */ - -+/* BUG word 2 */ -+#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */ - #endif /* _ASM_X86_CPUFEATURES_H */ -diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h -index 12714134f5eb..f20dc0c73cae 100644 ---- a/arch/x86/include/asm/processor.h -+++ b/arch/x86/include/asm/processor.h -@@ -810,9 +810,11 @@ DECLARE_PER_CPU(u64, msr_misc_features_shadow); - #ifdef CONFIG_CPU_SUP_AMD - extern u16 amd_get_nb_id(int cpu); - extern u32 amd_get_nodes_per_socket(void); -+extern void amd_clear_divider(void); - #else - static inline u16 amd_get_nb_id(int cpu) { return 0; } - static inline u32 amd_get_nodes_per_socket(void) { return 0; } -+static inline void amd_clear_divider(void) { } - #endif - - static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) -diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c -index 3d99a823ffac..842357ee7724 100644 ---- a/arch/x86/kernel/cpu/amd.c -+++ b/arch/x86/kernel/cpu/amd.c -@@ -76,6 +76,10 @@ static const int amd_zenbleed[] = - AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), - AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); - -+static const int amd_div0[] = -+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf), -+ AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf)); -+ - static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) - { - int osvw_id = *erratum++; -@@ -1168,6 +1172,11 @@ static void init_amd(struct cpuinfo_x86 *c) - check_null_seg_clears_base(c); - - zenbleed_check(c); -+ -+ if (cpu_has_amd_erratum(c, amd_div0)) { -+ pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n"); -+ setup_force_cpu_bug(X86_BUG_DIV0); -+ } - } - - #ifdef CONFIG_X86_32 -@@ -1293,3 +1302,13 @@ void amd_check_microcode(void) - { - on_each_cpu(zenbleed_check_cpu, NULL, 1); - } -+ -+/* -+ * Issue a DIV 0/1 insn to clear any division data from previous DIV -+ * operations. -+ */ -+void noinstr amd_clear_divider(void) -+{ -+ asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) -+ :: "a" (0), "d" (0), "r" (1)); -+} -diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c -index 3780c728345c..d8142b5738ac 100644 ---- a/arch/x86/kernel/traps.c -+++ b/arch/x86/kernel/traps.c -@@ -198,6 +198,8 @@ DEFINE_IDTENTRY(exc_divide_error) - { - do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, - FPE_INTDIV, error_get_trap_addr(regs)); -+ -+ amd_clear_divider(); - } - - DEFINE_IDTENTRY(exc_overflow) --- -2.40.1 - diff --git a/packages/kernel-5.10/5013-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch b/packages/kernel-5.10/5013-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch deleted file mode 100644 index 6ef00d99..00000000 --- a/packages/kernel-5.10/5013-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch +++ /dev/null @@ -1,83 +0,0 @@ -From ea19dbd49d7dcdfa1a807ce1ea48164f10129113 Mon Sep 17 00:00:00 2001 -From: "Borislav Petkov (AMD)" -Date: Fri, 11 Aug 2023 23:38:24 +0200 -Subject: [PATCH] x86/CPU/AMD: Fix the DIV(0) initial fix attempt - -commit f58d6fbcb7c848b7f2469be339bc571f2e9d245b upstream. - -Initially, it was thought that doing an innocuous division in the #DE -handler would take care to prevent any leaking of old data from the -divider but by the time the fault is raised, the speculation has already -advanced too far and such data could already have been used by younger -operations. - -Therefore, do the innocuous division on every exit to userspace so that -userspace doesn't see any potentially old data from integer divisions in -kernel space. - -Do the same before VMRUN too, to protect host data from leaking into the -guest too. - -Fixes: 77245f1c3c64 ("x86/CPU/AMD: Do not leak quotient data after a division by 0") -Signed-off-by: Borislav Petkov (AMD) -Cc: -Link: https://lore.kernel.org/r/20230811213824.10025-1-bp@alien8.de -Signed-off-by: Greg Kroah-Hartman -(cherry picked from commit 69712baf249570a1419e75dc1a103a44e375b2cd) -Signed-off-by: Leonard Foerster ---- - arch/x86/include/asm/entry-common.h | 1 + - arch/x86/kernel/cpu/amd.c | 1 + - arch/x86/kernel/traps.c | 2 -- - arch/x86/kvm/svm/svm.c | 1 + - 4 files changed, 3 insertions(+), 2 deletions(-) - -diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h -index 4a382fb6a9ef..5443851d3aa6 100644 ---- a/arch/x86/include/asm/entry-common.h -+++ b/arch/x86/include/asm/entry-common.h -@@ -78,6 +78,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, - static __always_inline void arch_exit_to_user_mode(void) - { - mds_user_clear_cpu_buffers(); -+ amd_clear_divider(); - } - #define arch_exit_to_user_mode arch_exit_to_user_mode - -diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c -index 842357ee7724..64e97f243441 100644 ---- a/arch/x86/kernel/cpu/amd.c -+++ b/arch/x86/kernel/cpu/amd.c -@@ -1312,3 +1312,4 @@ void noinstr amd_clear_divider(void) - asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) - :: "a" (0), "d" (0), "r" (1)); - } -+EXPORT_SYMBOL_GPL(amd_clear_divider); -diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c -index d8142b5738ac..3780c728345c 100644 ---- a/arch/x86/kernel/traps.c -+++ b/arch/x86/kernel/traps.c -@@ -198,8 +198,6 @@ DEFINE_IDTENTRY(exc_divide_error) - { - do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, - FPE_INTDIV, error_get_trap_addr(regs)); -- -- amd_clear_divider(); - } - - DEFINE_IDTENTRY(exc_overflow) -diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c -index 5ddc75ade8f0..d0a1c0420c92 100644 ---- a/arch/x86/kvm/svm/svm.c -+++ b/arch/x86/kvm/svm/svm.c -@@ -3381,6 +3381,7 @@ static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) - - static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) - { -+ amd_clear_divider(); - } - - static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) --- -2.40.1 - diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 3fafe00f..3e18783a 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/e6326ee4512d019820a49568094b3393f82a963a90b4201cbb45eea26a66ce02/kernel-5.10.186-179.751.amzn2.src.rpm" -sha512 = "6753ecfd149bf30a7ac8661ac2e711aa73a1b3ed9122e9545d2053c09b430c8ea8ca142f9500a096fc770007e989c417496e578eddf363442a262af2a5c17ee1" +url = "https://cdn.amazonlinux.com/blobstore/4cbf281b8513ad2257aae8ad983a75fd76cb9c613fe7025822f0f16879cb2e2b/kernel-5.10.192-182.736.amzn2.src.rpm" +sha512 = "8c1885a9f3a7c00d55b5c1bdadc5d95f1f64b321eabb602d69ce78706ce7f7241022cb094f161aebeebac74d4a08479c07d4a3db7bacb2896cf10ede962de3ec" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 54bb16e9..20fd6920 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.186 +Version: 5.10.192 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/e6326ee4512d019820a49568094b3393f82a963a90b4201cbb45eea26a66ce02/kernel-5.10.186-179.751.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/4cbf281b8513ad2257aae8ad983a75fd76cb9c613fe7025822f0f16879cb2e2b/kernel-5.10.192-182.736.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal @@ -24,17 +24,6 @@ Patch1003: 1003-af_unix-increase-default-max_dgram_qlen-to-512.patch Patch2000: 2000-kbuild-move-module-strip-compression-code-into-scrip.patch Patch2001: 2001-kbuild-add-support-for-zstd-compressed-modules.patch -# Cherry-picked fix for CVE-2023-20593 ("Zenbleed"). Can be dropped when moving -# upstream to 5.10.187 or later. -Patch5001: 5001-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch -Patch5002: 5002-x86-cpu-amd-Add-a-Zenbleed-fix.patch - -# Cherry-picked fixes for CVE-2023-20588 ("DIV0"). Can be dropped when moving -# upstream to 5.10.192 or later. -Patch5011: 5011-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch -Patch5012: 5012-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch -Patch5013: 5013-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch - BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From e5b0de0566a515698b1f995edc9fa377d3a8fd00 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Mon, 11 Sep 2023 11:31:24 +0000 Subject: [PATCH 1104/1356] kernel-5.10: Cleanup AL downstream patches We inherited some unnecessary and confusing patch as part of including Amazon Linux' latest kernel updates. Clean this up. Details in the patch itself. Signed-off-by: Leonard Foerster --- ...-nf_tables-drop-map-element-referenc.patch | 75 +++++++++++++++++++ packages/kernel-5.10/kernel-5.10.spec | 3 + 2 files changed, 78 insertions(+) create mode 100644 packages/kernel-5.10/5001-Revert-netfilter-nf_tables-drop-map-element-referenc.patch diff --git a/packages/kernel-5.10/5001-Revert-netfilter-nf_tables-drop-map-element-referenc.patch b/packages/kernel-5.10/5001-Revert-netfilter-nf_tables-drop-map-element-referenc.patch new file mode 100644 index 00000000..1768d2d5 --- /dev/null +++ b/packages/kernel-5.10/5001-Revert-netfilter-nf_tables-drop-map-element-referenc.patch @@ -0,0 +1,75 @@ +From e499042e36f6b00fcf68452c2d7bfdcf124203c5 Mon Sep 17 00:00:00 2001 +From: Leonard Foerster +Date: Mon, 11 Sep 2023 11:17:18 +0000 +Subject: [PATCH] Revert "netfilter: nf_tables: drop map element references + from preparation phase" + +This reverts commit 9ff6253cea9cf567bc899164405f437212eb59f2. + +This reverts an AL downstream patch that is not adding any value and is +cluttering the patch queue unnecessarily. This seems to have started as +a downstream backport of the original commit 628bd3e49cba1 introduced +upstream in v6.4.13, but ended picking up an extra function +`nft_setelem_validate` which was introduced in d46fc894147cf in v6.3.20. +That additional patch has not been backported to the 5.10 series as it +fixes a bug introduced only in 5.13. + +When the original patch was introduced in upstream stable 5.10 series as +a136b7942ad2a in 5.10.188 that single additional function stayed around +in AL as this patch I am reverting here. The function it adds is never +referenced in the assembled linux tree, so remove it in an attempy to +improve our code hygiene. + +Letting it stay in would also confuse here, as before this revert we +have two patches with the same name and same commit message, but +completely disjunct diffs claiming to be a backport of 628bd3e49cba1. + +Signed-off-by: Leonard Foerster +--- + net/netfilter/nf_tables_api.c | 30 ------------------------------ + 1 file changed, 30 deletions(-) + +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c +index 96fd4e68973b..2669999d1bc9 100644 +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -3311,36 +3311,6 @@ static int nft_table_validate(struct net *net, const struct nft_table *table) + return 0; + } + +-int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set, +- const struct nft_set_iter *iter, +- struct nft_set_elem *elem) +-{ +- const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); +- struct nft_ctx *pctx = (struct nft_ctx *)ctx; +- const struct nft_data *data; +- int err; +- +- if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) && +- *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END) +- return 0; +- +- data = nft_set_ext_data(ext); +- switch (data->verdict.code) { +- case NFT_JUMP: +- case NFT_GOTO: +- pctx->level++; +- err = nft_chain_validate(ctx, data->verdict.chain); +- if (err < 0) +- return err; +- pctx->level--; +- break; +- default: +- break; +- } +- +- return 0; +-} +- + static struct nft_rule *nft_rule_lookup_byid(const struct net *net, + const struct nft_chain *chain, + const struct nlattr *nla); +-- +2.40.1 + diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 20fd6920..2c8a449d 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -24,6 +24,9 @@ Patch1003: 1003-af_unix-increase-default-max_dgram_qlen-to-512.patch Patch2000: 2000-kbuild-move-module-strip-compression-code-into-scrip.patch Patch2001: 2001-kbuild-add-support-for-zstd-compressed-modules.patch +# Fixup unused code inherited from AL +Patch5001: 5001-Revert-netfilter-nf_tables-drop-map-element-referenc.patch + BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From bf4fcedf5106b33bbff7237601640610b1f361d3 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Fri, 8 Sep 2023 13:24:20 +0000 Subject: [PATCH 1105/1356] kernel-5.15: update to 5.15.128 Rebase to Amazon Linux upstream version 5.15.128-80.144.amzn2. Drop downstream backports of fixes for CVE-2023-20588 as these are now included by the upstream base version. Signed-off-by: Leonard Foerster --- ...e-the-x86-bugs-vector-size-to-two-u3.patch | 48 -------- ...ot-leak-quotient-data-after-a-divisi.patch | 111 ------------------ ...MD-Fix-the-DIV-0-initial-fix-attempt.patch | 82 ------------- packages/kernel-5.15/Cargo.toml | 4 +- packages/kernel-5.15/kernel-5.15.spec | 10 +- 5 files changed, 4 insertions(+), 251 deletions(-) delete mode 100644 packages/kernel-5.15/5001-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch delete mode 100644 packages/kernel-5.15/5002-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch delete mode 100644 packages/kernel-5.15/5003-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch diff --git a/packages/kernel-5.15/5001-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch b/packages/kernel-5.15/5001-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch deleted file mode 100644 index fccdce71..00000000 --- a/packages/kernel-5.15/5001-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch +++ /dev/null @@ -1,48 +0,0 @@ -From 40f837f02c448b37fb8967e5c50878c8a4e9459a Mon Sep 17 00:00:00 2001 -From: "Borislav Petkov (AMD)" -Date: Sat, 8 Jul 2023 10:21:35 +0200 -Subject: [PATCH] x86/bugs: Increase the x86 bugs vector size to two u32s - -Upstream commit: 0e52740ffd10c6c316837c6c128f460f1aaba1ea - -There was never a doubt in my mind that they would not fit into a single -u32 eventually. - -Signed-off-by: Borislav Petkov (AMD) -Signed-off-by: Greg Kroah-Hartman -(cherry picked from commit 236dd7133394bfe30275191e3aefcc6b3b09962b) -Signed-off-by: Leonard Foerster ---- - arch/x86/include/asm/cpufeatures.h | 2 +- - tools/arch/x86/include/asm/cpufeatures.h | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - -diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h -index ad6984f941f7..3800d0ec048d 100644 ---- a/arch/x86/include/asm/cpufeatures.h -+++ b/arch/x86/include/asm/cpufeatures.h -@@ -14,7 +14,7 @@ - * Defines x86 CPU feature bits - */ - #define NCAPINTS 20 /* N 32-bit words worth of info */ --#define NBUGINTS 1 /* N 32-bit bug flags */ -+#define NBUGINTS 2 /* N 32-bit bug flags */ - - /* - * Note: If the comment begins with a quoted string, that string is used -diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h -index 3781a7f489ef..da6d66e1fbb1 100644 ---- a/tools/arch/x86/include/asm/cpufeatures.h -+++ b/tools/arch/x86/include/asm/cpufeatures.h -@@ -14,7 +14,7 @@ - * Defines x86 CPU feature bits - */ - #define NCAPINTS 20 /* N 32-bit words worth of info */ --#define NBUGINTS 1 /* N 32-bit bug flags */ -+#define NBUGINTS 2 /* N 32-bit bug flags */ - - /* - * Note: If the comment begins with a quoted string, that string is used --- -2.40.1 - diff --git a/packages/kernel-5.15/5002-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch b/packages/kernel-5.15/5002-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch deleted file mode 100644 index fc264f5e..00000000 --- a/packages/kernel-5.15/5002-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch +++ /dev/null @@ -1,111 +0,0 @@ -From 7292d6bb18710a2c1f283f77f3f69196536bdc2b Mon Sep 17 00:00:00 2001 -From: "Borislav Petkov (AMD)" -Date: Sat, 5 Aug 2023 00:06:43 +0200 -Subject: [PATCH] x86/CPU/AMD: Do not leak quotient data after a division by 0 - -commit 77245f1c3c6495521f6a3af082696ee2f8ce3921 upstream. - -Under certain circumstances, an integer division by 0 which faults, can -leave stale quotient data from a previous division operation on Zen1 -microarchitectures. - -Do a dummy division 0/1 before returning from the #DE exception handler -in order to avoid any leaks of potentially sensitive data. - -Signed-off-by: Borislav Petkov (AMD) -Cc: -Signed-off-by: Linus Torvalds -Signed-off-by: Greg Kroah-Hartman -(cherry picked from commit a74878207b02060c5feaf88b5566208ed08eb78d) -Signed-off-by: Leonard Foerster ---- - arch/x86/include/asm/cpufeatures.h | 2 ++ - arch/x86/include/asm/processor.h | 2 ++ - arch/x86/kernel/cpu/amd.c | 19 +++++++++++++++++++ - arch/x86/kernel/traps.c | 2 ++ - 4 files changed, 25 insertions(+) - -diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h -index 3800d0ec048d..8d64a1e26589 100644 ---- a/arch/x86/include/asm/cpufeatures.h -+++ b/arch/x86/include/asm/cpufeatures.h -@@ -454,4 +454,6 @@ - #define X86_BUG_RAS_POISONING X86_BUG(29) /* CPU is affected by RAS poisoning */ - #define X86_BUG_GDS X86_BUG(30) /* CPU is affected by Gather Data Sampling */ - -+/* BUG word 2 */ -+#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */ - #endif /* _ASM_X86_CPUFEATURES_H */ -diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h -index 3e3bd5b7d5db..aeef8a6c2088 100644 ---- a/arch/x86/include/asm/processor.h -+++ b/arch/x86/include/asm/processor.h -@@ -803,9 +803,11 @@ extern u16 get_llc_id(unsigned int cpu); - #ifdef CONFIG_CPU_SUP_AMD - extern u32 amd_get_nodes_per_socket(void); - extern u32 amd_get_highest_perf(void); -+extern void amd_clear_divider(void); - #else - static inline u32 amd_get_nodes_per_socket(void) { return 0; } - static inline u32 amd_get_highest_perf(void) { return 0; } -+static inline void amd_clear_divider(void) { } - #endif - - static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) -diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c -index 3daceadf5d1f..892eb16a9ea2 100644 ---- a/arch/x86/kernel/cpu/amd.c -+++ b/arch/x86/kernel/cpu/amd.c -@@ -75,6 +75,10 @@ static const int amd_zenbleed[] = - AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), - AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); - -+static const int amd_div0[] = -+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf), -+ AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf)); -+ - static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) - { - int osvw_id = *erratum++; -@@ -1140,6 +1144,11 @@ static void init_amd(struct cpuinfo_x86 *c) - check_null_seg_clears_base(c); - - zenbleed_check(c); -+ -+ if (cpu_has_amd_erratum(c, amd_div0)) { -+ pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n"); -+ setup_force_cpu_bug(X86_BUG_DIV0); -+ } - } - - #ifdef CONFIG_X86_32 -@@ -1281,3 +1290,13 @@ void amd_check_microcode(void) - { - on_each_cpu(zenbleed_check_cpu, NULL, 1); - } -+ -+/* -+ * Issue a DIV 0/1 insn to clear any division data from previous DIV -+ * operations. -+ */ -+void noinstr amd_clear_divider(void) -+{ -+ asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) -+ :: "a" (0), "d" (0), "r" (1)); -+} -diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c -index ca47080e3774..3361d32d090f 100644 ---- a/arch/x86/kernel/traps.c -+++ b/arch/x86/kernel/traps.c -@@ -202,6 +202,8 @@ DEFINE_IDTENTRY(exc_divide_error) - { - do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, - FPE_INTDIV, error_get_trap_addr(regs)); -+ -+ amd_clear_divider(); - } - - DEFINE_IDTENTRY(exc_overflow) --- -2.40.1 - diff --git a/packages/kernel-5.15/5003-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch b/packages/kernel-5.15/5003-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch deleted file mode 100644 index b2de61d2..00000000 --- a/packages/kernel-5.15/5003-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch +++ /dev/null @@ -1,82 +0,0 @@ -From 1524872707f69fe6bea94d26238e8f6d9302b5d6 Mon Sep 17 00:00:00 2001 -From: "Borislav Petkov (AMD)" -Date: Fri, 11 Aug 2023 23:38:24 +0200 -Subject: [PATCH] x86/CPU/AMD: Fix the DIV(0) initial fix attempt - -commit f58d6fbcb7c848b7f2469be339bc571f2e9d245b upstream. - -Initially, it was thought that doing an innocuous division in the #DE -handler would take care to prevent any leaking of old data from the -divider but by the time the fault is raised, the speculation has already -advanced too far and such data could already have been used by younger -operations. - -Therefore, do the innocuous division on every exit to userspace so that -userspace doesn't see any potentially old data from integer divisions in -kernel space. - -Do the same before VMRUN too, to protect host data from leaking into the -guest too. - -Fixes: 77245f1c3c64 ("x86/CPU/AMD: Do not leak quotient data after a division by 0") -Signed-off-by: Borislav Petkov (AMD) -Cc: -Link: https://lore.kernel.org/r/20230811213824.10025-1-bp@alien8.de -Signed-off-by: Greg Kroah-Hartman ---- - arch/x86/include/asm/entry-common.h | 1 + - arch/x86/kernel/cpu/amd.c | 1 + - arch/x86/kernel/traps.c | 2 -- - arch/x86/kvm/svm/svm.c | 2 ++ - 4 files changed, 4 insertions(+), 2 deletions(-) - -diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h -index 43184640b579..a12fdf01dc26 100644 ---- a/arch/x86/include/asm/entry-common.h -+++ b/arch/x86/include/asm/entry-common.h -@@ -92,6 +92,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, - static __always_inline void arch_exit_to_user_mode(void) - { - mds_user_clear_cpu_buffers(); -+ amd_clear_divider(); - } - #define arch_exit_to_user_mode arch_exit_to_user_mode - -diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c -index 892eb16a9ea2..f485e6c3ae90 100644 ---- a/arch/x86/kernel/cpu/amd.c -+++ b/arch/x86/kernel/cpu/amd.c -@@ -1300,3 +1300,4 @@ void noinstr amd_clear_divider(void) - asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) - :: "a" (0), "d" (0), "r" (1)); - } -+EXPORT_SYMBOL_GPL(amd_clear_divider); -diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c -index 3361d32d090f..ca47080e3774 100644 ---- a/arch/x86/kernel/traps.c -+++ b/arch/x86/kernel/traps.c -@@ -202,8 +202,6 @@ DEFINE_IDTENTRY(exc_divide_error) - { - do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, - FPE_INTDIV, error_get_trap_addr(regs)); -- -- amd_clear_divider(); - } - - DEFINE_IDTENTRY(exc_overflow) -diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c -index 0611dac70c25..944a08cc3b6b 100644 ---- a/arch/x86/kvm/svm/svm.c -+++ b/arch/x86/kvm/svm/svm.c -@@ -1452,6 +1452,8 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) - struct vcpu_svm *svm = to_svm(vcpu); - struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); - -+ amd_clear_divider(); -+ - if (sev_es_guest(vcpu->kvm)) - sev_es_unmap_ghcb(svm); - --- -2.40.1 - diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index be8a8543..3cab212a 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/d73ac4b2ddb2c5ed91308adfcd7ccf4d7ba53882d31c9a6461e1661766159b62/kernel-5.15.122-77.145.amzn2.src.rpm" -sha512 = "37742f1923dcafa20e9144d9754e5238a85956bbb814caa89dbc4db2549e62b64c9e3af9ceaf0bc32d71560eef9a60d86f35ae3df86c5893094fd86b63b58ffb" +url = "https://cdn.amazonlinux.com/blobstore/d301b89106ee983f8cd5cd0b4d6b4111ea97b7e51ca2892a6d9bbec4eaf18c4f/kernel-5.15.128-80.144.amzn2.src.rpm" +sha512 = "ac8fce4c8f293dd123e64ec6f3cf553e2d9b0462de5b48e0caebeecb1091a6d72dde35571264da1ed05984845778e758552636faf42d89ac6af41feec1f8b5da" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 085eb6cc..2eff2f74 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.122 +Version: 5.15.128 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/d73ac4b2ddb2c5ed91308adfcd7ccf4d7ba53882d31c9a6461e1661766159b62/kernel-5.15.122-77.145.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/d301b89106ee983f8cd5cd0b4d6b4111ea97b7e51ca2892a6d9bbec4eaf18c4f/kernel-5.15.128-80.144.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal @@ -22,12 +22,6 @@ Patch1003: 1003-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch # Increase default of sysctl net.unix.max_dgram_qlen to 512. Patch1004: 1004-af_unix-increase-default-max_dgram_qlen-to-512.patch -# Cherry-picked fixes for CVE-2023-20588 ("DIV0"). Can be dropped when moving -# upstream to 5.15.128 or later. -Patch5001: 5001-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch -Patch5002: 5002-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch -Patch5003: 5003-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch - BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From 19ab678dd44bb5ab901d0771133877b291f4ab80 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Fri, 8 Sep 2023 13:24:49 +0000 Subject: [PATCH 1106/1356] kernel-6.1: update to 6.1.49 Rebase to Amazon Linux upstream version 6.1.49-69.116.amzn2023. Drop downstream backports of fixes for CVE-2023-20588 as these are now included by the upstream base version. Signed-off-by: Leonard Foerster --- ...e-the-x86-bugs-vector-size-to-two-u3.patch | 48 -------- ...ot-leak-quotient-data-after-a-divisi.patch | 111 ------------------ ...MD-Fix-the-DIV-0-initial-fix-attempt.patch | 82 ------------- packages/kernel-6.1/Cargo.toml | 4 +- packages/kernel-6.1/kernel-6.1.spec | 10 +- 5 files changed, 4 insertions(+), 251 deletions(-) delete mode 100644 packages/kernel-6.1/5001-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch delete mode 100644 packages/kernel-6.1/5002-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch delete mode 100644 packages/kernel-6.1/5003-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch diff --git a/packages/kernel-6.1/5001-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch b/packages/kernel-6.1/5001-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch deleted file mode 100644 index 2052baa3..00000000 --- a/packages/kernel-6.1/5001-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch +++ /dev/null @@ -1,48 +0,0 @@ -From 3bf59e709af08ffd0e321755b5699942474c1962 Mon Sep 17 00:00:00 2001 -From: "Borislav Petkov (AMD)" -Date: Sat, 8 Jul 2023 10:21:35 +0200 -Subject: [PATCH] x86/bugs: Increase the x86 bugs vector size to two u32s - -Upstream commit: 0e52740ffd10c6c316837c6c128f460f1aaba1ea - -There was never a doubt in my mind that they would not fit into a single -u32 eventually. - -Signed-off-by: Borislav Petkov (AMD) -Signed-off-by: Greg Kroah-Hartman -(cherry picked from commit dfede4cb8ef732039b7a479d260bd89d3b474f14) -Signed-off-by: Leonard Foerster ---- - arch/x86/include/asm/cpufeatures.h | 2 +- - tools/arch/x86/include/asm/cpufeatures.h | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - -diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h -index b69f948be454..32221013c45d 100644 ---- a/arch/x86/include/asm/cpufeatures.h -+++ b/arch/x86/include/asm/cpufeatures.h -@@ -14,7 +14,7 @@ - * Defines x86 CPU feature bits - */ - #define NCAPINTS 20 /* N 32-bit words worth of info */ --#define NBUGINTS 1 /* N 32-bit bug flags */ -+#define NBUGINTS 2 /* N 32-bit bug flags */ - - /* - * Note: If the comment begins with a quoted string, that string is used -diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h -index b71f4f2ecdd5..9ecc62861194 100644 ---- a/tools/arch/x86/include/asm/cpufeatures.h -+++ b/tools/arch/x86/include/asm/cpufeatures.h -@@ -14,7 +14,7 @@ - * Defines x86 CPU feature bits - */ - #define NCAPINTS 20 /* N 32-bit words worth of info */ --#define NBUGINTS 1 /* N 32-bit bug flags */ -+#define NBUGINTS 2 /* N 32-bit bug flags */ - - /* - * Note: If the comment begins with a quoted string, that string is used --- -2.40.1 - diff --git a/packages/kernel-6.1/5002-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch b/packages/kernel-6.1/5002-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch deleted file mode 100644 index e0ef9e05..00000000 --- a/packages/kernel-6.1/5002-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch +++ /dev/null @@ -1,111 +0,0 @@ -From 35131bf2a0cc0d522f294c21be7d9c2a88c06035 Mon Sep 17 00:00:00 2001 -From: "Borislav Petkov (AMD)" -Date: Sat, 5 Aug 2023 00:06:43 +0200 -Subject: [PATCH] x86/CPU/AMD: Do not leak quotient data after a division by 0 - -commit 77245f1c3c6495521f6a3af082696ee2f8ce3921 upstream. - -Under certain circumstances, an integer division by 0 which faults, can -leave stale quotient data from a previous division operation on Zen1 -microarchitectures. - -Do a dummy division 0/1 before returning from the #DE exception handler -in order to avoid any leaks of potentially sensitive data. - -Signed-off-by: Borislav Petkov (AMD) -Cc: -Signed-off-by: Linus Torvalds -Signed-off-by: Greg Kroah-Hartman -(cherry picked from commit f2615bb47be4f53be92c81a6a8aa286c92ef04d9) -Signed-off-by: Leonard Foerster ---- - arch/x86/include/asm/cpufeatures.h | 2 ++ - arch/x86/include/asm/processor.h | 2 ++ - arch/x86/kernel/cpu/amd.c | 19 +++++++++++++++++++ - arch/x86/kernel/traps.c | 2 ++ - 4 files changed, 25 insertions(+) - -diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h -index 32221013c45d..e2d980757511 100644 ---- a/arch/x86/include/asm/cpufeatures.h -+++ b/arch/x86/include/asm/cpufeatures.h -@@ -467,4 +467,6 @@ - #define X86_BUG_RAS_POISONING X86_BUG(30) /* CPU is affected by RAS poisoning */ - #define X86_BUG_GDS X86_BUG(31) /* CPU is affected by Gather Data Sampling */ - -+/* BUG word 2 */ -+#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */ - #endif /* _ASM_X86_CPUFEATURES_H */ -diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h -index d8277eec1bcd..7dc733062313 100644 ---- a/arch/x86/include/asm/processor.h -+++ b/arch/x86/include/asm/processor.h -@@ -800,9 +800,11 @@ extern u16 get_llc_id(unsigned int cpu); - #ifdef CONFIG_CPU_SUP_AMD - extern u32 amd_get_nodes_per_socket(void); - extern u32 amd_get_highest_perf(void); -+extern void amd_clear_divider(void); - #else - static inline u32 amd_get_nodes_per_socket(void) { return 0; } - static inline u32 amd_get_highest_perf(void) { return 0; } -+static inline void amd_clear_divider(void) { } - #endif - - #define for_each_possible_hypervisor_cpuid_base(function) \ -diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c -index 7f4eb8b027cc..7a93bb12302d 100644 ---- a/arch/x86/kernel/cpu/amd.c -+++ b/arch/x86/kernel/cpu/amd.c -@@ -75,6 +75,10 @@ static const int amd_zenbleed[] = - AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), - AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); - -+static const int amd_div0[] = -+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf), -+ AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf)); -+ - static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) - { - int osvw_id = *erratum++; -@@ -1115,6 +1119,11 @@ static void init_amd(struct cpuinfo_x86 *c) - check_null_seg_clears_base(c); - - zenbleed_check(c); -+ -+ if (cpu_has_amd_erratum(c, amd_div0)) { -+ pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n"); -+ setup_force_cpu_bug(X86_BUG_DIV0); -+ } - } - - #ifdef CONFIG_X86_32 -@@ -1256,3 +1265,13 @@ void amd_check_microcode(void) - { - on_each_cpu(zenbleed_check_cpu, NULL, 1); - } -+ -+/* -+ * Issue a DIV 0/1 insn to clear any division data from previous DIV -+ * operations. -+ */ -+void noinstr amd_clear_divider(void) -+{ -+ asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) -+ :: "a" (0), "d" (0), "r" (1)); -+} -diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c -index d3fdec706f1d..80b719ff60ed 100644 ---- a/arch/x86/kernel/traps.c -+++ b/arch/x86/kernel/traps.c -@@ -206,6 +206,8 @@ DEFINE_IDTENTRY(exc_divide_error) - { - do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, - FPE_INTDIV, error_get_trap_addr(regs)); -+ -+ amd_clear_divider(); - } - - DEFINE_IDTENTRY(exc_overflow) --- -2.40.1 - diff --git a/packages/kernel-6.1/5003-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch b/packages/kernel-6.1/5003-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch deleted file mode 100644 index 0e1a58b6..00000000 --- a/packages/kernel-6.1/5003-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch +++ /dev/null @@ -1,82 +0,0 @@ -From 20eb241125391039b9a7248b82e8e6c892522931 Mon Sep 17 00:00:00 2001 -From: "Borislav Petkov (AMD)" -Date: Fri, 11 Aug 2023 23:38:24 +0200 -Subject: [PATCH] x86/CPU/AMD: Fix the DIV(0) initial fix attempt - -commit f58d6fbcb7c848b7f2469be339bc571f2e9d245b upstream. - -Initially, it was thought that doing an innocuous division in the #DE -handler would take care to prevent any leaking of old data from the -divider but by the time the fault is raised, the speculation has already -advanced too far and such data could already have been used by younger -operations. - -Therefore, do the innocuous division on every exit to userspace so that -userspace doesn't see any potentially old data from integer divisions in -kernel space. - -Do the same before VMRUN too, to protect host data from leaking into the -guest too. - -Fixes: 77245f1c3c64 ("x86/CPU/AMD: Do not leak quotient data after a division by 0") -Signed-off-by: Borislav Petkov (AMD) -Cc: -Link: https://lore.kernel.org/r/20230811213824.10025-1-bp@alien8.de -Signed-off-by: Greg Kroah-Hartman ---- - arch/x86/include/asm/entry-common.h | 1 + - arch/x86/kernel/cpu/amd.c | 1 + - arch/x86/kernel/traps.c | 2 -- - arch/x86/kvm/svm/svm.c | 2 ++ - 4 files changed, 4 insertions(+), 2 deletions(-) - -diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h -index 674ed46d3ced..11203a9fe0a8 100644 ---- a/arch/x86/include/asm/entry-common.h -+++ b/arch/x86/include/asm/entry-common.h -@@ -92,6 +92,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, - static __always_inline void arch_exit_to_user_mode(void) - { - mds_user_clear_cpu_buffers(); -+ amd_clear_divider(); - } - #define arch_exit_to_user_mode arch_exit_to_user_mode - -diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c -index 7a93bb12302d..b76e85f8cdb8 100644 ---- a/arch/x86/kernel/cpu/amd.c -+++ b/arch/x86/kernel/cpu/amd.c -@@ -1275,3 +1275,4 @@ void noinstr amd_clear_divider(void) - asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) - :: "a" (0), "d" (0), "r" (1)); - } -+EXPORT_SYMBOL_GPL(amd_clear_divider); -diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c -index 80b719ff60ed..d3fdec706f1d 100644 ---- a/arch/x86/kernel/traps.c -+++ b/arch/x86/kernel/traps.c -@@ -206,8 +206,6 @@ DEFINE_IDTENTRY(exc_divide_error) - { - do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, - FPE_INTDIV, error_get_trap_addr(regs)); -- -- amd_clear_divider(); - } - - DEFINE_IDTENTRY(exc_overflow) -diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c -index fc1649b5931a..9d549826b23f 100644 ---- a/arch/x86/kvm/svm/svm.c -+++ b/arch/x86/kvm/svm/svm.c -@@ -3940,6 +3940,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in - - guest_state_enter_irqoff(); - -+ amd_clear_divider(); -+ - if (sev_es_guest(vcpu->kvm)) - __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted); - else --- -2.40.1 - diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 9f694803..e6ad1e28 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/789848dec5baccf864b022af695a2a3ac1ba52392a2b6aa83f19dc07d050df0a/kernel-6.1.41-63.114.amzn2023.src.rpm" -sha512 = "6a66562d23a21ac3fba56cb13680ef2cc0c3fe9b2b77e83c3e6da47ca36016413cd5ebac9266419e835d04f10fb509b00536fc3e38eb0e8d707db5f8fdd8f10e" +url = "https://cdn.amazonlinux.com/al2023/blobstore/b7fd4bb92caacd373bbd4cf41dca8c29736bf229c08ef80c59bb6063654d058b/kernel-6.1.49-69.116.amzn2023.src.rpm" +sha512 = "d9ccbf828b0466a226a6bf42e9d8a4482b4acea1bd27f6ba28a823d481d6357688a1594b457a6b8735b611d4d370b2aeb1382726ae694bb03f7aa1cf9ee7a9c2" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 42397792..94c6cb0b 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.41 +Version: 6.1.49 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/789848dec5baccf864b022af695a2a3ac1ba52392a2b6aa83f19dc07d050df0a/kernel-6.1.41-63.114.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/b7fd4bb92caacd373bbd4cf41dca8c29736bf229c08ef80c59bb6063654d058b/kernel-6.1.49-69.116.amzn2023.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal @@ -25,12 +25,6 @@ Patch1004: 1004-af_unix-increase-default-max_dgram_qlen-to-512.patch # options for nvidia are instead included through DRM_SIMPLE Patch1005: 1005-Revert-Revert-drm-fb_helper-improve-CONFIG_FB-depend.patch -# Cherry-picked fixes for CVE-2023-20588 ("DIV0"). Can be dropped when moving -# upstream to 6.1.48 or later -Patch5001: 5001-x86-bugs-Increase-the-x86-bugs-vector-size-to-two-u3.patch -Patch5002: 5002-x86-CPU-AMD-Do-not-leak-quotient-data-after-a-divisi.patch -Patch5003: 5003-x86-CPU-AMD-Fix-the-DIV-0-initial-fix-attempt.patch - BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From 208d5856cc2783ff3e26b5ca408f3fdd36e9337a Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Tue, 12 Sep 2023 01:28:01 +0000 Subject: [PATCH 1107/1356] rpm2img: create XFS partition on boot XFS reads information from the device on mkfs. This results in an optimized filesystem for the particular situation. Boot time is slightly better as well since the IO from mkfs is less than growfs. Having a prepared XFS partition with no data can result in similar problems as ac0cacc where data read in from an encrypted EBS volume returns random garbage which makes the filesystem appear corrupted. Having no filesystem sidesteps this problem. Signed-off-by: Matthew Yeazel --- tools/rpm2img | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/tools/rpm2img b/tools/rpm2img index 245c15ad..93e4cb4d 100755 --- a/tools/rpm2img +++ b/tools/rpm2img @@ -637,20 +637,9 @@ mkfs_data() { offset="${3:?}" # Create an XFS filesystem if requested if [ "${XFS_DATA_PARTITION}" == "yes" ] ; then - echo "writing XFS filesystem for DATA" + echo "writing blank partition for DATA" # Create a file to write the filesystem to first dd if=/dev/zero of="${BOTTLEROCKET_DATA}" bs=1M count=${size%?} - # block size of 4096, directory block size of 16384 - # enable inotbtcount, bigtime, and reflink - # use an internal log with starting size of 64m - # use the minimal 2 Allocation groups, this still overprovisions when expanded - # set strip units of 512k and sectsize to make EBS volumes align - mkfs.xfs \ - -b size=4096 -n size=16384 \ - -m inobtcount=1,bigtime=1,reflink=1 \ - -l internal,size=64m \ - -d agcount=2,su=512k,sw=1,sectsize=4096 \ - -f "${BOTTLEROCKET_DATA}" else # default to ext4 echo "writing ext4 filesystem for DATA" From 0e4117de41ec11b2e69473c62985de71baa1eeca Mon Sep 17 00:00:00 2001 From: ecpullen Date: Tue, 8 Aug 2023 22:38:18 +0000 Subject: [PATCH 1108/1356] testsys: Fix support for metal migration testing Previously all migration testing required a bottlerocket crd to be present, but since EKS Anywhere is used to provision machines, the bottlerocket crd is not created for metal. This means the migration agent needs to use the bottlerocket crd for instance ids if it's available and if not, rely on the cluster crd to get the instance ids. --- tools/testsys/src/migration.rs | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/tools/testsys/src/migration.rs b/tools/testsys/src/migration.rs index e90b0885..aeb3f7a1 100644 --- a/tools/testsys/src/migration.rs +++ b/tools/testsys/src/migration.rs @@ -17,10 +17,6 @@ pub(crate) fn migration_crd( .cluster_crd_name .as_ref() .expect("A cluster name is required for migrations"); - let bottlerocket_resource_name = migration_input - .bottlerocket_crd_name - .as_ref() - .expect("A cluster name is required for migrations"); let labels = migration_input.crd_input.labels(btreemap! { "testsys/type".to_string() => "migration".to_string(), @@ -58,12 +54,22 @@ pub(crate) fn migration_crd( }; migration_config - .instance_ids_template(bottlerocket_resource_name, instance_id_field_name) + .instance_ids_template( + migration_input + .bottlerocket_crd_name + .as_ref() + .unwrap_or(cluster_resource_name), + instance_id_field_name, + ) .migrate_to_version(migration_version) .tuf_repo(migration_input.crd_input.tuf_repo_config()) .assume_role(migration_input.crd_input.config.agent_role.clone()) - .resources(bottlerocket_resource_name) - .resources(cluster_resource_name) + .set_resources(Some( + vec![cluster_resource_name.to_owned()] + .into_iter() + .chain(migration_input.bottlerocket_crd_name.iter().cloned()) + .collect(), + )) .set_depends_on(Some(migration_input.prev_tests)) .image( migration_input From 49c6413deb93da6264c6fa221cf58247a0c26845 Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Fri, 8 Sep 2023 18:49:48 +0000 Subject: [PATCH 1109/1356] start-local-vm: update usage text to match current parameters Add new options and metasyntactic variables to the usage section. Perform some word smithing on the --product-name option while there. Signed-off-by: Markus Boehme --- tools/start-local-vm | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/tools/start-local-vm b/tools/start-local-vm index 1045905e..b7dd0980 100755 --- a/tools/start-local-vm +++ b/tools/start-local-vm @@ -4,7 +4,7 @@ # # Common error handling # -# + exit_trap_cmds=() on_exit() { @@ -49,8 +49,11 @@ show_usage() { echo "\ usage: ${0##*/} [--arch BUILDSYS_ARCH] [--variant BUILDSYS_VARIANT] [--host-port-forwards HOST_PORT_FWDS] + [--product-name NAME] [--vm-memory VM_MEMORY] [--vm-cpus VM_CPUS] + [--force-extract] [--inject-file LOCAL_PATH[:IMAGE_PATH]]... + [--firmware-code PATH] [--firmware-vars PATH] Launch a local virtual machine from a Bottlerocket image. @@ -61,10 +64,9 @@ Options: BUILDSYS_ARCH environment variable is set) --variant Bottlerocket variant to run (may be omitted if the BUILDSYS_VARIANT environment variable is set) - --product-name - product name used for file and directory naming used when - building with the "-e BUILDSYS_NAME" option; may be omitted if the - BUILDSYS_NAME environment variable is set. Otherwise default is bottlerocket if not defined or empty + --product-name short product name used as prefix for file and directory + names (defaults to the BUILDSYS_NAME environment variable + or 'bottlerocket' when that is unset) --host-port-forwards list of host ports to forward to the VM; HOST_PORT_FWDS must be a valid QEMU port forwarding specifier (default @@ -196,7 +198,7 @@ prepare_raw_images() { prepare_firmware() { # Create local copies of the edk2 firmware variable storage, to help with - # faciliate Secure Boot testing where custom variables are needed for both + # facilitate Secure Boot testing where custom variables are needed for both # architectures, but can't safely be reused across QEMU invocations. Also # set reasonable defaults for both firmware files, if nothing more specific # was requested. @@ -329,4 +331,4 @@ prepare_raw_images prepare_firmware create_extra_files inject_files -launch_vm \ No newline at end of file +launch_vm From 6159766686b878802f9f62b6114e8b67483d82bf Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Tue, 12 Sep 2023 14:58:04 +0000 Subject: [PATCH 1110/1356] start-local-vm: match variable names to buildsys terminology As Ben Cressey rightfully pointed out, the `start-local-vm` script is using slightly different terminology than buildsys when referring to the final Bottlerocket images. Adopt the existing terminology to avoid confusion by renaming "boot images" to "OS images". Signed-off-by: Markus Boehme --- tools/start-local-vm | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tools/start-local-vm b/tools/start-local-vm index b7dd0980..7c69c781 100755 --- a/tools/start-local-vm +++ b/tools/start-local-vm @@ -35,7 +35,7 @@ vm_cpus=4 force_extract= declare -A extra_files=() -boot_image= +os_image= data_image= @@ -177,12 +177,12 @@ extract_image() { prepare_raw_images() { local -r image_dir=build/images/${arch}-${variant}/latest - local -r compressed_boot_image=${image_dir}/${product_name}-${variant}-${arch}.img.lz4 + local -r compressed_os_image=${image_dir}/${product_name}-${variant}-${arch}.img.lz4 local -r compressed_data_image=${image_dir}/${product_name}-${variant}-${arch}-data.img.lz4 - if [[ -e ${compressed_boot_image} ]]; then - readonly boot_image=${compressed_boot_image%*.lz4} - extract_image "${compressed_boot_image}" "${boot_image}" + if [[ -e ${compressed_os_image} ]]; then + readonly os_image=${compressed_os_image%*.lz4} + extract_image "${compressed_os_image}" "${os_image}" else bail 'Boot image not found. Did the last build fail?' fi @@ -261,10 +261,10 @@ inject_files() { # absence of actual hardware, assume a traditional sector size of 512 bytes. local private_first_sector private_last_sector read -r private_first_sector private_last_sector < <( - fdisk --list-details "${boot_image}" \ + fdisk --list-details "${os_image}" \ | awk '/BOTTLEROCKET-PRIVATE/ { print $2, $3 }') if [[ -z ${private_first_sector} ]] || [[ -z ${private_last_sector} ]]; then - bail "Failed to find the private partition in '${boot_image}'." + bail "Failed to find the private partition in '${os_image}'." fi local private_size_mib=$(( (private_last_sector - private_first_sector + 1) * 512 / 1024 / 1024 )) @@ -279,11 +279,11 @@ inject_files() { done if ! mkfs.ext4 -d "${private_mount}" "${private_image}" "${private_size_mib}M" \ - || ! dd if="${private_image}" of="${boot_image}" conv=notrunc bs=512 seek="${private_first_sector}" + || ! dd if="${private_image}" of="${os_image}" conv=notrunc bs=512 seek="${private_first_sector}" then rm -f "${private_image}" rm -rf "${private_mount}" - bail "Failed to inject files into '${boot_image}'." + bail "Failed to inject files into '${os_image}'." fi } @@ -296,7 +296,7 @@ launch_vm() { -m "${vm_mem}" -drive if=pflash,format=raw,unit=0,file="${firmware_code}",readonly=on -drive if=pflash,format=raw,unit=1,file="${firmware_vars}" - -drive index=0,if=virtio,format=raw,file="${boot_image}" + -drive index=0,if=virtio,format=raw,file="${os_image}" ) # Plug the virtual primary NIC in as BDF 00:10.0 so udev will give it a From a6bf4f9a989c12f7980835e4c6ffd4dc7213a10c Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Fri, 8 Sep 2023 19:06:53 +0000 Subject: [PATCH 1111/1356] start-local-vm: allow resizing the extracted images before boot Sometimes it is helpful to not go with the default image sizes for testing purposes--in practice, this would mean testing with bigger images. Introduce the `--os-image-size` and `--data-image-size` options to `start-local-vm` that allow for resizing the respective images after they have been extracted, but before they are booted. Signed-off-by: Markus Boehme --- tools/start-local-vm | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/tools/start-local-vm b/tools/start-local-vm index 7c69c781..b6f0875a 100755 --- a/tools/start-local-vm +++ b/tools/start-local-vm @@ -54,6 +54,7 @@ usage: ${0##*/} [--arch BUILDSYS_ARCH] [--variant BUILDSYS_VARIANT] [--force-extract] [--inject-file LOCAL_PATH[:IMAGE_PATH]]... [--firmware-code PATH] [--firmware-vars PATH] + [--os-image-size SIZE] [--data-image-size SIZE] Launch a local virtual machine from a Bottlerocket image. @@ -82,6 +83,8 @@ Options: private partition will be lost --firmware-code override the default firmware executable file --firmware-vars override the initial firmware variable storage file + --os-image-size resize the OS disk image to the given size (e.g. 4096M) + --data-image-size resize the data disk image to the given size (e.g. 20G) --help shows this usage text By default, the virtual machine's port 22 (SSH) will be exposed via the local @@ -147,6 +150,12 @@ parse_args() { --firmware-vars) shift; firmware_vars=$1 ;; + --os-image-size) + shift; os_image_size=$1 + ;; + --data-image-size) + shift; data_image_size=$1 + ;; *) usage_error "unknown option '$1'" ;; esac @@ -194,6 +203,20 @@ prepare_raw_images() { # Missing data image is fine. This variant may not be a split build. readonly data_image= fi + + if [[ -n ${os_image_size} ]]; then + truncate --no-create --size "${os_image_size}" "${os_image}" \ + || bail "Failed to resize OS image '${os_image}'." + fi + + if [[ -n ${data_image_size} ]]; then + if [[ -e ${data_image} ]]; then + truncate --no-create --size "${data_image_size}" "${data_image}" \ + || bail "Failed to resize data image '${data_image}'." + else + >&2 echo "Ignoring option --data-image-size ${data_image_size} since no data image was found." + fi + fi } prepare_firmware() { From 9136eadf4bd505de90a5483fd57b6366588734d6 Mon Sep 17 00:00:00 2001 From: ecpullen Date: Wed, 13 Sep 2023 21:55:50 +0000 Subject: [PATCH 1112/1356] testsys: Update testsys to v0.0.9 --- tools/Cargo.lock | 16 ++++++++-------- tools/testsys-config/Cargo.toml | 8 ++++---- tools/testsys/Cargo.toml | 9 ++++++--- 3 files changed, 18 insertions(+), 15 deletions(-) diff --git a/tools/Cargo.lock b/tools/Cargo.lock index 121d4d8b..7ac2c9d2 100644 --- a/tools/Cargo.lock +++ b/tools/Cargo.lock @@ -739,8 +739,8 @@ dependencies = [ [[package]] name = "bottlerocket-types" -version = "0.0.8" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.8#cea049d4e94d5824beb25ffc23893c358d187ef9" +version = "0.0.9" +source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.9#2491a2a122cf75bd7df23accc3574141669568ea" dependencies = [ "builder-derive", "configuration-derive", @@ -772,8 +772,8 @@ dependencies = [ [[package]] name = "builder-derive" -version = "0.0.8" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.8#cea049d4e94d5824beb25ffc23893c358d187ef9" +version = "0.0.9" +source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.9#2491a2a122cf75bd7df23accc3574141669568ea" dependencies = [ "proc-macro2", "quote", @@ -969,8 +969,8 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "configuration-derive" -version = "0.0.8" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.8#cea049d4e94d5824beb25ffc23893c358d187ef9" +version = "0.0.9" +source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.9#2491a2a122cf75bd7df23accc3574141669568ea" dependencies = [ "quote", "syn 1.0.109", @@ -3141,8 +3141,8 @@ dependencies = [ [[package]] name = "testsys-model" -version = "0.0.8" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.8#cea049d4e94d5824beb25ffc23893c358d187ef9" +version = "0.0.9" +source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.9#2491a2a122cf75bd7df23accc3574141669568ea" dependencies = [ "async-recursion", "async-trait", diff --git a/tools/testsys-config/Cargo.toml b/tools/testsys-config/Cargo.toml index f6b69472..064097ff 100644 --- a/tools/testsys-config/Cargo.toml +++ b/tools/testsys-config/Cargo.toml @@ -7,13 +7,13 @@ edition = "2021" publish = false [dependencies] -bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.8", tag = "v0.0.8"} +bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.9", tag = "v0.0.9" } bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } handlebars = "4" log = "0.4" -maplit="1" -testsys-model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.8", tag = "v0.0.8"} -serde = { version = "1", features = ["derive"] } +maplit = "1" +testsys-model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.9", tag = "v0.0.9" } +serde = { version = "1", features = ["derive"] } serde_plain = "1" serde_yaml = "0.9" snafu = "0.7" diff --git a/tools/testsys/Cargo.toml b/tools/testsys/Cargo.toml index 08a96dc6..ff7ccce2 100644 --- a/tools/testsys/Cargo.toml +++ b/tools/testsys/Cargo.toml @@ -1,7 +1,10 @@ [package] name = "testsys" version = "0.1.0" -authors = ["Ethan Pullen ", "Matt Briggs "] +authors = [ + "Ethan Pullen ", + "Matt Briggs ", +] license = "Apache-2.0 OR MIT" edition = "2021" publish = false @@ -11,7 +14,7 @@ async-trait = "0.1" aws-config = "0.55" aws-sdk-ec2 = "0.28" base64 = "0.20" -bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.8", tag = "v0.0.8"} +bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.9", tag = "v0.0.9" } bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } clap = { version = "4", features = ["derive", "env"] } env_logger = "0.10" @@ -19,7 +22,7 @@ futures = "0.3" handlebars = "4" log = "0.4" maplit = "1" -testsys-model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.8", tag = "v0.0.8"} +testsys-model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.9", tag = "v0.0.9" } pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } fastrand = "1" serde = { version = "1", features = ["derive"] } From 627713de63d273e151fd9b97c7243c32ceed8673 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Fri, 8 Sep 2023 17:56:33 -0700 Subject: [PATCH 1113/1356] testsys: support for overriding EKS service endpoint This adds the plumbing necessary to override the EKS service endpoint for the EKS cluster agent when it queries cluster metadata for populating information necessary to launch nodes into the cluster. --- tools/testsys-config/src/lib.rs | 4 ++++ tools/testsys/src/aws_k8s.rs | 8 ++++++++ 2 files changed, 12 insertions(+) diff --git a/tools/testsys-config/src/lib.rs b/tools/testsys-config/src/lib.rs index b841a720..867355cd 100644 --- a/tools/testsys-config/src/lib.rs +++ b/tools/testsys-config/src/lib.rs @@ -360,6 +360,9 @@ pub struct DeveloperConfig { pub keep_tests_running: Option, /// Use an alternate account for image lookup pub image_account_id: Option, + /// Overrides the EKS service endpoint for TestSys agents gathering EKS cluster metadata + /// (only for pre-existing EKS clusters, does not apply to new EKS cluster creation) + pub eks_service_endpoint: Option, } impl DeveloperConfig { @@ -374,6 +377,7 @@ impl DeveloperConfig { .or(other.bottlerocket_destruction_policy), keep_tests_running: self.keep_tests_running.or(other.keep_tests_running), image_account_id: self.image_account_id.or(other.image_account_id), + eks_service_endpoint: self.eks_service_endpoint.or(other.eks_service_endpoint), } } } diff --git a/tools/testsys/src/aws_k8s.rs b/tools/testsys/src/aws_k8s.rs index 971607d9..2fc063fa 100644 --- a/tools/testsys/src/aws_k8s.rs +++ b/tools/testsys/src/aws_k8s.rs @@ -106,6 +106,14 @@ impl CrdCreator for AwsK8sCreator { let eks_crd = EksClusterConfig::builder() .creation_policy(CreationPolicy::IfNotExists) + .eks_service_endpoint( + cluster_input + .crd_input + .config + .dev + .eks_service_endpoint + .clone(), + ) .assume_role(cluster_input.crd_input.config.agent_role.clone()) .config(config) .image( From 94bc65a3366b41b9589c33ebfe5623825438c5b3 Mon Sep 17 00:00:00 2001 From: Erikson Tung Date: Mon, 11 Sep 2023 14:43:13 -0700 Subject: [PATCH 1114/1356] testsys: launch nodes in public subnets by default It's more likely for a test cluster to at least have one or more public subnets than it is for it to have at least one or more private subnet. --- tools/testsys/src/aws_resources.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testsys/src/aws_resources.rs b/tools/testsys/src/aws_resources.rs index 5f6fc2f2..aa97a3fb 100644 --- a/tools/testsys/src/aws_resources.rs +++ b/tools/testsys/src/aws_resources.rs @@ -207,7 +207,7 @@ pub(crate) async fn ec2_crd<'a>( // Add in the EKS specific configuration. if cluster_type == ClusterType::Eks { ec2_builder - .subnet_ids_template(cluster_name, "privateSubnetIds") + .subnet_ids_template(cluster_name, "publicSubnetIds") .endpoint_template(cluster_name, "endpoint") .certificate_template(cluster_name, "certificate") .cluster_dns_ip_template(cluster_name, "clusterDnsIp") @@ -305,7 +305,7 @@ pub(crate) async fn ec2_karpenter_crd<'a>( ) .cluster_name_template(cluster_name, "clusterName") .region_template(cluster_name, "region") - .subnet_ids_template(cluster_name, "privateSubnetIds") + .subnet_ids_template(cluster_name, "publicSubnetIds") .endpoint_template(cluster_name, "endpoint") .cluster_sg_template(cluster_name, "clustersharedSg") .device_mappings(device_mappings) From bc16c8ebb5a1e55d24651959cc31fb47fa3786be Mon Sep 17 00:00:00 2001 From: ecpullen Date: Tue, 12 Sep 2023 22:37:37 +0000 Subject: [PATCH 1115/1356] testsys: Add support for eksa release manifest --- tools/testsys-config/src/lib.rs | 5 +++++ tools/testsys/src/metal_k8s.rs | 8 ++++++++ tools/testsys/src/vmware_k8s.rs | 8 ++++++++ 3 files changed, 21 insertions(+) diff --git a/tools/testsys-config/src/lib.rs b/tools/testsys-config/src/lib.rs index 867355cd..b43c06a6 100644 --- a/tools/testsys-config/src/lib.rs +++ b/tools/testsys-config/src/lib.rs @@ -363,6 +363,8 @@ pub struct DeveloperConfig { /// Overrides the EKS service endpoint for TestSys agents gathering EKS cluster metadata /// (only for pre-existing EKS clusters, does not apply to new EKS cluster creation) pub eks_service_endpoint: Option, + /// A manifest containing the EKS Anywhere binary that should be used for cluster provisioning + pub eks_a_release_manifest_url: Option, } impl DeveloperConfig { @@ -378,6 +380,9 @@ impl DeveloperConfig { keep_tests_running: self.keep_tests_running.or(other.keep_tests_running), image_account_id: self.image_account_id.or(other.image_account_id), eks_service_endpoint: self.eks_service_endpoint.or(other.eks_service_endpoint), + eks_a_release_manifest_url: self + .eks_a_release_manifest_url + .or(other.eks_a_release_manifest_url), } } } diff --git a/tools/testsys/src/metal_k8s.rs b/tools/testsys/src/metal_k8s.rs index 4304a8cd..3cef00a1 100644 --- a/tools/testsys/src/metal_k8s.rs +++ b/tools/testsys/src/metal_k8s.rs @@ -115,6 +115,14 @@ impl CrdCreator for MetalK8sCreator { what: "A cluster config is required for Bare Metal testing", })?, )) + .eks_a_release_manifest_url( + cluster_input + .crd_input + .config + .dev + .eks_a_release_manifest_url + .clone(), + ) .set_conflicts_with(Some(existing_clusters)) .destruction_policy( cluster_input diff --git a/tools/testsys/src/vmware_k8s.rs b/tools/testsys/src/vmware_k8s.rs index 51d43b8f..43d26f77 100644 --- a/tools/testsys/src/vmware_k8s.rs +++ b/tools/testsys/src/vmware_k8s.rs @@ -120,6 +120,14 @@ impl CrdCreator for VmwareK8sCreator { .vcenter_resource_pool(&self.datacenter.resource_pool) .vcenter_workload_folder(&self.datacenter.folder) .mgmt_cluster_kubeconfig_base64(&self.encoded_mgmt_cluster_kubeconfig) + .eks_a_release_manifest_url( + cluster_input + .crd_input + .config + .dev + .eks_a_release_manifest_url + .clone(), + ) .set_conflicts_with(Some(existing_clusters)) .destruction_policy( cluster_input From d0acfe91f2f3cc42338abd1268681ebb5db1cdc4 Mon Sep 17 00:00:00 2001 From: Matthew James Briggs Date: Sat, 19 Aug 2023 16:01:31 -0700 Subject: [PATCH 1116/1356] update twoliter and remove tools --- tools/.gitignore | 1 + tools/Cargo.lock | 3987 ----------------- tools/Cargo.toml | 11 - tools/buildsys/.gitignore | 0 tools/buildsys/Cargo.toml | 26 - .../src/bin/bottlerocket-variant/main.rs | 70 - tools/buildsys/src/builder.rs | 650 --- tools/buildsys/src/builder/error.rs | 82 - tools/buildsys/src/cache.rs | 154 - tools/buildsys/src/cache/error.rs | 55 - tools/buildsys/src/gomod.rs | 207 - tools/buildsys/src/gomod/error.rs | 57 - tools/buildsys/src/lib.rs | 1 - tools/buildsys/src/main.rs | 296 -- tools/buildsys/src/manifest.rs | 582 --- tools/buildsys/src/manifest/error.rs | 22 - tools/buildsys/src/project.rs | 51 - tools/buildsys/src/project/error.rs | 10 - tools/buildsys/src/spec.rs | 74 - tools/buildsys/src/spec/error.rs | 12 - tools/deny.toml | 102 - tools/docker-go | 94 - tools/infrasys/Cargo.toml | 29 - .../kms_key_setup.yml | 30 - .../cloudformation-templates/s3_setup.yml | 25 - tools/infrasys/src/error.rs | 169 - tools/infrasys/src/keys.rs | 150 - tools/infrasys/src/main.rs | 361 -- tools/infrasys/src/root.rs | 206 - tools/infrasys/src/s3.rs | 369 -- tools/infrasys/src/shared.rs | 99 - .../test_tomls/toml_yaml_conversion.toml | 12 - .../test_tomls/toml_yaml_conversion.yml | 40 - tools/install-twoliter.sh | 168 + tools/partyplanner | 276 -- tools/pubsys-config/Cargo.toml | 19 - tools/pubsys-config/src/lib.rs | 279 -- tools/pubsys-config/src/vmware.rs | 221 - tools/pubsys-setup/Cargo.toml | 20 - tools/pubsys-setup/src/main.rs | 388 -- tools/pubsys/Cargo.toml | 52 - .../pubsys/src/aws/ami/launch_permissions.rs | 101 - tools/pubsys/src/aws/ami/mod.rs | 627 --- tools/pubsys/src/aws/ami/public.rs | 64 - tools/pubsys/src/aws/ami/register.rs | 331 -- tools/pubsys/src/aws/ami/snapshot.rs | 65 - tools/pubsys/src/aws/ami/wait.rs | 139 - tools/pubsys/src/aws/client.rs | 71 - tools/pubsys/src/aws/mod.rs | 42 - tools/pubsys/src/aws/promote_ssm/mod.rs | 550 --- tools/pubsys/src/aws/publish_ami/mod.rs | 731 --- tools/pubsys/src/aws/ssm/mod.rs | 540 --- tools/pubsys/src/aws/ssm/ssm.rs | 472 -- tools/pubsys/src/aws/ssm/template.rs | 415 -- tools/pubsys/src/aws/validate_ami/ami.rs | 223 - tools/pubsys/src/aws/validate_ami/mod.rs | 850 ---- tools/pubsys/src/aws/validate_ami/results.rs | 1034 ----- tools/pubsys/src/aws/validate_ssm/mod.rs | 797 ---- tools/pubsys/src/aws/validate_ssm/results.rs | 615 --- tools/pubsys/src/main.rs | 265 -- tools/pubsys/src/repo.rs | 808 ---- .../pubsys/src/repo/check_expirations/mod.rs | 184 - tools/pubsys/src/repo/refresh_repo/mod.rs | 214 - tools/pubsys/src/repo/validate_repo/mod.rs | 198 - tools/pubsys/src/vmware/govc.rs | 177 - tools/pubsys/src/vmware/mod.rs | 2 - tools/pubsys/src/vmware/upload_ova/mod.rs | 239 - tools/rpm2img | 792 ---- tools/rpm2kmodkit | 58 - tools/rpm2migrations | 56 - tools/testsys-config/Cargo.toml | 20 - tools/testsys-config/src/lib.rs | 554 --- tools/testsys/Cargo.toml | 37 - tools/testsys/Test.toml.example | 125 - tools/testsys/src/aws_ecs.rs | 281 -- tools/testsys/src/aws_k8s.rs | 238 - tools/testsys/src/aws_resources.rs | 348 -- tools/testsys/src/crds.rs | 787 ---- tools/testsys/src/delete.rs | 80 - tools/testsys/src/error.rs | 121 - tools/testsys/src/install.rs | 62 - tools/testsys/src/logs.rs | 47 - tools/testsys/src/main.rs | 112 - tools/testsys/src/metal_k8s.rs | 261 -- tools/testsys/src/migration.rs | 109 - tools/testsys/src/restart_test.rs | 18 - tools/testsys/src/run.rs | 619 --- tools/testsys/src/secret.rs | 118 - tools/testsys/src/sonobuoy.rs | 172 - tools/testsys/src/status.rs | 128 - tools/testsys/src/uninstall.rs | 21 - tools/testsys/src/vmware_k8s.rs | 299 -- 92 files changed, 169 insertions(+), 24505 deletions(-) delete mode 100644 tools/Cargo.lock delete mode 100644 tools/Cargo.toml delete mode 100644 tools/buildsys/.gitignore delete mode 100644 tools/buildsys/Cargo.toml delete mode 100644 tools/buildsys/src/bin/bottlerocket-variant/main.rs delete mode 100644 tools/buildsys/src/builder.rs delete mode 100644 tools/buildsys/src/builder/error.rs delete mode 100644 tools/buildsys/src/cache.rs delete mode 100644 tools/buildsys/src/cache/error.rs delete mode 100644 tools/buildsys/src/gomod.rs delete mode 100644 tools/buildsys/src/gomod/error.rs delete mode 100644 tools/buildsys/src/lib.rs delete mode 100644 tools/buildsys/src/main.rs delete mode 100644 tools/buildsys/src/manifest.rs delete mode 100644 tools/buildsys/src/manifest/error.rs delete mode 100644 tools/buildsys/src/project.rs delete mode 100644 tools/buildsys/src/project/error.rs delete mode 100644 tools/buildsys/src/spec.rs delete mode 100644 tools/buildsys/src/spec/error.rs delete mode 100644 tools/deny.toml delete mode 100755 tools/docker-go delete mode 100644 tools/infrasys/Cargo.toml delete mode 100644 tools/infrasys/cloudformation-templates/kms_key_setup.yml delete mode 100644 tools/infrasys/cloudformation-templates/s3_setup.yml delete mode 100644 tools/infrasys/src/error.rs delete mode 100644 tools/infrasys/src/keys.rs delete mode 100644 tools/infrasys/src/main.rs delete mode 100644 tools/infrasys/src/root.rs delete mode 100644 tools/infrasys/src/s3.rs delete mode 100644 tools/infrasys/src/shared.rs delete mode 100644 tools/infrasys/test_tomls/toml_yaml_conversion.toml delete mode 100644 tools/infrasys/test_tomls/toml_yaml_conversion.yml create mode 100755 tools/install-twoliter.sh delete mode 100755 tools/partyplanner delete mode 100644 tools/pubsys-config/Cargo.toml delete mode 100644 tools/pubsys-config/src/lib.rs delete mode 100644 tools/pubsys-config/src/vmware.rs delete mode 100644 tools/pubsys-setup/Cargo.toml delete mode 100644 tools/pubsys-setup/src/main.rs delete mode 100644 tools/pubsys/Cargo.toml delete mode 100644 tools/pubsys/src/aws/ami/launch_permissions.rs delete mode 100644 tools/pubsys/src/aws/ami/mod.rs delete mode 100644 tools/pubsys/src/aws/ami/public.rs delete mode 100644 tools/pubsys/src/aws/ami/register.rs delete mode 100644 tools/pubsys/src/aws/ami/snapshot.rs delete mode 100644 tools/pubsys/src/aws/ami/wait.rs delete mode 100644 tools/pubsys/src/aws/client.rs delete mode 100644 tools/pubsys/src/aws/mod.rs delete mode 100644 tools/pubsys/src/aws/promote_ssm/mod.rs delete mode 100644 tools/pubsys/src/aws/publish_ami/mod.rs delete mode 100644 tools/pubsys/src/aws/ssm/mod.rs delete mode 100644 tools/pubsys/src/aws/ssm/ssm.rs delete mode 100644 tools/pubsys/src/aws/ssm/template.rs delete mode 100644 tools/pubsys/src/aws/validate_ami/ami.rs delete mode 100644 tools/pubsys/src/aws/validate_ami/mod.rs delete mode 100644 tools/pubsys/src/aws/validate_ami/results.rs delete mode 100644 tools/pubsys/src/aws/validate_ssm/mod.rs delete mode 100644 tools/pubsys/src/aws/validate_ssm/results.rs delete mode 100644 tools/pubsys/src/main.rs delete mode 100644 tools/pubsys/src/repo.rs delete mode 100644 tools/pubsys/src/repo/check_expirations/mod.rs delete mode 100644 tools/pubsys/src/repo/refresh_repo/mod.rs delete mode 100644 tools/pubsys/src/repo/validate_repo/mod.rs delete mode 100644 tools/pubsys/src/vmware/govc.rs delete mode 100644 tools/pubsys/src/vmware/mod.rs delete mode 100644 tools/pubsys/src/vmware/upload_ova/mod.rs delete mode 100755 tools/rpm2img delete mode 100755 tools/rpm2kmodkit delete mode 100755 tools/rpm2migrations delete mode 100644 tools/testsys-config/Cargo.toml delete mode 100644 tools/testsys-config/src/lib.rs delete mode 100644 tools/testsys/Cargo.toml delete mode 100644 tools/testsys/Test.toml.example delete mode 100644 tools/testsys/src/aws_ecs.rs delete mode 100644 tools/testsys/src/aws_k8s.rs delete mode 100644 tools/testsys/src/aws_resources.rs delete mode 100644 tools/testsys/src/crds.rs delete mode 100644 tools/testsys/src/delete.rs delete mode 100644 tools/testsys/src/error.rs delete mode 100644 tools/testsys/src/install.rs delete mode 100644 tools/testsys/src/logs.rs delete mode 100644 tools/testsys/src/main.rs delete mode 100644 tools/testsys/src/metal_k8s.rs delete mode 100644 tools/testsys/src/migration.rs delete mode 100644 tools/testsys/src/restart_test.rs delete mode 100644 tools/testsys/src/run.rs delete mode 100644 tools/testsys/src/secret.rs delete mode 100644 tools/testsys/src/sonobuoy.rs delete mode 100644 tools/testsys/src/status.rs delete mode 100644 tools/testsys/src/uninstall.rs delete mode 100644 tools/testsys/src/vmware_k8s.rs diff --git a/tools/.gitignore b/tools/.gitignore index d3ceb7fc..d745034e 100644 --- a/tools/.gitignore +++ b/tools/.gitignore @@ -1,3 +1,4 @@ /bin +/twoliter /.crates.toml /.crates2.json diff --git a/tools/Cargo.lock b/tools/Cargo.lock deleted file mode 100644 index 7ac2c9d2..00000000 --- a/tools/Cargo.lock +++ /dev/null @@ -1,3987 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "addr2line" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - -[[package]] -name = "aho-corasick" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6748e8def348ed4d14996fa801f4122cd763fff530258cdc03f64b25f89d3a5a" -dependencies = [ - "memchr", -] - -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - -[[package]] -name = "android_system_properties" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" -dependencies = [ - "libc", -] - -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - -[[package]] -name = "anstream" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163" -dependencies = [ - "anstyle", - "anstyle-parse", - "anstyle-query", - "anstyle-wincon", - "colorchoice", - "is-terminal", - "utf8parse", -] - -[[package]] -name = "anstyle" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" - -[[package]] -name = "anstyle-parse" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" -dependencies = [ - "utf8parse", -] - -[[package]] -name = "anstyle-query" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" -dependencies = [ - "windows-sys 0.48.0", -] - -[[package]] -name = "anstyle-wincon" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c677ab05e09154296dd37acecd46420c17b9713e8366facafa8fc0885167cf4c" -dependencies = [ - "anstyle", - "windows-sys 0.48.0", -] - -[[package]] -name = "argh" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7af5ba06967ff7214ce4c7419c7d185be7ecd6cc4965a8f6e1d8ce0398aad219" -dependencies = [ - "argh_derive", - "argh_shared", -] - -[[package]] -name = "argh_derive" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56df0aeedf6b7a2fc67d06db35b09684c3e8da0c95f8f27685cb17e08413d87a" -dependencies = [ - "argh_shared", - "proc-macro2", - "quote", - "syn 2.0.29", -] - -[[package]] -name = "argh_shared" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5693f39141bda5760ecc4111ab08da40565d1771038c4a0250f03457ec707531" -dependencies = [ - "serde", -] - -[[package]] -name = "assert-json-diff" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" -dependencies = [ - "serde", - "serde_json", -] - -[[package]] -name = "async-recursion" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.29", -] - -[[package]] -name = "async-trait" -version = "0.1.73" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.29", -] - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi", -] - -[[package]] -name = "autocfg" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" - -[[package]] -name = "aws-config" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcdcf0d683fe9c23d32cf5b53c9918ea0a500375a9fb20109802552658e576c9" -dependencies = [ - "aws-credential-types", - "aws-http", - "aws-sdk-sso", - "aws-sdk-sts", - "aws-smithy-async", - "aws-smithy-client", - "aws-smithy-http", - "aws-smithy-http-tower", - "aws-smithy-json", - "aws-smithy-types", - "aws-types", - "bytes", - "fastrand 1.9.0", - "hex", - "http", - "hyper", - "ring", - "time", - "tokio", - "tower", - "tracing", - "zeroize", -] - -[[package]] -name = "aws-credential-types" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fcdb2f7acbc076ff5ad05e7864bdb191ca70a6fd07668dc3a1a8bcd051de5ae" -dependencies = [ - "aws-smithy-async", - "aws-smithy-types", - "fastrand 1.9.0", - "tokio", - "tracing", - "zeroize", -] - -[[package]] -name = "aws-endpoint" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cce1c41a6cfaa726adee9ebb9a56fcd2bbfd8be49fd8a04c5e20fd968330b04" -dependencies = [ - "aws-smithy-http", - "aws-smithy-types", - "aws-types", - "http", - "regex", - "tracing", -] - -[[package]] -name = "aws-http" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aadbc44e7a8f3e71c8b374e03ecd972869eb91dd2bc89ed018954a52ba84bc44" -dependencies = [ - "aws-credential-types", - "aws-smithy-http", - "aws-smithy-types", - "aws-types", - "bytes", - "http", - "http-body", - "lazy_static", - "percent-encoding", - "pin-project-lite", - "tracing", -] - -[[package]] -name = "aws-sdk-cloudformation" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f32bb66da99e2955ce49e346200cb14421784755a39c74fe2c043536b2d57ba" -dependencies = [ - "aws-credential-types", - "aws-endpoint", - "aws-http", - "aws-sig-auth", - "aws-smithy-async", - "aws-smithy-client", - "aws-smithy-http", - "aws-smithy-http-tower", - "aws-smithy-json", - "aws-smithy-query", - "aws-smithy-types", - "aws-smithy-xml", - "aws-types", - "bytes", - "fastrand 1.9.0", - "http", - "regex", - "tokio-stream", - "tower", - "tracing", -] - -[[package]] -name = "aws-sdk-ebs" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c44666651c93b43b78bc3d0bc280efffa64ab6c23ecb3370ed0760d6e69d417" -dependencies = [ - "aws-credential-types", - "aws-endpoint", - "aws-http", - "aws-sig-auth", - "aws-smithy-async", - "aws-smithy-client", - "aws-smithy-http", - "aws-smithy-http-tower", - "aws-smithy-json", - "aws-smithy-types", - "aws-types", - "bytes", - "fastrand 1.9.0", - "http", - "regex", - "tokio-stream", - "tower", - "tracing", -] - -[[package]] -name = "aws-sdk-ec2" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eab2493c5857725eeafe12ec66ba4ce6feb3355e3af6828d9ef28d6152972a27" -dependencies = [ - "aws-credential-types", - "aws-endpoint", - "aws-http", - "aws-sig-auth", - "aws-smithy-async", - "aws-smithy-client", - "aws-smithy-http", - "aws-smithy-http-tower", - "aws-smithy-json", - "aws-smithy-query", - "aws-smithy-types", - "aws-smithy-xml", - "aws-types", - "bytes", - "fastrand 1.9.0", - "http", - "regex", - "tokio-stream", - "tower", - "tracing", -] - -[[package]] -name = "aws-sdk-kms" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "545335abd7c6ef7285d2972a67b9f8279ff5fec8bbb3ffc637fa436ba1e6e434" -dependencies = [ - "aws-credential-types", - "aws-endpoint", - "aws-http", - "aws-sig-auth", - "aws-smithy-async", - "aws-smithy-client", - "aws-smithy-http", - "aws-smithy-http-tower", - "aws-smithy-json", - "aws-smithy-types", - "aws-types", - "bytes", - "http", - "regex", - "tokio-stream", - "tower", - "tracing", -] - -[[package]] -name = "aws-sdk-s3" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fba197193cbb4bcb6aad8d99796b2291f36fa89562ded5d4501363055b0de89f" -dependencies = [ - "aws-credential-types", - "aws-endpoint", - "aws-http", - "aws-sig-auth", - "aws-sigv4", - "aws-smithy-async", - "aws-smithy-checksums", - "aws-smithy-client", - "aws-smithy-eventstream", - "aws-smithy-http", - "aws-smithy-http-tower", - "aws-smithy-json", - "aws-smithy-types", - "aws-smithy-xml", - "aws-types", - "bytes", - "http", - "http-body", - "once_cell", - "percent-encoding", - "regex", - "tokio-stream", - "tower", - "tracing", - "url", -] - -[[package]] -name = "aws-sdk-ssm" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "014a095ed73c1f789699dfeb45a2b1debb03119910392bd7fcda4a07a72b3af4" -dependencies = [ - "aws-credential-types", - "aws-endpoint", - "aws-http", - "aws-sig-auth", - "aws-smithy-async", - "aws-smithy-client", - "aws-smithy-http", - "aws-smithy-http-tower", - "aws-smithy-json", - "aws-smithy-types", - "aws-types", - "bytes", - "fastrand 1.9.0", - "http", - "regex", - "tokio-stream", - "tower", - "tracing", -] - -[[package]] -name = "aws-sdk-sso" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8b812340d86d4a766b2ca73f740dfd47a97c2dff0c06c8517a16d88241957e4" -dependencies = [ - "aws-credential-types", - "aws-endpoint", - "aws-http", - "aws-sig-auth", - "aws-smithy-async", - "aws-smithy-client", - "aws-smithy-http", - "aws-smithy-http-tower", - "aws-smithy-json", - "aws-smithy-types", - "aws-types", - "bytes", - "http", - "regex", - "tokio-stream", - "tower", - "tracing", -] - -[[package]] -name = "aws-sdk-sts" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "265fac131fbfc188e5c3d96652ea90ecc676a934e3174eaaee523c6cec040b3b" -dependencies = [ - "aws-credential-types", - "aws-endpoint", - "aws-http", - "aws-sig-auth", - "aws-smithy-async", - "aws-smithy-client", - "aws-smithy-http", - "aws-smithy-http-tower", - "aws-smithy-json", - "aws-smithy-query", - "aws-smithy-types", - "aws-smithy-xml", - "aws-types", - "bytes", - "http", - "regex", - "tower", - "tracing", -] - -[[package]] -name = "aws-sig-auth" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b94acb10af0c879ecd5c7bdf51cda6679a0a4f4643ce630905a77673bfa3c61" -dependencies = [ - "aws-credential-types", - "aws-sigv4", - "aws-smithy-eventstream", - "aws-smithy-http", - "aws-types", - "http", - "tracing", -] - -[[package]] -name = "aws-sigv4" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d2ce6f507be68e968a33485ced670111d1cbad161ddbbab1e313c03d37d8f4c" -dependencies = [ - "aws-smithy-eventstream", - "aws-smithy-http", - "bytes", - "form_urlencoded", - "hex", - "hmac", - "http", - "once_cell", - "percent-encoding", - "regex", - "sha2", - "time", - "tracing", -] - -[[package]] -name = "aws-smithy-async" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13bda3996044c202d75b91afeb11a9afae9db9a721c6a7a427410018e286b880" -dependencies = [ - "futures-util", - "pin-project-lite", - "tokio", - "tokio-stream", -] - -[[package]] -name = "aws-smithy-checksums" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07ed8b96d95402f3f6b8b57eb4e0e45ee365f78b1a924faf20ff6e97abf1eae6" -dependencies = [ - "aws-smithy-http", - "aws-smithy-types", - "bytes", - "crc32c", - "crc32fast", - "hex", - "http", - "http-body", - "md-5", - "pin-project-lite", - "sha1", - "sha2", - "tracing", -] - -[[package]] -name = "aws-smithy-client" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a86aa6e21e86c4252ad6a0e3e74da9617295d8d6e374d552be7d3059c41cedd" -dependencies = [ - "aws-smithy-async", - "aws-smithy-http", - "aws-smithy-http-tower", - "aws-smithy-types", - "bytes", - "fastrand 1.9.0", - "http", - "http-body", - "hyper", - "hyper-rustls 0.23.2", - "lazy_static", - "pin-project-lite", - "rustls 0.20.8", - "tokio", - "tower", - "tracing", -] - -[[package]] -name = "aws-smithy-eventstream" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "460c8da5110835e3d9a717c61f5556b20d03c32a1dec57f8fc559b360f733bb8" -dependencies = [ - "aws-smithy-types", - "bytes", - "crc32fast", -] - -[[package]] -name = "aws-smithy-http" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b3b693869133551f135e1f2c77cb0b8277d9e3e17feaf2213f735857c4f0d28" -dependencies = [ - "aws-smithy-eventstream", - "aws-smithy-types", - "bytes", - "bytes-utils", - "futures-core", - "http", - "http-body", - "hyper", - "once_cell", - "percent-encoding", - "pin-project-lite", - "pin-utils", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "aws-smithy-http-tower" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ae4f6c5798a247fac98a867698197d9ac22643596dc3777f0c76b91917616b9" -dependencies = [ - "aws-smithy-http", - "aws-smithy-types", - "bytes", - "http", - "http-body", - "pin-project-lite", - "tower", - "tracing", -] - -[[package]] -name = "aws-smithy-json" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23f9f42fbfa96d095194a632fbac19f60077748eba536eb0b9fecc28659807f8" -dependencies = [ - "aws-smithy-types", -] - -[[package]] -name = "aws-smithy-query" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98819eb0b04020a1c791903533b638534ae6c12e2aceda3e6e6fba015608d51d" -dependencies = [ - "aws-smithy-types", - "urlencoding", -] - -[[package]] -name = "aws-smithy-types" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16a3d0bf4f324f4ef9793b86a1701d9700fbcdbd12a846da45eed104c634c6e8" -dependencies = [ - "base64-simd", - "itoa", - "num-integer", - "ryu", - "time", -] - -[[package]] -name = "aws-smithy-xml" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1b9d12875731bd07e767be7baad95700c3137b56730ec9ddeedb52a5e5ca63b" -dependencies = [ - "xmlparser", -] - -[[package]] -name = "aws-types" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dd209616cc8d7bfb82f87811a5c655dc97537f592689b18743bddf5dc5c4829" -dependencies = [ - "aws-credential-types", - "aws-smithy-async", - "aws-smithy-client", - "aws-smithy-http", - "aws-smithy-types", - "http", - "rustc_version", - "tracing", -] - -[[package]] -name = "backtrace" -version = "0.3.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" -dependencies = [ - "addr2line", - "cc", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", -] - -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - -[[package]] -name = "base64" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5" - -[[package]] -name = "base64" -version = "0.21.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" - -[[package]] -name = "base64-simd" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "339abbe78e73178762e23bea9dfd08e697eb3f3301cd4be981c0f78ba5859195" -dependencies = [ - "outref", - "vsimd", -] - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitflags" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" - -[[package]] -name = "block-buffer" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" -dependencies = [ - "generic-array", -] - -[[package]] -name = "bottlerocket-types" -version = "0.0.9" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.9#2491a2a122cf75bd7df23accc3574141669568ea" -dependencies = [ - "builder-derive", - "configuration-derive", - "serde", - "serde_json", - "serde_plain", - "serde_yaml 0.8.26", - "testsys-model", -] - -[[package]] -name = "bottlerocket-variant" -version = "0.1.0" -dependencies = [ - "generate-readme", - "serde", - "snafu", -] - -[[package]] -name = "bstr" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6798148dccfbff0fae41c7574d2fa8f1ef3492fba0face179de5d8d447d67b05" -dependencies = [ - "memchr", - "serde", -] - -[[package]] -name = "builder-derive" -version = "0.0.9" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.9#2491a2a122cf75bd7df23accc3574141669568ea" -dependencies = [ - "proc-macro2", - "quote", - "serde", - "syn 1.0.109", -] - -[[package]] -name = "buildsys" -version = "0.1.0" -dependencies = [ - "bottlerocket-variant", - "duct", - "hex", - "lazy_static", - "nonzero_ext", - "rand", - "regex", - "reqwest", - "serde", - "serde_plain", - "sha2", - "snafu", - "toml", - "url", - "walkdir", -] - -[[package]] -name = "bumpalo" -version = "3.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" - -[[package]] -name = "bytecount" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c676a478f63e9fa2dd5368a42f28bba0d6c560b775f38583c8bbaa7fcd67c9c" - -[[package]] -name = "byteorder" -version = "1.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" - -[[package]] -name = "bytes" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" - -[[package]] -name = "bytes-utils" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e47d3a8076e283f3acd27400535992edb3ba4b5bb72f8891ad8fbe7932a7d4b9" -dependencies = [ - "bytes", - "either", -] - -[[package]] -name = "cargo-readme" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66dbfc9307f5b2429656e07533613cd3f26803fd2857fc33be22aa2711181d58" -dependencies = [ - "clap 2.34.0", - "lazy_static", - "percent-encoding", - "regex", - "serde", - "serde_derive", - "toml", -] - -[[package]] -name = "cc" -version = "1.0.83" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" -dependencies = [ - "libc", -] - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "chrono" -version = "0.4.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" -dependencies = [ - "android-tzdata", - "iana-time-zone", - "num-traits", - "serde", - "winapi", -] - -[[package]] -name = "clap" -version = "2.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" -dependencies = [ - "ansi_term", - "atty", - "bitflags 1.3.2", - "strsim 0.8.0", - "textwrap", - "unicode-width", - "vec_map", -] - -[[package]] -name = "clap" -version = "4.3.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03aef18ddf7d879c15ce20f04826ef8418101c7e528014c3eeea13321047dca3" -dependencies = [ - "clap_builder", - "clap_derive", - "once_cell", -] - -[[package]] -name = "clap_builder" -version = "4.3.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ce6fffb678c9b80a70b6b6de0aad31df727623a70fd9a842c30cd573e2fa98" -dependencies = [ - "anstream", - "anstyle", - "clap_lex", - "strsim 0.10.0", -] - -[[package]] -name = "clap_derive" -version = "4.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn 2.0.29", -] - -[[package]] -name = "clap_lex" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" - -[[package]] -name = "coldsnap" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faa54b44a1a199e3f37ba30ffb7391ed2fe1e4deb15cc55232786b2ca228cb33" -dependencies = [ - "argh", - "async-trait", - "aws-config", - "aws-sdk-ebs", - "aws-sdk-ec2", - "aws-smithy-http", - "aws-types", - "base64 0.13.1", - "bytes", - "env_logger", - "futures", - "indicatif", - "log", - "nix", - "sha2", - "snafu", - "tempfile", - "tokio", -] - -[[package]] -name = "colorchoice" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" - -[[package]] -name = "configuration-derive" -version = "0.0.9" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.9#2491a2a122cf75bd7df23accc3574141669568ea" -dependencies = [ - "quote", - "syn 1.0.109", -] - -[[package]] -name = "console" -version = "0.15.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8" -dependencies = [ - "encode_unicode", - "lazy_static", - "libc", - "unicode-width", - "windows-sys 0.45.0", -] - -[[package]] -name = "core-foundation" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" - -[[package]] -name = "cpufeatures" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" -dependencies = [ - "libc", -] - -[[package]] -name = "crc32c" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8f48d60e5b4d2c53d5c2b1d8a58c849a70ae5e5509b08a48d047e3b65714a74" -dependencies = [ - "rustc_version", -] - -[[package]] -name = "crc32fast" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "crossbeam-channel" -version = "0.5.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" -dependencies = [ - "cfg-if", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-deque" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" -dependencies = [ - "cfg-if", - "crossbeam-epoch", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" -dependencies = [ - "autocfg", - "cfg-if", - "crossbeam-utils", - "memoffset", - "scopeguard", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "crypto-common" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" -dependencies = [ - "generic-array", - "typenum", -] - -[[package]] -name = "darling" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" -dependencies = [ - "darling_core", - "darling_macro", -] - -[[package]] -name = "darling_core" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.10.0", - "syn 1.0.109", -] - -[[package]] -name = "darling_macro" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" -dependencies = [ - "darling_core", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "dashmap" -version = "5.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "907076dfda823b0b36d2a1bb5f90c96660a5bbcd7729e10727f07858f22c4edc" -dependencies = [ - "cfg-if", - "hashbrown", - "lock_api", - "once_cell", - "parking_lot_core", -] - -[[package]] -name = "deranged" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" - -[[package]] -name = "digest" -version = "0.10.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" -dependencies = [ - "block-buffer", - "crypto-common", - "subtle", -] - -[[package]] -name = "dirs-next" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" -dependencies = [ - "cfg-if", - "dirs-sys-next", -] - -[[package]] -name = "dirs-sys-next" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" -dependencies = [ - "libc", - "redox_users", - "winapi", -] - -[[package]] -name = "doc-comment" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" - -[[package]] -name = "duct" -version = "0.13.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ae3fc31835f74c2a7ceda3aeede378b0ae2e74c8f1c36559fcc9ae2a4e7d3e" -dependencies = [ - "libc", - "once_cell", - "os_pipe", - "shared_child", -] - -[[package]] -name = "dyn-clone" -version = "1.0.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbfc4744c1b8f2a09adc0e55242f60b1af195d88596bd8700be74418c056c555" - -[[package]] -name = "either" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" - -[[package]] -name = "encode_unicode" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" - -[[package]] -name = "encoding_rs" -version = "0.8.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "env_logger" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" -dependencies = [ - "humantime", - "is-terminal", - "log", - "regex", - "termcolor", -] - -[[package]] -name = "errno" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" -dependencies = [ - "errno-dragonfly", - "libc", - "windows-sys 0.48.0", -] - -[[package]] -name = "errno-dragonfly" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" -dependencies = [ - "cc", - "libc", -] - -[[package]] -name = "fastrand" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] - -[[package]] -name = "fastrand" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "form_urlencoded" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" -dependencies = [ - "percent-encoding", -] - -[[package]] -name = "futures" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" - -[[package]] -name = "futures-executor" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-io" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" - -[[package]] -name = "futures-macro" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.29", -] - -[[package]] -name = "futures-sink" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" - -[[package]] -name = "futures-task" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" - -[[package]] -name = "futures-timer" -version = "3.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" - -[[package]] -name = "futures-util" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" -dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "slab", -] - -[[package]] -name = "generate-readme" -version = "0.1.0" -dependencies = [ - "cargo-readme", - "snafu", -] - -[[package]] -name = "generic-array" -version = "0.14.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" -dependencies = [ - "typenum", - "version_check", -] - -[[package]] -name = "getrandom" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" -dependencies = [ - "cfg-if", - "libc", - "wasi 0.11.0+wasi-snapshot-preview1", -] - -[[package]] -name = "gimli" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" - -[[package]] -name = "globset" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759c97c1e17c55525b57192c06a267cda0ac5210b222d6b82189a2338fa1c13d" -dependencies = [ - "aho-corasick", - "bstr", - "fnv", - "log", - "regex", -] - -[[package]] -name = "governor" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c390a940a5d157878dd057c78680a33ce3415bcd05b4799509ea44210914b4d5" -dependencies = [ - "cfg-if", - "dashmap", - "futures", - "futures-timer", - "no-std-compat", - "nonzero_ext", - "parking_lot", - "quanta", - "rand", - "smallvec", -] - -[[package]] -name = "h2" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "handlebars" -version = "4.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83c3372087601b532857d332f5957cbae686da52bb7810bf038c3e3c3cc2fa0d" -dependencies = [ - "log", - "pest", - "pest_derive", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - -[[package]] -name = "hermit-abi" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" - -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - -[[package]] -name = "hmac" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" -dependencies = [ - "digest", -] - -[[package]] -name = "home" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" -dependencies = [ - "windows-sys 0.48.0", -] - -[[package]] -name = "http" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - -[[package]] -name = "http-body" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" -dependencies = [ - "bytes", - "http", - "pin-project-lite", -] - -[[package]] -name = "http-range-header" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" - -[[package]] -name = "httparse" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" - -[[package]] -name = "httpdate" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" - -[[package]] -name = "humantime" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" - -[[package]] -name = "hyper" -version = "0.14.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "socket2", - "tokio", - "tower-service", - "tracing", - "want", -] - -[[package]] -name = "hyper-rustls" -version = "0.23.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" -dependencies = [ - "http", - "hyper", - "log", - "rustls 0.20.8", - "rustls-native-certs", - "tokio", - "tokio-rustls 0.23.4", -] - -[[package]] -name = "hyper-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" -dependencies = [ - "futures-util", - "http", - "hyper", - "log", - "rustls 0.21.6", - "rustls-native-certs", - "tokio", - "tokio-rustls 0.24.1", -] - -[[package]] -name = "hyper-timeout" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" -dependencies = [ - "hyper", - "pin-project-lite", - "tokio", - "tokio-io-timeout", -] - -[[package]] -name = "iana-time-zone" -version = "0.1.57" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" -dependencies = [ - "android_system_properties", - "core-foundation-sys", - "iana-time-zone-haiku", - "js-sys", - "wasm-bindgen", - "windows", -] - -[[package]] -name = "iana-time-zone-haiku" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" -dependencies = [ - "cc", -] - -[[package]] -name = "ident_case" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" - -[[package]] -name = "idna" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg", - "hashbrown", -] - -[[package]] -name = "indicatif" -version = "0.17.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b297dc40733f23a0e52728a58fa9489a5b7638a324932de16b41adc3ef80730" -dependencies = [ - "console", - "instant", - "number_prefix", - "portable-atomic", - "unicode-width", -] - -[[package]] -name = "infrasys" -version = "0.1.0" -dependencies = [ - "assert-json-diff", - "async-trait", - "aws-config", - "aws-sdk-cloudformation", - "aws-sdk-s3", - "aws-types", - "clap 4.3.23", - "hex", - "log", - "pubsys-config", - "serde_json", - "serde_yaml 0.9.21", - "sha2", - "shell-words", - "simplelog", - "snafu", - "tokio", - "url", -] - -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "ipnet" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" - -[[package]] -name = "is-terminal" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" -dependencies = [ - "hermit-abi 0.3.2", - "rustix", - "windows-sys 0.48.0", -] - -[[package]] -name = "itoa" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" - -[[package]] -name = "js-sys" -version = "0.3.64" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "json-patch" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f54898088ccb91df1b492cc80029a6fdf1c48ca0db7c6822a8babad69c94658" -dependencies = [ - "serde", - "serde_json", - "thiserror", - "treediff", -] - -[[package]] -name = "jsonpath_lib" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaa63191d68230cccb81c5aa23abd53ed64d83337cacbb25a7b8c7979523774f" -dependencies = [ - "log", - "serde", - "serde_json", -] - -[[package]] -name = "k8s-openapi" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd990069640f9db34b3b0f7a1afc62a05ffaa3be9b66aa3c313f58346df7f788" -dependencies = [ - "base64 0.21.2", - "bytes", - "chrono", - "serde", - "serde-value", - "serde_json", -] - -[[package]] -name = "kube" -version = "0.82.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc7d3d52dd5c871991679102e80dfb192faaaa09fecdbccdd8c55af264ce7a8f" -dependencies = [ - "k8s-openapi", - "kube-client", - "kube-core", - "kube-derive", -] - -[[package]] -name = "kube-client" -version = "0.82.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "544339f1665488243f79080441cacb09c997746fd763342303e66eebb9d3ba13" -dependencies = [ - "base64 0.20.0", - "bytes", - "chrono", - "dirs-next", - "either", - "futures", - "http", - "http-body", - "hyper", - "hyper-rustls 0.24.1", - "hyper-timeout", - "jsonpath_lib", - "k8s-openapi", - "kube-core", - "pem", - "pin-project", - "rand", - "rustls 0.21.6", - "rustls-pemfile", - "secrecy", - "serde", - "serde_json", - "serde_yaml 0.9.21", - "thiserror", - "tokio", - "tokio-tungstenite", - "tokio-util", - "tower", - "tower-http", - "tracing", -] - -[[package]] -name = "kube-core" -version = "0.82.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25983d07f414dfffba08c5951fe110f649113416b1d8e22f7c89c750eb2555a7" -dependencies = [ - "chrono", - "form_urlencoded", - "http", - "json-patch", - "k8s-openapi", - "once_cell", - "schemars", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "kube-derive" -version = "0.82.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5af652b642aca19ef5194de3506aa39f89d788d5326a570da68b13a02d6c5ba2" -dependencies = [ - "darling", - "proc-macro2", - "quote", - "serde_json", - "syn 1.0.109", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.147" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" - -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - -[[package]] -name = "linux-raw-sys" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" - -[[package]] -name = "lock_api" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" -dependencies = [ - "autocfg", - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" - -[[package]] -name = "mach" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" -dependencies = [ - "libc", -] - -[[package]] -name = "maplit" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" - -[[package]] -name = "md-5" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" -dependencies = [ - "digest", -] - -[[package]] -name = "memchr" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" - -[[package]] -name = "memoffset" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" -dependencies = [ - "autocfg", -] - -[[package]] -name = "mime" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" - -[[package]] -name = "miniz_oxide" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" -dependencies = [ - "adler", -] - -[[package]] -name = "mio" -version = "0.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" -dependencies = [ - "libc", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.48.0", -] - -[[package]] -name = "nix" -version = "0.26.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" -dependencies = [ - "bitflags 1.3.2", - "cfg-if", - "libc", - "static_assertions", -] - -[[package]] -name = "no-std-compat" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c" - -[[package]] -name = "nonzero_ext" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" - -[[package]] -name = "num-integer" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" -dependencies = [ - "autocfg", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" -dependencies = [ - "autocfg", -] - -[[package]] -name = "num_cpus" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" -dependencies = [ - "hermit-abi 0.3.2", - "libc", -] - -[[package]] -name = "num_threads" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" -dependencies = [ - "libc", -] - -[[package]] -name = "number_prefix" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" - -[[package]] -name = "object" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ac5bbd07aea88c60a577a1ce218075ffd59208b2d7ca97adf9bfc5aeb21ebe" -dependencies = [ - "memchr", -] - -[[package]] -name = "olpc-cjson" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d637c9c15b639ccff597da8f4fa968300651ad2f1e968aefc3b4927a6fb2027a" -dependencies = [ - "serde", - "serde_json", - "unicode-normalization", -] - -[[package]] -name = "once_cell" -version = "1.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" - -[[package]] -name = "openssl-probe" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" - -[[package]] -name = "ordered-float" -version = "2.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7940cf2ca942593318d07fcf2596cdca60a85c9e7fab408a5e21a4f9dcd40d87" -dependencies = [ - "num-traits", -] - -[[package]] -name = "os_pipe" -version = "1.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ae859aa07428ca9a929b936690f8b12dc5f11dd8c6992a18ca93919f28bc177" -dependencies = [ - "libc", - "windows-sys 0.48.0", -] - -[[package]] -name = "outref" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4030760ffd992bef45b0ae3f10ce1aba99e33464c90d14dd7c039884963ddc7a" - -[[package]] -name = "papergrid" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1526bb6aa9f10ec339fb10360f22c57edf81d5678d0278e93bc12a47ffbe4b01" -dependencies = [ - "bytecount", - "fnv", - "unicode-width", -] - -[[package]] -name = "parking_lot" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall 0.3.5", - "smallvec", - "windows-targets 0.48.5", -] - -[[package]] -name = "parse-datetime" -version = "0.1.0" -dependencies = [ - "chrono", - "generate-readme", - "snafu", -] - -[[package]] -name = "path-absolutize" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43eb3595c63a214e1b37b44f44b0a84900ef7ae0b4c5efce59e123d246d7a0de" -dependencies = [ - "path-dedot", -] - -[[package]] -name = "path-dedot" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d55e486337acb9973cdea3ec5638c1b3bcb22e573b2b7b41969e0c744d5a15e" -dependencies = [ - "once_cell", -] - -[[package]] -name = "pem" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" -dependencies = [ - "base64 0.13.1", -] - -[[package]] -name = "percent-encoding" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" - -[[package]] -name = "pest" -version = "2.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1acb4a4365a13f749a93f1a094a7805e5cfa0955373a9de860d962eaa3a5fe5a" -dependencies = [ - "thiserror", - "ucd-trie", -] - -[[package]] -name = "pest_derive" -version = "2.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "666d00490d4ac815001da55838c500eafb0320019bbaa44444137c48b443a853" -dependencies = [ - "pest", - "pest_generator", -] - -[[package]] -name = "pest_generator" -version = "2.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68ca01446f50dbda87c1786af8770d535423fa8a53aec03b8f4e3d7eb10e0929" -dependencies = [ - "pest", - "pest_meta", - "proc-macro2", - "quote", - "syn 2.0.29", -] - -[[package]] -name = "pest_meta" -version = "2.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56af0a30af74d0445c0bf6d9d051c979b516a1a5af790d251daee76005420a48" -dependencies = [ - "once_cell", - "pest", - "sha2", -] - -[[package]] -name = "pin-project" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" -dependencies = [ - "pin-project-internal", -] - -[[package]] -name = "pin-project-internal" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.29", -] - -[[package]] -name = "pin-project-lite" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cc1b0bf1727a77a54b6654e7b5f1af8604923edc8b81885f8ec92f9e3f0a05" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "portable-atomic" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f32154ba0af3a075eefa1eda8bb414ee928f62303a54ea85b8d6638ff1a6ee9e" - -[[package]] -name = "ppv-lite86" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" - -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn 1.0.109", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - -[[package]] -name = "proc-macro2" -version = "1.0.66" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "pubsys" -version = "0.1.0" -dependencies = [ - "aws-config", - "aws-credential-types", - "aws-sdk-ebs", - "aws-sdk-ec2", - "aws-sdk-kms", - "aws-sdk-ssm", - "aws-sdk-sts", - "aws-smithy-types", - "aws-types", - "buildsys", - "chrono", - "clap 4.3.23", - "coldsnap", - "duct", - "futures", - "governor", - "indicatif", - "lazy_static", - "log", - "nonzero_ext", - "num_cpus", - "parse-datetime", - "pubsys-config", - "rayon", - "reqwest", - "semver", - "serde", - "serde_json", - "serde_plain", - "simplelog", - "snafu", - "tabled", - "tempfile", - "tinytemplate", - "tokio", - "tokio-stream", - "toml", - "tough", - "tough-kms", - "tough-ssm", - "update_metadata", - "url", -] - -[[package]] -name = "pubsys-config" -version = "0.1.0" -dependencies = [ - "chrono", - "home", - "lazy_static", - "log", - "parse-datetime", - "serde", - "serde_yaml 0.9.21", - "snafu", - "toml", - "url", -] - -[[package]] -name = "pubsys-setup" -version = "0.1.0" -dependencies = [ - "clap 4.3.23", - "hex", - "log", - "pubsys-config", - "reqwest", - "sha2", - "shell-words", - "simplelog", - "snafu", - "tempfile", - "url", -] - -[[package]] -name = "quanta" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20afe714292d5e879d8b12740aa223c6a88f118af41870e8b6196e39a02238a8" -dependencies = [ - "crossbeam-utils", - "libc", - "mach", - "once_cell", - "raw-cpuid", - "wasi 0.10.2+wasi-snapshot-preview1", - "web-sys", - "winapi", -] - -[[package]] -name = "quote" -version = "1.0.33" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha", - "rand_core", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom", -] - -[[package]] -name = "raw-cpuid" -version = "10.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "rayon" -version = "1.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" -dependencies = [ - "either", - "rayon-core", -] - -[[package]] -name = "rayon-core" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" -dependencies = [ - "crossbeam-channel", - "crossbeam-deque", - "crossbeam-utils", - "num_cpus", -] - -[[package]] -name = "redox_syscall" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_syscall" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_users" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" -dependencies = [ - "getrandom", - "redox_syscall 0.2.16", - "thiserror", -] - -[[package]] -name = "regex" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" -dependencies = [ - "aho-corasick", - "memchr", - "regex-automata", - "regex-syntax", -] - -[[package]] -name = "regex-automata" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" - -[[package]] -name = "reqwest" -version = "0.11.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" -dependencies = [ - "base64 0.21.2", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-rustls 0.24.1", - "ipnet", - "js-sys", - "log", - "mime", - "once_cell", - "percent-encoding", - "pin-project-lite", - "rustls 0.21.6", - "rustls-pemfile", - "serde", - "serde_json", - "serde_urlencoded", - "tokio", - "tokio-rustls 0.24.1", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "webpki-roots", - "winreg", -] - -[[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin", - "untrusted", - "web-sys", - "winapi", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" - -[[package]] -name = "rustc_version" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" -dependencies = [ - "semver", -] - -[[package]] -name = "rustix" -version = "0.38.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ed4fa021d81c8392ce04db050a3da9a60299050b7ae1cf482d862b54a7218f" -dependencies = [ - "bitflags 2.4.0", - "errno", - "libc", - "linux-raw-sys", - "windows-sys 0.48.0", -] - -[[package]] -name = "rustls" -version = "0.20.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" -dependencies = [ - "log", - "ring", - "sct", - "webpki", -] - -[[package]] -name = "rustls" -version = "0.21.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1feddffcfcc0b33f5c6ce9a29e341e4cd59c3f78e7ee45f4a40c038b1d6cbb" -dependencies = [ - "log", - "ring", - "rustls-webpki", - "sct", -] - -[[package]] -name = "rustls-native-certs" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" -dependencies = [ - "openssl-probe", - "rustls-pemfile", - "schannel", - "security-framework", -] - -[[package]] -name = "rustls-pemfile" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" -dependencies = [ - "base64 0.21.2", -] - -[[package]] -name = "rustls-webpki" -version = "0.101.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "ryu" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" - -[[package]] -name = "same-file" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "schannel" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" -dependencies = [ - "windows-sys 0.48.0", -] - -[[package]] -name = "schemars" -version = "0.8.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1847b767a3d62d95cbf3d8a9f0e421cf57a0d8aa4f411d4b16525afb0284d4ed" -dependencies = [ - "dyn-clone", - "schemars_derive", - "serde", - "serde_json", -] - -[[package]] -name = "schemars_derive" -version = "0.8.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af4d7e1b012cb3d9129567661a63755ea4b8a7386d339dc945ae187e403c6743" -dependencies = [ - "proc-macro2", - "quote", - "serde_derive_internals", - "syn 1.0.109", -] - -[[package]] -name = "scopeguard" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" - -[[package]] -name = "sct" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "secrecy" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" -dependencies = [ - "serde", - "zeroize", -] - -[[package]] -name = "security-framework" -version = "2.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" -dependencies = [ - "bitflags 1.3.2", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "semver" -version = "1.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" -dependencies = [ - "serde", -] - -[[package]] -name = "serde" -version = "1.0.185" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be9b6f69f1dfd54c3b568ffa45c310d6973a5e5148fd40cf515acaf38cf5bc31" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde-value" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" -dependencies = [ - "ordered-float", - "serde", -] - -[[package]] -name = "serde_derive" -version = "1.0.185" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc59dfdcbad1437773485e0367fea4b090a2e0a16d9ffc46af47764536a298ec" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.29", -] - -[[package]] -name = "serde_derive_internals" -version = "0.26.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "serde_json" -version = "1.0.97" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf3bf93142acad5821c99197022e170842cdbc1c30482b98750c688c640842a" -dependencies = [ - "indexmap", - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "serde_plain" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6018081315db179d0ce57b1fe4b62a12a0028c9cf9bbef868c9cf477b3c34ae" -dependencies = [ - "serde", -] - -[[package]] -name = "serde_urlencoded" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" -dependencies = [ - "form_urlencoded", - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "serde_yaml" -version = "0.8.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" -dependencies = [ - "indexmap", - "ryu", - "serde", - "yaml-rust", -] - -[[package]] -name = "serde_yaml" -version = "0.9.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9d684e3ec7de3bf5466b32bd75303ac16f0736426e5a4e0d6e489559ce1249c" -dependencies = [ - "indexmap", - "itoa", - "ryu", - "serde", - "unsafe-libyaml", -] - -[[package]] -name = "sha1" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest", -] - -[[package]] -name = "sha2" -version = "0.10.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest", -] - -[[package]] -name = "shared_child" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0d94659ad3c2137fef23ae75b03d5241d633f8acded53d672decfa0e6e0caef" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "shell-words" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" - -[[package]] -name = "signal-hook-registry" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" -dependencies = [ - "libc", -] - -[[package]] -name = "simplelog" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acee08041c5de3d5048c8b3f6f13fafb3026b24ba43c6a695a0c76179b844369" -dependencies = [ - "log", - "termcolor", - "time", -] - -[[package]] -name = "slab" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] - -[[package]] -name = "smallvec" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" - -[[package]] -name = "snafu" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4de37ad025c587a29e8f3f5605c00f70b98715ef90b9061a815b9e59e9042d6" -dependencies = [ - "backtrace", - "doc-comment", - "snafu-derive", -] - -[[package]] -name = "snafu-derive" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "990079665f075b699031e9c08fd3ab99be5029b96f3b78dc0709e8f77e4efebf" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "socket2" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "strsim" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" - -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - -[[package]] -name = "subtle" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" - -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "tabled" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c3ee73732ffceaea7b8f6b719ce3bb17f253fa27461ffeaf568ebd0cdb4b85" -dependencies = [ - "papergrid", - "tabled_derive", - "unicode-width", -] - -[[package]] -name = "tabled_derive" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "beca1b4eaceb4f2755df858b88d9b9315b7ccfd1ffd0d7a48a52602301f01a57" -dependencies = [ - "heck", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "tempfile" -version = "3.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" -dependencies = [ - "cfg-if", - "fastrand 2.0.0", - "redox_syscall 0.3.5", - "rustix", - "windows-sys 0.48.0", -] - -[[package]] -name = "term_size" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e4129646ca0ed8f45d09b929036bafad5377103edd06e50bf574b353d2b08d9" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "termcolor" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "testsys" -version = "0.1.0" -dependencies = [ - "async-trait", - "aws-config", - "aws-sdk-ec2", - "base64 0.20.0", - "bottlerocket-types", - "bottlerocket-variant", - "clap 4.3.23", - "env_logger", - "fastrand 1.9.0", - "futures", - "handlebars", - "log", - "maplit", - "pubsys-config", - "serde", - "serde_json", - "serde_plain", - "serde_yaml 0.9.21", - "snafu", - "term_size", - "testsys-config", - "testsys-model", - "tokio", - "unescape", - "url", -] - -[[package]] -name = "testsys-config" -version = "0.1.0" -dependencies = [ - "bottlerocket-types", - "bottlerocket-variant", - "handlebars", - "log", - "maplit", - "serde", - "serde_plain", - "serde_yaml 0.9.21", - "snafu", - "testsys-model", - "toml", -] - -[[package]] -name = "testsys-model" -version = "0.0.9" -source = "git+https://github.com/bottlerocket-os/bottlerocket-test-system?tag=v0.0.9#2491a2a122cf75bd7df23accc3574141669568ea" -dependencies = [ - "async-recursion", - "async-trait", - "base64 0.20.0", - "bytes", - "chrono", - "futures", - "http", - "json-patch", - "k8s-openapi", - "kube", - "lazy_static", - "log", - "maplit", - "regex", - "schemars", - "serde", - "serde_json", - "serde_plain", - "serde_yaml 0.8.26", - "snafu", - "tabled", - "tokio", - "tokio-util", - "topological-sort", -] - -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width", -] - -[[package]] -name = "thiserror" -version = "1.0.47" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97a802ec30afc17eee47b2855fc72e0c4cd62be9b4efe6591edde0ec5bd68d8f" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.47" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bb623b56e39ab7dcd4b1b98bb6c8f8d907ed255b18de254088016b27a8ee19b" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.29", -] - -[[package]] -name = "time" -version = "0.3.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bb39ee79a6d8de55f48f2293a830e040392f1c5f16e336bdd1788cd0aadce07" -dependencies = [ - "deranged", - "itoa", - "libc", - "num_threads", - "serde", - "time-core", - "time-macros", -] - -[[package]] -name = "time-core" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" - -[[package]] -name = "time-macros" -version = "0.2.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "733d258752e9303d392b94b75230d07b0b9c489350c69b851fc6c065fde3e8f9" -dependencies = [ - "time-core", -] - -[[package]] -name = "tinytemplate" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" -dependencies = [ - "serde", - "serde_json", -] - -[[package]] -name = "tinyvec" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" - -[[package]] -name = "tokio" -version = "1.29.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" -dependencies = [ - "autocfg", - "backtrace", - "bytes", - "libc", - "mio", - "num_cpus", - "parking_lot", - "pin-project-lite", - "signal-hook-registry", - "socket2", - "tokio-macros", - "windows-sys 0.48.0", -] - -[[package]] -name = "tokio-io-timeout" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" -dependencies = [ - "pin-project-lite", - "tokio", -] - -[[package]] -name = "tokio-macros" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.29", -] - -[[package]] -name = "tokio-rustls" -version = "0.23.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" -dependencies = [ - "rustls 0.20.8", - "tokio", - "webpki", -] - -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls 0.21.6", - "tokio", -] - -[[package]] -name = "tokio-stream" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" -dependencies = [ - "futures-core", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "tokio-tungstenite" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54319c93411147bced34cb5609a80e0a8e44c5999c93903a81cd866630ec0bfd" -dependencies = [ - "futures-util", - "log", - "tokio", - "tungstenite", -] - -[[package]] -name = "tokio-util" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "pin-project-lite", - "tokio", - "tracing", -] - -[[package]] -name = "toml" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" -dependencies = [ - "serde", -] - -[[package]] -name = "topological-sort" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea68304e134ecd095ac6c3574494fc62b909f416c4fca77e440530221e549d3d" - -[[package]] -name = "tough" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eda3efa9005cf9c1966984c3b9a44c3f37b7ed2c95ba338d6ad51bba70e989a0" -dependencies = [ - "chrono", - "dyn-clone", - "globset", - "hex", - "log", - "olpc-cjson", - "path-absolutize", - "pem", - "percent-encoding", - "reqwest", - "ring", - "serde", - "serde_json", - "serde_plain", - "snafu", - "tempfile", - "untrusted", - "url", - "walkdir", -] - -[[package]] -name = "tough-kms" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc49c1a5300e54484604162ec78417fc39306f0c9e2c98166df3ebfa203d6800" -dependencies = [ - "aws-config", - "aws-sdk-kms", - "pem", - "ring", - "snafu", - "tokio", - "tough", -] - -[[package]] -name = "tough-ssm" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcf4932265842607b42840e65f3fde9dde2834eaa97209b994d6c1a7ff9f3fd7" -dependencies = [ - "aws-config", - "aws-sdk-ssm", - "snafu", - "tokio", - "tough", -] - -[[package]] -name = "tower" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" -dependencies = [ - "futures-core", - "futures-util", - "pin-project", - "pin-project-lite", - "tokio", - "tokio-util", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "tower-http" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55ae70283aba8d2a8b411c695c437fe25b8b5e44e23e780662002fc72fb47a82" -dependencies = [ - "base64 0.21.2", - "bitflags 2.4.0", - "bytes", - "futures-core", - "futures-util", - "http", - "http-body", - "http-range-header", - "mime", - "pin-project-lite", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "tower-layer" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" - -[[package]] -name = "tower-service" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" - -[[package]] -name = "tracing" -version = "0.1.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" -dependencies = [ - "cfg-if", - "log", - "pin-project-lite", - "tracing-attributes", - "tracing-core", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.29", -] - -[[package]] -name = "tracing-core" -version = "0.1.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" -dependencies = [ - "once_cell", -] - -[[package]] -name = "treediff" -version = "4.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52984d277bdf2a751072b5df30ec0377febdb02f7696d64c2d7d54630bac4303" -dependencies = [ - "serde_json", -] - -[[package]] -name = "try-lock" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" - -[[package]] -name = "tungstenite" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ee6ab729cd4cf0fd55218530c4522ed30b7b6081752839b68fcec8d0960788" -dependencies = [ - "base64 0.13.1", - "byteorder", - "bytes", - "http", - "httparse", - "log", - "rand", - "sha1", - "thiserror", - "url", - "utf-8", -] - -[[package]] -name = "typenum" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" - -[[package]] -name = "ucd-trie" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" - -[[package]] -name = "unescape" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccb97dac3243214f8d8507998906ca3e2e0b900bf9bf4870477f125b82e68f6e" - -[[package]] -name = "unicode-bidi" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" - -[[package]] -name = "unicode-ident" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" - -[[package]] -name = "unicode-normalization" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" -dependencies = [ - "tinyvec", -] - -[[package]] -name = "unicode-width" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" - -[[package]] -name = "unsafe-libyaml" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28467d3e1d3c6586d8f25fa243f544f5800fec42d97032474e17222c2b75cfa" - -[[package]] -name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" - -[[package]] -name = "update_metadata" -version = "0.1.0" -dependencies = [ - "chrono", - "parse-datetime", - "regex", - "semver", - "serde", - "serde_json", - "serde_plain", - "snafu", - "toml", -] - -[[package]] -name = "url" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" -dependencies = [ - "form_urlencoded", - "idna", - "percent-encoding", - "serde", -] - -[[package]] -name = "urlencoding" -version = "2.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" - -[[package]] -name = "utf-8" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" - -[[package]] -name = "utf8parse" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" - -[[package]] -name = "vec_map" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" - -[[package]] -name = "version_check" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" - -[[package]] -name = "vsimd" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c3082ca00d5a5ef149bb8b555a72ae84c9c59f7250f013ac822ac2e49b19c64" - -[[package]] -name = "walkdir" -version = "2.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" -dependencies = [ - "same-file", - "winapi-util", -] - -[[package]] -name = "want" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" -dependencies = [ - "try-lock", -] - -[[package]] -name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "wasm-bindgen" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" -dependencies = [ - "cfg-if", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" -dependencies = [ - "bumpalo", - "log", - "once_cell", - "proc-macro2", - "quote", - "syn 2.0.29", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" -dependencies = [ - "cfg-if", - "js-sys", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.29", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" - -[[package]] -name = "web-sys" -version = "0.3.64" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "webpki" -version = "0.22.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0e74f82d49d545ad128049b7e88f6576df2da6b02e9ce565c6f533be576957e" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "webpki-roots" -version = "0.25.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" -dependencies = [ - "windows-targets 0.48.5", -] - -[[package]] -name = "windows-sys" -version = "0.45.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" -dependencies = [ - "windows-targets 0.42.2", -] - -[[package]] -name = "windows-sys" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" -dependencies = [ - "windows-targets 0.48.5", -] - -[[package]] -name = "windows-targets" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", -] - -[[package]] -name = "windows-targets" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" -dependencies = [ - "windows_aarch64_gnullvm 0.48.5", - "windows_aarch64_msvc 0.48.5", - "windows_i686_gnu 0.48.5", - "windows_i686_msvc 0.48.5", - "windows_x86_64_gnu 0.48.5", - "windows_x86_64_gnullvm 0.48.5", - "windows_x86_64_msvc 0.48.5", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" - -[[package]] -name = "windows_i686_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" - -[[package]] -name = "windows_i686_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" - -[[package]] -name = "windows_i686_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" - -[[package]] -name = "windows_i686_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" - -[[package]] -name = "winreg" -version = "0.50.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] - -[[package]] -name = "xmlparser" -version = "0.13.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d25c75bf9ea12c4040a97f829154768bbbce366287e2dc044af160cd79a13fd" - -[[package]] -name = "yaml-rust" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = [ - "linked-hash-map", -] - -[[package]] -name = "zeroize" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" diff --git a/tools/Cargo.toml b/tools/Cargo.toml deleted file mode 100644 index e6efc568..00000000 --- a/tools/Cargo.toml +++ /dev/null @@ -1,11 +0,0 @@ -[workspace] -resolver = "1" -members = [ - "infrasys", - "buildsys", - "pubsys", - "pubsys-config", - "pubsys-setup", - "testsys", - "testsys-config", -] diff --git a/tools/buildsys/.gitignore b/tools/buildsys/.gitignore deleted file mode 100644 index e69de29b..00000000 diff --git a/tools/buildsys/Cargo.toml b/tools/buildsys/Cargo.toml deleted file mode 100644 index 7f882cad..00000000 --- a/tools/buildsys/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "buildsys" -version = "0.1.0" -authors = ["Ben Cressey "] -license = "Apache-2.0 OR MIT" -edition = "2021" -publish = false -# Don't rebuild crate just because of changes to README. -exclude = ["README.md"] - -[dependencies] -bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } -duct = "0.13" -hex = "0.4" -lazy_static = "1" -rand = { version = "0.8", default-features = false, features = ["std", "std_rng"] } -regex = "1" -reqwest = { version = "0.11", default-features = false, features = ["rustls-tls", "blocking"] } -serde = { version = "1", features = ["derive"] } -serde_plain = "1" -sha2 = "0.10" -snafu = "0.7" -toml = "0.5" -url = { version = "2", features = ["serde"] } -walkdir = "2" -nonzero_ext = "0.3" diff --git a/tools/buildsys/src/bin/bottlerocket-variant/main.rs b/tools/buildsys/src/bin/bottlerocket-variant/main.rs deleted file mode 100644 index d994a983..00000000 --- a/tools/buildsys/src/bin/bottlerocket-variant/main.rs +++ /dev/null @@ -1,70 +0,0 @@ -use bottlerocket_variant::Variant; -use buildsys::manifest::ManifestInfo; -use snafu::ResultExt; -use std::path::PathBuf; -use std::{env, process}; - -// Returning a Result from main makes it print a Debug representation of the error, but with Snafu -// we have nice Display representations of the error, so we wrap "main" (run) and print any error. -// https://github.com/shepmaster/snafu/issues/110 -fn main() { - if let Err(e) = run() { - eprintln!("{}", e); - process::exit(1); - } -} - -/// Read `BUILDSYS_VARIANT` from the environment, parse into its components, and emit related -/// environment variables to set (or export). Do the same for features defined in the variant -/// manifest. -fn run() -> Result<()> { - let env = getenv("BUILDSYS_VARIANT")?; - let variant = Variant::new(&env).context(error::VariantParseSnafu)?; - println!("BUILDSYS_VARIANT_PLATFORM={}", variant.platform()); - println!("BUILDSYS_VARIANT_RUNTIME={}", variant.runtime()); - println!("BUILDSYS_VARIANT_FAMILY={}", variant.family()); - println!( - "BUILDSYS_VARIANT_FLAVOR={}", - variant.variant_flavor().unwrap_or("''") - ); - let manifest = PathBuf::from(getenv("BUILDSYS_ROOT_DIR")?) - .join("variants") - .join(&env) - .join("Cargo.toml"); - let variant_manifest = ManifestInfo::new(manifest).context(error::ManifestParseSnafu)?; - if let Some(image_features) = variant_manifest.image_features() { - for image_feature in image_features { - println!("export BUILDSYS_VARIANT_IMAGE_FEATURE_{}=1", image_feature); - } - } - Ok(()) -} - -/// Retrieve a variable that we expect to be set in the environment. -fn getenv(var: &str) -> Result { - env::var(var).context(error::EnvironmentSnafu { var }) -} - -mod error { - use snafu::Snafu; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(super) enum Error { - VariantParse { - source: bottlerocket_variant::error::Error, - }, - - ManifestParse { - source: buildsys::manifest::Error, - }, - - #[snafu(display("Missing environment variable '{}'", var))] - Environment { - var: String, - source: std::env::VarError, - }, - } -} - -type Result = std::result::Result; diff --git a/tools/buildsys/src/builder.rs b/tools/buildsys/src/builder.rs deleted file mode 100644 index 0949d354..00000000 --- a/tools/buildsys/src/builder.rs +++ /dev/null @@ -1,650 +0,0 @@ -/*! -This module handles the calls to Docker needed to execute package and variant -builds. The actual build steps and the expected parameters are defined in -the repository's top-level Dockerfile. - -*/ -pub(crate) mod error; -use error::Result; - -use duct::cmd; -use lazy_static::lazy_static; -use nonzero_ext::nonzero; -use rand::Rng; -use regex::Regex; -use sha2::{Digest, Sha512}; -use snafu::{ensure, OptionExt, ResultExt}; -use std::collections::HashSet; -use std::env; -use std::fs::{self, read_dir, File}; -use std::num::NonZeroU16; -use std::path::{Path, PathBuf}; -use std::process::Output; -use walkdir::{DirEntry, WalkDir}; - -use buildsys::manifest::{ImageFeature, ImageFormat, ImageLayout, PartitionPlan, SupportedArch}; - -/* -There's a bug in BuildKit that can lead to a build failure during parallel -`docker build` executions: - https://github.com/moby/buildkit/issues/1090 - -Unfortunately we can't do much to control the concurrency here, and even when -the bug is fixed there will be many older versions of Docker in the wild. - -The failure has an exit code of 1, which is too generic to be helpful. All we -can do is check the output for the error's signature, and retry if we find it. -*/ -lazy_static! { - static ref DOCKER_BUILD_FRONTEND_ERROR: Regex = Regex::new(concat!( - r#"failed to solve with frontend dockerfile.v0: "#, - r#"failed to solve with frontend gateway.v0: "#, - r#"frontend grpc server closed unexpectedly"# - )) - .unwrap(); -} - -/* -There's a similar bug that's fixed in new releases of BuildKit but still in the wild in popular -versions of Docker/BuildKit: - https://github.com/moby/buildkit/issues/1468 -*/ -lazy_static! { - static ref DOCKER_BUILD_DEAD_RECORD_ERROR: Regex = Regex::new(concat!( - r#"failed to solve with frontend dockerfile.v0: "#, - r#"failed to solve with frontend gateway.v0: "#, - r#"rpc error: code = Unknown desc = failed to build LLB: "#, - r#"failed to get dead record"#, - )) - .unwrap(); -} - -/* -We also see sporadic CI failures with only this error message. -We use (?m) for multi-line mode so we can match the message on a line of its own without splitting -the output ourselves; we match the regexes against the whole of stdout. -*/ -lazy_static! { - static ref UNEXPECTED_EOF_ERROR: Regex = Regex::new("(?m)unexpected EOF$").unwrap(); -} - -/* -Sometimes new RPMs are not fully written to the host directory before another build starts, which -exposes `createrepo_c` to partially-written RPMs that cannot be added to the repo metadata. Retry -these errors by restarting the build since the alternatives are to ignore the `createrepo_c` exit -code (masking other problems) or aggressively `sync()` the host directory (hurting performance). -*/ -lazy_static! { - static ref CREATEREPO_C_READ_HEADER_ERROR: Regex = Regex::new(®ex::escape( - r#"C_CREATEREPOLIB: Warning: read_header: rpmReadPackageFile() error"# - )) - .unwrap(); -} - -static DOCKER_BUILD_MAX_ATTEMPTS: NonZeroU16 = nonzero!(10u16); - -pub(crate) struct PackageBuilder; - -impl PackageBuilder { - /// Build RPMs for the specified package. - pub(crate) fn build( - package: &str, - image_features: Option>, - ) -> Result { - let output_dir: PathBuf = getenv("BUILDSYS_PACKAGES_DIR")?.into(); - let arch = getenv("BUILDSYS_ARCH")?; - let goarch = serde_plain::from_str::(&arch) - .context(error::UnsupportedArchSnafu { arch: &arch })? - .goarch(); - - let mut args = Vec::new(); - args.push("--network".into()); - args.push("none".into()); - args.build_arg("PACKAGE", package); - args.build_arg("ARCH", &arch); - args.build_arg("GOARCH", goarch); - - // Pass certain environment variables into the build environment. These variables aren't - // automatically used to trigger rebuilds when they change, because most packages aren't - // affected. Packages that care should "echo cargo:rerun-if-env-changed=VAR" in their - // build.rs build script. - for (src_env_var, target_env_var) in [ - ("BUILDSYS_VARIANT", "VARIANT"), - ("BUILDSYS_VARIANT_PLATFORM", "VARIANT_PLATFORM"), - ("BUILDSYS_VARIANT_RUNTIME", "VARIANT_RUNTIME"), - ("BUILDSYS_VARIANT_FAMILY", "VARIANT_FAMILY"), - ("BUILDSYS_VARIANT_FLAVOR", "VARIANT_FLAVOR"), - ("PUBLISH_REPO", "REPO"), - ] { - let src_env_val = - env::var(src_env_var).context(error::EnvironmentSnafu { var: src_env_var })?; - args.build_arg(target_env_var, src_env_val); - } - - let tag = format!( - "buildsys-pkg-{package}-{arch}", - package = package, - arch = arch, - ); - - if let Some(image_features) = image_features { - for image_feature in image_features.iter() { - args.build_arg(format!("{}", image_feature), "1"); - } - } - - build(BuildType::Package, package, &arch, args, &tag, &output_dir)?; - - Ok(Self) - } -} - -pub(crate) struct VariantBuilder; - -impl VariantBuilder { - /// Build a variant with the specified packages installed. - pub(crate) fn build( - packages: &[String], - image_format: Option<&ImageFormat>, - image_layout: Option<&ImageLayout>, - kernel_parameters: Option<&Vec>, - image_features: Option>, - ) -> Result { - let output_dir: PathBuf = getenv("BUILDSYS_OUTPUT_DIR")?.into(); - - let variant = getenv("BUILDSYS_VARIANT")?; - let arch = getenv("BUILDSYS_ARCH")?; - let goarch = serde_plain::from_str::(&arch) - .context(error::UnsupportedArchSnafu { arch: &arch })? - .goarch(); - - let image_layout = image_layout.cloned().unwrap_or_default(); - let ImageLayout { - os_image_size_gib, - data_image_size_gib, - partition_plan, - .. - } = image_layout; - - let (os_image_publish_size_gib, data_image_publish_size_gib) = - image_layout.publish_image_sizes_gib(); - - let mut args = Vec::new(); - args.push("--network".into()); - args.push("host".into()); - args.build_arg("PACKAGES", packages.join(" ")); - args.build_arg("ARCH", &arch); - args.build_arg("GOARCH", goarch); - args.build_arg("VARIANT", &variant); - args.build_arg("VERSION_ID", getenv("BUILDSYS_VERSION_IMAGE")?); - args.build_arg("BUILD_ID", getenv("BUILDSYS_VERSION_BUILD")?); - args.build_arg("PRETTY_NAME", getenv("BUILDSYS_PRETTY_NAME")?); - args.build_arg("IMAGE_NAME", getenv("BUILDSYS_NAME")?); - args.build_arg( - "IMAGE_FORMAT", - match image_format { - Some(ImageFormat::Raw) | None => "raw", - Some(ImageFormat::Qcow2) => "qcow2", - Some(ImageFormat::Vmdk) => "vmdk", - }, - ); - args.build_arg("OS_IMAGE_SIZE_GIB", format!("{}", os_image_size_gib)); - args.build_arg("DATA_IMAGE_SIZE_GIB", format!("{}", data_image_size_gib)); - args.build_arg( - "OS_IMAGE_PUBLISH_SIZE_GIB", - format!("{}", os_image_publish_size_gib), - ); - args.build_arg( - "DATA_IMAGE_PUBLISH_SIZE_GIB", - format!("{}", data_image_publish_size_gib), - ); - args.build_arg( - "PARTITION_PLAN", - match partition_plan { - PartitionPlan::Split => "split", - PartitionPlan::Unified => "unified", - }, - ); - args.build_arg( - "KERNEL_PARAMETERS", - kernel_parameters.map(|v| v.join(" ")).unwrap_or_default(), - ); - - if let Some(image_features) = image_features { - for image_feature in image_features.iter() { - args.build_arg(format!("{}", image_feature), "1"); - } - } - - // Add known secrets to the build argments. - add_secrets(&mut args)?; - - // Always rebuild variants since they are located in a different workspace, - // and don't directly track changes in the underlying packages. - getenv("BUILDSYS_TIMESTAMP")?; - - let tag = format!( - "buildsys-var-{variant}-{arch}", - variant = variant, - arch = arch - ); - - build(BuildType::Variant, &variant, &arch, args, &tag, &output_dir)?; - - Ok(Self) - } -} - -// =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - -enum BuildType { - Package, - Variant, -} - -/// Invoke a series of `docker` commands to drive a package or variant build. -fn build( - kind: BuildType, - what: &str, - arch: &str, - build_args: Vec, - tag: &str, - output_dir: &PathBuf, -) -> Result<()> { - // Our Dockerfile is in the top-level directory. - let root = getenv("BUILDSYS_ROOT_DIR")?; - env::set_current_dir(&root).context(error::DirectoryChangeSnafu { path: &root })?; - - // Compute a per-checkout prefix for the tag to avoid collisions. - let mut d = Sha512::new(); - d.update(&root); - let digest = hex::encode(d.finalize()); - let token = &digest[..12]; - let tag = format!("{}-{}", tag, token); - - // Our SDK and toolchain are picked by the external `cargo make` invocation. - let sdk = getenv("BUILDSYS_SDK_IMAGE")?; - let toolchain = getenv("BUILDSYS_TOOLCHAIN")?; - - // Avoid using a cached layer from a previous build. - let nocache = rand::thread_rng().gen::(); - - // Create a directory for tracking outputs before we move them into position. - let build_dir = create_build_dir(&kind, what, arch)?; - - // Clean up any previous outputs we have tracked. - clean_build_files(&build_dir, output_dir)?; - - let target = match kind { - BuildType::Package => "package", - BuildType::Variant => "variant", - }; - - let mut build = format!( - "build . \ - --target {target} \ - --tag {tag}", - target = target, - tag = tag, - ) - .split_string(); - - build.extend(build_args); - build.build_arg("SDK", sdk); - build.build_arg("TOOLCHAIN", toolchain); - build.build_arg("NOCACHE", nocache.to_string()); - // Avoid using a cached layer from a concurrent build in another checkout. - build.build_arg("TOKEN", token); - - let create = format!("create --name {} {} true", tag, tag).split_string(); - let cp = format!("cp {}:/output/. {}", tag, build_dir.display()).split_string(); - let rm = format!("rm --force {}", tag).split_string(); - let rmi = format!("rmi --force {}", tag).split_string(); - - // Clean up the stopped container if it exists. - let _ = docker(&rm, Retry::No); - - // Clean up the previous image if it exists. - let _ = docker(&rmi, Retry::No); - - // Build the image, which builds the artifacts we want. - // Work around transient, known failure cases with Docker. - docker( - &build, - Retry::Yes { - attempts: DOCKER_BUILD_MAX_ATTEMPTS, - messages: &[ - &*DOCKER_BUILD_FRONTEND_ERROR, - &*DOCKER_BUILD_DEAD_RECORD_ERROR, - &*UNEXPECTED_EOF_ERROR, - &*CREATEREPO_C_READ_HEADER_ERROR, - ], - }, - )?; - - // Create a stopped container so we can copy artifacts out. - docker(&create, Retry::No)?; - - // Copy artifacts into our output directory. - docker(&cp, Retry::No)?; - - // Clean up our stopped container after copying artifacts out. - docker(&rm, Retry::No)?; - - // Clean up our image now that we're done. - docker(&rmi, Retry::No)?; - - // Copy artifacts to the expected directory and write markers to track them. - copy_build_files(&build_dir, output_dir)?; - - Ok(()) -} - -/// Run `docker` with the specified arguments. -fn docker(args: &[String], retry: Retry) -> Result { - let mut max_attempts: u16 = 1; - let mut retry_messages: &[&Regex] = &[]; - if let Retry::Yes { attempts, messages } = retry { - max_attempts = attempts.into(); - retry_messages = messages; - } - - let mut attempt = 1; - loop { - let output = cmd("docker", args) - .stderr_to_stdout() - .stdout_capture() - .unchecked() - .run() - .context(error::CommandStartSnafu)?; - - let stdout = String::from_utf8_lossy(&output.stdout); - println!("{}", &stdout); - if output.status.success() { - return Ok(output); - } - - ensure!( - retry_messages.iter().any(|m| m.is_match(&stdout)) && attempt < max_attempts, - error::DockerExecutionSnafu { - args: &args.join(" ") - } - ); - - attempt += 1; - } -} - -/// Allow the caller to configure retry behavior, since the command may fail -/// for spurious reasons that should not be treated as an error. -enum Retry<'a> { - No, - Yes { - attempts: NonZeroU16, - messages: &'a [&'static Regex], - }, -} - -// =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - -/// Add secrets that might be needed for builds. Since most builds won't use -/// them, they are not automatically tracked for changes. If necessary, builds -/// can emit the relevant cargo directives for tracking in their build script. -fn add_secrets(args: &mut Vec) -> Result<()> { - let sbkeys_var = "BUILDSYS_SBKEYS_PROFILE_DIR"; - let sbkeys_dir = env::var(sbkeys_var).context(error::EnvironmentSnafu { var: sbkeys_var })?; - - let sbkeys = read_dir(&sbkeys_dir).context(error::DirectoryReadSnafu { path: &sbkeys_dir })?; - for s in sbkeys { - let s = s.context(error::DirectoryReadSnafu { path: &sbkeys_dir })?; - args.build_secret( - "file", - &s.file_name().to_string_lossy(), - &s.path().to_string_lossy(), - ); - } - - for var in &[ - "AWS_ACCESS_KEY_ID", - "AWS_SECRET_ACCESS_KEY", - "AWS_SESSION_TOKEN", - ] { - let id = format!("{}.env", var.to_lowercase().replace('_', "-")); - args.build_secret("env", &id, var); - } - - Ok(()) -} - -// =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - -/// Create a directory for build artifacts. -fn create_build_dir(kind: &BuildType, name: &str, arch: &str) -> Result { - let prefix = match kind { - BuildType::Package => "packages", - BuildType::Variant => "variants", - }; - - let path = [&getenv("BUILDSYS_STATE_DIR")?, arch, prefix, name] - .iter() - .collect(); - - fs::create_dir_all(&path).context(error::DirectoryCreateSnafu { path: &path })?; - - Ok(path) -} - -const MARKER_EXTENSION: &str = ".buildsys_marker"; - -/// Copy build artifacts to the output directory. -/// Before we copy each file, we create a corresponding marker file to record its existence. -fn copy_build_files

(build_dir: P, output_dir: P) -> Result<()> -where - P: AsRef, -{ - fn has_artifacts(entry: &DirEntry) -> bool { - let is_dir = entry.path().is_dir(); - let is_file = entry.file_type().is_file(); - let is_not_marker = is_file - && entry - .file_name() - .to_str() - .map(|s| !s.ends_with(MARKER_EXTENSION)) - .unwrap_or(false); - let is_symlink = entry.file_type().is_symlink(); - is_dir || is_not_marker || is_symlink - } - - for artifact_file in find_files(&build_dir, has_artifacts) { - let mut marker_file = artifact_file.clone().into_os_string(); - marker_file.push(MARKER_EXTENSION); - File::create(&marker_file).context(error::FileCreateSnafu { path: &marker_file })?; - - let mut output_file: PathBuf = output_dir.as_ref().into(); - output_file.push(artifact_file.strip_prefix(&build_dir).context( - error::StripPathPrefixSnafu { - path: &marker_file, - prefix: build_dir.as_ref(), - }, - )?); - - let parent_dir = output_file - .parent() - .context(error::BadDirectorySnafu { path: &output_file })?; - fs::create_dir_all(parent_dir) - .context(error::DirectoryCreateSnafu { path: &parent_dir })?; - - fs::rename(&artifact_file, &output_file).context(error::FileRenameSnafu { - old_path: &artifact_file, - new_path: &output_file, - })?; - } - - Ok(()) -} - -/// Remove build artifacts from the output directory. -/// Any marker file we find could have a corresponding file that should be cleaned up. -/// We also clean up the marker files so they do not accumulate across builds. -/// For the same reason, if a directory is empty after build artifacts, marker files, and other -/// empty directories have been removed, then that directory will also be removed. -fn clean_build_files

(build_dir: P, output_dir: P) -> Result<()> -where - P: AsRef, -{ - let build_dir = build_dir.as_ref(); - let output_dir = output_dir.as_ref(); - - fn has_markers(entry: &DirEntry) -> bool { - let is_dir = entry.path().is_dir(); - let is_file = entry.file_type().is_file(); - let is_marker = is_file - && entry - .file_name() - .to_str() - .map(|s| s.ends_with(MARKER_EXTENSION)) - .unwrap_or(false); - is_dir || is_marker - } - - fn cleanup(path: &Path, top: &Path, dirs: &mut HashSet) -> Result<()> { - if !path.exists() && !path.is_symlink() { - return Ok(()); - } - std::fs::remove_file(path).context(error::FileRemoveSnafu { path })?; - let mut parent = path.parent(); - while let Some(p) = parent { - if p == top || dirs.contains(p) { - break; - } - dirs.insert(p.into()); - parent = p.parent() - } - Ok(()) - } - - fn is_empty_dir(path: &Path) -> Result { - Ok(path.is_dir() - && path - .read_dir() - .context(error::DirectoryReadSnafu { path })? - .next() - .is_none()) - } - - let mut clean_dirs: HashSet = HashSet::new(); - - for marker_file in find_files(&build_dir, has_markers) { - let mut output_file: PathBuf = output_dir.into(); - output_file.push(marker_file.strip_prefix(build_dir).context( - error::StripPathPrefixSnafu { - path: &marker_file, - prefix: build_dir, - }, - )?); - output_file.set_extension(""); - cleanup(&output_file, output_dir, &mut clean_dirs)?; - cleanup(&marker_file, build_dir, &mut clean_dirs)?; - } - - // Clean up directories in reverse order, so that empty child directories don't stop an - // otherwise empty parent directory from being removed. - let mut clean_dirs = clean_dirs.into_iter().collect::>(); - clean_dirs.sort_by(|a, b| b.cmp(a)); - - for clean_dir in clean_dirs { - if is_empty_dir(&clean_dir)? { - std::fs::remove_dir(&clean_dir) - .context(error::DirectoryRemoveSnafu { path: &clean_dir })?; - } - } - - Ok(()) -} - -/// Create an iterator over files matching the supplied filter. -fn find_files

( - dir: P, - filter: for<'r> fn(&'r walkdir::DirEntry) -> bool, -) -> impl Iterator -where - P: AsRef, -{ - WalkDir::new(&dir) - .follow_links(false) - .same_file_system(true) - .min_depth(1) - .into_iter() - .filter_entry(filter) - .flat_map(|e| e.context(error::DirectoryWalkSnafu)) - .map(|e| e.into_path()) - .filter(|e| e.is_file() || e.is_symlink()) -} - -/// Retrieve a BUILDSYS_* variable that we expect to be set in the environment, -/// and ensure that we track it for changes, since it will directly affect the -/// output. -fn getenv(var: &str) -> Result { - println!("cargo:rerun-if-env-changed={}", var); - env::var(var).context(error::EnvironmentSnafu { var }) -} - -// =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - -/// Helper trait for constructing buildkit --build-arg arguments. -trait BuildArg { - fn build_arg(&mut self, key: S1, value: S2) - where - S1: AsRef, - S2: AsRef; -} - -impl BuildArg for Vec { - fn build_arg(&mut self, key: S1, value: S2) - where - S1: AsRef, - S2: AsRef, - { - self.push("--build-arg".to_string()); - self.push(format!("{}={}", key.as_ref(), value.as_ref())); - } -} - -/// Helper trait for constructing buildkit --secret arguments. -trait BuildSecret { - fn build_secret(&mut self, typ: S, id: S, src: S) - where - S: AsRef; -} - -impl BuildSecret for Vec { - fn build_secret(&mut self, typ: S, id: S, src: S) - where - S: AsRef, - { - self.push("--secret".to_string()); - self.push(format!( - "type={},id={},src={}", - typ.as_ref(), - id.as_ref(), - src.as_ref() - )); - } -} - -/// Helper trait for splitting a string on spaces into owned Strings. -/// -/// If you need an element with internal spaces, you should handle that separately, for example -/// with BuildArg. -trait SplitString { - fn split_string(&self) -> Vec; -} - -impl SplitString for S -where - S: AsRef, -{ - fn split_string(&self) -> Vec { - self.as_ref().split(' ').map(String::from).collect() - } -} diff --git a/tools/buildsys/src/builder/error.rs b/tools/buildsys/src/builder/error.rs deleted file mode 100644 index fe527590..00000000 --- a/tools/buildsys/src/builder/error.rs +++ /dev/null @@ -1,82 +0,0 @@ -use snafu::Snafu; -use std::path::PathBuf; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(super)))] -pub(crate) enum Error { - #[snafu(display("Failed to start command: {}", source))] - CommandStart { source: std::io::Error }, - - #[snafu(display("Failed to execute command: 'docker {}'", args))] - DockerExecution { args: String }, - - #[snafu(display("Failed to change directory to '{}': {}", path.display(), source))] - DirectoryChange { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to get parent directory for '{}'", path.display()))] - BadDirectory { path: PathBuf }, - - #[snafu(display("Failed to create directory '{}': {}", path.display(), source))] - DirectoryCreate { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to create directory '{}': {}", path.display(), source))] - DirectoryRemove { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to read directory '{}': {}", path.display(), source))] - DirectoryRead { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to walk directory to find marker files: {}", source))] - DirectoryWalk { source: walkdir::Error }, - - #[snafu(display("Failed to create file '{}': {}", path.display(), source))] - FileCreate { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to remove file '{}': {}", path.display(), source))] - FileRemove { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to rename file '{}' to '{}': {}", old_path.display(), new_path.display(), source))] - FileRename { - old_path: PathBuf, - new_path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Missing environment variable '{}'", var))] - Environment { - var: String, - source: std::env::VarError, - }, - - #[snafu(display("Failed to strip prefix '{}' from path '{}': {}", prefix.display(), path.display(), source))] - StripPathPrefix { - path: PathBuf, - prefix: PathBuf, - source: std::path::StripPrefixError, - }, - - #[snafu(display("Unsupported architecture '{}'", arch))] - UnsupportedArch { - arch: String, - source: serde_plain::Error, - }, -} - -pub(super) type Result = std::result::Result; diff --git a/tools/buildsys/src/cache.rs b/tools/buildsys/src/cache.rs deleted file mode 100644 index 6bc4e8ac..00000000 --- a/tools/buildsys/src/cache.rs +++ /dev/null @@ -1,154 +0,0 @@ -/*! -Many of the inputs to package builds are not source files tracked within the git -repository, but large binary artifacts such as tar archives that are independently -distributed by an upstream project. - -This module provides the ability to retrieve and validate these external files, -given the (name, url, hash) data that uniquely identifies each file. - -It implements a two-tier approach to retrieval: files are first pulled from the -"lookaside" cache and only fetched from the upstream site if that access fails. - -*/ -pub(crate) mod error; -use error::Result; - -use buildsys::manifest; -use reqwest::header::{HeaderMap, HeaderValue, USER_AGENT}; -use sha2::{Digest, Sha512}; -use snafu::{ensure, OptionExt, ResultExt}; -use std::env; -use std::fs::{self, File}; -use std::io::{self, BufWriter}; -use std::path::{Path, PathBuf}; - -static LOOKASIDE_CACHE: &str = "https://cache.bottlerocket.aws"; - -pub(crate) struct LookasideCache; - -impl LookasideCache { - /// Fetch files stored out-of-tree and ensure they match the stored hash. - pub(crate) fn fetch(files: &[manifest::ExternalFile]) -> Result { - for f in files { - let url_file_name = Self::extract_file_name(&f.url)?; - let path = &f.path.as_ref().unwrap_or(&url_file_name); - ensure!( - path.components().count() == 1, - error::ExternalFileNameSnafu { path } - ); - - let hash = &f.sha512; - if path.is_file() { - match Self::verify_file(path, hash) { - Ok(_) => continue, - Err(e) => { - eprintln!("{}", e); - fs::remove_file(path).context(error::ExternalFileDeleteSnafu { path })?; - } - } - } - - let name = path.display(); - let tmp = PathBuf::from(format!(".{}", name)); - - // first check the lookaside cache - let url = format!("{}/{}/{}/{}", LOOKASIDE_CACHE, name, hash, name); - match Self::fetch_file(&url, &tmp, hash) { - Ok(_) => { - fs::rename(&tmp, path) - .context(error::ExternalFileRenameSnafu { path: &tmp })?; - continue; - } - Err(e) => { - eprintln!("{}", e); - } - } - - // next check with upstream, if permitted - if f.force_upstream.unwrap_or(false) - || std::env::var("BUILDSYS_UPSTREAM_SOURCE_FALLBACK") == Ok("true".to_string()) - { - println!("Fetching {:?} from upstream source", url_file_name); - Self::fetch_file(&f.url, &tmp, hash)?; - fs::rename(&tmp, path).context(error::ExternalFileRenameSnafu { path: &tmp })?; - } - } - - Ok(Self) - } - - /// Retrieves a file from the specified URL and write it to the given path, - /// then verifies the contents against the SHA-512 hash provided. - fn fetch_file>(url: &str, path: P, hash: &str) -> Result<()> { - let path = path.as_ref(); - - let version = Self::getenv("BUILDSYS_VERSION_FULL")?; - - let mut headers = HeaderMap::new(); - headers.insert( - USER_AGENT, - HeaderValue::from_str(&format!( - "Bottlerocket buildsys {version} (https://github.com/bottlerocket-os/bottlerocket)" - )) - .unwrap_or(HeaderValue::from_static( - "Bottlerocket buildsys (https://github.com/bottlerocket-os/bottlerocket)", - )), - ); - - let client = reqwest::blocking::Client::new(); - let mut resp = client - .get(url) - .headers(headers) - .send() - .context(error::ExternalFileRequestSnafu { url })?; - let status = resp.status(); - ensure!( - status.is_success(), - error::ExternalFileFetchSnafu { url, status } - ); - - let f = File::create(path).context(error::ExternalFileOpenSnafu { path })?; - let mut f = BufWriter::new(f); - resp.copy_to(&mut f) - .context(error::ExternalFileSaveSnafu { path })?; - drop(f); - - match Self::verify_file(path, hash) { - Ok(_) => Ok(()), - Err(e) => { - fs::remove_file(path).context(error::ExternalFileDeleteSnafu { path })?; - Err(e) - } - } - } - - fn getenv(var: &str) -> Result { - env::var(var).context(error::EnvironmentSnafu { var: (var) }) - } - - fn extract_file_name(url: &str) -> Result { - let parsed = reqwest::Url::parse(url).context(error::ExternalFileUrlSnafu { url })?; - let name = parsed - .path_segments() - .context(error::ExternalFileNameSnafu { path: url })? - .last() - .context(error::ExternalFileNameSnafu { path: url })?; - Ok(name.into()) - } - - /// Reads a file from disk and compares it to the expected SHA-512 hash. - fn verify_file>(path: P, hash: &str) -> Result<()> { - let path = path.as_ref(); - let mut f = File::open(path).context(error::ExternalFileOpenSnafu { path })?; - let mut d = Sha512::new(); - - io::copy(&mut f, &mut d).context(error::ExternalFileLoadSnafu { path })?; - let digest = hex::encode(d.finalize()); - - ensure!( - digest == hash, - error::ExternalFileVerifySnafu { path, hash } - ); - Ok(()) - } -} diff --git a/tools/buildsys/src/cache/error.rs b/tools/buildsys/src/cache/error.rs deleted file mode 100644 index 7665ba68..00000000 --- a/tools/buildsys/src/cache/error.rs +++ /dev/null @@ -1,55 +0,0 @@ -use snafu::Snafu; -use std::io; -use std::path::PathBuf; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(super)))] -#[allow(clippy::enum_variant_names)] -pub(crate) enum Error { - #[snafu(display("Missing environment variable '{}'", var))] - Environment { - var: String, - source: std::env::VarError, - }, - - #[snafu(display("Bad file name '{}'", path.display()))] - ExternalFileName { path: PathBuf }, - - #[snafu(display("Bad file url '{}': {}", url, source))] - ExternalFileUrl { - url: String, - source: url::ParseError, - }, - - #[snafu(display("Failed to request '{}': {}", url, source))] - ExternalFileRequest { url: String, source: reqwest::Error }, - - #[snafu(display("Failed to fetch '{}': {}", url, status))] - ExternalFileFetch { - url: String, - status: reqwest::StatusCode, - }, - - #[snafu(display("Failed to open file '{}': {}", path.display(), source))] - ExternalFileOpen { path: PathBuf, source: io::Error }, - - #[snafu(display("Failed to write file '{}': {}", path.display(), source))] - ExternalFileSave { - path: PathBuf, - source: reqwest::Error, - }, - - #[snafu(display("Failed to load file '{}': {}", path.display(), source))] - ExternalFileLoad { path: PathBuf, source: io::Error }, - - #[snafu(display("Failed to verify file '{}' with hash '{}'", path.display(), hash))] - ExternalFileVerify { path: PathBuf, hash: String }, - - #[snafu(display("Failed to rename file '{}': {}", path.display(), source))] - ExternalFileRename { path: PathBuf, source: io::Error }, - - #[snafu(display("Failed to delete file '{}': {}", path.display(), source))] - ExternalFileDelete { path: PathBuf, source: io::Error }, -} - -pub(super) type Result = std::result::Result; diff --git a/tools/buildsys/src/gomod.rs b/tools/buildsys/src/gomod.rs deleted file mode 100644 index 6a1dabc0..00000000 --- a/tools/buildsys/src/gomod.rs +++ /dev/null @@ -1,207 +0,0 @@ -/*! -Packages using the Go programming language may have upstream tar archives that -include only the source code of the project, but not the source code of any -dependencies. The Go programming language promotes the use of "modules" for -dependencies. Projects adopting modules will provide `go.mod` and `go.sum` files. - -This Rust module extends the functionality of `packages.metadata.build-package.external-files` -and provides the ability to retrieve and validate dependencies -declared using Go modules given a tar archive containing a `go.mod` and `go.sum`. - -The location where dependencies are retrieved from are controlled by the -standard environment variables employed by the Go tool: `GOPROXY`, `GOSUMDB`, and -`GOPRIVATE`. These variables are automatically retrieved from the host environment -when the docker-go script is invoked. - - */ - -pub(crate) mod error; -use error::Result; - -use buildsys::manifest; -use duct::cmd; -use snafu::{ensure, OptionExt, ResultExt}; -use std::io::Write; -use std::os::unix::fs::PermissionsExt; -use std::path::{Path, PathBuf}; -use std::{env, fs}; - -pub(crate) struct GoMod; - -const GO_MOD_DOCKER_SCRIPT_NAME: &str = "docker-go-script.sh"; - -// The following bash template script is intended to be run within a container -// using the docker-go tool found in this codebase under `tools/docker-go`. -// -// This script inspects the top level directory found in the package upstream -// archive and uses that as the default Go module path if no explicit module -// path was provided. It will then untar the archive, vendor the Go -// dependencies, create a new archive using the {module-path}/vendor directory -// and name it the output path provided. If no output path was given, it -// defaults to "bundled-{package-file-name}". Finally, it cleans up by removing -// the untar'd source code. The upstream archive remains intact and both tar -// files can then be used during packaging. -// -// This script exists as an in memory template string literal and is populated -// into a temporary file in the package directory itself to enable buildsys to -// be as portable as possible and have no dependency on runtime paths. Since -// buildsys is executed from the context of many different package directories, -// managing a temporary file via this Rust module prevents having to acquire the -// path of some static script file on the host system. -const GO_MOD_SCRIPT_TMPL: &str = r#".#!/bin/bash - -set -e - -toplevel=$(tar tf __LOCAL_FILE_NAME__ | head -1) -if [ -z __MOD_DIR__ ] ; then - targetdir="${toplevel}" -else - targetdir="__MOD_DIR__" -fi - -tar xf __LOCAL_FILE_NAME__ - -pushd "${targetdir}" - go list -mod=readonly ./... >/dev/null && go mod vendor -popd - -tar czf __OUTPUT__ "${targetdir}"/vendor -rm -rf "${targetdir}" -touch -r __LOCAL_FILE_NAME__ __OUTPUT__ -"#; - -impl GoMod { - pub(crate) fn vendor( - root_dir: &Path, - package_dir: &Path, - external_file: &manifest::ExternalFile, - ) -> Result<()> { - let url_file_name = extract_file_name(&external_file.url)?; - let local_file_name = &external_file.path.as_ref().unwrap_or(&url_file_name); - ensure!( - local_file_name.components().count() == 1, - error::InputFileSnafu - ); - - let full_path = package_dir.join(local_file_name); - ensure!( - full_path.is_file(), - error::InputFileBadSnafu { path: full_path } - ); - - // If a module directory was not provided, set as an empty path. - // By default, without a provided module directory, tar will be passed - // the first directory found in the archives as the top level Go module - let default_empty_path = PathBuf::from(""); - let mod_dir = external_file - .bundle_root_path - .as_ref() - .unwrap_or(&default_empty_path); - - // Use a default "bundle-{name-of-file}" if no output path was provided - let default_output_path = - PathBuf::from(format!("bundled-{}", local_file_name.to_string_lossy())); - let output_path_arg = external_file - .bundle_output_path - .as_ref() - .unwrap_or(&default_output_path); - println!( - "cargo:rerun-if-changed={}", - output_path_arg.to_string_lossy() - ); - - // Our SDK and toolchain are picked by the external `cargo make` invocation. - let sdk = env::var("BUILDSYS_SDK_IMAGE").context(error::EnvironmentSnafu { - var: "BUILDSYS_SDK_IMAGE", - })?; - - let args = DockerGoArgs { - module_path: package_dir, - sdk_image: sdk, - go_mod_cache: &root_dir.join(".gomodcache"), - command: format!("./{}", GO_MOD_DOCKER_SCRIPT_NAME), - }; - - // Create and/or write the temporary script file to the package directory - // using the script template string and placeholder variables - let script_contents = GO_MOD_SCRIPT_TMPL - .replace("__LOCAL_FILE_NAME__", &local_file_name.to_string_lossy()) - .replace("__MOD_DIR__", &mod_dir.to_string_lossy()) - .replace("__OUTPUT__", &output_path_arg.to_string_lossy()); - let script_path = format!( - "{}/{}", - package_dir.to_string_lossy(), - GO_MOD_DOCKER_SCRIPT_NAME - ); - - // Drop the reference after writing the file to avoid a "text busy" error - // when attempting to execute it. - { - let mut script_file = fs::File::create(&script_path) - .context(error::CreateFileSnafu { path: &script_path })?; - fs::set_permissions(&script_path, fs::Permissions::from_mode(0o777)) - .context(error::SetFilePermissionsSnafu { path: &script_path })?; - script_file - .write_all(script_contents.as_bytes()) - .context(error::WriteFileSnafu { path: &script_path })?; - } - - let res = docker_go(root_dir, &args); - fs::remove_file(&script_path).context(error::RemoveFileSnafu { path: &script_path })?; - res - } -} - -fn extract_file_name(url: &str) -> Result { - let parsed = reqwest::Url::parse(url).context(error::InputUrlSnafu { url })?; - let name = parsed - .path_segments() - .context(error::InputFileBadSnafu { path: url })? - .last() - .context(error::InputFileBadSnafu { path: url })?; - Ok(name.into()) -} - -struct DockerGoArgs<'a> { - module_path: &'a Path, - sdk_image: String, - go_mod_cache: &'a Path, - command: String, -} - -/// Run `docker-go` with the specified arguments. -fn docker_go(root_dir: &Path, dg_args: &DockerGoArgs) -> Result<()> { - let args = vec![ - "--module-path", - dg_args - .module_path - .to_str() - .context(error::InputFileSnafu)?, - "--sdk-image", - &dg_args.sdk_image, - "--go-mod-cache", - dg_args - .go_mod_cache - .to_str() - .context(error::InputFileSnafu)?, - "--command", - &dg_args.command, - ]; - let arg_string = args.join(" "); - let program = root_dir.join("tools/docker-go"); - println!("program: {}", program.to_string_lossy()); - let output = cmd(program, args) - .stderr_to_stdout() - .stdout_capture() - .unchecked() - .run() - .context(error::CommandStartSnafu)?; - - let stdout = String::from_utf8_lossy(&output.stdout); - println!("{}", &stdout); - ensure!( - output.status.success(), - error::DockerExecutionSnafu { args: arg_string } - ); - Ok(()) -} diff --git a/tools/buildsys/src/gomod/error.rs b/tools/buildsys/src/gomod/error.rs deleted file mode 100644 index 64d736d3..00000000 --- a/tools/buildsys/src/gomod/error.rs +++ /dev/null @@ -1,57 +0,0 @@ -use std::path::PathBuf; - -use snafu::Snafu; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(super)))] -pub(crate) enum Error { - #[snafu(display("Failed to start command: {}", source))] - CommandStart { source: std::io::Error }, - - #[snafu(display("Failed to execute docker-go script. 'args: {}'", args))] - DockerExecution { args: String }, - - #[snafu(display("Input url is required"))] - InputFile, - - #[snafu(display("Input file {} must be a file", path.display()))] - InputFileBad { path: PathBuf }, - - #[snafu(display("Bad file url '{}': {}", url, source))] - InputUrl { - url: String, - source: url::ParseError, - }, - - #[snafu(display("Missing environment variable '{}'", var))] - Environment { - var: String, - source: std::env::VarError, - }, - - #[snafu(display("Failed to create '{}': {}", path.display(), source))] - CreateFile { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to set permissions on '{}': {}", path.display(), source))] - SetFilePermissions { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to write contents to '{}': {}", path.display(), source))] - WriteFile { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to remove '{}': {}", path.display(), source))] - RemoveFile { - path: PathBuf, - source: std::io::Error, - }, -} - -pub(super) type Result = std::result::Result; diff --git a/tools/buildsys/src/lib.rs b/tools/buildsys/src/lib.rs deleted file mode 100644 index 640fc648..00000000 --- a/tools/buildsys/src/lib.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod manifest; diff --git a/tools/buildsys/src/main.rs b/tools/buildsys/src/main.rs deleted file mode 100644 index 5ea01121..00000000 --- a/tools/buildsys/src/main.rs +++ /dev/null @@ -1,296 +0,0 @@ -/*! -This tool carries out a package or variant build using Docker. - -It is meant to be called by a Cargo build script. To keep those scripts simple, -all of the configuration is taken from the environment, with the build type -specified as a command line argument. - -The implementation is closely tied to the top-level Dockerfile. - -*/ -mod builder; -mod cache; -mod gomod; -mod project; -mod spec; - -use builder::{PackageBuilder, VariantBuilder}; -use buildsys::manifest::{BundleModule, ManifestInfo, SupportedArch}; -use cache::LookasideCache; -use gomod::GoMod; -use project::ProjectInfo; -use serde::Deserialize; -use snafu::{ensure, ResultExt}; -use spec::SpecInfo; -use std::env; -use std::path::PathBuf; -use std::process; - -mod error { - use snafu::Snafu; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(super) enum Error { - ManifestParse { - source: buildsys::manifest::Error, - }, - - SpecParse { - source: super::spec::error::Error, - }, - - ExternalFileFetch { - source: super::cache::error::Error, - }, - - GoMod { - source: super::gomod::error::Error, - }, - - ProjectCrawl { - source: super::project::error::Error, - }, - - BuildAttempt { - source: super::builder::error::Error, - }, - - #[snafu(display("Missing environment variable '{}'", var))] - Environment { - var: String, - source: std::env::VarError, - }, - - #[snafu(display("Unknown architecture: '{}'", arch))] - UnknownArch { - arch: String, - source: serde_plain::Error, - }, - - #[snafu(display( - "Unsupported architecture {}, this variant supports {}", - arch, - supported_arches.join(", ") - ))] - UnsupportedArch { - arch: String, - supported_arches: Vec, - }, - } -} - -type Result = std::result::Result; - -#[derive(Debug, Deserialize)] -#[serde(rename_all = "kebab-case")] -enum Command { - BuildPackage, - BuildVariant, -} - -fn usage() -> ! { - eprintln!( - "\ -USAGE: - buildsys - -SUBCOMMANDS: - build-package Build RPMs from a spec file and sources. - build-variant Build filesystem and disk images from RPMs." - ); - process::exit(1) -} - -// Returning a Result from main makes it print a Debug representation of the error, but with Snafu -// we have nice Display representations of the error, so we wrap "main" (run) and print any error. -// https://github.com/shepmaster/snafu/issues/110 -fn main() { - if let Err(e) = run() { - eprintln!("{}", e); - process::exit(1); - } -} - -fn run() -> Result<()> { - // Not actually redundant for a diverging function. - #[allow(clippy::redundant_closure)] - let command_str = std::env::args().nth(1).unwrap_or_else(|| usage()); - let command = serde_plain::from_str::(&command_str).unwrap_or_else(|_| usage()); - match command { - Command::BuildPackage => build_package()?, - Command::BuildVariant => build_variant()?, - } - Ok(()) -} - -fn build_package() -> Result<()> { - let manifest_file = "Cargo.toml"; - println!("cargo:rerun-if-changed={}", manifest_file); - - let root_dir: PathBuf = getenv("BUILDSYS_ROOT_DIR")?.into(); - let variant = getenv("BUILDSYS_VARIANT")?; - let variant_manifest_path = root_dir.join("variants").join(variant).join(manifest_file); - let variant_manifest = - ManifestInfo::new(variant_manifest_path).context(error::ManifestParseSnafu)?; - supported_arch(&variant_manifest)?; - let mut image_features = variant_manifest.image_features(); - - let manifest_dir: PathBuf = getenv("CARGO_MANIFEST_DIR")?.into(); - let manifest = - ManifestInfo::new(manifest_dir.join(manifest_file)).context(error::ManifestParseSnafu)?; - let package_features = manifest.package_features(); - - // For any package feature specified in the package manifest, track the corresponding - // environment variable for changes to the ambient set of image features for the current - // variant. - if let Some(package_features) = &package_features { - for package_feature in package_features { - println!( - "cargo:rerun-if-env-changed=BUILDSYS_VARIANT_IMAGE_FEATURE_{}", - package_feature - ); - } - } - - // Keep only the image features that the package has indicated that it tracks, if any. - if let Some(image_features) = &mut image_features { - match package_features { - Some(package_features) => image_features.retain(|k| package_features.contains(k)), - None => image_features.clear(), - } - } - - // If manifest has package.metadata.build-package.variant-sensitive set, then track the - // appropriate environment variable for changes. - if let Some(sensitivity) = manifest.variant_sensitive() { - use buildsys::manifest::{SensitivityType::*, VariantSensitivity::*}; - fn emit_variant_env(suffix: Option<&str>) { - if let Some(suffix) = suffix { - println!( - "cargo:rerun-if-env-changed=BUILDSYS_VARIANT_{}", - suffix.to_uppercase() - ); - } else { - println!("cargo:rerun-if-env-changed=BUILDSYS_VARIANT"); - } - } - match sensitivity { - Any(false) => (), - Any(true) => emit_variant_env(None), - Specific(Platform) => emit_variant_env(Some("platform")), - Specific(Runtime) => emit_variant_env(Some("runtime")), - Specific(Family) => emit_variant_env(Some("family")), - Specific(Flavor) => emit_variant_env(Some("flavor")), - } - } - - if let Some(files) = manifest.external_files() { - LookasideCache::fetch(files).context(error::ExternalFileFetchSnafu)?; - for f in files { - if f.bundle_modules.is_none() { - continue; - } - - for b in f.bundle_modules.as_ref().unwrap() { - match b { - BundleModule::Go => { - GoMod::vendor(&root_dir, &manifest_dir, f).context(error::GoModSnafu)? - } - } - } - } - } - - if let Some(groups) = manifest.source_groups() { - let var = "BUILDSYS_SOURCES_DIR"; - let root: PathBuf = getenv(var)?.into(); - println!("cargo:rerun-if-env-changed={}", var); - - let dirs = groups.iter().map(|d| root.join(d)).collect::>(); - let info = ProjectInfo::crawl(&dirs).context(error::ProjectCrawlSnafu)?; - for f in info.files { - println!("cargo:rerun-if-changed={}", f.display()); - } - } - - // Package developer can override name of package if desired, e.g. to name package with - // characters invalid in Cargo crate names - let package = if let Some(name_override) = manifest.package_name() { - name_override.clone() - } else { - getenv("CARGO_PKG_NAME")? - }; - let spec = format!("{}.spec", package); - println!("cargo:rerun-if-changed={}", spec); - - let info = SpecInfo::new(PathBuf::from(&spec)).context(error::SpecParseSnafu)?; - - for f in info.sources { - println!("cargo:rerun-if-changed={}", f.display()); - } - - for f in info.patches { - println!("cargo:rerun-if-changed={}", f.display()); - } - - PackageBuilder::build(&package, image_features).context(error::BuildAttemptSnafu)?; - - Ok(()) -} - -fn build_variant() -> Result<()> { - let manifest_dir: PathBuf = getenv("CARGO_MANIFEST_DIR")?.into(); - let manifest_file = "Cargo.toml"; - println!("cargo:rerun-if-changed={}", manifest_file); - - let manifest = - ManifestInfo::new(manifest_dir.join(manifest_file)).context(error::ManifestParseSnafu)?; - - supported_arch(&manifest)?; - - if let Some(packages) = manifest.included_packages() { - let image_format = manifest.image_format(); - let image_layout = manifest.image_layout(); - let kernel_parameters = manifest.kernel_parameters(); - let image_features = manifest.image_features(); - VariantBuilder::build( - packages, - image_format, - image_layout, - kernel_parameters, - image_features, - ) - .context(error::BuildAttemptSnafu)?; - } else { - println!("cargo:warning=No included packages in manifest. Skipping variant build."); - } - - Ok(()) -} - -/// Ensure that the current arch is supported by the current variant -fn supported_arch(manifest: &ManifestInfo) -> Result<()> { - if let Some(supported_arches) = manifest.supported_arches() { - let arch = getenv("BUILDSYS_ARCH")?; - let current_arch: SupportedArch = - serde_plain::from_str(&arch).context(error::UnknownArchSnafu { arch: &arch })?; - - ensure!( - supported_arches.contains(¤t_arch), - error::UnsupportedArchSnafu { - arch: &arch, - supported_arches: supported_arches - .iter() - .map(|a| a.to_string()) - .collect::>() - } - ) - } - Ok(()) -} - -/// Retrieve a variable that we expect to be set in the environment. -fn getenv(var: &str) -> Result { - env::var(var).context(error::EnvironmentSnafu { var }) -} diff --git a/tools/buildsys/src/manifest.rs b/tools/buildsys/src/manifest.rs deleted file mode 100644 index 1df31abe..00000000 --- a/tools/buildsys/src/manifest.rs +++ /dev/null @@ -1,582 +0,0 @@ -/*! -# Build system metadata - -This module provides deserialization and convenience methods for build system -metadata located in `Cargo.toml`. - -Cargo ignores the `package.metadata` table in its manifest, so it can be used -to store configuration for other tools. We recognize the following keys. - -## Metadata for packages - -`source-groups` is a list of directories in the top-level `sources` directory, -each of which contains a set of related Rust projects. Changes to files in -these groups should trigger a rebuild. -```ignore -[package.metadata.build-package] -source-groups = ["api"] -``` - -`external-files` is a list of out-of-tree files that should be retrieved -as additional dependencies for the build. If the path for the external -file name is not provided, it will be taken from the last path component -of the URL. -```ignore -[[package.metadata.build-package.external-files]] -path = "foo" -url = "https://foo" -sha512 = "abcdef" - -[[package.metadata.build-package.external-files]] -path = "bar" -url = "https://bar" -sha512 = "123456" -``` - -The `bundle-*` keys on `external-files` are a group of optional modifiers -and are used to untar an upstream external file archive, vendor any dependent -code, and produce an additional archive with those dependencies. -Only `bundle-modules` is required when bundling an archive's dependences. - -`bundle-modules` is a list of module "paradigms" the external-file should -be vendored through. For example, if a project contains a `go.mod` and `go.sum` -file, adding "go" to the list will vendor the dependencies through go modules. -Currently, only "go" is supported. - -`bundle-root-path` is an optional argument that provides the filepath -within the archive that contains the module. By default, the first top level -directory in the archive is used. So, for example, given a Go project that has -the necessary `go.mod` and `go.sum` files in the archive located at the -filepath `a/b/c`, this `bundle-root-path` value should be "a/b/c". Or, given an -archive with a single directory that contains a Go project that has `go.mod` -and `go.sum` files located in that top level directory, this option may be -omitted since the single top-level directory will authomatically be used. - -`bundle-output-path` is an optional argument that provides the desired path of -the output archive. By default, this will use the name of the existing archive, -but prepended with "bundled-". For example, if "my-unique-archive-name.tar.gz" -is entered as the value for `bundle-output-path`, then the output directory -will be named `my-unique-archive-name.tar.gz`. Or, by default, given the name -of some upstream archive is "my-package.tar.gz", the output archive would be -named `bundled-my-package.tar.gz`. This output path may then be referenced -within an RPM spec or when creating a package in order to access the vendored -upstream dependencies during build time. -```ignore -[[package.metadata.build-package.external-files]] -path = "foo" -url = "https://foo" -sha512 = "abcdef" -bundle-modules = [ "go" ] -bundle-root-path = "path/to/module" -bundle-output-path = "path/to/output.tar.gz" -``` - -`package-name` lets you override the package name in Cargo.toml; this is useful -if you have a package with "." in its name, for example, which Cargo doesn't -allow. This means the directory name and spec file name can use your preferred -naming. -```ignore -[package.metadata.build-package] -package-name = "better.name" -``` - -`variant-sensitive` lets you specify whether the package should be rebuilt when -building a new variant, and defaults to false; set it to true if a package is -using the variant to affect its build process. - -```ignore -[package.metadata.build-package] -variant-sensitive = true -``` - -Some packages might only be sensitive to certain components of the variant -tuple, such as the platform, runtime, or family. The `variant-sensitive` field -can also take a string to indicate the source of the sensitivity. - -```ignore -[package.metadata.build-package] -# sensitive to platform, like "metal" or "aws" -variant-sensitive = "platform" - -# sensitive to runtime, like "k8s" or "ecs" -variant-sensitive = "runtime" - -# sensitive to family, like "metal-k8s" or "aws-ecs" -variant-sensitive = "family" -``` - -`package-features` is a list of image features that the package tracks. This is -useful when the way the package is built changes based on whether a particular -image feature is enabled for the current variant, rather than when the variant -tuple changes. - -```ignore -[package.metadata.build-package] -package-features = [ - "grub-set-private-var", -] -``` - -`releases-url` is ignored by buildsys, but can be used by packager maintainers -to indicate a good URL for checking whether the software has had a new release. -```ignore -[package.metadata.build-package] -releases-url = "https://www.example.com/releases" -``` - -## Metadata for variants - -`included-packages` is a list of packages that should be included in a variant. -```ignore -[package.metadata.build-variant] -included-packages = ["release"] -``` - -`image-format` is the desired format for the built images. -This can be `raw` (the default), `vmdk`, or `qcow2`. -```ignore -[package.metadata.build-variant] -image-format = "vmdk" -``` - -`image-layout` is the desired layout for the built images. - -`os-image-size-gib` is the desired size of the "os" disk image in GiB. -The specified size will be automatically divided into two banks, where each -bank contains the set of partitions needed for in-place upgrades. Roughly 40% -will be available for each root filesystem partition, with the rest allocated -to other essential system partitions. - -`data-image-size-gib` is the desired size of the "data" disk image in GiB. -The full size will be used for the single data partition, except for the 2 MiB -overhead for the GPT labels and partition alignment. The data partition will be -automatically resized to fill the disk on boot, so it is usually not necessary -to increase this value. - -`publish-image-size-hint-gib` is the desired size of the published image in GiB. -When the `split` layout is used, the "os" image volume will remain at the built -size, and any additional space will be allocated to the "data" image volume. -When the `unified` layout is used, this value will be used directly for the -single "os" image volume. The hint will be ignored if the combined size of the -"os" and "data" images exceeds the specified value. - -`partition-plan` is the desired strategy for image partitioning. -This can be `split` (the default) for "os" and "data" images backed by separate -volumes, or `unified` to have "os" and "data" share the same volume. -```ignore -[package.metadata.build-variant.image-layout] -os-image-size-gib = 2 -data-image-size-gib = 1 -publish-image-size-hint-gib = 22 -partition-plan = "split" -``` - -`supported-arches` is the list of architectures the variant is able to run on. -The values can be `x86_64` and `aarch64`. -If not specified, the variant can run on any of those architectures. -```ignore -[package.metadata.build-variant] -supported-arches = ["x86_64"] -``` - -`kernel-parameters` is a list of extra parameters to be added to the kernel command line. -The given parameters are inserted at the start of the command line. -```ignore -[package.metadata.build-variant] -kernel-parameters = [ - "console=ttyS42", -] - -`image-features` is a map of image feature flags, which can be enabled or disabled. This allows us -to conditionally use or exclude certain firmware-level features in variants. - -`grub-set-private-var` means that the grub image for the current variant includes the command to -find the BOTTLEROCKET_PRIVATE partition and set the appropriate `$private` variable for the grub -config file to consume. This feature flag is a prerequisite for Boot Config support. -```ignore -[package.metadata.build-variant.image-features] -grub-set-private-var = true -``` - -`systemd-networkd` uses the `systemd-networkd` network backend in place of `wicked`. This feature -flag is meant primarily for development, and will be removed when development has completed. -```ignore -[package.metadata.build-variant.image-features] -systemd-networkd = true -``` - -`unified-cgroup-hierarchy` makes systemd set up a unified cgroup hierarchy on -boot, i.e. the host will use cgroup v2 by default. This feature flag allows -old variants to continue booting with cgroup v1 and new variants to move to -cgroup v2, while users will still be able to override the default via command -line arguments set in the boot configuration. -```ignore -[package.metadata.build-variant.image-features] -unified-cgroup-hierarchy = true -``` - -`xfs-data-partition` changes the filesystem for the data partition from ext4 to xfs. The -default will remain ext4 and xfs is opt-in. - -```ignore -[package.metadata.build-variant.image-features] -xfs-data-partition = true -``` - -`uefi-secure-boot` means that the bootloader and kernel are signed. The grub image for the current -variant will have a public GPG baked in, and will expect the grub config file to have a valid -detached signature. Published artifacts such as AMIs and OVAs will enforce the signature checks -when the platform supports it. - -```ignore -[package.metadata.build-variant.image-features] -uefi-secure-boot = true -``` - -*/ - -mod error; - -use serde::Deserialize; -use snafu::{ResultExt, Snafu}; -use std::cmp::max; -use std::collections::{HashMap, HashSet}; -use std::convert::TryFrom; -use std::fmt::{self, Display}; -use std::fs; -use std::path::{Path, PathBuf}; - -#[derive(Debug, Snafu)] -pub struct Error(error::Error); -type Result = std::result::Result; - -/// The nested structures here are somewhat complex, but they make it trivial -/// to deserialize the structure we expect to find in the manifest. -#[derive(Deserialize, Debug)] -#[serde(rename_all = "kebab-case")] -pub struct ManifestInfo { - package: Package, -} - -impl ManifestInfo { - /// Extract the settings we understand from `Cargo.toml`. - pub fn new>(path: P) -> Result { - let path = path.as_ref(); - let manifest_data = - fs::read_to_string(path).context(error::ManifestFileReadSnafu { path })?; - let manifest = - toml::from_str(&manifest_data).context(error::ManifestFileLoadSnafu { path })?; - Ok(manifest) - } - - /// Convenience method to return the list of source groups. - pub fn source_groups(&self) -> Option<&Vec> { - self.build_package().and_then(|b| b.source_groups.as_ref()) - } - - /// Convenience method to return the list of external files. - pub fn external_files(&self) -> Option<&Vec> { - self.build_package().and_then(|b| b.external_files.as_ref()) - } - - /// Convenience method to return the package name override, if any. - pub fn package_name(&self) -> Option<&String> { - self.build_package().and_then(|b| b.package_name.as_ref()) - } - - /// Convenience method to find whether the package is sensitive to variant changes. - pub fn variant_sensitive(&self) -> Option<&VariantSensitivity> { - self.build_package() - .and_then(|b| b.variant_sensitive.as_ref()) - } - - /// Convenience method to return the image features tracked by this package. - pub fn package_features(&self) -> Option> { - self.build_package() - .and_then(|b| b.package_features.as_ref().map(|m| m.iter().collect())) - } - - /// Convenience method to return the list of included packages. - pub fn included_packages(&self) -> Option<&Vec> { - self.build_variant() - .and_then(|b| b.included_packages.as_ref()) - } - - /// Convenience method to return the image format override, if any. - pub fn image_format(&self) -> Option<&ImageFormat> { - self.build_variant().and_then(|b| b.image_format.as_ref()) - } - - /// Convenience method to return the image layout, if specified. - pub fn image_layout(&self) -> Option<&ImageLayout> { - self.build_variant().map(|b| &b.image_layout) - } - - /// Convenience method to return the supported architectures for this variant. - pub fn supported_arches(&self) -> Option<&HashSet> { - self.build_variant() - .and_then(|b| b.supported_arches.as_ref()) - } - - /// Convenience method to return the kernel parameters for this variant. - pub fn kernel_parameters(&self) -> Option<&Vec> { - self.build_variant() - .and_then(|b| b.kernel_parameters.as_ref()) - } - - /// Convenience method to return the enabled image features for this variant. - pub fn image_features(&self) -> Option> { - self.build_variant().and_then(|b| { - b.image_features - .as_ref() - .map(|m| m.iter().filter(|(_k, v)| **v).map(|(k, _v)| k).collect()) - }) - } - - /// Helper methods to navigate the series of optional struct fields. - fn build_package(&self) -> Option<&BuildPackage> { - self.package - .metadata - .as_ref() - .and_then(|m| m.build_package.as_ref()) - } - - fn build_variant(&self) -> Option<&BuildVariant> { - self.package - .metadata - .as_ref() - .and_then(|m| m.build_variant.as_ref()) - } -} - -#[derive(Deserialize, Debug)] -#[serde(rename_all = "kebab-case")] -struct Package { - metadata: Option, -} - -#[derive(Deserialize, Debug)] -#[serde(rename_all = "kebab-case")] -struct Metadata { - build_package: Option, - build_variant: Option, -} - -#[derive(Deserialize, Debug)] -#[serde(rename_all = "kebab-case")] -#[allow(dead_code)] -pub struct BuildPackage { - pub external_files: Option>, - pub package_name: Option, - pub releases_url: Option, - pub source_groups: Option>, - pub variant_sensitive: Option, - pub package_features: Option>, -} - -#[derive(Deserialize, Debug)] -#[serde(rename_all = "kebab-case")] -#[serde(untagged)] -pub enum VariantSensitivity { - Any(bool), - Specific(SensitivityType), -} - -#[derive(Deserialize, Debug)] -#[serde(rename_all = "kebab-case")] -pub enum SensitivityType { - Platform, - Runtime, - Family, - Flavor, -} - -#[derive(Deserialize, Debug)] -#[serde(rename_all = "kebab-case")] -pub struct BuildVariant { - pub included_packages: Option>, - pub image_format: Option, - #[serde(default)] - pub image_layout: ImageLayout, - pub supported_arches: Option>, - pub kernel_parameters: Option>, - pub image_features: Option>, -} - -#[derive(Deserialize, Debug)] -#[serde(rename_all = "lowercase")] -pub enum ImageFormat { - Qcow2, - Raw, - Vmdk, -} - -#[derive(Deserialize, Debug, Copy, Clone)] -/// Constrain specified image sizes to a plausible range, from 0 - 65535 GiB. -pub struct ImageSize(u16); - -impl Display for ImageSize { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.0) - } -} - -#[derive(Deserialize, Debug, Copy, Clone)] -#[serde(rename_all = "kebab-case")] -pub struct ImageLayout { - #[serde(default = "ImageLayout::default_os_image_size_gib")] - pub os_image_size_gib: ImageSize, - #[serde(default = "ImageLayout::default_data_image_size_gib")] - pub data_image_size_gib: ImageSize, - #[serde(default = "ImageLayout::default_publish_image_size_hint_gib")] - publish_image_size_hint_gib: ImageSize, - #[serde(default = "ImageLayout::default_partition_plan")] - pub partition_plan: PartitionPlan, -} - -/// These are the historical defaults for all variants, before we added support -/// for customizing these properties. -static DEFAULT_OS_IMAGE_SIZE_GIB: ImageSize = ImageSize(2); -static DEFAULT_DATA_IMAGE_SIZE_GIB: ImageSize = ImageSize(1); -static DEFAULT_PUBLISH_IMAGE_SIZE_HINT_GIB: ImageSize = ImageSize(22); -static DEFAULT_PARTITION_PLAN: PartitionPlan = PartitionPlan::Split; - -impl ImageLayout { - fn default_os_image_size_gib() -> ImageSize { - DEFAULT_OS_IMAGE_SIZE_GIB - } - - fn default_data_image_size_gib() -> ImageSize { - DEFAULT_DATA_IMAGE_SIZE_GIB - } - - fn default_publish_image_size_hint_gib() -> ImageSize { - DEFAULT_PUBLISH_IMAGE_SIZE_HINT_GIB - } - - fn default_partition_plan() -> PartitionPlan { - DEFAULT_PARTITION_PLAN - } - - // At publish time we will need specific sizes for the OS image and the (optional) data image. - // The sizes returned by this function depend on the image layout, and whether the publish - // image hint is larger than the required minimum size. - pub fn publish_image_sizes_gib(&self) -> (i32, i32) { - let os_image_base_size_gib = self.os_image_size_gib.0; - let data_image_base_size_gib = self.data_image_size_gib.0; - let publish_image_size_hint_gib = self.publish_image_size_hint_gib.0; - - let min_publish_image_size_gib = os_image_base_size_gib + data_image_base_size_gib; - let publish_image_size_gib = max(publish_image_size_hint_gib, min_publish_image_size_gib); - - match self.partition_plan { - PartitionPlan::Split => { - let os_image_publish_size_gib = os_image_base_size_gib; - let data_image_publish_size_gib = publish_image_size_gib - os_image_base_size_gib; - ( - os_image_publish_size_gib.into(), - data_image_publish_size_gib.into(), - ) - } - PartitionPlan::Unified => (publish_image_size_gib.into(), -1), - } - } -} - -impl Default for ImageLayout { - fn default() -> Self { - Self { - os_image_size_gib: Self::default_os_image_size_gib(), - data_image_size_gib: Self::default_data_image_size_gib(), - publish_image_size_hint_gib: Self::default_publish_image_size_hint_gib(), - partition_plan: Self::default_partition_plan(), - } - } -} - -#[derive(Deserialize, Debug, Copy, Clone)] -#[serde(rename_all = "lowercase")] -pub enum PartitionPlan { - Split, - Unified, -} - -#[derive(Deserialize, Debug, PartialEq, Eq, Hash)] -#[serde(rename_all = "lowercase")] -pub enum SupportedArch { - X86_64, - Aarch64, -} - -/// Map a Linux architecture into the corresponding Docker architecture. -impl SupportedArch { - pub fn goarch(&self) -> &'static str { - match self { - SupportedArch::X86_64 => "amd64", - SupportedArch::Aarch64 => "arm64", - } - } -} - -#[derive(Deserialize, Debug, PartialEq, Eq, Hash)] -#[serde(try_from = "String")] -pub enum ImageFeature { - GrubSetPrivateVar, - SystemdNetworkd, - UnifiedCgroupHierarchy, - XfsDataPartition, - UefiSecureBoot, -} - -impl TryFrom for ImageFeature { - type Error = Error; - fn try_from(s: String) -> Result { - match s.as_str() { - "grub-set-private-var" => Ok(ImageFeature::GrubSetPrivateVar), - "systemd-networkd" => Ok(ImageFeature::SystemdNetworkd), - "unified-cgroup-hierarchy" => Ok(ImageFeature::UnifiedCgroupHierarchy), - "xfs-data-partition" => Ok(ImageFeature::XfsDataPartition), - "uefi-secure-boot" => Ok(ImageFeature::UefiSecureBoot), - _ => error::ParseImageFeatureSnafu { what: s }.fail()?, - } - } -} - -impl fmt::Display for ImageFeature { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - ImageFeature::GrubSetPrivateVar => write!(f, "GRUB_SET_PRIVATE_VAR"), - ImageFeature::SystemdNetworkd => write!(f, "SYSTEMD_NETWORKD"), - ImageFeature::UnifiedCgroupHierarchy => write!(f, "UNIFIED_CGROUP_HIERARCHY"), - ImageFeature::XfsDataPartition => write!(f, "XFS_DATA_PARTITION"), - ImageFeature::UefiSecureBoot => write!(f, "UEFI_SECURE_BOOT"), - } - } -} - -#[derive(Deserialize, Debug)] -#[serde(rename_all = "lowercase")] -pub enum BundleModule { - Go, -} - -#[derive(Deserialize, Debug)] -#[serde(rename_all = "kebab-case")] -pub struct ExternalFile { - pub path: Option, - pub sha512: String, - pub url: String, - pub force_upstream: Option, - pub bundle_modules: Option>, - pub bundle_root_path: Option, - pub bundle_output_path: Option, -} - -impl fmt::Display for SupportedArch { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - SupportedArch::X86_64 => write!(f, "x86_64"), - SupportedArch::Aarch64 => write!(f, "aarch64"), - } - } -} diff --git a/tools/buildsys/src/manifest/error.rs b/tools/buildsys/src/manifest/error.rs deleted file mode 100644 index 788cbb1a..00000000 --- a/tools/buildsys/src/manifest/error.rs +++ /dev/null @@ -1,22 +0,0 @@ -use snafu::Snafu; -use std::io; -use std::path::PathBuf; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(super)))] -pub(super) enum Error { - #[snafu(display("Failed to read manifest file '{}': {}", path.display(), source))] - ManifestFileRead { path: PathBuf, source: io::Error }, - - #[snafu(display("Failed to load manifest file '{}': {}", path.display(), source))] - ManifestFileLoad { - path: PathBuf, - source: toml::de::Error, - }, - - #[snafu(display("Failed to parse image feature '{}'", what))] - ParseImageFeature { what: String }, - - #[snafu(display("Invalid image size {}; must be between 1 and 1024", value))] - InvalidImageSize { value: i32 }, -} diff --git a/tools/buildsys/src/project.rs b/tools/buildsys/src/project.rs deleted file mode 100644 index 08b5d4ff..00000000 --- a/tools/buildsys/src/project.rs +++ /dev/null @@ -1,51 +0,0 @@ -/*! -This module handles iterating through project directories to discover source -files that should be passed to Cargo to watch for changes. - -For now, it's a thin wrapper around `walkdir` with a filter applied to ignore -files that shouldn't trigger rebuilds. - -*/ -pub(crate) mod error; -use error::Result; - -use snafu::ResultExt; -use std::path::{Path, PathBuf}; -use walkdir::{DirEntry, WalkDir}; - -pub(crate) struct ProjectInfo { - pub(crate) files: Vec, -} - -impl ProjectInfo { - /// Traverse the list of directories and produce a list of files to track. - pub(crate) fn crawl>(dirs: &[P]) -> Result { - let mut files = Vec::new(); - - for dir in dirs { - let walker = WalkDir::new(dir) - .follow_links(false) - .same_file_system(true) - .into_iter(); - - files.extend( - walker - .filter_entry(|e| !Self::ignored(e)) - .flat_map(|e| e.context(error::DirectoryWalkSnafu)) - .map(|e| e.into_path()) - .filter(|e| e.is_file()), - ); - } - - Ok(ProjectInfo { files }) - } - - /// Exclude hidden files and build artifacts from the list. - fn ignored(entry: &DirEntry) -> bool { - entry - .file_name() - .to_str() - .map(|s| s.starts_with('.') || s == "target" || s == "vendor" || s == "README.md") - .unwrap_or(false) - } -} diff --git a/tools/buildsys/src/project/error.rs b/tools/buildsys/src/project/error.rs deleted file mode 100644 index 03502682..00000000 --- a/tools/buildsys/src/project/error.rs +++ /dev/null @@ -1,10 +0,0 @@ -use snafu::Snafu; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(super)))] -pub(crate) enum Error { - #[snafu(display("Failed to walk directory to find project files: {}", source))] - DirectoryWalk { source: walkdir::Error }, -} - -pub(super) type Result = std::result::Result; diff --git a/tools/buildsys/src/spec.rs b/tools/buildsys/src/spec.rs deleted file mode 100644 index 946b97f7..00000000 --- a/tools/buildsys/src/spec.rs +++ /dev/null @@ -1,74 +0,0 @@ -/*! -This module provides a very simple parser for RPM spec files. - -It does not attempt to expand macros or perform any meaningful validation. Its -only purpose is to extract Source and Patch declarations so they can be passed -to Cargo as files to watch for changes. - -*/ -pub(crate) mod error; -use error::Result; - -use snafu::ResultExt; -use std::collections::VecDeque; -use std::fs::File; -use std::io::{BufRead, BufReader}; -use std::path::{Path, PathBuf}; - -pub(crate) struct SpecInfo { - pub(crate) sources: Vec, - pub(crate) patches: Vec, -} - -impl SpecInfo { - /// Returns a list of 'Source' and 'Patch' lines found in a spec file. - pub(crate) fn new>(path: P) -> Result { - let (sources, patches) = Self::parse(path)?; - let sources = Self::filter(&sources); - let patches = Self::filter(&patches); - Ok(Self { sources, patches }) - } - - /// "Parse" a spec file, extracting values of potential interest. - fn parse>(path: P) -> Result<(Vec, Vec)> { - let path = path.as_ref(); - let f = File::open(path).context(error::SpecFileReadSnafu { path })?; - let f = BufReader::new(f); - - let mut sources = Vec::new(); - let mut patches = Vec::new(); - - for line in f.lines() { - let line = line.context(error::SpecFileReadSnafu { path })?; - - let mut tokens = line.split_whitespace().collect::>(); - if let Some(t) = tokens.pop_front() { - if t.starts_with("Source") { - if let Some(s) = tokens.pop_front() { - sources.push(s.into()); - } - } else if t.starts_with("Patch") { - if let Some(p) = tokens.pop_front() { - patches.push(p.into()); - } - } - } - } - - Ok((sources, patches)) - } - - /// Emitting a non-existent file for `rerun-if-changed` will cause Cargo - /// to always repeat the build. Therefore we exclude "files" that do not - /// exist or that point outside the package directory. We also exclude - /// anything that appears to be an unexpanded macro. - fn filter(input: &[String]) -> Vec { - input - .iter() - .filter(|s| !s.contains("%{")) - .map(PathBuf::from) - .filter(|p| p.components().count() == 1) - .filter(|p| p.file_name().is_some()) - .collect() - } -} diff --git a/tools/buildsys/src/spec/error.rs b/tools/buildsys/src/spec/error.rs deleted file mode 100644 index 969ccf32..00000000 --- a/tools/buildsys/src/spec/error.rs +++ /dev/null @@ -1,12 +0,0 @@ -use snafu::Snafu; -use std::io; -use std::path::PathBuf; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(super)))] -pub(crate) enum Error { - #[snafu(display("Failed to read spec file '{}': {}", path.display(), source))] - SpecFileRead { path: PathBuf, source: io::Error }, -} - -pub(super) type Result = std::result::Result; diff --git a/tools/deny.toml b/tools/deny.toml deleted file mode 100644 index 584d80be..00000000 --- a/tools/deny.toml +++ /dev/null @@ -1,102 +0,0 @@ -[licenses] -unlicensed = "deny" - -# Deny licenses unless they are specifically listed here -copyleft = "deny" -allow-osi-fsf-free = "neither" -default = "deny" - -# We want really high confidence when inferring licenses from text -confidence-threshold = 0.93 - -# Commented license types are allowed but not currently used -allow = [ - "Apache-2.0", - "BSD-2-Clause", - "BSD-3-Clause", - "BSL-1.0", - # "CC0-1.0", - "ISC", - "MIT", - "OpenSSL", - "Unlicense", - "Zlib", -] - -exceptions = [ - { name = "webpki-roots", allow = ["MPL-2.0"], version = "*" }, - { name = "unicode-ident", version = "1.0.4", allow = ["MIT", "Apache-2.0", "Unicode-DFS-2016"] }, -] - -# https://github.com/hsivonen/encoding_rs The non-test code that isn't generated from the WHATWG data in this crate is -# under Apache-2.0 OR MIT. Test code is under CC0. -[[licenses.clarify]] -name = "encoding_rs" -version = "0.8.30" -expression = "(Apache-2.0 OR MIT) AND BSD-3-Clause" -license-files = [ - { path = "COPYRIGHT", hash = 0x39f8ad31 } -] - -[[licenses.clarify]] -name = "ring" -expression = "MIT AND ISC AND OpenSSL" -license-files = [ - { path = "LICENSE", hash = 0xbd0eed23 }, -] - -[[licenses.clarify]] -name = "webpki" -expression = "ISC" -license-files = [ - { path = "LICENSE", hash = 0x001c7e6c }, -] - -[[licenses.clarify]] -name = "rustls-webpki" -expression = "ISC" -license-files = [ - { path = "LICENSE", hash = 0x001c7e6c }, -] - -[bans] -# Deny multiple versions or wildcard dependencies. -multiple-versions = "deny" -wildcards = "deny" - -skip = [ - # several dependencies are using multiple versions of base64 - { name = "base64" }, - # several dependencies are using an old version of bitflags - { name = "bitflags", version = "=1.3" }, - # several dependencies are using an old version of serde_yaml - { name = "serde_yaml", version = "=0.8" }, - # governor uses an old version of wasi - { name = "wasi", version = "=0.10.2" }, - # aws-sdk-rust is using an old version of fastrand - { name = "fastrand", version = "=1.9" }, - # aws-sdk-rust is using an old version of rustls, hyper-rustls, and tokio-rustls - { name = "rustls", version = "=0.20" }, - { name = "hyper-rustls", version = "=0.23" }, - { name = "tokio-rustls", version = "=0.23" }, - # kube-client uses an old version of redox_syscall - { name = "redox_syscall", version = "=0.2" }, -] - -skip-tree = [ - # windows-sys is not a direct dependency. mio and schannel - # are using different versions of windows-sys. we skip the - # dependency tree because windows-sys has many sub-crates - # that differ in major version. - { name = "windows-sys" }, - # generate-readme uses an old version of clap and other dependencies - { name = "generate-readme", version = "0.1.0" } -] - -[sources] -allow-git = [ - "https://github.com/bottlerocket-os/bottlerocket-test-system", -] -# Deny crates from unknown registries or git repositories. -unknown-registry = "deny" -unknown-git = "deny" diff --git a/tools/docker-go b/tools/docker-go deleted file mode 100755 index 50915d98..00000000 --- a/tools/docker-go +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/env bash - -# Helper script for running commands in a golang build/runtime environment for testing/vendoring/building a go module - -set -e -o pipefail - -usage() { - cat >&2 < - --go-version - --go-mod-cache - --command "" -Runs - -Required: - --module-path The path of the Go module to mount into the container - --sdk-image Name of the SDK image to use - --go-mod-cache The Go module cache path to mount into the container - --command The command to run in the SDK container -EOF -} - -required_arg() { - local arg="${1:?}" - local value="${2}" - if [ -z "${value}" ]; then - echo "ERROR: ${arg} is required" >&2 - exit 2 - fi -} - -# shellcheck disable=SC2124 # TODO: improve command interface (#2534) -parse_args() { - while [ ${#} -gt 0 ] ; do - case "${1}" in - --help ) usage; exit 0 ;; - --module-path ) shift; GO_MODULE_PATH="${1}" ;; - --sdk-image ) shift; SDK_IMAGE="${1}" ;; - --go-mod-cache ) shift; GO_MOD_CACHE="${1}" ;; - --command ) shift; COMMAND="${@:1}" ;; - *) ;; - esac - shift - done - - # Required arguments - required_arg "--module-path" "${GO_MODULE_PATH}" - required_arg "--sdk-image" "${SDK_IMAGE}" - required_arg "--go-mod-cache" "${GO_MOD_CACHE}" - required_arg "--command" "${COMMAND}" -} - -# We need to mount the ../.. parent of GO_MOD_CACHE -GOPATH=$(cd "${GO_MOD_CACHE}/../.." && pwd) - -DOCKER_RUN_ARGS="--network=host" - -parse_args "${@}" - -# Pass through relevant Go variables, from the config or environment. -go_env=( ) -for i in GOPROXY GONOPROXY GOPRIVATE GOSUMDB ; do - if command -v go >/dev/null 2>&1 ; then - govar="$(go env ${i})" - if [ -n "${govar}" ] ; then - go_env[${#go_env[@]}]="--env=${i}=${govar}" - fi - elif [ -n "${!i}" ] ; then - go_env[${#go_env[@]}]="--env=${i}=${!i}" - fi -done - -# Go accepts both lower and uppercase proxy variables, pass both through. -proxy_env=( ) -for i in http_proxy https_proxy no_proxy HTTP_PROXY HTTPS_PROXY NO_PROXY ; do - if [ -n "${!i}" ]; then - proxy_env[${#proxy_env[@]}]="--env=$i=${!i}" - fi -done - -docker run --rm \ - -e GOCACHE='/tmp/.cache' \ - -e GOPATH="${GOPATH}" \ - "${go_env[@]}" \ - "${proxy_env[@]}" \ - --user "$(id -u):$(id -g)" \ - --security-opt="label=disable" \ - ${DOCKER_RUN_ARGS} \ - -v "${GOPATH}":"${GOPATH}" \ - -v "${GO_MODULE_PATH}":"${GO_MODULE_PATH}" \ - -w "${GO_MODULE_PATH}" \ - "${SDK_IMAGE}" \ - bash -c "${COMMAND}" diff --git a/tools/infrasys/Cargo.toml b/tools/infrasys/Cargo.toml deleted file mode 100644 index 8579f62e..00000000 --- a/tools/infrasys/Cargo.toml +++ /dev/null @@ -1,29 +0,0 @@ -[package] -name = "infrasys" -version = "0.1.0" -license = "Apache-2.0 OR MIT" -authors = ["Aashna Sheth "] -edition = "2021" -publish = false - -[dependencies] -async-trait = "0.1" -clap = { version = "4", features = ["derive"] } -hex = "0.4" -log = "0.4" -pubsys-config = { path = "../pubsys-config/", version = "0.1" } -aws-config = "0.55" -aws-types = "0.55" -aws-sdk-cloudformation = "0.28" -aws-sdk-s3 = "0.28" -serde_json = "1" -serde_yaml = "0.9" -sha2 = "0.10" -shell-words = "1" -simplelog = "0.12" -snafu = "0.7" -tokio = { version = "1", default-features = false, features = ["macros", "rt-multi-thread"] } -url = "2" - -[dev-dependencies] -assert-json-diff = "2" diff --git a/tools/infrasys/cloudformation-templates/kms_key_setup.yml b/tools/infrasys/cloudformation-templates/kms_key_setup.yml deleted file mode 100644 index 38517452..00000000 --- a/tools/infrasys/cloudformation-templates/kms_key_setup.yml +++ /dev/null @@ -1,30 +0,0 @@ -Parameters: - Alias: - Description: "Required. Alias for KMS key to be created" - Type: String - -Resources: - KMSKey: - Type: AWS::KMS::Key - Properties: - KeySpec: RSA_3072 - KeyUsage: SIGN_VERIFY - KeyPolicy: - Statement: - - Effect: Allow - Principal: - AWS: !Sub "arn:aws:iam::${AWS::AccountId}:root" - Action: "kms:*" - Resource: "*" - - KMSKeyAlias: - Type: AWS::KMS::Alias - DependsOn: - - KMSKey - Properties: - AliasName: !Sub "alias/${Alias}" - TargetKeyId: !Ref KMSKey - -Outputs: - KeyId: - Value: !GetAtt KMSKey.Arn diff --git a/tools/infrasys/cloudformation-templates/s3_setup.yml b/tools/infrasys/cloudformation-templates/s3_setup.yml deleted file mode 100644 index 31b4e9fe..00000000 --- a/tools/infrasys/cloudformation-templates/s3_setup.yml +++ /dev/null @@ -1,25 +0,0 @@ -Resources: - TUFRepoBucket: - Type: AWS::S3::Bucket - DeletionPolicy: Retain - Properties: - VersioningConfiguration: - Status: Enabled - AccessControl: LogDeliveryWrite - MetricsConfigurations: - - Id: BucketMetrics - BucketEncryption: - ServerSideEncryptionConfiguration: - - ServerSideEncryptionByDefault: - SSEAlgorithm: AES256 - PublicAccessBlockConfiguration: - BlockPublicAcls: True - BlockPublicPolicy: True - IgnorePublicAcls: True - RestrictPublicBuckets: True - -Outputs: - BucketName: - Value: !Ref TUFRepoBucket - RDN: - Value: !GetAtt TUFRepoBucket.RegionalDomainName diff --git a/tools/infrasys/src/error.rs b/tools/infrasys/src/error.rs deleted file mode 100644 index 1a3b668b..00000000 --- a/tools/infrasys/src/error.rs +++ /dev/null @@ -1,169 +0,0 @@ -use aws_sdk_s3::error::SdkError; -use snafu::Snafu; -use std::io; -use std::path::PathBuf; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(super)))] -pub enum Error { - #[snafu(display( - "Failed to create CFN stack '{}' in '{}': {}", - stack_name, - region, - source - ))] - CreateStack { - stack_name: String, - region: String, - source: SdkError, - }, - - #[snafu(display( - "Received CREATE_FAILED status for CFN stack '{}' in '{}'", - stack_name, - region - ))] - CreateStackFailure { stack_name: String, region: String }, - - #[snafu(display("Error splitting shell command '{}': {}", command, source))] - CommandSplit { - command: String, - source: shell_words::ParseError, - }, - - #[snafu(display("Error reading Infra.toml: {}", source))] - Config { source: pubsys_config::Error }, - - #[snafu(display( - "Stuck in indefinite CREATE_IN_PROGRESS loop for CFN stack '{}' in '{}'", - stack_name, - region - ))] - CreateStackTimeout { stack_name: String, region: String }, - - #[snafu(display("No stack data returned for CFN stack '{}' in {}", stack_name, region))] - MissingStack { stack_name: String, region: String }, - - #[snafu(display( - "Failed to fetch stack details for CFN stack '{}' in '{}': {}", - stack_name, - region, - source - ))] - DescribeStack { - stack_name: String, - region: String, - source: SdkError, - }, - - #[snafu(display("Missing environment variable '{}'", var))] - Environment { - var: String, - source: std::env::VarError, - }, - - #[snafu(display("File already exists at '{}'", path.display()))] - FileExists { path: PathBuf }, - - #[snafu(display("Failed to open file at '{}': {}", path.display(), source))] - FileOpen { path: PathBuf, source: io::Error }, - - #[snafu(display("Failed to read file at '{}': {}", path.display(), source))] - FileRead { path: PathBuf, source: io::Error }, - - #[snafu(display("Failed to write file at '{}': {}", path.display(), source))] - FileWrite { path: PathBuf, source: io::Error }, - - #[snafu(display("Failed to get bucket policy statement for bucket '{}'", bucket_name))] - GetPolicyStatement { bucket_name: String }, - - #[snafu(display("Failed to convert '{}' to yaml: {}", what, source))] - InvalidJson { - what: String, - source: serde_json::Error, - }, - - #[snafu(display("Invalid path '{}' for '{}'", path.display(), thing))] - InvalidPath { path: PathBuf, thing: String }, - - #[snafu(display("Publication/Root key threshold must be <= {}, currently {}", num_keys.to_string(), threshold))] - InvalidThreshold { threshold: String, num_keys: usize }, - - #[snafu(display("Failed to convert updated Infra.toml information to yaml: {}", source))] - InvalidYaml { source: serde_yaml::Error }, - - #[snafu(display( - "Failed to create keys due to invalid key config. Missing '{}'.", - missing - ))] - KeyConfig { missing: String }, - - #[snafu(display( - "Failed to create new keys or access pre-existing keys in available_keys list." - ))] - KeyCreation, - - #[snafu(display("Logger setup error: {}", source))] - Logger { source: log::SetLoggerError }, - - #[snafu(display("Infra.toml is missing '{}'", missing))] - MissingConfig { missing: String }, - - #[snafu(display("Failed to create directory '{}': {}", path.display(), source))] - Mkdir { path: PathBuf, source: io::Error }, - - #[snafu(display("Failed to get parent of path '{}'", path.display()))] - Parent { path: PathBuf }, - - #[snafu(display("Failed to parse '{}' to int: {}", what, source))] - ParseInt { - what: String, - source: std::num::ParseIntError, - }, - - #[snafu(display("Failed to find default region"))] - DefaultRegion, - - #[snafu(display("Unable to parse stack status"))] - ParseStatus, - - #[snafu(display( - "Failed to find field '{}' after attempting to create resource '{}'", - what, - resource_name - ))] - ParseResponse { what: String, resource_name: String }, - - #[snafu(display("Failed to convert '{}' to URL: {}", input, source))] - ParseUrl { - input: String, - source: url::ParseError, - }, - - #[snafu(display("Failed to push object to bucket '{}': {}", bucket_name, source))] - PutObject { - bucket_name: String, - source: SdkError, - }, - - #[snafu(display( - "Failed to update bucket policy for bucket '{}': {}", - bucket_name, - source - ))] - PutPolicy { - bucket_name: String, - source: SdkError, - }, - - #[snafu(display("Failed to create async runtime: {}", source))] - Runtime { source: std::io::Error }, - - #[snafu(display("'tuftool {}' returned {}", command, code))] - TuftoolResult { command: String, code: String }, - - #[snafu(display("Failed to start tuftool: {}", source))] - TuftoolSpawn { source: io::Error }, -} - -pub type Result = std::result::Result; diff --git a/tools/infrasys/src/keys.rs b/tools/infrasys/src/keys.rs deleted file mode 100644 index a00283c2..00000000 --- a/tools/infrasys/src/keys.rs +++ /dev/null @@ -1,150 +0,0 @@ -use async_trait::async_trait; -use aws_sdk_cloudformation::Client as CloudFormationClient; -use aws_types::region::Region; -use pubsys_config::{KMSKeyConfig, SigningKeyConfig}; -use snafu::{OptionExt, ResultExt}; -use std::fs; - -use super::{error, shared, Result}; - -/// Creates keys using data stored in SigningKeyConfig enum -/// Output: Edits KMSConfig fields in place after creating new keys -pub async fn create_keys(signing_key_config: &mut SigningKeyConfig) -> Result<()> { - // An extra check even through these parameters are checked earlier in main.rs - check_signing_key_config(signing_key_config)?; - match signing_key_config { - SigningKeyConfig::file { .. } => (), - SigningKeyConfig::kms { config, .. } => { - config - .as_mut() - .context(error::MissingConfigSnafu { - missing: "config field for a kms key", - })? - .create_kms_keys() - .await?; - } - SigningKeyConfig::ssm { .. } => (), - } - Ok(()) -} - -pub fn check_signing_key_config(signing_key_config: &SigningKeyConfig) -> Result<()> { - match signing_key_config { - SigningKeyConfig::file { .. } => (), - SigningKeyConfig::kms { config, .. } => { - let config = config.as_ref().context(error::MissingConfigSnafu { - missing: "config field for kms keys", - })?; - - match ( - config.available_keys.is_empty(), - config.regions.is_empty(), - config.key_alias.as_ref(), - ) { - // everything is unspecified (no way to allocate a key_id) - (true, true, None) => error::KeyConfigSnafu { - missing: "an available_key or region/key_alias", - } - .fail()?, - // regions is populated, but no key alias - // (it doesn't matter if available keys are listed or not) - (_, false, None) => error::KeyConfigSnafu { - missing: "key_alias", - } - .fail()?, - // key alias is populated, but no key regions to create keys in - // (it doesn't matter if available keys are listed or not) - (_, true, Some(..)) => error::KeyConfigSnafu { missing: "region" }.fail()?, - _ => (), - }; - } - SigningKeyConfig::ssm { .. } => (), - } - Ok(()) -} - -/// Must create a trait because can't directly implement a method for an struct in an -/// external crate like KMSKeyConfig (which lives in pubsys-config/lib.rs) -#[async_trait] -trait KMSKeyConfigExt { - async fn create_kms_keys(&mut self) -> Result<()>; -} - -/// Creates new KMS keys using cloudformation in regions specified -/// Input Conditions: Alias+Region or AvailableKeys must be specified -/// Output: Populates KMSKeyConfig with information about resources created -/// 'available-keys' starts as a map of pre-existing keyids:regions and will end as a -/// map of pre-existing and generated keyids:regions, -/// 'key-stack-arns' starts empty and will end as a -/// map of keyids:stackarn if new keys are created -#[async_trait] -impl KMSKeyConfigExt for KMSKeyConfig { - async fn create_kms_keys(&mut self) -> Result<()> { - // Generating new keys (if regions is non-empty) - for region in self.regions.iter() { - let stack_name = format!( - "TUF-KMS-{}", - self.key_alias.as_ref().context(error::KeyConfigSnafu { - missing: "key_alias", - })? - ); - - let config = aws_config::from_env() - .region(Region::new(region.to_owned())) - .load() - .await; - let cfn_client = CloudFormationClient::new(&config); - - let cfn_filepath = format!( - "{}/infrasys/cloudformation-templates/kms_key_setup.yml", - shared::getenv("BUILDSYS_TOOLS_DIR")? - ); - let cfn_template = fs::read_to_string(&cfn_filepath) - .context(error::FileReadSnafu { path: cfn_filepath })?; - - let stack_result = cfn_client - .create_stack() - .parameters(shared::create_parameter( - "Alias".to_string(), - self.key_alias - .as_ref() - .context(error::KeyConfigSnafu { - missing: "key_alias", - })? - .to_string(), - )) - .stack_name(stack_name.clone()) - .template_body(cfn_template.clone()) - .send() - .await - .context(error::CreateStackSnafu { - stack_name: &stack_name, - region, - })?; - - let stack_arn = stack_result - .clone() - .stack_id - .context(error::ParseResponseSnafu { - what: "stack_id", - resource_name: &stack_name, - })?; - - let output_array = shared::get_stack_outputs(&cfn_client, &stack_name, region).await?; - let key_id = - output_array[0] - .output_value - .as_ref() - .context(error::ParseResponseSnafu { - what: "outputs[0].output_value (key id)", - resource_name: stack_name, - })?; - self.available_keys - .insert(key_id.to_string(), region.to_string()); - self.key_stack_arns - .insert(key_id.to_string(), stack_arn.to_string()); - } - - Ok(()) - } -} diff --git a/tools/infrasys/src/main.rs b/tools/infrasys/src/main.rs deleted file mode 100644 index 7fa8ce81..00000000 --- a/tools/infrasys/src/main.rs +++ /dev/null @@ -1,361 +0,0 @@ -mod error; -mod keys; -mod root; -mod s3; -mod shared; - -use aws_sdk_cloudformation::config::Region; -use clap::Parser; -use error::Result; -use log::{error, info}; -use pubsys_config::{InfraConfig, RepoConfig, S3Config, SigningKeyConfig}; -use sha2::{Digest, Sha512}; -use shared::KeyRole; -use simplelog::{CombinedLogger, Config as LogConfig, ConfigBuilder, LevelFilter, SimpleLogger}; -use snafu::{ensure, OptionExt, ResultExt}; -use std::collections::HashMap; -use std::num::NonZeroUsize; -use std::path::{Path, PathBuf}; -use std::{fs, process}; -use tokio::runtime::Runtime; -use url::Url; - -// =^..^= =^..^= =^..^= SUB-COMMAND STRUCTS =^..^= =^..^= =^..^= - -#[derive(Debug, Parser)] -struct Args { - #[arg(global = true, long, default_value = "INFO")] - log_level: LevelFilter, - - // Path to Infra.toml (NOTE: must be specified before subcommand) - #[arg(long)] - infra_config_path: PathBuf, - - #[command(subcommand)] - subcommand: SubCommand, -} - -#[derive(Debug, Parser)] -struct CreateInfraArgs { - /// Path to the root.json file. - #[arg(long)] - root_role_path: PathBuf, -} - -#[derive(Debug, Parser)] -enum SubCommand { - /// Creates infrastructure specified in the Infra.toml file. - CreateInfra(CreateInfraArgs), -} - -// =^..^= =^..^= =^..^= MAIN METHODS =^..^= =^..^= =^..^= - -fn main() { - if let Err(e) = run() { - eprintln!("{}", e); - process::exit(1); - } -} - -fn run() -> Result<()> { - // Parse and store the args passed to the program - let args = Args::parse(); - - match args.log_level { - // Set log level for AWS SDK to error to reduce verbosity. - LevelFilter::Info => { - CombinedLogger::init(vec![ - SimpleLogger::new( - LevelFilter::Info, - ConfigBuilder::new() - .add_filter_ignore_str("aws_config") - .add_filter_ignore_str("aws_smithy") - .add_filter_ignore_str("tracing::span") - .build(), - ), - SimpleLogger::new( - LevelFilter::Warn, - ConfigBuilder::new() - .add_filter_allow_str("aws_config") - .add_filter_allow_str("aws_smithy") - .add_filter_allow_str("tracing::span") - .build(), - ), - ]) - .context(error::LoggerSnafu)?; - } - - // Set the supplied log level across the whole crate. - _ => { - SimpleLogger::init(args.log_level, LogConfig::default()).context(error::LoggerSnafu)? - } - } - - match args.subcommand { - SubCommand::CreateInfra(ref run_task_args) => { - let rt = Runtime::new().context(error::RuntimeSnafu)?; - rt.block_on(async { - create_infra(&args.infra_config_path, &run_task_args.root_role_path).await - }) - } - } -} - -fn check_infra_lock(toml_path: &Path) -> Result<()> { - let lock_path = InfraConfig::compute_lock_path(toml_path).context(error::ConfigSnafu)?; - - ensure!(!lock_path.is_file(), { - error!( - "It looks like you've already created some resources for your custom TUF repository because a lock file exists at '{}'. - \nPlease clean up your TUF resources in AWS, delete Infra.lock, and run again.", - lock_path.display() - ); - error::FileExistsSnafu { path: lock_path } - }); - Ok(()) -} - -/// Automates setting up infrastructure for a custom TUF repo -async fn create_infra(toml_path: &Path, root_role_path: &Path) -> Result<()> { - check_infra_lock(toml_path)?; - info!("Parsing Infra.toml..."); - let mut infra_config = InfraConfig::from_path(toml_path).context(error::ConfigSnafu)?; - let repos = infra_config - .repo - .as_mut() - .context(error::MissingConfigSnafu { missing: "repo" })?; - let s3_info_map = infra_config - .aws - .as_mut() - .context(error::MissingConfigSnafu { missing: "aws" })? - .s3 - .as_mut() - .context(error::MissingConfigSnafu { missing: "aws.s3" })?; - - for (repo_name, repo_config) in repos.iter_mut() { - // Validate repo_config and unwrap required optional data - let mut repo_info = ValidRepoInfo::new(repo_config, repo_name, s3_info_map)?; - - // Validate the key configurations and root file - keys::check_signing_key_config(repo_info.signing_keys)?; - keys::check_signing_key_config(repo_info.root_keys)?; - root::check_root(root_role_path)?; - - // Create the repo - let (s3_stack_arn, bucket_name, bucket_rdn) = - create_repo_infrastructure(&mut repo_info).await?; - *repo_info.stack_arn = Some(s3_stack_arn); - *repo_info.bucket_name = Some(bucket_name.clone()); - update_root_and_sign_root(&mut repo_info, root_role_path).await?; - - // Upload root.json. - info!("Uploading root.json to S3 bucket..."); - s3::upload_file( - &repo_info.s3_region, - &bucket_name, - &repo_info.prefix, - root_role_path, - ) - .await?; - - // Update infra_config with output parameters if not already set - if repo_info.metadata_base_url.is_none() { - *repo_info.metadata_base_url = Some( - Url::parse(format!("https://{}{}/", &bucket_rdn, &repo_info.prefix).as_str()) - .context(error::ParseUrlSnafu { input: &bucket_rdn })?, - ); - } - if repo_info.targets_url.is_none() { - *repo_info.targets_url = Some( - Url::parse( - format!("https://{}{}/targets/", &bucket_rdn, &repo_info.prefix).as_str(), - ) - .context(error::ParseUrlSnafu { input: &bucket_rdn })?, - ); - } - if repo_info.root_role_url.is_none() { - *repo_info.root_role_url = Some( - Url::parse( - format!("https://{}{}/root.json", &bucket_rdn, &repo_info.prefix).as_str(), - ) - .context(error::ParseUrlSnafu { input: &bucket_rdn })?, - ); - } - let root_role_data = fs::read_to_string(root_role_path).context(error::FileReadSnafu { - path: root_role_path, - })?; - let mut d = Sha512::new(); - d.update(&root_role_data); - let digest = hex::encode(d.finalize()); - repo_config.root_role_sha512 = Some(digest); - } - - // Generate Infra.lock - info!("Writing Infra.lock..."); - let yaml_string = serde_yaml::to_string(&infra_config).context(error::InvalidYamlSnafu)?; - fs::write( - toml_path - .parent() - .context(error::ParentSnafu { path: toml_path })? - .join("Infra.lock"), - yaml_string, - ) - .context(error::FileWriteSnafu { path: toml_path })?; - - info!("Complete!"); - Ok(()) -} - -struct ValidRepoInfo<'a> { - bucket_name: &'a mut Option, - metadata_base_url: &'a mut Option, - prefix: String, - pub_key_threshold: &'a NonZeroUsize, - root_key_threshold: &'a NonZeroUsize, - root_keys: &'a mut SigningKeyConfig, - root_role_url: &'a mut Option, - s3_region: Region, - s3_stack_name: String, - signing_keys: &'a mut SigningKeyConfig, - stack_arn: &'a mut Option, - targets_url: &'a mut Option, - vpce_id: &'a String, -} - -impl<'a> ValidRepoInfo<'a> { - fn new( - repo_config: &'a mut RepoConfig, - repo_name: &str, - s3_info_map: &'a mut HashMap, - ) -> Result { - let s3_stack_name = - repo_config - .file_hosting_config_name - .to_owned() - .context(error::MissingConfigSnafu { - missing: "file_hosting_config_name", - })?; - let s3_info = s3_info_map - .get_mut(&s3_stack_name) - .context(error::MissingConfigSnafu { - missing: format!("aws.s3 config with name {}", s3_stack_name), - })?; - Ok(ValidRepoInfo { - s3_stack_name: s3_stack_name.to_string(), - s3_region: Region::new(s3_info.region.as_ref().cloned().context( - error::MissingConfigSnafu { - missing: format!("region for '{}' s3 config", s3_stack_name), - }, - )?), - bucket_name: &mut s3_info.bucket_name, - stack_arn: &mut s3_info.stack_arn, - vpce_id: s3_info - .vpc_endpoint_id - .as_ref() - .context(error::MissingConfigSnafu { - missing: format!("vpc_endpoint_id for '{}' s3 config", s3_stack_name), - })?, - prefix: s3::format_prefix(&s3_info.s3_prefix), - signing_keys: repo_config - .signing_keys - .as_mut() - .context(error::MissingConfigSnafu { - missing: format!("signing_keys for '{}' repo config", repo_name), - })?, - root_keys: repo_config - .root_keys - .as_mut() - .context(error::MissingConfigSnafu { - missing: format!("root_keys for '{}' repo config", repo_name), - })?, - root_key_threshold: repo_config.root_key_threshold.as_mut().context( - error::MissingConfigSnafu { - missing: format!("root_key_threshold for '{}' repo config", repo_name), - }, - )?, - pub_key_threshold: repo_config.pub_key_threshold.as_ref().context( - error::MissingConfigSnafu { - missing: format!("pub_key_threshold for '{}' repo config", repo_name), - }, - )?, - root_role_url: &mut repo_config.root_role_url, - targets_url: &mut repo_config.targets_url, - metadata_base_url: &mut repo_config.metadata_base_url, - }) - } -} - -async fn create_repo_infrastructure( - repo_info: &'_ mut ValidRepoInfo<'_>, -) -> Result<(String, String, String)> { - // Create S3 bucket - info!("Creating S3 bucket..."); - let (s3_stack_arn, bucket_name, bucket_rdn) = - s3::create_s3_bucket(&repo_info.s3_region, &repo_info.s3_stack_name).await?; - - // Add Bucket Policy to newly created bucket - s3::add_bucket_policy( - &repo_info.s3_region, - &bucket_name, - &repo_info.prefix, - repo_info.vpce_id, - ) - .await?; - - // Create root + publication keys - info!("Creating KMS Keys..."); - keys::create_keys(repo_info.signing_keys).await?; - keys::create_keys(repo_info.root_keys).await?; - Ok((s3_stack_arn, bucket_name, bucket_rdn)) -} - -async fn update_root_and_sign_root( - repo_info: &'_ mut ValidRepoInfo<'_>, - root_role_path: &Path, -) -> Result<()> { - // Create and populate (add/sign) root.json - info!("Creating and signing root.json..."); - root::create_root(root_role_path)?; - // Add keys (for both roles) - root::add_keys( - repo_info.signing_keys, - &KeyRole::Publication, - repo_info.pub_key_threshold, - &root_role_path.display().to_string(), - )?; - root::add_keys( - repo_info.root_keys, - &KeyRole::Root, - repo_info.root_key_threshold, - &root_role_path.display().to_string(), - )?; - // Sign root with all root keys - root::sign_root(repo_info.root_keys, &root_role_path.display().to_string())?; - Ok(()) -} - -// =^..^= =^..^= =^..^= TESTS =^..^= =^..^= =^..^= - -#[cfg(test)] -mod tests { - use super::{fs, shared, InfraConfig}; - - #[test] - fn toml_yaml_conversion() { - let test_toml_path = format!( - "{}/test_tomls/toml_yaml_conversion.toml", - shared::getenv("CARGO_MANIFEST_DIR").unwrap() - ); - let toml_struct = InfraConfig::from_path(&test_toml_path).unwrap(); - let yaml_string = serde_yaml::to_string(&toml_struct).expect("Could not write to file!"); - - let test_yaml_path = format!( - "{}/test_tomls/toml_yaml_conversion.yml", - shared::getenv("CARGO_MANIFEST_DIR").unwrap() - ); - fs::write(&test_yaml_path, &yaml_string).expect("Could not write to file!"); - let decoded_yaml = InfraConfig::from_lock_path(&test_yaml_path).unwrap(); - - assert_eq!(toml_struct, decoded_yaml); - } -} diff --git a/tools/infrasys/src/root.rs b/tools/infrasys/src/root.rs deleted file mode 100644 index bd0c6108..00000000 --- a/tools/infrasys/src/root.rs +++ /dev/null @@ -1,206 +0,0 @@ -use super::{error, KeyRole, Result}; -use aws_config::meta::region::RegionProviderChain; -use log::{trace, warn}; -use pubsys_config::SigningKeyConfig; -use snafu::{ensure, OptionExt, ResultExt}; -use std::collections::HashMap; -use std::fs; -use std::num::NonZeroUsize; -use std::path::Path; -use std::process::Command; - -/// The tuftool macro wraps Command to simplify calls to tuftool, adding region functionality. -macro_rules! tuftool { - ($region:expr, $format_str:expr, $($format_arg:expr),*) => { - let arg_str = format!($format_str, $($format_arg),*); - trace!("tuftool arg string: {}", arg_str); - let args = shell_words::split(&arg_str).context(error::CommandSplitSnafu { command: &arg_str })?; - trace!("tuftool split args: {:#?}", args); - - let status = Command::new("tuftool") - .args(args) - .env("AWS_REGION", $region) - .status() - .context(error::TuftoolSpawnSnafu)?; - - ensure!(status.success(), error::TuftoolResultSnafu { - command: arg_str, - code: status.code().map(|i| i.to_string()).unwrap_or_else(|| "".to_string()) - }); - } -} - -pub fn check_root(root_role_path: &Path) -> Result<()> { - ensure!(!root_role_path.is_file(), { - warn!("Cowardly refusing to overwrite the existing root.json at {}. Please manually delete it and run again.", root_role_path.display()); - error::FileExistsSnafu { - path: root_role_path, - } - }); - Ok(()) -} -pub fn get_region() -> Result { - let rt = tokio::runtime::Runtime::new().context(error::RuntimeSnafu)?; - rt.block_on(async { async_get_region().await }) -} - -async fn async_get_region() -> Result { - let default_region_fallback = "us-east-1"; - let default_region = RegionProviderChain::default_provider() - .or_else(default_region_fallback) - .region() - .await - .context(error::DefaultRegionSnafu)? - .to_string(); - Ok(default_region) -} - -/// Creates the directory where root.json will live and creates root.json itself according to details specified in root-role-path -pub fn create_root(root_role_path: &Path) -> Result<()> { - // Make /roles and /keys directories, if they don't exist, so we can write generated files. - let role_dir = root_role_path.parent().context(error::InvalidPathSnafu { - path: root_role_path, - thing: "root role", - })?; - fs::create_dir_all(role_dir).context(error::MkdirSnafu { path: role_dir })?; - let default_region = get_region()?; - - // Initialize root - tuftool!(&default_region, "root init '{}'", root_role_path.display()); - tuftool!( - &default_region, - // TODO: expose expiration date as a configurable parameter - "root expire '{}' 'in 52 weeks'", - root_role_path.display() - ); - Ok(()) -} - -/// Adds keys to root.json according to key type -pub fn add_keys( - signing_key_config: &mut SigningKeyConfig, - role: &KeyRole, - threshold: &NonZeroUsize, - filepath: &str, -) -> Result<()> { - match signing_key_config { - SigningKeyConfig::file { .. } => (), - SigningKeyConfig::kms { key_id, config, .. } => add_keys_kms( - &config - .as_ref() - .context(error::MissingConfigSnafu { - missing: "config field for a kms key", - })? - .available_keys, - role, - threshold, - filepath, - key_id, - )?, - SigningKeyConfig::ssm { .. } => (), - } - Ok(()) -} - -/// Adds KMSKeys to root.json given root or publication type -/// Input: available-keys (keys to sign with), role (root or publication), threshold for role, filepath for root.JSON, -/// mutable key_id -/// Output: in-place edit of root.json and key_id with a valid publication key -/// (If key-id is populated, it will not change. Otherwise, it will be populated with a key-id of an available key) -fn add_keys_kms( - available_keys: &HashMap, - role: &KeyRole, - threshold: &NonZeroUsize, - filepath: &str, - key_id: &mut Option, -) -> Result<()> { - ensure!( - (*available_keys).len() >= (*threshold).get(), - error::InvalidThresholdSnafu { - threshold: threshold.to_string(), - num_keys: (*available_keys).len(), - } - ); - let default_region = get_region()?; - match role { - KeyRole::Root => { - tuftool!( - &default_region, - "root set-threshold '{}' root '{}' ", - filepath, - threshold.to_string() - ); - for (keyid, region) in available_keys.iter() { - tuftool!( - region, - "root add-key '{}' aws-kms:///'{}' --role root", - filepath, - keyid - ); - } - } - KeyRole::Publication => { - tuftool!( - &default_region, - "root set-threshold '{}' snapshot '{}' ", - filepath, - threshold.to_string() - ); - tuftool!( - &default_region, - "root set-threshold '{}' targets '{}' ", - filepath, - threshold.to_string() - ); - tuftool!( - &default_region, - "root set-threshold '{}' timestamp '{}' ", - filepath, - threshold.to_string() - ); - for (keyid, region) in available_keys.iter() { - tuftool!( - region, - "root add-key '{}' aws-kms:///'{}' --role snapshot --role targets --role timestamp", - filepath, - keyid - ); - } - - // Set key_id using a publication key (if one is not already provided) - if key_id.is_none() { - *key_id = Some( - available_keys - .iter() - .next() - .context(error::KeyCreationSnafu)? - .0 - .to_string(), - ); - } - } - } - - Ok(()) -} - -/// Signs root with available_keys under root_keys (will have a different tuftool command depending on key type) -pub fn sign_root(signing_key_config: &SigningKeyConfig, filepath: &str) -> Result<()> { - match signing_key_config { - SigningKeyConfig::file { .. } => (), - SigningKeyConfig::kms { config, .. } => { - for (keyid, region) in config - .as_ref() - .context(error::MissingConfigSnafu { - missing: "KMS key details", - })? - .available_keys - .iter() - { - tuftool!(region, "root sign '{}' -k aws-kms:///'{}'", filepath, keyid); - } - } - SigningKeyConfig::ssm { .. } => (), - } - Ok(()) -} diff --git a/tools/infrasys/src/s3.rs b/tools/infrasys/src/s3.rs deleted file mode 100644 index 6fc9c804..00000000 --- a/tools/infrasys/src/s3.rs +++ /dev/null @@ -1,369 +0,0 @@ -use aws_sdk_cloudformation::{config::Region, Client as CloudFormationClient}; -use aws_sdk_s3::Client as S3Client; -use snafu::{OptionExt, ResultExt}; -use std::fs; -use std::fs::File; -use std::io::prelude::*; -use std::path::{Path, PathBuf}; - -use super::{error, shared, Result}; - -pub fn format_prefix(prefix: &str) -> String { - if prefix.is_empty() { - return prefix.to_string(); - } - let formatted = { - if prefix.starts_with('/') { - prefix.to_string() - } else { - format!("/{}", prefix) - } - }; - if formatted.ends_with('/') { - formatted[..formatted.len() - 1].to_string() - } else if formatted.ends_with("/*") { - formatted[..formatted.len() - 2].to_string() - } else { - formatted - } -} - -/// Creates a *private* S3 Bucket using a CloudFormation template -/// Input: The region in which the bucket will be created and the name of the bucket -/// Output: The stack_arn of the stack w/ the S3 bucket, the CFN allocated bucket name, -/// and the bucket url (for the url fields in Infra.lock) -pub async fn create_s3_bucket( - region: &Region, - stack_name: &str, -) -> Result<(String, String, String)> { - // TODO: Add support for accommodating pre-existing buckets (skip this creation process) - let config = aws_config::from_env() - .region(region.to_owned()) - .load() - .await; - let cfn_client = CloudFormationClient::new(&config); - - let cfn_filepath: PathBuf = format!( - "{}/infrasys/cloudformation-templates/s3_setup.yml", - shared::getenv("BUILDSYS_TOOLS_DIR")? - ) - .into(); - let cfn_template = - fs::read_to_string(&cfn_filepath).context(error::FileReadSnafu { path: cfn_filepath })?; - - let stack_result = cfn_client - .create_stack() - .stack_name(stack_name.to_string()) - .template_body(cfn_template.clone()) - .send() - .await - .context(error::CreateStackSnafu { - stack_name, - region: region.as_ref(), - })?; - // We don't have to wait for successful stack creation to grab the stack ARN - let stack_arn = stack_result - .clone() - .stack_id - .context(error::ParseResponseSnafu { - what: "stack_id", - resource_name: stack_name, - })?; - - // Grab the StackOutputs to get the Bucketname and BucketURL - let output_array = shared::get_stack_outputs(&cfn_client, stack_name, region.as_ref()).await?; - let bucket_name = output_array[0] - .output_value - .as_ref() - .context(error::ParseResponseSnafu { - what: "outputs[0].output_value (bucket name)", - resource_name: stack_name, - })? - .to_string(); - let bucket_rdn = output_array[1] - .output_value - .as_ref() - .context(error::ParseResponseSnafu { - what: "outputs[1].output_value (bucket url)", - resource_name: stack_name, - })? - .to_string(); - - Ok((stack_arn, bucket_name, bucket_rdn)) -} - -/// Adds a BucketPolicy allowing GetObject access to a specified VPC -/// Input: Region, Name of bucket, which prefix root.json should be put under, and vpcid -/// Note that the prefix parameter must have the format "//*" and the bucket name "" -/// Output: Doesn't need to save any metadata from this action -pub async fn add_bucket_policy( - region: &Region, - bucket_name: &str, - prefix: &str, - vpcid: &str, -) -> Result<()> { - // Get old policy - let config = aws_config::from_env() - .region(region.to_owned()) - .load() - .await; - let s3_client = S3Client::new(&config); - let mut policy: serde_json::Value = match s3_client - .get_bucket_policy() - .bucket(bucket_name.to_string()) - .send() - .await - { - Ok(output) => serde_json::from_str(&output.policy.context(error::ParseResponseSnafu { - what: "policy", - resource_name: bucket_name, - })?) - .context(error::InvalidJsonSnafu { - what: format!("retrieved bucket policy for {}", &bucket_name), - })?, - - Err(..) => serde_json::from_str( - r#"{"Version": "2008-10-17", - "Statement": []}"#, - ) - .context(error::InvalidJsonSnafu { - what: format!("new bucket policy for {}", &bucket_name), - })?, - }; - - // Create a new policy - let new_bucket_policy = serde_json::from_str(&format!( - r#"{{ - "Effect": "Allow", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::{}{}/*", - "Condition": {{ - "StringEquals": {{ - "aws:sourceVpce": "{}" - }} - }} - }}"#, - bucket_name, prefix, vpcid - )) - .context(error::InvalidJsonSnafu { - what: format!("new bucket policy for {}", &bucket_name), - })?; - - // Append new policy onto old one - policy - .get_mut("Statement") - .context(error::GetPolicyStatementSnafu { bucket_name })? - .as_array_mut() - .context(error::GetPolicyStatementSnafu { bucket_name })? - .push(new_bucket_policy); - - // Push the new policy as a string - s3_client - .put_bucket_policy() - .bucket(bucket_name.to_string()) - .policy( - serde_json::to_string(&policy).context(error::InvalidJsonSnafu { - what: format!("new bucket policy for {}", &bucket_name), - })?, - ) - .send() - .await - .context(error::PutPolicySnafu { bucket_name })?; - - Ok(()) -} - -/// Uploads root.json to S3 Bucket (automatically creates the folder that the bucket policy was scoped to or will simply add to it) -/// Input: Region, Name of bucket, which prefix root.json should be put under, and path to the S3 bucket CFN template -/// Note that the prefix parameter must have the format "/" and the bucket name "" -/// Output: Doesn't need to save any metadata from this action -pub async fn upload_file( - region: &Region, - bucket_name: &str, - prefix: &str, - file_path: &Path, -) -> Result<()> { - let config = aws_config::from_env() - .region(region.to_owned()) - .load() - .await; - let s3_client = S3Client::new(&config); - - // File --> Bytes - let mut file = File::open(file_path).context(error::FileOpenSnafu { path: file_path })?; - let mut buffer = Vec::new(); - file.read_to_end(&mut buffer) - .context(error::FileReadSnafu { path: file_path })?; - - s3_client - .put_object() - .bucket(format!("{}{}", bucket_name, prefix)) - .key("root.json".to_string()) - .body(aws_sdk_s3::primitives::ByteStream::from(buffer)) - .send() - .await - .context(error::PutObjectSnafu { bucket_name })?; - - Ok(()) -} - -// =^..^= =^..^= =^..^= TESTS =^..^= =^..^= =^..^= - -#[cfg(test)] -mod tests { - use super::format_prefix; - use assert_json_diff::assert_json_include; - - #[test] - fn format_prefix_test() { - let valid = "/prefix"; - let missing_slash = "prefix"; - let excess_ending_1 = "/prefix/"; - let excess_ending_2 = "/prefix/*"; - let slash_and_excess_ending = "prefix/*"; - let empty = ""; - let single_slash = "/"; - - assert_eq!("/prefix", format_prefix(valid)); - assert_eq!("/prefix", format_prefix(missing_slash)); - assert_eq!("/prefix", format_prefix(excess_ending_1)); - assert_eq!("/prefix", format_prefix(excess_ending_2)); - assert_eq!("/prefix", format_prefix(slash_and_excess_ending)); - assert_eq!("", format_prefix(empty)); - assert_eq!("", format_prefix(single_slash)); - } - - #[test] - fn empty_bucket_policy() { - let mut policy: serde_json::Value = serde_json::from_str( - r#"{"Version": "2008-10-17", - "Statement": []}"#, - ) - .unwrap(); - - let new_bucket_policy = serde_json::from_str(&format!( - r#"{{ - "Effect": "Allow", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::{}{}/*", - "Condition": {{ - "StringEquals": {{ - "aws:sourceVpce": "{}" - }} - }} - }}"#, - "test-bucket-name", "/test-prefix", "testvpc123" - )) - .unwrap(); - - policy - .get_mut("Statement") - .unwrap() - .as_array_mut() - .unwrap() - .push(new_bucket_policy); - - let expected_policy: serde_json::Value = serde_json::from_str( - r#"{ - "Version": "2008-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::test-bucket-name/test-prefix/*", - "Condition": { - "StringEquals": { - "aws:sourceVpce": "testvpc123" - } - } - } - ] - }"#, - ) - .unwrap(); - - assert_json_include!(expected: expected_policy, actual: &policy); - } - - #[test] - fn populated_bucket_policy() { - let mut policy: serde_json::Value = serde_json::from_str( - r#"{ - "Version": "2008-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::test-bucket-name/test-prefix/*", - "Condition": { - "StringEquals": { - "aws:sourceVpce": "testvpc123" - } - } - } - ] - }"#, - ) - .unwrap(); - - let new_bucket_policy = serde_json::from_str(&format!( - r#"{{ - "Effect": "Deny", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::{}{}/*", - "Condition": {{ - "StringEquals": {{ - "aws:sourceVpce": "{}" - }} - }} - }}"#, - "test-bucket-name", "/test-prefix", "testvpc123" - )) - .unwrap(); - - policy - .get_mut("Statement") - .unwrap() - .as_array_mut() - .unwrap() - .push(new_bucket_policy); - - let expected_policy: serde_json::Value = serde_json::from_str( - r#"{ - "Version": "2008-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::test-bucket-name/test-prefix/*", - "Condition": { - "StringEquals": { - "aws:sourceVpce": "testvpc123" - } - } - }, - { - "Effect": "Deny", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::test-bucket-name/test-prefix/*", - "Condition": { - "StringEquals": { - "aws:sourceVpce": "testvpc123" - } - } - } - ] - }"#, - ) - .unwrap(); - - assert_json_include!(expected: expected_policy, actual: &policy); - } -} diff --git a/tools/infrasys/src/shared.rs b/tools/infrasys/src/shared.rs deleted file mode 100644 index a12a6770..00000000 --- a/tools/infrasys/src/shared.rs +++ /dev/null @@ -1,99 +0,0 @@ -use aws_sdk_cloudformation::types::{Output, Parameter}; -use aws_sdk_cloudformation::Client as CloudFormationClient; -use clap::Parser; -use log::info; -use snafu::{ensure, OptionExt, ResultExt}; -use std::{env, thread, time}; - -use super::{error, Result}; - -#[derive(Debug, Parser)] -pub enum KeyRole { - Root, - Publication, -} - -/// Retrieve a BUILDSYS_* variable that we expect to be set in the environment -pub fn getenv(var: &str) -> Result { - env::var(var).context(error::EnvironmentSnafu { var }) -} - -/// Generates a parameter type object used to specify parameters in CloudFormation templates -pub fn create_parameter(key: String, val: String) -> Parameter { - Parameter::builder() - .parameter_key(key) - .parameter_value(val) - .build() -} - -/// Polls cfn_client for stack_name in region until it's ready -/// Once stack is created, we can grab the outputs (before this point, outputs are empty) -pub async fn get_stack_outputs( - cfn_client: &CloudFormationClient, - stack_name: &str, - region: &str, -) -> Result> { - let mut stack_outputs = cfn_client - .describe_stacks() - .stack_name(stack_name) - .send() - .await - .context(error::DescribeStackSnafu { stack_name, region })? - .stacks - .context(error::ParseResponseSnafu { - what: "stacks", - resource_name: stack_name, - })? - .first() - .context(error::MissingStackSnafu { stack_name, region })? - .clone(); - - // Checking that keys have been created so we can return updated outputs - let mut status = stack_outputs - .stack_status() - .context(error::ParseStatusSnafu)? - .as_str(); - // Max wait is 30 mins (90 attempts * 20s = 1800s = 30mins) - let mut max_attempts: u32 = 90; - while status != "CREATE_COMPLETE" { - ensure!( - max_attempts > 0, - error::CreateStackTimeoutSnafu { stack_name, region } - ); - ensure!( - status != "CREATE_FAILED", - error::CreateStackFailureSnafu { stack_name, region } - ); - info!( - "Waiting for stack resources to be ready, current status is '{}'...", - status - ); - thread::sleep(time::Duration::from_secs(20)); - stack_outputs = cfn_client - .describe_stacks() - .stack_name(stack_name) - .send() - .await - .context(error::DescribeStackSnafu { stack_name, region })? - .stacks - .context(error::ParseResponseSnafu { - what: "stacks", - resource_name: stack_name, - })? - .first() - .context(error::MissingStackSnafu { stack_name, region })? - .clone(); - status = stack_outputs - .stack_status() - .context(error::ParseStatusSnafu)? - .as_str(); - max_attempts -= 1; - } - - let output_array = stack_outputs.outputs.context(error::ParseResponseSnafu { - what: "outputs", - resource_name: stack_name, - })?; - - Ok(output_array) -} diff --git a/tools/infrasys/test_tomls/toml_yaml_conversion.toml b/tools/infrasys/test_tomls/toml_yaml_conversion.toml deleted file mode 100644 index f2e58013..00000000 --- a/tools/infrasys/test_tomls/toml_yaml_conversion.toml +++ /dev/null @@ -1,12 +0,0 @@ -[repo.default] - file_hosting_config_name = "TUF-Repo-S3-Buck" - signing_keys = { kms = { available_keys = { "e4a8f7fe-2272-4e51-bc3e-3f719c77eb31" = "us-west-1" } } } - root_keys = { kms = { available_keys = { "e4a8f7fe-2272-4e51-bc3e-3f719c77eb31" = "us-west-1" } } } - root_key_threshold = 1 - pub_key_threshold = 1 - -[aws] - [aws.s3.TUF-Repo-S3-Buck] - region = "us-west-2" - vpc_endpoint_id = "vpc-12345" - s3_prefix = "/my-bottlerocket-remix" diff --git a/tools/infrasys/test_tomls/toml_yaml_conversion.yml b/tools/infrasys/test_tomls/toml_yaml_conversion.yml deleted file mode 100644 index c9482f65..00000000 --- a/tools/infrasys/test_tomls/toml_yaml_conversion.yml +++ /dev/null @@ -1,40 +0,0 @@ ---- -repo: - default: - root_role_url: ~ - root_role_sha512: ~ - signing_keys: - kms: - key_id: ~ - available_keys: - e4a8f7fe-2272-4e51-bc3e-3f719c77eb31: us-west-1 - key_alias: ~ - regions: [] - key_stack_arns: {} - root_keys: - kms: - key_id: ~ - available_keys: - e4a8f7fe-2272-4e51-bc3e-3f719c77eb31: us-west-1 - key_alias: ~ - regions: [] - key_stack_arns: {} - metadata_base_url: ~ - targets_url: ~ - file_hosting_config_name: TUF-Repo-S3-Buck - root_key_threshold: 1 - pub_key_threshold: 1 -aws: - regions: [] - role: ~ - profile: ~ - region: {} - ssm_prefix: ~ - s3: - TUF-Repo-S3-Buck: - region: us-west-2 - s3_prefix: /my-bottlerocket-remix - vpc_endpoint_id: vpc-12345 - stack_arn: ~ - bucket_name: ~ -vmware: ~ diff --git a/tools/install-twoliter.sh b/tools/install-twoliter.sh new file mode 100755 index 00000000..959643fd --- /dev/null +++ b/tools/install-twoliter.sh @@ -0,0 +1,168 @@ +#!/usr/bin/env bash + +# +# Common error handling +# + +exit_trap_cmds=() + +on_exit() { + exit_trap_cmds+=( "$1" ) +} + +run_exit_trap_cmds() { + for cmd in "${exit_trap_cmds[@]}"; do + eval "${cmd}" + done +} + +trap run_exit_trap_cmds EXIT + +warn() { + >&2 echo "Warning: $*" +} + +bail() { + if [[ $# -gt 0 ]]; then + >&2 echo "Error: $*" + fi + exit 1 +} + +usage() { + cat <&2 usage + bail "$1" +} + + +# +# Parse arguments +# + +while [[ $# -gt 0 ]]; do + case $1 in + -r|--repo) + shift; repo=$1 ;; + -v|--version) + shift; version=$1 ;; + -d|--directory) + shift; dir=$1 ;; + -e|--reuse-existing-install) + reuse_existing="true" ;; + -b|--allow-binary-install) + allow_bin="true" ;; + -s|--allow-from-source) + from_source="true" ;; + -h|--help) + usage; exit 0 ;; + *) + usage_error "Invalid option '$1'" ;; + esac + shift +done + +set -e + +workdir="$(mktemp -d)" +on_exit "rm -rf ${workdir}" + +if [ "${reuse_existing}" = "true" ] ; then + if [ -x "${dir}/twoliter" ] ; then + version_output="$("${dir}/twoliter" --version)" + found_version=v$(echo $version_output | awk '{print $2}') + echo "Found twoliter ${found_version} installed." + if [ "${found_version}" = "${version}" ] ; then + echo "Skipping installation." + exit 0 + fi + fi +fi + +if [ "${allow_bin}" = "true" ] ; then + host_arch="$(uname -m)" + host_arch="${host_arch,,}" + host_kernel="$(uname -s)" + host_kernel="${host_kernel,,}" + case "${host_kernel}-${host_arch}" in + linux-x86_64 | linux-aarch64) + echo "Installing twoliter from binary release." + twoliter_release="${repo}/releases/download/${version}" + twoliter_target="${host_arch}-unknown-${host_kernel}-musl" + cd "${workdir}" + curl -sSL "${twoliter_release}/twoliter-${twoliter_target}.tar.xz" -o "twoliter.tar.xz" + tar xf twoliter.tar.xz + mkdir -p "${dir}" + mv "./twoliter-${twoliter_target}/twoliter" "${dir}" + exit 0 + ;; + *) + echo "No pre-built binaries available for twoliter ${version}." + ;; + esac +else + echo "Skipped installing twoliter ${version} from pre-built binaries." +fi + +if [ "${from_source}" = "true" ] ; then + cargo install \ + --locked \ + --root "${workdir}" \ + --git "${repo}" \ + --rev "${version}" \ + --bin twoliter \ + --quiet \ + twoliter + mv "${workdir}/bin/twoliter" "${dir}/twoliter" + echo "Installed twoliter ${version} from source." + exit 0 +else + echo "Skipped installing twoliter ${version} from source." +fi + + +if [ ! -x "${dir}/twoliter" ] ; then + echo "Could not install twoliter ${version}" >&2 + exit 1 +fi \ No newline at end of file diff --git a/tools/partyplanner b/tools/partyplanner deleted file mode 100755 index d638319c..00000000 --- a/tools/partyplanner +++ /dev/null @@ -1,276 +0,0 @@ -#!/usr/bin/env bash -# shellcheck disable=SC2034 # Variables are used externally by rpm2img - -############################################################################### -# Section 1: partition type GUIDs and partition GUIDs - -# Define partition type GUIDs for all OS-managed partitions. This is required -# for the boot partition, where we set gptprio bits in the GUID-specific use -# field, but we might as well do it for all of them. -BOTTLEROCKET_BOOT_TYPECODE="6b636168-7420-6568-2070-6c616e657421" -BOTTLEROCKET_ROOT_TYPECODE="5526016a-1a97-4ea4-b39a-b7c8c6ca4502" -BOTTLEROCKET_HASH_TYPECODE="598f10af-c955-4456-6a99-7720068a6cea" -BOTTLEROCKET_RESERVED_TYPECODE="0c5d99a5-d331-4147-baef-08e2b855bdc9" -BOTTLEROCKET_PRIVATE_TYPECODE="440408bb-eb0b-4328-a6e5-a29038fad706" -BOTTLEROCKET_DATA_TYPECODE="626f7474-6c65-6474-6861-726d61726b73" - -# Under BIOS, the firmware will transfer control to the MBR on the boot device, -# which will pass control to the GRUB stage 2 binary written to the BIOS boot -# partition. The BIOS does not attach any significance to this partition type, -# but GRUB knows to install itself there when we run `grub-bios-setup`. -BIOS_BOOT_TYPECODE="ef02" - -# Under EFI, the firmware will find the EFI system partition and execute the -# program at a platform-defined path like `bootx64.efi`. The partition type -# must match what the firmware expects. -EFI_SYSTEM_TYPECODE="C12A7328-F81F-11D2-BA4B-00A0C93EC93B" - -# Whichever entry point is used for booting the system, it's important to note -# that only one build of GRUB is involved - the one that's installed during the -# image build. - -# GRUB understands the GPT priorities scheme we use to find the active boot -# partition; EFI and BIOS firmware does not. This is why we do not update GRUB -# during our system updates; we would have no way to revert to an earlier copy -# of the bootloader if it failed to boot. -# -# We may eventually want to have an active/passive scheme for EFI partitions, -# to allow for potential GRUB and shim updates on EFI platforms in cases where -# we need to deliver security fixes. For now, add a placeholder partition type -# for an alternate bank. -EFI_BACKUP_TYPECODE="B39CE39C-0A00-B4AB-2D11-F18F8237A21C" - -# Define partition GUIDs for the data partitions. We use the GUID for determining -# which data partition to label and use at boot. -BOTTLEROCKET_DATA_PREFERRED_PARTGUID="5b94e8df-28b8-485c-9d19-362263b5944c" -BOTTLEROCKET_DATA_FALLBACK_PARTGUID="69040874-417d-4e26-a764-7885f22007ea" - -############################################################################### -# Section 2: fixed size partitions and reservations - -# The GPT header and footer each take up 32 sectors, but we reserve a full MiB -# so that partitions can all be aligned on MiB boundaries. -GPT_MIB="1" # two per disk - -# The BIOS partition is only used on x86 platforms, and only needs to be large -# enough for the GRUB stage 2. Increasing its size will reduce the size of the -# "private" and "reserved" partitions. This should be relatively safe since we -# don't apply image updates to those partitions. -BIOS_MIB="4" # one per disk - -# The GPT and BIOS reservations are fixed overhead that will be deducted from -# the space nominally given to the private partition used to persist settings. -OVERHEAD_MIB="$((GPT_MIB * 2 + BIOS_MIB))" - -# The 'recommended' size for the EFI partition is 100MB but our EFI images are -# under 2MB, so this will suffice for now. It would be possible to increase the -# EFI partition size by taking space from the "reserved" area below. -EFI_MIB="5" # one per bank - -# Allocate 1 MiB for the initial data partition A. -DATA_A_MIB="1" # one per disk - -############################################################################### -# Section 3: variable sized partitions - -# These partitions scale based on image size. The scaling factors are chosen so -# that we end up with the same partition sizes for the banks on a 2 GiB image, -# which was the only image size we historically supported. -# -# !!! WARNING !!! -# -# Increasing any of these constants is very likely to break systems on update, -# since the corresponding partitions are adjacent on disk and have no room to -# grow. -BOOT_SCALE_FACTOR="20" -ROOT_SCALE_FACTOR="460" -HASH_SCALE_FACTOR="5" -RESERVE_SCALE_FACTOR="15" -PRIVATE_SCALE_FACTOR="24" - -############################################################################### -# Section 4: ASCII art gallery - -# Layout for a 1 GiB OS image. Sizes marked with (*) scale with overall image -# size, based on the constant factors above. - -# +---------------------------------+ -# Prelude | GPT header 1 MiB | 5 MiB -# | BIOS boot partition 4 MiB | Fixed size. -# +---------------------------------+ -# | EFI system partition 5 MiB | -# | Boot partition A 20 MiB* | (image size - prelude - postlude) / 2 -# Bank A | Root partition A 460 MiB* | Example: (1 GiB - 5 MiB - 19 MiB) / 2 -# | Hash partition A 5 MiB* | 500 MiB -# | Reserved partition A 10 MiB* | -# +---------------------------------+ -# | EFI backup partition 5 MiB | -# | Boot partition B 20 MiB* | (image size - prelude - postlude) / 2 -# Bank B | Root partition B 460 MiB* | Example: (1 GiB - 5 MiB - 19 MiB) / 2 -# | Hash partition B 5 MiB* | 500 MiB -# | Reserved partition B 10 MiB* | -# +---------------------------------+ -# | Private partition 17 MiB* | (image size * 24 as MiB) - prelude - DATA-A size -# | Data partition A 1 MiB | Data partition A -# Postlude | GPT footer 1 MiB | GPT is fixed, private partition grows. -# +---------------------------------+ - -############################################################################## -# Section 5: library functions - -# Populate the caller's tables with sizes and offsets for known partitions. -set_partition_sizes() { - local os_image_gib data_image_gib partition_plan - local -n pp_size pp_offset - os_image_gib="${1:?}" - data_image_gib="${2:?}" - - # Whether we're building a layout for a "split" image, where OS and data - # volumes are on separate disks, or a "unified" image, where they share the - # same disk. - partition_plan="${3:?}" - - # Table for partition sizes, in MiB. - pp_size="${4:?}" - - # Table for partition offsets from start of disk, in MiB. - pp_offset="${5:?}" - - # Most of the partitions on the main image scale with the overall size. - local boot_mib root_mib hash_mib reserved_mib private_mib - boot_mib="$((os_image_gib * BOOT_SCALE_FACTOR))" - root_mib="$((os_image_gib * ROOT_SCALE_FACTOR))" - hash_mib="$((os_image_gib * HASH_SCALE_FACTOR))" - - # Reserved space is everything left in the bank after the other partitions - # are scaled, minus the fixed 5 MiB EFI partition in that bank. - reserved_mib=$((os_image_gib * RESERVE_SCALE_FACTOR - EFI_MIB)) - - # Private space scales per GiB, minus the BIOS and GPT partition overhead. - private_mib=$((os_image_gib * PRIVATE_SCALE_FACTOR - OVERHEAD_MIB)) - # We need 1 MiB of space for data partition A. - private_mib=$((private_mib - DATA_A_MIB)) - - # Skip the GPT label at start of disk. - local offset - ((offset = 1)) - - pp_offset["BIOS"]="${offset}" - pp_size["BIOS"]="${BIOS_MIB}" - ((offset += BIOS_MIB)) - - for bank in A B ; do - pp_offset["EFI-${bank}"]="${offset}" - pp_size["EFI-${bank}"]="${EFI_MIB}" - ((offset += EFI_MIB)) - - pp_offset["BOOT-${bank}"]="${offset}" - pp_size["BOOT-${bank}"]="${boot_mib}" - ((offset += boot_mib)) - - pp_offset["ROOT-${bank}"]="${offset}" - pp_size["ROOT-${bank}"]="${root_mib}" - ((offset += root_mib)) - - pp_offset["HASH-${bank}"]="${offset}" - pp_size["HASH-${bank}"]="${hash_mib}" - ((offset += hash_mib)) - - pp_offset["RESERVED-${bank}"]="${offset}" - pp_size["RESERVED-${bank}"]="${reserved_mib}" - ((offset += reserved_mib)) - done - - pp_offset["PRIVATE"]="${offset}" - pp_size["PRIVATE"]="${private_mib}" - ((offset += private_mib)) - - case "${partition_plan}" in - split) - # For data partition A that lives on the OS image - pp_offset["DATA-A"]="${offset}" - pp_size["DATA-A"]="${DATA_A_MIB}" - ((offset += DATA_A_MIB)) - - # For a split data image, the first and last MiB are reserved for the GPT - # labels, and the rest is for data partition B. - pp_size["DATA-B"]="$((data_image_gib * 1024 - GPT_MIB * 2))" - pp_offset["DATA-B"]="1" - ;; - unified) - # For a unified image, we've already accounted for the GPT label space in - # the earlier calculations, so all the space is for the data partition. - pp_size["DATA-A"]="$((data_image_gib * 1024))" - pp_offset["DATA-A"]="${offset}" - ((offset += data_image_gib * 1024)) - ;; - *) - echo "unknown partition plan '${partition_plan}'" >&2 - exit 1 - ;; - esac -} - -# Populate the caller's table with labels for known partitions. -set_partition_labels() { - local -n pp_label - pp_label="${1:?}" - pp_label["BIOS"]="BIOS-BOOT" - pp_label["EFI-A"]="EFI-SYSTEM" - pp_label["EFI-B"]="EFI-BACKUP" - # Empty label for the data partitions. We're labeling the data partition - # during boot. - pp_label["DATA-A"]="" - pp_label["DATA-B"]="" - pp_label["PRIVATE"]="BOTTLEROCKET-PRIVATE" - for part in BOOT ROOT HASH RESERVED ; do - for bank in A B ; do - pp_label["${part}-${bank}"]="BOTTLEROCKET-${part}-${bank}" - done - done -} - -# Populate the caller's table with GPT type codes for known partitions. -set_partition_types() { - local -n pp_type - pp_type="${1:?}" - pp_type["BIOS"]="${BIOS_BOOT_TYPECODE}" - pp_type["DATA-A"]="${BOTTLEROCKET_DATA_TYPECODE}" - pp_type["DATA-B"]="${BOTTLEROCKET_DATA_TYPECODE}" - pp_type["EFI-A"]="${EFI_SYSTEM_TYPECODE}" - pp_type["EFI-B"]="${EFI_BACKUP_TYPECODE}" - pp_type["PRIVATE"]="${BOTTLEROCKET_PRIVATE_TYPECODE}" - local typecode - for part in BOOT ROOT HASH RESERVED ; do - for bank in A B ; do - typecode="BOTTLEROCKET_${part}_TYPECODE" - typecode="${!typecode}" - pp_type["${part}-${bank}"]="${typecode}" - done - done -} - -# Populate the caller's table with GPT partition UUIDs for DATA-A and -# DATA-B partitions. -set_partition_uuids() { - local -n pp_uuid - pp_uuid="${1:?}" - # Whether we're building a layout for a "split" image, where OS and data - # volumes are on separate disks, or a "unified" image, where they share the - # same disk. - partition_plan="${2:?}" - case "${partition_plan}" in - split) - pp_uuid["DATA-A"]="${BOTTLEROCKET_DATA_FALLBACK_PARTGUID}" - pp_uuid["DATA-B"]="${BOTTLEROCKET_DATA_PREFERRED_PARTGUID}" - ;; - unified) - pp_uuid["DATA-A"]="${BOTTLEROCKET_DATA_PREFERRED_PARTGUID}" - pp_uuid["DATA-B"]="${BOTTLEROCKET_DATA_FALLBACK_PARTGUID}" - ;; - *) - echo "unknown partition plan '${partition_plan}'" >&2 - exit 1 - ;; - esac -} diff --git a/tools/pubsys-config/Cargo.toml b/tools/pubsys-config/Cargo.toml deleted file mode 100644 index ba060eeb..00000000 --- a/tools/pubsys-config/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "pubsys-config" -version = "0.1.0" -authors = ["Zac Mrowicki ", "Tom Kirchner "] -license = "Apache-2.0 OR MIT" -edition = "2021" -publish = false - -[dependencies] -chrono = { version = "0.4", default-features = false, features = ["std", "clock"] } -home = "0.5" -lazy_static = "1" -log = "0.4" -parse-datetime = { path = "../../sources/parse-datetime", version = "0.1" } -serde = { version = "1", features = ["derive"] } -serde_yaml = "0.9" -snafu = "0.7" -toml = "0.5" -url = { version = "2", features = ["serde"] } diff --git a/tools/pubsys-config/src/lib.rs b/tools/pubsys-config/src/lib.rs deleted file mode 100644 index 8b244977..00000000 --- a/tools/pubsys-config/src/lib.rs +++ /dev/null @@ -1,279 +0,0 @@ -//! The config module owns the definition and loading process for our configuration sources. -pub mod vmware; - -use crate::vmware::VmwareConfig; -use chrono::Duration; -use log::info; -use parse_datetime::parse_offset; -use serde::{Deserialize, Deserializer, Serialize}; -use snafu::{OptionExt, ResultExt}; -use std::collections::{HashMap, VecDeque}; -use std::convert::TryFrom; -use std::fs; -use std::num::NonZeroUsize; -use std::path::{Path, PathBuf}; -use url::Url; - -/// Configuration needed to load and create repos -#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq)] -#[serde(deny_unknown_fields)] -pub struct InfraConfig { - // Repo subcommand config - pub repo: Option>, - - // Config for AWS specific subcommands - pub aws: Option, - - // Config for VMware specific subcommands - pub vmware: Option, -} - -impl InfraConfig { - /// Deserializes an InfraConfig from a given path - pub fn from_path

(path: P) -> Result - where - P: AsRef, - { - let path = path.as_ref(); - let infra_config_str = fs::read_to_string(path).context(error::FileSnafu { path })?; - toml::from_str(&infra_config_str).context(error::InvalidTomlSnafu { path }) - } - - /// Deserializes an InfraConfig from a Infra.lock file at a given path - pub fn from_lock_path

(path: P) -> Result - where - P: AsRef, - { - let path = path.as_ref(); - let infra_config_str = fs::read_to_string(path).context(error::FileSnafu { path })?; - serde_yaml::from_str(&infra_config_str).context(error::InvalidLockSnafu { path }) - } - - /// Deserializes an InfraConfig from a given path, if it exists, otherwise builds a default - /// config - pub fn from_path_or_default

(path: P) -> Result - where - P: AsRef, - { - if path.as_ref().exists() { - Self::from_path(path) - } else { - Ok(Self::default()) - } - } - - /// Deserializes an InfraConfig from Infra.lock, if it exists, otherwise uses Infra.toml - /// If the default flag is true, will create a default config if Infra.toml doesn't exist - pub fn from_path_or_lock(path: &Path, default: bool) -> Result { - let lock_path = Self::compute_lock_path(path)?; - if lock_path.exists() { - info!("Found infra config at path: {}", lock_path.display()); - Self::from_lock_path(lock_path) - } else if default { - Self::from_path_or_default(path) - } else { - info!("Found infra config at path: {}", path.display()); - Self::from_path(path) - } - } - - /// Looks for a file named `Infra.lock` in the same directory as the file named by - /// `infra_config_path`. Returns true if the `Infra.lock` file exists, or if `infra_config_path` - /// exists. Returns an error if the directory of `infra_config_path` cannot be found. - pub fn lock_or_infra_config_exists

(infra_config_path: P) -> Result - where - P: AsRef, - { - let lock_path = Self::compute_lock_path(&infra_config_path)?; - Ok(lock_path.exists() || infra_config_path.as_ref().exists()) - } - - /// Returns the file path to a file named `Infra.lock` in the same directory as the file named - /// by `infra_config_path`. - pub fn compute_lock_path

(infra_config_path: P) -> Result - where - P: AsRef, - { - Ok(infra_config_path - .as_ref() - .parent() - .context(error::ParentSnafu { - path: infra_config_path.as_ref(), - })? - .join("Infra.lock")) - } -} - -/// S3-specific TUF infrastructure configuration -#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq, Clone)] -pub struct S3Config { - pub region: Option, - #[serde(default)] - pub s3_prefix: String, - pub vpc_endpoint_id: Option, - pub stack_arn: Option, - pub bucket_name: Option, -} - -/// AWS-specific infrastructure configuration -#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq, Clone)] -#[serde(deny_unknown_fields)] -pub struct AwsConfig { - #[serde(default)] - pub regions: VecDeque, - pub role: Option, - pub profile: Option, - #[serde(default)] - pub region: HashMap, - pub ssm_prefix: Option, - pub s3: Option>, -} - -/// AWS region-specific configuration -#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Clone)] -#[serde(deny_unknown_fields)] -pub struct AwsRegionConfig { - pub role: Option, -} - -/// Location of signing keys -// These variant names are lowercase because they have to match the text in Infra.toml, and it's -// more common for TOML config to be lowercase. -#[allow(non_camel_case_types)] -#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)] -#[serde(deny_unknown_fields)] -pub enum SigningKeyConfig { - file { - path: PathBuf, - }, - kms { - key_id: Option, - #[serde(flatten)] - config: Option, - }, - ssm { - parameter: String, - }, -} - -/// AWS region-specific configuration -#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)] -//#[serde(deny_unknown_fields)] -pub struct KMSKeyConfig { - #[serde(default)] - pub available_keys: HashMap, - pub key_alias: Option, - #[serde(default)] - pub regions: VecDeque, - #[serde(default)] - pub key_stack_arns: HashMap, -} - -impl TryFrom for Url { - type Error = (); - fn try_from(key: SigningKeyConfig) -> std::result::Result { - match key { - SigningKeyConfig::file { path } => Url::from_file_path(path), - // We don't support passing profiles to tough in the name of the key/parameter, so for - // KMS and SSM we prepend a slash if there isn't one present. - SigningKeyConfig::kms { key_id, .. } => { - let mut key_id = key_id.unwrap_or_default(); - key_id = if key_id.starts_with('/') { - key_id.to_string() - } else { - format!("/{}", key_id) - }; - Url::parse(&format!("aws-kms://{}", key_id)).map_err(|_| ()) - } - SigningKeyConfig::ssm { parameter } => { - let parameter = if parameter.starts_with('/') { - parameter - } else { - format!("/{}", parameter) - }; - Url::parse(&format!("aws-ssm://{}", parameter)).map_err(|_| ()) - } - } - } -} - -/// Represents a Bottlerocket repo's location and the metadata needed to update the repo -#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq)] -#[serde(deny_unknown_fields)] -pub struct RepoConfig { - pub root_role_url: Option, - pub root_role_sha512: Option, - pub signing_keys: Option, - pub root_keys: Option, - pub metadata_base_url: Option, - pub targets_url: Option, - pub file_hosting_config_name: Option, - pub root_key_threshold: Option, - pub pub_key_threshold: Option, -} - -/// How long it takes for each metadata type to expire -#[derive(Debug, Deserialize)] -#[serde(deny_unknown_fields)] -pub struct RepoExpirationPolicy { - #[serde(deserialize_with = "deserialize_offset")] - pub snapshot_expiration: Duration, - #[serde(deserialize_with = "deserialize_offset")] - pub targets_expiration: Duration, - #[serde(deserialize_with = "deserialize_offset")] - pub timestamp_expiration: Duration, -} - -impl RepoExpirationPolicy { - /// Deserializes a RepoExpirationPolicy from a given path - pub fn from_path

(path: P) -> Result - where - P: AsRef, - { - let path = path.as_ref(); - let expiration_str = fs::read_to_string(path).context(error::FileSnafu { path })?; - toml::from_str(&expiration_str).context(error::InvalidTomlSnafu { path }) - } -} - -/// Deserializes a Duration in the form of "in X hours/days/weeks" -fn deserialize_offset<'de, D>(deserializer: D) -> std::result::Result -where - D: Deserializer<'de>, -{ - let s: &str = Deserialize::deserialize(deserializer)?; - parse_offset(s).map_err(serde::de::Error::custom) -} - -mod error { - use snafu::Snafu; - use std::io; - use std::path::PathBuf; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub enum Error { - #[snafu(display("Failed to read '{}': {}", path.display(), source))] - File { path: PathBuf, source: io::Error }, - - #[snafu(display("Invalid config file at '{}': {}", path.display(), source))] - InvalidToml { - path: PathBuf, - source: toml::de::Error, - }, - - #[snafu(display("Invalid lock file at '{}': {}", path.display(), source))] - InvalidLock { - path: PathBuf, - source: serde_yaml::Error, - }, - - #[snafu(display("Missing config: {}", what))] - MissingConfig { what: String }, - - #[snafu(display("Failed to get parent of path: {}", path.display()))] - Parent { path: PathBuf }, - } -} -pub use error::Error; -pub type Result = std::result::Result; diff --git a/tools/pubsys-config/src/vmware.rs b/tools/pubsys-config/src/vmware.rs deleted file mode 100644 index a5046096..00000000 --- a/tools/pubsys-config/src/vmware.rs +++ /dev/null @@ -1,221 +0,0 @@ -//! The vmware module owns the definition and loading process for our VMware configuration sources. -use lazy_static::lazy_static; -use log::debug; -use serde::{Deserialize, Serialize}; -use snafu::{OptionExt, ResultExt}; -use std::collections::HashMap; -use std::path::{Path, PathBuf}; -use std::{env, fs}; - -lazy_static! { - /// Determine the full path to the Vsphere credentials at runtime. This is an Option because it is - /// possible (however unlikely) that `home_dir()` is unable to find the home directory of the - /// current user - pub static ref VMWARE_CREDS_PATH: Option = home::home_dir().map(|home| home - .join(".config") - .join("pubsys") - .join("vsphere-credentials.toml")); -} - -const GOVC_USERNAME: &str = "GOVC_USERNAME"; -const GOVC_PASSWORD: &str = "GOVC_PASSWORD"; -const GOVC_URL: &str = "GOVC_URL"; -const GOVC_DATACENTER: &str = "GOVC_DATACENTER"; -const GOVC_DATASTORE: &str = "GOVC_DATASTORE"; -const GOVC_NETWORK: &str = "GOVC_NETWORK"; -const GOVC_RESOURCE_POOL: &str = "GOVC_RESOURCE_POOL"; -const GOVC_FOLDER: &str = "GOVC_FOLDER"; - -/// VMware-specific infrastructure configuration -#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq)] -#[serde(deny_unknown_fields)] -pub struct VmwareConfig { - #[serde(default)] - pub datacenters: Vec, - #[serde(default)] - pub datacenter: HashMap, - pub common: Option, -} - -/// VMware datacenter-specific configuration. -/// -/// Fields are optional here because this struct is used to gather environment variables, common -/// config, and datacenter-specific configuration, each of which may not have the complete set of -/// fields. It is used to build a complete datacenter configuration (hence the "Builder" name). -#[derive(Debug, Deserialize, Serialize, PartialEq, Eq)] -#[serde(deny_unknown_fields)] -pub struct DatacenterBuilder { - pub vsphere_url: Option, - pub datacenter: Option, - pub datastore: Option, - pub network: Option, - pub folder: Option, - pub resource_pool: Option, -} - -/// Helper macro for retrieving a field from another struct if the field in `self` is `None` -macro_rules! field_or { - ($self:expr, $field:ident, $other:expr) => { - $self - .$field - .as_ref() - .or($other.and_then(|o| o.$field.as_ref())) - .cloned() - }; -} - -impl DatacenterBuilder { - /// Create a DatacenterBuilder from environment variables - pub fn from_env() -> Self { - Self { - vsphere_url: get_env(GOVC_URL), - datacenter: get_env(GOVC_DATACENTER), - datastore: get_env(GOVC_DATASTORE), - network: get_env(GOVC_NETWORK), - folder: get_env(GOVC_FOLDER), - resource_pool: get_env(GOVC_RESOURCE_POOL), - } - } - - /// Creates a new DatacenterBuilder, merging fields from another (Optional) - /// DatacenterBuilder if the field in `self` is None - pub fn take_missing_from(&self, other: Option<&Self>) -> Self { - Self { - vsphere_url: field_or!(self, vsphere_url, other), - datacenter: field_or!(self, datacenter, other), - datastore: field_or!(self, datastore, other), - network: field_or!(self, network, other), - folder: field_or!(self, folder, other), - resource_pool: field_or!(self, resource_pool, other), - } - } - - /// Attempts to create a `Datacenter`, consuming `self` and ensuring that each field contains a - /// value. - pub fn build(self) -> Result { - let get_or_err = - |opt: Option, what: &str| opt.context(error::MissingConfigSnafu { what }); - - Ok(Datacenter { - vsphere_url: get_or_err(self.vsphere_url, "vSphere URL")?, - datacenter: get_or_err(self.datacenter, "vSphere datacenter")?, - datastore: get_or_err(self.datastore, "vSphere datastore")?, - network: get_or_err(self.network, "vSphere network")?, - folder: get_or_err(self.folder, "vSphere folder")?, - resource_pool: get_or_err(self.resource_pool, "vSphere resource pool")?, - }) - } -} - -/// A fully configured VMware datacenter, i.e. no optional fields -#[derive(Debug)] -pub struct Datacenter { - pub vsphere_url: String, - pub datacenter: String, - pub datastore: String, - pub network: String, - pub folder: String, - pub resource_pool: String, -} - -/// VMware infrastructure credentials for all datacenters -#[derive(Debug, Default, Deserialize)] -#[serde(deny_unknown_fields)] -pub struct DatacenterCredsConfig { - #[serde(default)] - pub datacenter: HashMap, -} - -impl DatacenterCredsConfig { - /// Deserializes a DatacenterCredsConfig from a given path - pub fn from_path

(path: P) -> Result - where - P: AsRef, - { - let path = path.as_ref(); - let creds_config_str = fs::read_to_string(path).context(error::FileSnafu { path })?; - toml::from_str(&creds_config_str).context(error::InvalidTomlSnafu { path }) - } -} - -/// VMware datacenter-specific credentials. Fields are optional here since this struct is used to -/// gather environment variables as well as fields from file, either of which may or may not exist. -/// It is used to build a complete credentials configuration (hence the "Builder" name). -#[derive(Debug, Default, Deserialize)] -#[serde(deny_unknown_fields)] -pub struct DatacenterCredsBuilder { - pub username: Option, - pub password: Option, -} - -impl DatacenterCredsBuilder { - /// Create a DatacenterCredsBuilder from environment variables - pub fn from_env() -> Self { - Self { - username: get_env(GOVC_USERNAME), - password: get_env(GOVC_PASSWORD), - } - } - - /// Creates a new DatacenterCredsBuilder, merging fields from another (Optional) - /// DatacenterCredsBuilder if the field in `self` is None - pub fn take_missing_from(&self, other: Option<&Self>) -> Self { - Self { - username: field_or!(self, username, other), - password: field_or!(self, password, other), - } - } - /// Attempts to create a `DatacenterCreds`, consuming `self` and ensuring that each field - /// contains a value - pub fn build(self) -> Result { - let get_or_err = - |opt: Option, what: &str| opt.context(error::MissingConfigSnafu { what }); - - Ok(DatacenterCreds { - username: get_or_err(self.username, "vSphere username")?, - password: get_or_err(self.password, "vSphere password")?, - }) - } -} - -/// Fully configured datacenter credentials, i.e. no optional fields -#[derive(Debug)] -pub struct DatacenterCreds { - pub username: String, - pub password: String, -} - -/// Attempt to retrieve an environment variable, returning None if it doesn't exist -fn get_env(var: &str) -> Option { - match env::var(var) { - Ok(v) => Some(v), - Err(e) => { - debug!("Unable to read environment variable '{}': {}", var, e); - None - } - } -} - -mod error { - use snafu::Snafu; - use std::io; - use std::path::PathBuf; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub enum Error { - #[snafu(display("Failed to read '{}': {}", path.display(), source))] - File { path: PathBuf, source: io::Error }, - - #[snafu(display("Invalid config file at '{}': {}", path.display(), source))] - InvalidToml { - path: PathBuf, - source: toml::de::Error, - }, - - #[snafu(display("Missing config: {}", what))] - MissingConfig { what: String }, - } -} -pub use error::Error; -pub type Result = std::result::Result; diff --git a/tools/pubsys-setup/Cargo.toml b/tools/pubsys-setup/Cargo.toml deleted file mode 100644 index f16852bc..00000000 --- a/tools/pubsys-setup/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "pubsys-setup" -version = "0.1.0" -authors = ["Zac Mrowicki ", "Tom Kirchner "] -license = "Apache-2.0 OR MIT" -edition = "2021" -publish = false - -[dependencies] -clap = { version = "4", features = ["derive"] } -hex = "0.4" -log = "0.4" -pubsys-config = { path = "../pubsys-config/", version = "0.1" } -reqwest = { version = "0.11", default-features = false, features = ["rustls-tls", "blocking"] } -sha2 = "0.10" -shell-words = "1" -simplelog = "0.12" -snafu = "0.7" -tempfile = "3" -url = { version = "2", features = ["serde"] } diff --git a/tools/pubsys-setup/src/main.rs b/tools/pubsys-setup/src/main.rs deleted file mode 100644 index d07cb7a2..00000000 --- a/tools/pubsys-setup/src/main.rs +++ /dev/null @@ -1,388 +0,0 @@ -/*! -`pubsys setup` helps you get started with the credentials you need to make Bottlerocket images and -the repos you use to update them. Specifically, it can create a new key and role, or download an -existing role. -*/ - -use clap::Parser; -use log::{debug, info, trace, warn}; -use pubsys_config::InfraConfig; -use sha2::{Digest, Sha512}; -use simplelog::{Config as LogConfig, LevelFilter, SimpleLogger}; -use snafu::{ensure, OptionExt, ResultExt}; -use std::convert::TryFrom; -use std::fs; -use std::os::unix::fs::PermissionsExt; -use std::path::PathBuf; -use std::process::{self, Command}; -use tempfile::NamedTempFile; -use url::Url; - -/// Helps you get started with credentials to make Bottlerocket images and repos. -#[derive(Debug, Parser)] -struct Args { - #[arg(global = true, long, default_value = "INFO")] - /// How much detail to log; from least to most: ERROR, WARN, INFO, DEBUG, TRACE - log_level: LevelFilter, - - #[arg(long)] - /// Path to Infra.toml - infra_config_path: PathBuf, - - #[arg(long)] - /// Use this named repo infrastructure from Infra.toml - repo: String, - - #[arg(long)] - /// Path to root.json - root_role_path: PathBuf, - - #[arg(long)] - /// If we have to generate a local key, store it here - default_key_path: PathBuf, - - #[arg(long)] - /// Allow setup to continue if we have a root role but no key for it - allow_missing_key: bool, -} - -/// The tuftool macro wraps Command to simplify calls to tuftool. -macro_rules! tuftool { - // We use variadic arguments to wrap a format! call so the user doesn't need to call format! - // each time. `tuftool root` always requires the path to root.json so there's always at least - // one. - ($format_str:expr, $($format_arg:expr),*) => { - let arg_str = format!($format_str, $($format_arg),*); - trace!("tuftool arg string: {}", arg_str); - let args = shell_words::split(&arg_str).context(error::CommandSplitSnafu { command: &arg_str })?; - trace!("tuftool split args: {:#?}", args); - - let status = Command::new("tuftool") - .args(args) - .status() - .context(error::TuftoolSpawnSnafu)?; - - ensure!(status.success(), error::TuftoolResultSnafu { - command: arg_str, - code: status.code().map(|i| i.to_string()).unwrap_or_else(|| "".to_string()) - }); - } -} - -/// Main entry point for tuftool setup. -fn run() -> Result<()> { - // Parse and store the args passed to the program - let args = Args::parse(); - - // SimpleLogger will send errors to stderr and anything less to stdout. - SimpleLogger::init(args.log_level, LogConfig::default()).context(error::LoggerSnafu)?; - - // Make /roles and /keys directories, if they don't exist, so we can write generated files. - let role_dir = args.root_role_path.parent().context(error::PathSnafu { - path: &args.root_role_path, - thing: "root role", - })?; - let key_dir = args.default_key_path.parent().context(error::PathSnafu { - path: &args.default_key_path, - thing: "key", - })?; - fs::create_dir_all(role_dir).context(error::MkdirSnafu { path: role_dir })?; - fs::create_dir_all(key_dir).context(error::MkdirSnafu { path: key_dir })?; - - // Main branching logic for deciding whether to create role/key, use what we have, or error. - match find_root_role_and_key(&args)? { - (Some(_root_role_path), Some(_key_url)) => Ok(()), - (Some(_root_role_path), None) => { - ensure!( - args.allow_missing_key, - error::MissingKeySnafu { repo: args.repo } - ); - Ok(()) - } - // User is missing something, so we generate at least a root.json and maybe a key. - (None, maybe_key_url) => { - if maybe_key_url.is_some() { - info!("Didn't find root role in Infra.toml, generating..."); - } else { - info!("Didn't find root role or signing key in Infra.toml, generating..."); - } - - let temp_root_role = - NamedTempFile::new_in(role_dir).context(error::TempFileCreateSnafu { - purpose: "root role", - })?; - let temp_root_role_path = temp_root_role.path().display(); - - // Make tuftool calls to create an initial root.json with basic parameters. - tuftool!("root init '{}'", temp_root_role_path); - - tuftool!("root expire '{}' 'in 52 weeks'", temp_root_role_path); - - tuftool!("root set-threshold '{}' root 1", temp_root_role_path); - tuftool!("root set-threshold '{}' snapshot 1", temp_root_role_path); - tuftool!("root set-threshold '{}' targets 1", temp_root_role_path); - tuftool!("root set-threshold '{}' timestamp 1", temp_root_role_path); - - let key_url = if let Some(key_url) = maybe_key_url { - // If the user has a key, add it to each role. - tuftool!("root add-key '{}' '{}' --role root --role snapshot --role targets --role timestamp", - temp_root_role_path, key_url); - key_url - } else { - // If the user has no key, build one and add it to each role. - tuftool!("root gen-rsa-key '{}' '{}' --role root --role snapshot --role targets --role timestamp", - temp_root_role_path, args.default_key_path.display()); - warn!( - "Created a key at {} - note that for production use, you should \ - use a key stored in a trusted service like KMS or SSM", - args.default_key_path.display() - ); - - Url::from_file_path(&args.default_key_path) - .ok() - .context(error::FileToUrlSnafu { - path: args.default_key_path, - })? - }; - - // Sign the role with the given key. - tuftool!("root sign '{}' -k '{}'", temp_root_role_path, key_url); - - temp_root_role - .persist_noclobber(&args.root_role_path) - .context(error::TempFilePersistSnafu { - path: &args.root_role_path, - })?; - - warn!( - "Created a root role at {} - note that for production use, you should create \ - a role with a shorter expiration and higher thresholds", - args.root_role_path.display() - ); - - // Root role files don't need to be secret. - fs::set_permissions(&args.root_role_path, fs::Permissions::from_mode(0o644)).context( - error::SetModeSnafu { - path: &args.root_role_path, - }, - )?; - - Ok(()) - } - } -} - -/// Searches Infra.toml and expected local paths for a root role and key for the requested repo. -fn find_root_role_and_key(args: &Args) -> Result<(Option<&PathBuf>, Option)> { - let (mut root_role_path, mut key_url) = (None, None); - - if InfraConfig::lock_or_infra_config_exists(&args.infra_config_path) - .context(error::ConfigSnafu)? - { - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) - .context(error::ConfigSnafu)?; - trace!("Parsed infra config: {:?}", infra_config); - - // Check whether the user has the relevant repo defined in their Infra.toml. - if let Some(repo_config) = infra_config - .repo - .as_ref() - .and_then(|repo_section| repo_section.get(&args.repo)) - { - // If they have a root role URL and checksum defined, we can download it. - if let (Some(url), Some(sha512)) = - (&repo_config.root_role_url, &repo_config.root_role_sha512) - { - // If it's already been downloaded, just confirm the checksum. - if args.root_role_path.exists() { - let root_role_data = - fs::read_to_string(&args.root_role_path).context(error::ReadFileSnafu { - path: &args.root_role_path, - })?; - let mut d = Sha512::new(); - d.update(&root_role_data); - let digest = hex::encode(d.finalize()); - - ensure!( - &digest == sha512, - error::HashSnafu { - expected: sha512, - got: digest, - thing: args.root_role_path.to_string_lossy() - } - ); - debug!( - "Using existing downloaded root role at {}", - args.root_role_path.display() - ); - } else { - // Download the root role by URL and verify its checksum before writing it. - let root_role_data = if url.scheme() == "file" { - // reqwest won't fetch a file URL, so just read the file. - let path = url - .to_file_path() - .ok() - .with_context(|| error::UrlToFileSnafu { url: url.clone() })?; - fs::read_to_string(&path).context(error::ReadFileSnafu { path: &path })? - } else { - reqwest::blocking::get(url.clone()) - .with_context(|_| error::GetUrlSnafu { url: url.clone() })? - .text() - .with_context(|_| error::GetUrlSnafu { url: url.clone() })? - }; - - let mut d = Sha512::new(); - d.update(&root_role_data); - let digest = hex::encode(d.finalize()); - - ensure!( - &digest == sha512, - error::HashSnafu { - expected: sha512, - got: digest, - thing: url.to_string() - } - ); - - // Write root role to expected path on disk. - fs::write(&args.root_role_path, &root_role_data).context( - error::WriteFileSnafu { - path: &args.root_role_path, - }, - )?; - debug!("Downloaded root role to {}", args.root_role_path.display()); - } - - root_role_path = Some(&args.root_role_path); - } else if repo_config.root_role_url.is_some() || repo_config.root_role_sha512.is_some() - { - // Must specify both URL and checksum. - error::RootRoleConfigSnafu.fail()?; - } - - if let Some(key_config) = &repo_config.signing_keys { - key_url = Some( - Url::try_from(key_config.clone()) - .ok() - .context(error::SigningKeyUrlSnafu { repo: &args.repo })?, - ); - } - } else { - info!( - "No repo config in '{}' - using local roles/keys", - args.infra_config_path.display() - ); - } - } else { - info!( - "No infra config at '{}' - using local roles/keys", - args.infra_config_path.display() - ); - } - - // If they don't have an Infra.toml or didn't define a root role / key there, check for them in - // expected local paths. - if root_role_path.is_none() && args.root_role_path.exists() { - root_role_path = Some(&args.root_role_path); - } - if key_url.is_none() && args.default_key_path.exists() { - key_url = Some(Url::from_file_path(&args.default_key_path).ok().context( - error::FileToUrlSnafu { - path: &args.default_key_path, - }, - )?); - } - - Ok((root_role_path, key_url)) -} - -// Returning a Result from main makes it print a Debug representation of the error, but with Snafu -// we have nice Display representations of the error, so we wrap "main" (run) and print any error. -// https://github.com/shepmaster/snafu/issues/110 -fn main() { - if let Err(e) = run() { - eprintln!("{}", e); - process::exit(1); - } -} - -mod error { - use snafu::Snafu; - use std::io; - use std::path::PathBuf; - use url::Url; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(super) enum Error { - #[snafu(display("Error splitting shell command - {} - input: {}", source, command))] - CommandSplit { - command: String, - source: shell_words::ParseError, - }, - - #[snafu(display("Error reading config: {}", source))] - Config { source: pubsys_config::Error }, - - #[snafu(display("Path not valid as a URL: {}", path.display()))] - FileToUrl { path: PathBuf }, - - #[snafu(display("Failed to fetch URL '{}': {}", url, source))] - GetUrl { url: Url, source: reqwest::Error }, - - #[snafu(display("Hash mismatch for '{}', got {} but expected {}", thing, got, expected))] - Hash { - expected: String, - got: String, - thing: String, - }, - - #[snafu(display("Logger setup error: {}", source))] - Logger { source: log::SetLoggerError }, - - #[snafu(display("'{}' repo has root role but no key. You wouldn't be able to update a repo without the matching key. To continue, pass '-e ALLOW_MISSING_KEY=true'", repo))] - MissingKey { repo: String }, - - #[snafu(display("Failed to create '{}': {}", path.display(), source))] - Mkdir { path: PathBuf, source: io::Error }, - - #[snafu(display("Invalid path '{}' for {}", path.display(), thing))] - Path { path: PathBuf, thing: String }, - - #[snafu(display("Failed to read '{}': {}", path.display(), source))] - ReadFile { path: PathBuf, source: io::Error }, - - #[snafu(display( - "Must specify both URL and SHA512 of root role in Infra.toml, found only one" - ))] - RootRoleConfig, - - #[snafu(display("Failed to set permissions on {}: {}", path.display(), source))] - SetMode { path: PathBuf, source: io::Error }, - - #[snafu(display("Unable to build URL from signing key for repo '{}'", repo))] - SigningKeyUrl { repo: String }, - - #[snafu(display("Failed to create temp file for {}: {}", purpose, source))] - TempFileCreate { purpose: String, source: io::Error }, - - #[snafu(display("Failed to move temp file to {}: {}", path.display(), source))] - TempFilePersist { - path: PathBuf, - source: tempfile::PersistError, - }, - - #[snafu(display("Returned {}: tuftool {}", code, command))] - TuftoolResult { code: String, command: String }, - - #[snafu(display("Failed to start tuftool: {}", source))] - TuftoolSpawn { source: io::Error }, - - #[snafu(display("URL not valid as a path: {}", url))] - UrlToFile { url: Url }, - - #[snafu(display("Failed to write '{}': {}", path.display(), source))] - WriteFile { path: PathBuf, source: io::Error }, - } -} -type Result = std::result::Result; diff --git a/tools/pubsys/Cargo.toml b/tools/pubsys/Cargo.toml deleted file mode 100644 index c306d808..00000000 --- a/tools/pubsys/Cargo.toml +++ /dev/null @@ -1,52 +0,0 @@ -[package] -name = "pubsys" -version = "0.1.0" -authors = ["Zac Mrowicki ", "Tom Kirchner "] -license = "Apache-2.0 OR MIT" -edition = "2021" -publish = false - -[dependencies] -aws-config = "0.55" -aws-credential-types = "0.55" -aws-sdk-ebs = "0.28" -aws-sdk-ec2 = "0.28" -aws-sdk-kms = "0.28" -aws-sdk-ssm = "0.28" -aws-sdk-sts = "0.28" -aws-smithy-types = "0.55" -aws-types = "0.55" -buildsys = { path = "../buildsys", version = "0.1" } -chrono = { version = "0.4", default-features = false, features = ["std", "clock"] } -clap = { version = "4", features = ["derive"] } -coldsnap = { version = "0.6", default-features = false, features = ["aws-sdk-rust-rustls"] } -duct = "0.13" -futures = "0.3" -governor = "0.5" -indicatif = "0.17" -lazy_static = "1" -log = "0.4" -nonzero_ext = "0.3" -num_cpus = "1" -parse-datetime = { path = "../../sources/parse-datetime", version = "0.1" } -pubsys-config = { path = "../pubsys-config/", version = "0.1" } -rayon = "1" -# Need to bring in reqwest with a TLS feature so tough can support TLS repos. -reqwest = { version = "0.11", default-features = false, features = ["rustls-tls", "blocking"] } -semver = "1" -serde = { version = "1", features = ["derive"] } -serde_json = "1" -serde_plain = "1" -simplelog = "0.12" -snafu = "0.7" -tabled = "0.10" -tempfile = "3" -tinytemplate = "1" -tokio = { version = "1", features = ["full"] } # LTS -tokio-stream = { version = "0.1", features = ["time"] } -toml = "0.5" -tough = { version = "0.14", features = ["http"] } -tough-kms = "0.6" -tough-ssm = "0.9" -update_metadata = { path = "../../sources/updater/update_metadata/", version = "0.1" } -url = { version = "2", features = ["serde"] } diff --git a/tools/pubsys/src/aws/ami/launch_permissions.rs b/tools/pubsys/src/aws/ami/launch_permissions.rs deleted file mode 100644 index f8f58447..00000000 --- a/tools/pubsys/src/aws/ami/launch_permissions.rs +++ /dev/null @@ -1,101 +0,0 @@ -use aws_sdk_ec2::{ - types::{ImageAttributeName, LaunchPermission}, - Client as Ec2Client, -}; -use serde::{Deserialize, Serialize}; -use snafu::ResultExt; - -/// Returns the launch permissions for the given AMI -pub(crate) async fn get_launch_permissions( - ec2_client: &Ec2Client, - region: &str, - ami_id: &str, -) -> Result> { - let ec2_response = ec2_client - .describe_image_attribute() - .image_id(ami_id) - .attribute(ImageAttributeName::LaunchPermission) - .send() - .await - .context(error::DescribeImageAttributeSnafu { - ami_id, - region: region.to_string(), - })?; - - let mut launch_permissions = vec![]; - - let responses: Vec = - ec2_response.launch_permissions().unwrap_or(&[]).to_vec(); - for permission in responses { - launch_permissions.push(LaunchPermissionDef::try_from(permission)?) - } - Ok(launch_permissions) -} - -#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq, Hash)] -#[serde(rename_all = "lowercase")] -pub(crate) enum LaunchPermissionDef { - /// The name of the group - Group(String), - - /// The Amazon Web Services account ID - UserId(String), - - /// The ARN of an organization - OrganizationArn(String), - - /// The ARN of an organizational unit - OrganizationalUnitArn(String), -} - -impl TryFrom for LaunchPermissionDef { - type Error = crate::aws::ami::launch_permissions::Error; - - fn try_from(launch_permission: LaunchPermission) -> std::result::Result { - let LaunchPermission { - group, - user_id, - organization_arn, - organizational_unit_arn, - .. - } = launch_permission.clone(); - match (group, user_id, organization_arn, organizational_unit_arn) { - (Some(group), None, None, None) => { - Ok(LaunchPermissionDef::Group(group.as_str().to_string())) - } - (None, Some(user_id), None, None) => Ok(LaunchPermissionDef::UserId(user_id)), - (None, None, Some(organization_arn), None) => { - Ok(LaunchPermissionDef::OrganizationArn(organization_arn)) - } - (None, None, None, Some(organizational_unit_arn)) => Ok( - LaunchPermissionDef::OrganizationalUnitArn(organizational_unit_arn), - ), - _ => Err(Error::InvalidLaunchPermission { launch_permission }), - } - } -} - -mod error { - use aws_sdk_ec2::error::SdkError; - use aws_sdk_ec2::operation::describe_image_attribute::DescribeImageAttributeError; - use aws_sdk_ec2::types::LaunchPermission; - use snafu::Snafu; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Error describing AMI {} in {}: {}", ami_id, region, source))] - DescribeImageAttribute { - ami_id: String, - region: String, - #[snafu(source(from(SdkError, Box::new)))] - source: Box>, - }, - - #[snafu(display("Invalid launch permission: {:?}", launch_permission))] - InvalidLaunchPermission { launch_permission: LaunchPermission }, - } -} -pub(crate) use error::Error; - -type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/ami/mod.rs b/tools/pubsys/src/aws/ami/mod.rs deleted file mode 100644 index 825f23dc..00000000 --- a/tools/pubsys/src/aws/ami/mod.rs +++ /dev/null @@ -1,627 +0,0 @@ -//! The ami module owns the 'ami' subcommand and controls the process of registering and copying -//! EC2 AMIs. - -pub(crate) mod launch_permissions; -pub(crate) mod public; -mod register; -mod snapshot; -pub(crate) mod wait; - -use crate::aws::ami::launch_permissions::get_launch_permissions; -use crate::aws::ami::public::ami_is_public; -use crate::aws::publish_ami::{get_snapshots, modify_image, modify_snapshots, ModifyOptions}; -use crate::aws::{client::build_client_config, parse_arch, region_from_string}; -use crate::Args; -use aws_sdk_ebs::Client as EbsClient; -use aws_sdk_ec2::error::{ProvideErrorMetadata, SdkError}; -use aws_sdk_ec2::operation::copy_image::{CopyImageError, CopyImageOutput}; -use aws_sdk_ec2::types::{ArchitectureValues, OperationType}; -use aws_sdk_ec2::{config::Region, Client as Ec2Client}; -use aws_sdk_sts::operation::get_caller_identity::{ - GetCallerIdentityError, GetCallerIdentityOutput, -}; -use aws_sdk_sts::Client as StsClient; -use clap::Parser; -use futures::future::{join, lazy, ready, FutureExt}; -use futures::stream::{self, StreamExt}; -use log::{error, info, trace, warn}; -use pubsys_config::{AwsConfig as PubsysAwsConfig, InfraConfig}; -use register::{get_ami_id, register_image, RegisteredIds}; -use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt, ResultExt}; -use std::collections::{HashMap, HashSet}; -use std::path::PathBuf; -use wait::wait_for_ami; - -const WARN_SEPARATOR: &str = "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"; - -/// Builds Bottlerocket AMIs using latest build artifacts -#[derive(Debug, Parser)] -pub(crate) struct AmiArgs { - /// Path to the image containing the os volume - #[arg(short = 'o', long)] - os_image: PathBuf, - - /// Path to the image containing the data volume - #[arg(short = 'd', long)] - data_image: Option, - - /// Path to the variant manifest - #[arg(short = 'v', long)] - variant_manifest: PathBuf, - - /// Path to the UEFI data - #[arg(short = 'e', long)] - uefi_data: PathBuf, - - /// The architecture of the machine image - #[arg(short = 'a', long, value_parser = parse_arch)] - arch: ArchitectureValues, - - /// The desired AMI name - #[arg(short = 'n', long)] - name: String, - - /// The desired AMI description - #[arg(long)] - description: Option, - - /// Don't display progress bars - #[arg(long)] - no_progress: bool, - - /// Regions where you want the AMI, the first will be used as the base for copying - #[arg(long, value_delimiter = ',')] - regions: Vec, - - /// If specified, save created regional AMI IDs in JSON at this path. - #[arg(long)] - ami_output: Option, -} - -/// Common entrypoint from main() -pub(crate) async fn run(args: &Args, ami_args: &AmiArgs) -> Result<()> { - match _run(args, ami_args).await { - Ok(amis) => { - // Write the AMI IDs to file if requested - if let Some(ref path) = ami_args.ami_output { - write_amis(path, &amis).context(error::WriteAmisSnafu { path })?; - } - Ok(()) - } - Err(e) => Err(e), - } -} - -async fn _run(args: &Args, ami_args: &AmiArgs) -> Result> { - let mut amis = HashMap::new(); - - // If a lock file exists, use that, otherwise use Infra.toml or default - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, true) - .context(error::ConfigSnafu)?; - trace!("Using infra config: {:?}", infra_config); - - let aws = infra_config.aws.unwrap_or_default(); - - // If the user gave an override list of regions, use that, otherwise use what's in the config. - let mut regions = if !ami_args.regions.is_empty() { - ami_args.regions.clone() - } else { - aws.regions.clone().into() - } - .into_iter() - .map(|name| region_from_string(&name)) - .collect::>(); - - ensure!( - !regions.is_empty(), - error::MissingConfigSnafu { - missing: "aws.regions" - } - ); - - // We register in this base region first, then copy from there to any other regions. - let base_region = regions.remove(0); - - // Build EBS client for snapshot management, and EC2 client for registration - let client_config = build_client_config(&base_region, &base_region, &aws).await; - - let base_ebs_client = EbsClient::new(&client_config); - - let base_ec2_client = Ec2Client::new(&client_config); - - // Check if the AMI already exists, in which case we can use the existing ID, otherwise we - // register a new one. - let maybe_id = get_ami_id( - &ami_args.name, - &ami_args.arch, - &base_region, - &base_ec2_client, - ) - .await - .context(error::GetAmiIdSnafu { - name: &ami_args.name, - arch: ami_args.arch.as_ref(), - region: base_region.as_ref(), - })?; - - // If the AMI does not exist yet, `public` should be false and `launch_permissions` empty - let mut public = false; - let mut launch_permissions = vec![]; - - let (ids_of_image, already_registered) = if let Some(found_id) = maybe_id { - warn!( - "\n{}\n\nFound '{}' already registered in {}: {}\n\n{0}", - WARN_SEPARATOR, ami_args.name, base_region, found_id - ); - let snapshot_ids = get_snapshots(&found_id, &base_region, &base_ec2_client) - .await - .context(error::GetSnapshotsSnafu { - image_id: &found_id, - region: base_region.as_ref(), - })?; - let found_ids = RegisteredIds { - image_id: found_id.clone(), - snapshot_ids, - }; - - public = ami_is_public(&base_ec2_client, base_region.as_ref(), &found_id) - .await - .context(error::IsAmiPublicSnafu { - image_id: found_id.clone(), - region: base_region.to_string(), - })?; - - launch_permissions = - get_launch_permissions(&base_ec2_client, base_region.as_ref(), &found_id) - .await - .context(error::DescribeImageAttributeSnafu { - image_id: found_id, - region: base_region.to_string(), - })?; - - (found_ids, true) - } else { - let new_ids = register_image(ami_args, &base_region, base_ebs_client, &base_ec2_client) - .await - .context(error::RegisterImageSnafu { - name: &ami_args.name, - arch: ami_args.arch.as_ref(), - region: base_region.as_ref(), - })?; - info!( - "Registered AMI '{}' in {}: {}", - ami_args.name, base_region, new_ids.image_id - ); - (new_ids, false) - }; - - amis.insert( - base_region.as_ref().to_string(), - Image::new( - &ids_of_image.image_id, - &ami_args.name, - Some(public), - Some(launch_permissions), - ), - ); - - // If we don't need to copy AMIs, we're done. - if regions.is_empty() { - return Ok(amis); - } - - // Wait for AMI to be available so it can be copied - let successes_required = if already_registered { 1 } else { 3 }; - wait_for_ami( - &ids_of_image.image_id, - &base_region, - &base_region, - "available", - successes_required, - &aws, - ) - .await - .context(error::WaitAmiSnafu { - id: &ids_of_image.image_id, - region: base_region.as_ref(), - })?; - - // For every other region, initiate copy-image calls. - - // First we need to find the account IDs for any given roles, so we can grant access to those - // accounts to copy the AMI and snapshots. - info!("Getting account IDs for target regions so we can grant access to copy source AMI"); - let mut account_ids = get_account_ids(®ions, &base_region, &aws).await?; - - // Get the account ID used in the base region; we don't need to grant to it so we can remove it - // from the list. - let client_config = build_client_config(&base_region, &base_region, &aws).await; - let base_sts_client = StsClient::new(&client_config); - - let response = base_sts_client.get_caller_identity().send().await.context( - error::GetCallerIdentitySnafu { - region: base_region.as_ref(), - }, - )?; - let base_account_id = response.account.context(error::MissingInResponseSnafu { - request_type: "GetCallerIdentity", - missing: "account", - })?; - account_ids.remove(&base_account_id); - - // If we have any accounts other than the base account, grant them access. - if !account_ids.is_empty() { - info!("Granting access to target accounts so we can copy the AMI"); - let account_id_vec: Vec<_> = account_ids.into_iter().collect(); - - let modify_options = ModifyOptions { - user_ids: account_id_vec, - group_names: Vec::new(), - organization_arns: Vec::new(), - organizational_unit_arns: Vec::new(), - }; - - modify_snapshots( - &modify_options, - &OperationType::Add, - &ids_of_image.snapshot_ids, - &base_ec2_client, - &base_region, - ) - .await - .context(error::GrantAccessSnafu { - thing: "snapshots", - region: base_region.as_ref(), - })?; - - modify_image( - &modify_options, - &OperationType::Add, - &ids_of_image.image_id, - &base_ec2_client, - ) - .await - .context(error::GrantImageAccessSnafu { - thing: "image", - region: base_region.as_ref(), - })?; - } - - // Next, make EC2 clients so we can fetch and copy AMIs. We make a map storing our regional - // clients because they're used in a future and need to live until the future is resolved. - let mut ec2_clients = HashMap::with_capacity(regions.len()); - for region in regions.iter() { - let client_config = build_client_config(region, &base_region, &aws).await; - let ec2_client = Ec2Client::new(&client_config); - ec2_clients.insert(region.clone(), ec2_client); - } - - // First, we check if the AMI already exists in each region. - info!("Checking whether AMIs already exist in target regions"); - let mut get_requests = Vec::with_capacity(regions.len()); - for region in regions.iter() { - let ec2_client = &ec2_clients[region]; - let get_request = get_ami_id(&ami_args.name, &ami_args.arch, region, ec2_client); - let info_future = ready(region.clone()); - get_requests.push(join(info_future, get_request)); - } - let request_stream = stream::iter(get_requests).buffer_unordered(4); - let get_responses: Vec<(Region, std::result::Result, register::Error>)> = - request_stream.collect().await; - - // If an AMI already existed, just add it to our list, otherwise prepare a copy request. - let mut copy_requests = Vec::with_capacity(regions.len()); - for (region, get_response) in get_responses { - let get_response = get_response.context(error::GetAmiIdSnafu { - name: &ami_args.name, - arch: ami_args.arch.as_ref(), - region: region.as_ref(), - })?; - if let Some(id) = get_response { - info!( - "Found '{}' already registered in {}: {}", - ami_args.name, region, id - ); - let public = ami_is_public(&ec2_clients[®ion], region.as_ref(), &id) - .await - .context(error::IsAmiPublicSnafu { - image_id: id.clone(), - region: base_region.to_string(), - })?; - - let launch_permissions = - get_launch_permissions(&ec2_clients[®ion], region.as_ref(), &id) - .await - .context(error::DescribeImageAttributeSnafu { - region: region.as_ref(), - image_id: id.clone(), - })?; - - amis.insert( - region.as_ref().to_string(), - Image::new(&id, &ami_args.name, Some(public), Some(launch_permissions)), - ); - continue; - } - - let ec2_client = &ec2_clients[®ion]; - let base_region = base_region.to_owned(); - let copy_future = ec2_client - .copy_image() - .set_description(ami_args.description.clone()) - .set_name(Some(ami_args.name.clone())) - .set_source_image_id(Some(ids_of_image.image_id.clone())) - .set_source_region(Some(base_region.as_ref().to_string())) - .send(); - - // Store the region so we can output it to the user - let region_future = ready(region.clone()); - // Let the user know the copy is starting, when this future goes to run - let message_future = - lazy(move |_| info!("Starting copy from {} to {}", base_region, region)); - copy_requests.push(message_future.then(|_| join(region_future, copy_future))); - } - - // If all target regions already have the AMI, we're done. - if copy_requests.is_empty() { - return Ok(amis); - } - - // Start requests; they return almost immediately and the copying work is done by the service - // afterward. You should wait for the AMI status to be "available" before launching it. - // (We still use buffer_unordered, rather than something like join_all, to retain some control - // over the number of requests going out in case we need it later, but this will effectively - // spin through all regions quickly because the requests return before any copying is done.) - let request_stream = stream::iter(copy_requests).buffer_unordered(4); - // Run through the stream and collect results into a list. - let copy_responses: Vec<( - Region, - std::result::Result>, - )> = request_stream.collect().await; - - // Report on successes and errors; don't fail immediately if we see an error so we can report - // all successful IDs. - let mut saw_error = false; - for (region, copy_response) in copy_responses { - match copy_response { - Ok(success) => { - if let Some(image_id) = success.image_id { - info!( - "Registered AMI '{}' in {}: {}", - ami_args.name, region, image_id, - ); - amis.insert( - region.as_ref().to_string(), - Image::new(&image_id, &ami_args.name, Some(false), Some(vec![])), - ); - } else { - saw_error = true; - error!( - "Registered AMI '{}' in {} but didn't receive an AMI ID!", - ami_args.name, region, - ); - } - } - Err(e) => { - saw_error = true; - error!( - "Copy to {} failed: {}", - region, - e.into_service_error().code().unwrap_or("unknown") - ); - } - } - } - - ensure!(!saw_error, error::AmiCopySnafu); - - Ok(amis) -} - -/// If JSON output was requested, we serialize out a mapping of region to AMI information; this -/// struct holds the information we save about each AMI. The `ssm` subcommand uses this -/// information to populate templates representing SSM parameter names and values. -#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq, Hash)] -pub(crate) struct Image { - pub(crate) id: String, - pub(crate) name: String, - pub(crate) public: Option, - pub(crate) launch_permissions: Option>, -} - -impl Image { - fn new( - id: &str, - name: &str, - public: Option, - launch_permissions: Option>, - ) -> Self { - Self { - id: id.to_string(), - name: name.to_string(), - public, - launch_permissions, - } - } -} - -/// Returns the set of account IDs associated with the roles configured for the given regions. -async fn get_account_ids( - regions: &[Region], - base_region: &Region, - pubsys_aws_config: &PubsysAwsConfig, -) -> Result> { - let mut grant_accounts = HashSet::new(); - - // We make a map storing our regional clients because they're used in a future and need to - // live until the future is resolved. - let mut sts_clients = HashMap::with_capacity(regions.len()); - for region in regions.iter() { - let client_config = build_client_config(region, base_region, pubsys_aws_config).await; - let sts_client = StsClient::new(&client_config); - sts_clients.insert(region.clone(), sts_client); - } - - let mut requests = Vec::with_capacity(regions.len()); - for region in regions.iter() { - let sts_client = &sts_clients[region]; - let response_future = sts_client.get_caller_identity().send(); - - // Store the region so we can include it in any errors - let region_future = ready(region.clone()); - requests.push(join(region_future, response_future)); - } - - let request_stream = stream::iter(requests).buffer_unordered(4); - // Run through the stream and collect results into a list. - let responses: Vec<( - Region, - std::result::Result>, - )> = request_stream.collect().await; - - for (region, response) in responses { - let response = response.context(error::GetCallerIdentitySnafu { - region: region.as_ref(), - })?; - let account_id = response.account.context(error::MissingInResponseSnafu { - request_type: "GetCallerIdentity", - missing: "account", - })?; - grant_accounts.insert(account_id); - } - trace!("Found account IDs {:?}", grant_accounts); - - Ok(grant_accounts) -} - -mod error { - use crate::aws::{ami, publish_ami}; - use aws_sdk_ec2::error::SdkError; - use aws_sdk_ec2::operation::modify_image_attribute::ModifyImageAttributeError; - use aws_sdk_ec2::types::LaunchPermission; - use aws_sdk_sts::operation::get_caller_identity::GetCallerIdentityError; - use snafu::Snafu; - use std::path::PathBuf; - - use super::public; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Some AMIs failed to copy, see above"))] - AmiCopy, - - #[snafu(display("Error reading config: {}", source))] - Config { source: pubsys_config::Error }, - - #[snafu(display( - "Failed to describe image attributes for image {} in region {}: {}", - image_id, - region, - source - ))] - DescribeImageAttribute { - image_id: String, - region: String, - source: super::launch_permissions::Error, - }, - - #[snafu(display("Failed to create file '{}': {}", path.display(), source))] - FileCreate { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Error getting AMI ID for {} {} in {}: {}", arch, name, region, source))] - GetAmiId { - name: String, - arch: String, - region: String, - source: ami::register::Error, - }, - - #[snafu(display("Error getting account ID in {}: {}", region, source))] - GetCallerIdentity { - region: String, - source: SdkError, - }, - - #[snafu(display( - "Failed to get snapshot IDs associated with {} in {}: {}", - image_id, - region, - source - ))] - GetSnapshots { - image_id: String, - region: String, - source: publish_ami::Error, - }, - - #[snafu(display("Failed to grant access to {} in {}: {}", thing, region, source))] - GrantAccess { - thing: String, - region: String, - source: publish_ami::Error, - }, - - #[snafu(display("Failed to grant access to {} in {}: {}", thing, region, source))] - GrantImageAccess { - thing: String, - region: String, - source: SdkError, - }, - - #[snafu(display( - "Failed to check if AMI with id {} is public in {}: {}", - image_id, - region, - source - ))] - IsAmiPublic { - image_id: String, - region: String, - source: public::Error, - }, - - #[snafu(display("Invalid launch permission: {:?}", launch_permission))] - InvalidLaunchPermission { launch_permission: LaunchPermission }, - - #[snafu(display("Infra.toml is missing {}", missing))] - MissingConfig { missing: String }, - - #[snafu(display("Response to {} was missing {}", request_type, missing))] - MissingInResponse { - request_type: String, - missing: String, - }, - - #[snafu(display("Error registering {} {} in {}: {}", arch, name, region, source))] - RegisterImage { - name: String, - arch: String, - region: String, - source: ami::register::Error, - }, - - #[snafu(display("AMI '{}' in {} did not become available: {}", id, region, source))] - WaitAmi { - id: String, - region: String, - source: ami::wait::Error, - }, - - #[snafu(display("Failed to write AMIs to '{}': {}", path.display(), source))] - WriteAmis { - path: PathBuf, - source: publish_ami::Error, - }, - } -} -pub(crate) use error::Error; - -use self::launch_permissions::LaunchPermissionDef; - -use super::publish_ami::write_amis; -type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/ami/public.rs b/tools/pubsys/src/aws/ami/public.rs deleted file mode 100644 index 6404abda..00000000 --- a/tools/pubsys/src/aws/ami/public.rs +++ /dev/null @@ -1,64 +0,0 @@ -use aws_sdk_ec2::Client as Ec2Client; -use snafu::{ensure, OptionExt, ResultExt}; - -/// Returns whether or not the given AMI ID refers to a public AMI. -pub(crate) async fn ami_is_public( - ec2_client: &Ec2Client, - region: &str, - ami_id: &str, -) -> Result { - let ec2_response = ec2_client - .describe_images() - .image_ids(ami_id.to_string()) - .send() - .await - .context(error::DescribeImagesSnafu { - ami_id: ami_id.to_string(), - region: region.to_string(), - })?; - - let returned_images = ec2_response.images().unwrap_or_default(); - - ensure!( - returned_images.len() <= 1, - error::TooManyImagesSnafu { - ami_id: ami_id.to_string(), - region: region.to_string(), - } - ); - - Ok(returned_images - .first() - .context(error::NoSuchImageSnafu { - ami_id: ami_id.to_string(), - region: region.to_string(), - })? - .public() - .unwrap_or(false)) -} - -mod error { - use aws_sdk_ec2::error::SdkError; - use aws_sdk_ec2::operation::describe_images::DescribeImagesError; - use snafu::Snafu; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Error describing AMI {} in {}: {}", ami_id, region, source))] - DescribeImages { - ami_id: String, - region: String, - #[snafu(source(from(SdkError, Box::new)))] - source: Box>, - }, - - #[snafu(display("AMI {} not found in {}", ami_id, region))] - NoSuchImage { ami_id: String, region: String }, - - #[snafu(display("Multiples AMIs with ID {} found in {}", ami_id, region))] - TooManyImages { ami_id: String, region: String }, - } -} -pub(crate) use error::Error; -type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/ami/register.rs b/tools/pubsys/src/aws/ami/register.rs deleted file mode 100644 index aed614ae..00000000 --- a/tools/pubsys/src/aws/ami/register.rs +++ /dev/null @@ -1,331 +0,0 @@ -use super::{snapshot::snapshot_from_image, AmiArgs}; -use aws_sdk_ebs::Client as EbsClient; -use aws_sdk_ec2::types::{ - ArchitectureValues, BlockDeviceMapping, EbsBlockDevice, Filter, VolumeType, -}; -use aws_sdk_ec2::{config::Region, Client as Ec2Client}; -use buildsys::manifest::{self, ImageFeature}; -use coldsnap::{SnapshotUploader, SnapshotWaiter}; -use log::{debug, info, warn}; -use snafu::{ensure, OptionExt, ResultExt}; - -const ROOT_DEVICE_NAME: &str = "/dev/xvda"; -const DATA_DEVICE_NAME: &str = "/dev/xvdb"; - -// Features we assume/enable for the images. -const VIRT_TYPE: &str = "hvm"; -const VOLUME_TYPE: &str = "gp2"; -const SRIOV: &str = "simple"; -const ENA: bool = true; - -#[derive(Debug)] -pub(crate) struct RegisteredIds { - pub(crate) image_id: String, - pub(crate) snapshot_ids: Vec, -} - -/// Helper for `register_image`. Inserts registered snapshot IDs into `cleanup_snapshot_ids` so -/// they can be cleaned up on failure if desired. -async fn _register_image( - ami_args: &AmiArgs, - region: &Region, - ebs_client: EbsClient, - ec2_client: &Ec2Client, - cleanup_snapshot_ids: &mut Vec, -) -> Result { - let variant_manifest = manifest::ManifestInfo::new(&ami_args.variant_manifest).context( - error::LoadVariantManifestSnafu { - path: &ami_args.variant_manifest, - }, - )?; - - let image_layout = variant_manifest - .image_layout() - .context(error::MissingImageLayoutSnafu { - path: &ami_args.variant_manifest, - })?; - - let (os_volume_size, data_volume_size) = image_layout.publish_image_sizes_gib(); - - let uefi_data = - std::fs::read_to_string(&ami_args.uefi_data).context(error::LoadUefiDataSnafu { - path: &ami_args.uefi_data, - })?; - - debug!("Uploading images into EBS snapshots in {}", region); - let uploader = SnapshotUploader::new(ebs_client); - let os_snapshot = - snapshot_from_image(&ami_args.os_image, &uploader, None, ami_args.no_progress) - .await - .context(error::SnapshotSnafu { - path: &ami_args.os_image, - region: region.as_ref(), - })?; - cleanup_snapshot_ids.push(os_snapshot.clone()); - - let mut data_snapshot = None; - if let Some(data_image) = &ami_args.data_image { - let snapshot = snapshot_from_image(data_image, &uploader, None, ami_args.no_progress) - .await - .context(error::SnapshotSnafu { - path: &ami_args.os_image, - region: region.as_ref(), - })?; - cleanup_snapshot_ids.push(snapshot.clone()); - data_snapshot = Some(snapshot); - } - - info!("Waiting for snapshots to become available in {}", region); - let waiter = SnapshotWaiter::new(ec2_client.clone()); - waiter - .wait(&os_snapshot, Default::default()) - .await - .context(error::WaitSnapshotSnafu { - snapshot_type: "root", - })?; - - if let Some(ref data_snapshot) = data_snapshot { - waiter - .wait(&data_snapshot, Default::default()) - .await - .context(error::WaitSnapshotSnafu { - snapshot_type: "data", - })?; - } - - // Prepare parameters for AMI registration request - let os_bdm = BlockDeviceMapping::builder() - .set_device_name(Some(ROOT_DEVICE_NAME.to_string())) - .set_ebs(Some( - EbsBlockDevice::builder() - .set_delete_on_termination(Some(true)) - .set_snapshot_id(Some(os_snapshot.clone())) - .set_volume_type(Some(VolumeType::from(VOLUME_TYPE))) - .set_volume_size(Some(os_volume_size)) - .build(), - )) - .build(); - - let mut data_bdm = None; - if let Some(ref data_snapshot) = data_snapshot { - let mut bdm = os_bdm.clone(); - bdm.device_name = Some(DATA_DEVICE_NAME.to_string()); - if let Some(ebs) = bdm.ebs.as_mut() { - ebs.snapshot_id = Some(data_snapshot.clone()); - ebs.volume_size = Some(data_volume_size); - } - data_bdm = Some(bdm); - } - - let mut block_device_mappings = vec![os_bdm]; - if let Some(data_bdm) = data_bdm { - block_device_mappings.push(data_bdm); - } - - let uefi_secure_boot_enabled = variant_manifest - .image_features() - .iter() - .flatten() - .any(|f| **f == ImageFeature::UefiSecureBoot); - - let (boot_mode, uefi_data) = if uefi_secure_boot_enabled { - (Some("uefi-preferred".into()), Some(uefi_data)) - } else { - (None, None) - }; - - info!("Making register image call in {}", region); - let register_response = ec2_client - .register_image() - .set_architecture(Some(ami_args.arch.clone())) - .set_block_device_mappings(Some(block_device_mappings)) - .set_boot_mode(boot_mode) - .set_uefi_data(uefi_data) - .set_description(ami_args.description.clone()) - .set_ena_support(Some(ENA)) - .set_name(Some(ami_args.name.clone())) - .set_root_device_name(Some(ROOT_DEVICE_NAME.to_string())) - .set_sriov_net_support(Some(SRIOV.to_string())) - .set_virtualization_type(Some(VIRT_TYPE.to_string())) - .send() - .await - .context(error::RegisterImageSnafu { - region: region.as_ref(), - })?; - - let image_id = register_response - .image_id - .context(error::MissingImageIdSnafu { - region: region.as_ref(), - })?; - - let mut snapshot_ids = vec![os_snapshot]; - if let Some(data_snapshot) = data_snapshot { - snapshot_ids.push(data_snapshot); - } - - Ok(RegisteredIds { - image_id, - snapshot_ids, - }) -} - -/// Uploads the given images into snapshots and registers an AMI using them as its block device -/// mapping. Deletes snapshots on failure. -pub(crate) async fn register_image( - ami_args: &AmiArgs, - region: &Region, - ebs_client: EbsClient, - ec2_client: &Ec2Client, -) -> Result { - info!("Registering '{}' in {}", ami_args.name, region); - let mut cleanup_snapshot_ids = Vec::new(); - let register_result = _register_image( - ami_args, - region, - ebs_client, - ec2_client, - &mut cleanup_snapshot_ids, - ) - .await; - - if register_result.is_err() { - for snapshot_id in cleanup_snapshot_ids { - if let Err(e) = ec2_client - .delete_snapshot() - .set_snapshot_id(Some(snapshot_id.clone())) - .send() - .await - { - warn!( - "While cleaning up, failed to delete snapshot {}: {}", - snapshot_id, e - ); - } - } - } - register_result -} - -/// Queries EC2 for the given AMI name. If found, returns Ok(Some(id)), if not returns Ok(None). -pub(crate) async fn get_ami_id( - name: S, - arch: &ArchitectureValues, - region: &Region, - ec2_client: &Ec2Client, -) -> Result> -where - S: Into, -{ - let describe_response = ec2_client - .describe_images() - .set_owners(Some(vec!["self".to_string()])) - .set_filters(Some(vec![ - Filter::builder() - .set_name(Some("name".to_string())) - .set_values(Some(vec![name.into()])) - .build(), - Filter::builder() - .set_name(Some("architecture".to_string())) - .set_values(Some(vec![arch.as_ref().to_string()])) - .build(), - Filter::builder() - .set_name(Some("image-type".to_string())) - .set_values(Some(vec!["machine".to_string()])) - .build(), - Filter::builder() - .set_name(Some("virtualization-type".to_string())) - .set_values(Some(vec![VIRT_TYPE.to_string()])) - .build(), - ])) - .send() - .await - .context(error::DescribeImagesSnafu { - region: region.as_ref(), - })?; - if let Some(mut images) = describe_response.images { - if images.is_empty() { - return Ok(None); - } - ensure!( - images.len() == 1, - error::MultipleImagesSnafu { - images: images - .into_iter() - .map(|i| i.image_id.unwrap_or_else(|| "".to_string())) - .collect::>() - } - ); - let image = images.remove(0); - // If there is an image but we couldn't find the ID of it, fail rather than returning None, - // which would indicate no image. - let id = image.image_id.context(error::MissingImageIdSnafu { - region: region.as_ref(), - })?; - Ok(Some(id)) - } else { - Ok(None) - } -} - -mod error { - use crate::aws::ami; - use aws_sdk_ec2::error::SdkError; - use aws_sdk_ec2::operation::{ - describe_images::DescribeImagesError, register_image::RegisterImageError, - }; - use snafu::Snafu; - use std::path::PathBuf; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Failed to describe images in {}: {}", region, source))] - DescribeImages { - region: String, - source: SdkError, - }, - - #[snafu(display("Failed to load variant manifest from {}: {}", path.display(), source))] - LoadVariantManifest { - path: PathBuf, - source: buildsys::manifest::Error, - }, - - #[snafu(display("Failed to load UEFI data from {}: {}", path.display(), source))] - LoadUefiData { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Could not find image layout for {}", path.display()))] - MissingImageLayout { path: PathBuf }, - - #[snafu(display("Image response in {} did not include image ID", region))] - MissingImageId { region: String }, - - #[snafu(display("DescribeImages with unique filters returned multiple results: {}", images.join(", ")))] - MultipleImages { images: Vec }, - - #[snafu(display("Failed to register image in {}: {}", region, source))] - RegisterImage { - region: String, - source: SdkError, - }, - - #[snafu(display("Failed to upload snapshot from {} in {}: {}", path.display(),region, source))] - Snapshot { - path: PathBuf, - region: String, - source: ami::snapshot::Error, - }, - - #[snafu(display("{} snapshot did not become available: {}", snapshot_type, source))] - WaitSnapshot { - snapshot_type: String, - source: coldsnap::WaitError, - }, - } -} -pub(crate) use error::Error; -type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/ami/snapshot.rs b/tools/pubsys/src/aws/ami/snapshot.rs deleted file mode 100644 index 15b25611..00000000 --- a/tools/pubsys/src/aws/ami/snapshot.rs +++ /dev/null @@ -1,65 +0,0 @@ -use coldsnap::SnapshotUploader; -use indicatif::{ProgressBar, ProgressStyle}; -use snafu::{OptionExt, ResultExt}; -use std::path::Path; - -/// Create a progress bar to show status of snapshot blocks, if wanted. -fn build_progress_bar(no_progress: bool, verb: &str) -> Result> { - if no_progress { - return Ok(None); - } - let progress_bar = ProgressBar::new(0); - progress_bar.set_style( - ProgressStyle::default_bar() - .template(&[" ", verb, " [{bar:50.white/black}] {pos}/{len} ({eta})"].concat()) - .context(error::ProgressBarTemplateSnafu)? - .progress_chars("=> "), - ); - Ok(Some(progress_bar)) -} - -/// Uploads the given path into a snapshot. -pub(crate) async fn snapshot_from_image

( - path: P, - uploader: &SnapshotUploader, - desired_size: Option, - no_progress: bool, -) -> Result -where - P: AsRef, -{ - let path = path.as_ref(); - let progress_bar = build_progress_bar(no_progress, "Uploading snapshot"); - let filename = path - .file_name() - .context(error::InvalidImagePathSnafu { path })? - .to_string_lossy(); - - uploader - .upload_from_file(path, desired_size, Some(&filename), progress_bar?) - .await - .context(error::UploadSnapshotSnafu) -} - -mod error { - use snafu::Snafu; - use std::path::PathBuf; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - #[allow(clippy::large_enum_variant)] - pub(crate) enum Error { - #[snafu(display("Invalid image path '{}'", path.display()))] - InvalidImagePath { path: PathBuf }, - - #[snafu(display("Failed to parse progress style template: {}", source))] - ProgressBarTemplate { - source: indicatif::style::TemplateError, - }, - - #[snafu(display("Failed to upload snapshot: {}", source))] - UploadSnapshot { source: coldsnap::UploadError }, - } -} -pub(crate) use error::Error; -type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/ami/wait.rs b/tools/pubsys/src/aws/ami/wait.rs deleted file mode 100644 index 9a2c7cd5..00000000 --- a/tools/pubsys/src/aws/ami/wait.rs +++ /dev/null @@ -1,139 +0,0 @@ -use crate::aws::client::build_client_config; -use aws_sdk_ec2::{config::Region, types::ImageState, Client as Ec2Client}; -use log::info; -use pubsys_config::AwsConfig as PubsysAwsConfig; -use snafu::{ensure, ResultExt}; -use std::thread::sleep; -use std::time::Duration; - -/// Waits for the given AMI ID to reach the given state, requiring it be in that state for -/// `success_required` checks in a row. -pub(crate) async fn wait_for_ami( - id: &str, - region: &Region, - sts_region: &Region, - state: &str, - successes_required: u8, - pubsys_aws_config: &PubsysAwsConfig, -) -> Result<()> { - let mut successes = 0; - let max_attempts = 90; - let mut attempts = 0; - let seconds_between_attempts = 2; - - loop { - attempts += 1; - // Stop if we're over max, unless we're on a success streak, then give it some wiggle room. - ensure!( - (attempts - successes) <= max_attempts, - error::MaxAttemptsSnafu { - id, - max_attempts, - region: region.as_ref(), - } - ); - - // Use a new client each time so we have more confidence that different endpoints can see - // the new AMI. - let client_config = build_client_config(region, sts_region, pubsys_aws_config).await; - let ec2_client = Ec2Client::new(&client_config); - let describe_response = ec2_client - .describe_images() - .set_image_ids(Some(vec![id.to_string()])) - .send() - .await - .context(error::DescribeImagesSnafu { - region: region.as_ref(), - })?; - - // The response contains an Option>, so we have to check that we got a - // list at all, and then that the list contains the ID in question. - if let Some(images) = describe_response.images { - let mut saw_it = false; - for image in images { - if let Some(found_id) = image.image_id { - if let Some(found_state) = image.state { - if id == found_id && ImageState::from(state) == found_state { - // Success; check if we have enough to declare victory. - saw_it = true; - successes += 1; - if successes >= successes_required { - info!("Found {} {} in {}", id, state, region); - return Ok(()); - } - break; - } - // If the state shows us the AMI failed, we know we'll never hit the - // desired state. (Unless they desired "error", which will be caught - // above.) - match &found_state { - ImageState::Invalid - | ImageState::Deregistered - | ImageState::Failed - | ImageState::Error => error::StateSnafu { - id, - state: found_state.as_ref(), - region: region.as_ref(), - } - .fail(), - _ => Ok(()), - }?; - } - } - } - if !saw_it { - // Did not find image in list; reset success count and try again (if we have spare attempts) - successes = 0; - } - } else { - // Did not receive list; reset success count and try again (if we have spare attempts) - successes = 0; - }; - - if attempts % 5 == 1 { - info!( - "Waiting for {} in {} to be {}... (attempt {} of {})", - id, region, state, attempts, max_attempts - ); - } - sleep(Duration::from_secs(seconds_between_attempts)); - } -} - -mod error { - use aws_sdk_ec2::error::SdkError; - use aws_sdk_ec2::operation::describe_images::DescribeImagesError; - use snafu::Snafu; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - #[allow(clippy::large_enum_variant)] - pub(crate) enum Error { - #[snafu(display("Failed to describe images in {}: {}", region, source))] - DescribeImages { - region: String, - source: SdkError, - }, - - #[snafu(display( - "Failed to reach desired state within {} attempts for {} in {}", - max_attempts, - id, - region - ))] - MaxAttempts { - max_attempts: u8, - id: String, - region: String, - }, - - #[snafu(display("Image '{}' went to '{}' state in {}", id, state, region))] - State { - id: String, - state: String, - region: String, - }, - } -} -pub(crate) use error::Error; -type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/client.rs b/tools/pubsys/src/aws/client.rs deleted file mode 100644 index 770504ac..00000000 --- a/tools/pubsys/src/aws/client.rs +++ /dev/null @@ -1,71 +0,0 @@ -use aws_config::default_provider::credentials::default_provider; -use aws_config::profile::ProfileFileCredentialsProvider; -use aws_config::sts::AssumeRoleProvider; -use aws_config::SdkConfig; -use aws_credential_types::provider::SharedCredentialsProvider; -use aws_types::region::Region; -use pubsys_config::AwsConfig as PubsysAwsConfig; - -/// Create an AWS client config using the given regions and pubsys config. -pub(crate) async fn build_client_config( - region: &Region, - sts_region: &Region, - pubsys_aws_config: &PubsysAwsConfig, -) -> SdkConfig { - let maybe_profile = pubsys_aws_config.profile.clone(); - let maybe_role = pubsys_aws_config.role.clone(); - let maybe_regional_role = pubsys_aws_config - .region - .get(region.as_ref()) - .and_then(|r| r.role.clone()); - let base_provider = base_provider(&maybe_profile).await; - - let config = match (&maybe_role, &maybe_regional_role) { - (None, None) => aws_config::from_env().credentials_provider(base_provider), - _ => { - let assume_roles = maybe_role.iter().chain(maybe_regional_role.iter()).cloned(); - let provider = - build_provider(sts_region, assume_roles.clone(), base_provider.clone()).await; - aws_config::from_env().credentials_provider(provider) - } - }; - - config.region(region.clone()).load().await -} - -/// Chains credentials providers to assume the given roles in order. -/// The region given should be the one in which you want to talk to STS to get temporary -/// credentials, not the region in which you want to talk to a service endpoint like EC2. This is -/// needed because you may be assuming a role in an opt-in region from an account that has not -/// opted-in to that region, and you need to get session credentials from an STS endpoint in a -/// region to which you have access in the base account -async fn build_provider( - sts_region: &Region, - assume_roles: impl Iterator, - base_provider: SharedCredentialsProvider, -) -> SharedCredentialsProvider { - let mut provider = base_provider; - for assume_role in assume_roles { - provider = SharedCredentialsProvider::new( - AssumeRoleProvider::builder(assume_role) - .region(sts_region.clone()) - .session_name("pubsys") - .build(provider.clone()), - ) - } - provider -} - -/// If the user specified a profile, use that, otherwise use the default -/// credentials mechanisms. -async fn base_provider(maybe_profile: &Option) -> SharedCredentialsProvider { - if let Some(profile) = maybe_profile { - SharedCredentialsProvider::new( - ProfileFileCredentialsProvider::builder() - .profile_name(profile) - .build(), - ) - } else { - SharedCredentialsProvider::new(default_provider().await) - } -} diff --git a/tools/pubsys/src/aws/mod.rs b/tools/pubsys/src/aws/mod.rs deleted file mode 100644 index 80e06de5..00000000 --- a/tools/pubsys/src/aws/mod.rs +++ /dev/null @@ -1,42 +0,0 @@ -use aws_sdk_ec2::config::Region; -use aws_sdk_ec2::types::ArchitectureValues; - -#[macro_use] -pub(crate) mod client; - -pub(crate) mod ami; -pub(crate) mod promote_ssm; -pub(crate) mod publish_ami; -pub(crate) mod ssm; -pub(crate) mod validate_ami; -pub(crate) mod validate_ssm; - -/// Builds a Region from the given region name. -fn region_from_string(name: &str) -> Region { - Region::new(name.to_owned()) -} - -/// Parses the given string as an architecture, mapping values to the ones used in EC2. -pub(crate) fn parse_arch(input: &str) -> Result { - match input { - "x86_64" | "amd64" => Ok(ArchitectureValues::X8664), - "arm64" | "aarch64" => Ok(ArchitectureValues::Arm64), - _ => error::ParseArchSnafu { - input, - msg: "unknown architecture", - } - .fail(), - } -} - -mod error { - use snafu::Snafu; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Failed to parse arch '{}': {}", input, msg))] - ParseArch { input: String, msg: String }, - } -} -type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/promote_ssm/mod.rs b/tools/pubsys/src/aws/promote_ssm/mod.rs deleted file mode 100644 index 21f4ca1d..00000000 --- a/tools/pubsys/src/aws/promote_ssm/mod.rs +++ /dev/null @@ -1,550 +0,0 @@ -//! The promote_ssm module owns the 'promote-ssm' subcommand and controls the process of copying -//! SSM parameters from one version to another - -use crate::aws::client::build_client_config; -use crate::aws::ssm::template::RenderedParametersMap; -use crate::aws::ssm::{key_difference, ssm, template, BuildContext, SsmKey}; -use crate::aws::validate_ssm::parse_parameters; -use crate::aws::{parse_arch, region_from_string}; -use crate::Args; -use aws_sdk_ec2::types::ArchitectureValues; -use aws_sdk_ssm::{config::Region, Client as SsmClient}; -use clap::Parser; -use log::{info, trace}; -use pubsys_config::InfraConfig; -use snafu::{ensure, ResultExt}; -use std::collections::HashMap; -use std::path::PathBuf; - -/// Copies sets of SSM parameters -#[derive(Debug, Parser)] -pub(crate) struct PromoteArgs { - /// The architecture of the machine image - #[arg(long, value_parser = parse_arch)] - arch: ArchitectureValues, - - /// The variant name for the current build - #[arg(long)] - variant: String, - - /// Version number (or string) to copy from - #[arg(long)] - source: String, - - /// Version number (or string) to copy to - #[arg(long)] - target: String, - - /// Comma-separated list of regions to promote in, overriding Infra.toml - #[arg(long, value_delimiter = ',')] - regions: Vec, - - /// File holding the parameter templates - #[arg(long)] - template_path: PathBuf, - - /// If set, contains the path to the file holding the original SSM parameters - /// and where the newly promoted parameters will be written - #[arg(long)] - ssm_parameter_output: Option, -} - -/// Common entrypoint from main() -pub(crate) async fn run(args: &Args, promote_args: &PromoteArgs) -> Result<()> { - info!( - "Promoting SSM parameters from {} to {}", - promote_args.source, promote_args.target - ); - - // Setup =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - - // If a lock file exists, use that, otherwise use Infra.toml - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) - .context(error::ConfigSnafu)?; - - trace!("Parsed infra config: {:#?}", infra_config); - let aws = infra_config.aws.unwrap_or_default(); - let ssm_prefix = aws.ssm_prefix.as_deref().unwrap_or(""); - - // If the user gave an override list of regions, use that, otherwise use what's in the config. - let regions = if !promote_args.regions.is_empty() { - promote_args.regions.clone() - } else { - aws.regions.clone().into() - } - .into_iter() - .map(|name| region_from_string(&name)) - .collect::>(); - - ensure!( - !regions.is_empty(), - error::MissingConfigSnafu { - missing: "aws.regions" - } - ); - let base_region = ®ions[0]; - - let mut ssm_clients = HashMap::with_capacity(regions.len()); - for region in ®ions { - let client_config = build_client_config(region, base_region, &aws).await; - let ssm_client = SsmClient::new(&client_config); - ssm_clients.insert(region.clone(), ssm_client); - } - - // Template setup =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - - // Non-image-specific context for building and rendering templates - let source_build_context = BuildContext { - variant: &promote_args.variant, - arch: promote_args.arch.as_str(), - image_version: &promote_args.source, - }; - - let target_build_context = BuildContext { - variant: &promote_args.variant, - arch: promote_args.arch.as_str(), - image_version: &promote_args.target, - }; - - info!( - "Parsing SSM parameter templates from {}", - promote_args.template_path.display() - ); - // Doesn't matter which build context we use to find template files because version isn't used - // in their naming - let template_parameters = - template::get_parameters(&promote_args.template_path, &source_build_context) - .context(error::FindTemplatesSnafu)?; - - if template_parameters.parameters.is_empty() { - info!( - "No parameters for this arch/variant in {}", - promote_args.template_path.display() - ); - return Ok(()); - } - - // Render parameter names into maps of {template string => rendered value}. We need the - // template strings so we can associate source parameters with target parameters that came - // from the same template, so we know what to copy. - let source_parameter_map = - template::render_parameter_names(&template_parameters, ssm_prefix, &source_build_context) - .context(error::RenderTemplatesSnafu)?; - let target_parameter_map = - template::render_parameter_names(&template_parameters, ssm_prefix, &target_build_context) - .context(error::RenderTemplatesSnafu)?; - - // Parameters are the same in each region, so we need to associate each region with each of - // the parameter names so we can fetch them. - let source_keys: Vec = regions - .iter() - .flat_map(|region| { - source_parameter_map - .values() - .map(move |name| SsmKey::new(region.clone(), name.clone())) - }) - .collect(); - let target_keys: Vec = regions - .iter() - .flat_map(|region| { - target_parameter_map - .values() - .map(move |name| SsmKey::new(region.clone(), name.clone())) - }) - .collect(); - - // SSM get/compare =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - - info!("Getting current SSM parameters for source and target names"); - let current_source_parameters = ssm::get_parameters(&source_keys, &ssm_clients) - .await - .context(error::FetchSsmSnafu)?; - trace!( - "Current source SSM parameters: {:#?}", - current_source_parameters - ); - ensure!( - !current_source_parameters.is_empty(), - error::EmptySourceSnafu { - version: &promote_args.source - } - ); - - let current_target_parameters = ssm::get_parameters(&target_keys, &ssm_clients) - .await - .context(error::FetchSsmSnafu)?; - trace!( - "Current target SSM parameters: {:#?}", - current_target_parameters - ); - - // Build a map of rendered source parameter names to rendered target parameter names. This - // will let us find which target parameters to set based on the source parameter names we get - // back from SSM. - let source_target_map: HashMap<&String, &String> = source_parameter_map - .iter() - .map(|(k, v)| (v, &target_parameter_map[k])) - .collect(); - - // Show the difference between source and target parameters in SSM. We use the - // source_target_map we built above to map source keys to target keys (generated from the same - // template) so that the diff code has common keys to compare. - let set_parameters = key_difference( - ¤t_source_parameters - .into_iter() - .map(|(key, value)| { - ( - SsmKey::new(key.region, source_target_map[&key.name].to_string()), - value, - ) - }) - .collect(), - ¤t_target_parameters, - ); - if set_parameters.is_empty() { - info!("No changes necessary."); - return Ok(()); - } - - // If an output file path was given, read the existing parameters in `ssm_parameter_output` and - // write the newly promoted parameters to `ssm_parameter_output` along with the original - // parameters - if let Some(ssm_parameter_output) = &promote_args.ssm_parameter_output { - append_rendered_parameters(ssm_parameter_output, &set_parameters).await?; - } - - // SSM set =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - - info!("Setting updated SSM parameters."); - ssm::set_parameters(&set_parameters, &ssm_clients) - .await - .context(error::SetSsmSnafu)?; - - info!("Validating whether live parameters in SSM reflect changes."); - ssm::validate_parameters(&set_parameters, &ssm_clients) - .await - .context(error::ValidateSsmSnafu)?; - - info!("All parameters match requested values."); - Ok(()) -} - -/// Read parameters in given file, add newly promoted parameters, and write combined parameters to -/// the given file -async fn append_rendered_parameters( - ssm_parameters_output: &PathBuf, - set_parameters: &HashMap, -) -> Result<()> { - // If the file doesn't exist, assume that there are no existing parameters - let parsed_parameters = parse_parameters(&ssm_parameters_output.to_owned()) - .await - .or_else({ - |e| match e { - crate::aws::validate_ssm::Error::ReadExpectedParameterFile { .. } => { - Ok(HashMap::new()) - } - _ => Err(e), - } - }) - .context(error::ParseExistingSsmParametersSnafu { - path: ssm_parameters_output, - })? - // SsmKey contains region information, so we can lose the top-level region. - .into_values() - .fold(HashMap::new(), |mut acc, params| { - acc.extend(params); - acc - }); - - let combined_parameters = merge_parameters(parsed_parameters, set_parameters); - - write_rendered_parameters( - ssm_parameters_output, - &RenderedParametersMap::from(combined_parameters).rendered_parameters, - ) - .context(error::WriteRenderedSsmParametersSnafu { - path: ssm_parameters_output, - })?; - - Ok(()) -} - -/// Return a HashMap of Region mapped to a HashMap of SsmKey, String pairs, representing the newly -/// promoted parameters as well as the original parameters. In case of a parameter collision, -/// the parameter takes the promoted value. -fn merge_parameters( - source_parameters: HashMap, - set_parameters: &HashMap, -) -> HashMap> { - let mut combined_parameters = HashMap::new(); - - source_parameters - .into_iter() - // Process the `set_parameters` second so that they overwrite existing values. - .chain(set_parameters.clone()) - .for_each(|(ssm_key, ssm_value)| { - combined_parameters - // The `entry()` API demands that we clone - .entry(ssm_key.region.clone()) - .or_insert(HashMap::new()) - .insert(ssm_key, ssm_value); - }); - - combined_parameters -} - -mod error { - use std::path::PathBuf; - - use crate::aws::{ - ssm::{ssm, template}, - validate_ssm, - }; - use snafu::Snafu; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Error reading config: {}", source))] - Config { - source: pubsys_config::Error, - }, - - #[snafu(display("Found no parameters in source version {}", version))] - EmptySource { - version: String, - }, - - #[snafu(display("Failed to fetch parameters from SSM: {}", source))] - FetchSsm { - source: ssm::Error, - }, - - #[snafu(display("Failed to find templates: {}", source))] - FindTemplates { - source: template::Error, - }, - - #[snafu(display("Infra.toml is missing {}", missing))] - MissingConfig { - missing: String, - }, - - #[snafu(display("Failed to render templates: {}", source))] - RenderTemplates { - source: template::Error, - }, - - #[snafu(display("Failed to set SSM parameters: {}", source))] - SetSsm { - source: ssm::Error, - }, - - ValidateSsm { - source: ssm::Error, - }, - - #[snafu(display( - "Failed to parse existing SSM parameters at path {:?}: {}", - path, - source, - ))] - ParseExistingSsmParameters { - source: validate_ssm::error::Error, - path: PathBuf, - }, - - #[snafu(display("Failed to parse rendered SSM parameters to JSON: {}", source))] - ParseRenderedSsmParameters { - source: serde_json::Error, - }, - - #[snafu(display("Failed to write rendered SSM parameters to {}: {}", path.display(), source))] - WriteRenderedSsmParameters { - path: PathBuf, - source: crate::aws::ssm::Error, - }, - } -} -pub(crate) use error::Error; - -use super::ssm::write_rendered_parameters; -type Result = std::result::Result; - -#[cfg(test)] -mod test { - use std::collections::HashMap; - - use crate::aws::{promote_ssm::merge_parameters, ssm::SsmKey}; - use aws_sdk_ssm::config::Region; - - #[test] - fn combined_parameters() { - let existing_parameters = HashMap::from([ - ( - SsmKey::new(Region::new("us-west-2"), "test1-parameter-name".to_string()), - "test1-parameter-value".to_string(), - ), - ( - SsmKey::new(Region::new("us-west-2"), "test2-parameter-name".to_string()), - "test2-parameter-value".to_string(), - ), - ( - SsmKey::new(Region::new("us-east-1"), "test3-parameter-name".to_string()), - "test3-parameter-value".to_string(), - ), - ( - SsmKey::new( - Region::new("us-east-1"), - "test4-unpromoted-parameter-name".to_string(), - ), - "test4-unpromoted-parameter-value".to_string(), - ), - ]); - let set_parameters = HashMap::from([ - ( - SsmKey::new( - Region::new("us-west-2"), - "test1-parameter-name-promoted".to_string(), - ), - "test1-parameter-value".to_string(), - ), - ( - SsmKey::new( - Region::new("us-west-2"), - "test2-parameter-name-promoted".to_string(), - ), - "test2-parameter-value".to_string(), - ), - ( - SsmKey::new( - Region::new("us-east-1"), - "test3-parameter-name-promoted".to_string(), - ), - "test3-parameter-value".to_string(), - ), - ]); - let map = merge_parameters(existing_parameters, &set_parameters); - let expected_map = HashMap::from([ - ( - Region::new("us-west-2"), - HashMap::from([ - ( - SsmKey::new(Region::new("us-west-2"), "test1-parameter-name".to_string()), - "test1-parameter-value".to_string(), - ), - ( - SsmKey::new(Region::new("us-west-2"), "test2-parameter-name".to_string()), - "test2-parameter-value".to_string(), - ), - ( - SsmKey::new( - Region::new("us-west-2"), - "test1-parameter-name-promoted".to_string(), - ), - "test1-parameter-value".to_string(), - ), - ( - SsmKey::new( - Region::new("us-west-2"), - "test2-parameter-name-promoted".to_string(), - ), - "test2-parameter-value".to_string(), - ), - ]), - ), - ( - Region::new("us-east-1"), - HashMap::from([ - ( - SsmKey::new(Region::new("us-east-1"), "test3-parameter-name".to_string()), - "test3-parameter-value".to_string(), - ), - ( - SsmKey::new( - Region::new("us-east-1"), - "test3-parameter-name-promoted".to_string(), - ), - "test3-parameter-value".to_string(), - ), - ( - SsmKey::new( - Region::new("us-east-1"), - "test4-unpromoted-parameter-name".to_string(), - ), - "test4-unpromoted-parameter-value".to_string(), - ), - ]), - ), - ]); - assert_eq!(map, expected_map); - } - - #[test] - fn combined_parameters_overwrite() { - let existing_parameters = HashMap::from([ - ( - SsmKey::new(Region::new("us-west-2"), "test1-parameter-name".to_string()), - "test1-parameter-value".to_string(), - ), - ( - SsmKey::new(Region::new("us-west-2"), "test2-parameter-name".to_string()), - "test2-parameter-value".to_string(), - ), - ( - SsmKey::new(Region::new("us-east-1"), "test3-parameter-name".to_string()), - "test3-parameter-value".to_string(), - ), - ]); - let set_parameters = HashMap::from([ - ( - SsmKey::new(Region::new("us-west-2"), "test1-parameter-name".to_string()), - "test1-parameter-value-new".to_string(), - ), - ( - SsmKey::new(Region::new("us-west-2"), "test2-parameter-name".to_string()), - "test2-parameter-value-new".to_string(), - ), - ( - SsmKey::new( - Region::new("us-east-1"), - "test3-parameter-name-promoted".to_string(), - ), - "test3-parameter-value".to_string(), - ), - ]); - let map = merge_parameters(existing_parameters, &set_parameters); - let expected_map = HashMap::from([ - ( - Region::new("us-west-2"), - HashMap::from([ - ( - SsmKey::new(Region::new("us-west-2"), "test1-parameter-name".to_string()), - "test1-parameter-value-new".to_string(), - ), - ( - SsmKey::new(Region::new("us-west-2"), "test2-parameter-name".to_string()), - "test2-parameter-value-new".to_string(), - ), - ]), - ), - ( - Region::new("us-east-1"), - HashMap::from([ - ( - SsmKey::new(Region::new("us-east-1"), "test3-parameter-name".to_string()), - "test3-parameter-value".to_string(), - ), - ( - SsmKey::new( - Region::new("us-east-1"), - "test3-parameter-name-promoted".to_string(), - ), - "test3-parameter-value".to_string(), - ), - ]), - ), - ]); - assert_eq!(map, expected_map); - } -} diff --git a/tools/pubsys/src/aws/publish_ami/mod.rs b/tools/pubsys/src/aws/publish_ami/mod.rs deleted file mode 100644 index 578bdee4..00000000 --- a/tools/pubsys/src/aws/publish_ami/mod.rs +++ /dev/null @@ -1,731 +0,0 @@ -//! The publish_ami module owns the 'publish-ami' subcommand and controls the process of granting -//! and revoking access to EC2 AMIs. - -use crate::aws::ami::launch_permissions::{get_launch_permissions, LaunchPermissionDef}; -use crate::aws::ami::wait::{self, wait_for_ami}; -use crate::aws::ami::Image; -use crate::aws::client::build_client_config; -use crate::aws::region_from_string; -use crate::Args; -use aws_sdk_ec2::error::{ProvideErrorMetadata, SdkError}; -use aws_sdk_ec2::operation::{ - modify_image_attribute::{ModifyImageAttributeError, ModifyImageAttributeOutput}, - modify_snapshot_attribute::{ModifySnapshotAttributeError, ModifySnapshotAttributeOutput}, -}; -use aws_sdk_ec2::types::{ - ImageAttributeName, OperationType, PermissionGroup, SnapshotAttributeName, -}; -use aws_sdk_ec2::{config::Region, Client as Ec2Client}; -use clap::{Args as ClapArgs, Parser}; -use futures::future::{join, ready}; -use futures::stream::{self, StreamExt}; -use log::{debug, error, info, trace}; -use pubsys_config::InfraConfig; -use snafu::{ensure, OptionExt, ResultExt}; -use std::collections::{HashMap, HashSet}; -use std::fs::File; -use std::iter::FromIterator; -use std::path::PathBuf; - -#[derive(Debug, Parser)] -#[group(id = "who", required = true, multiple = true)] -pub(crate) struct ModifyOptions { - /// User IDs to give/remove access - #[arg(long, value_delimiter = ',', group = "who")] - pub(crate) user_ids: Vec, - /// Group names to give/remove access - #[arg(long, value_delimiter = ',', group = "who")] - pub(crate) group_names: Vec, - /// Organization arns to give/remove access - #[arg(long, value_delimiter = ',', group = "who")] - pub(crate) organization_arns: Vec, - /// Organizational unit arns to give/remove access - #[arg(long, value_delimiter = ',', group = "who")] - pub(crate) organizational_unit_arns: Vec, -} - -/// Grants or revokes permissions to Bottlerocket AMIs -#[derive(Debug, ClapArgs)] -#[group(id = "mode", required = true, multiple = false)] -pub(crate) struct Who { - /// Path to the JSON file containing regional AMI IDs to modify - #[arg(long)] - ami_input: PathBuf, - - /// Comma-separated list of regions to publish in, overriding Infra.toml; given regions must be - /// in the --ami-input file - #[arg(long, value_delimiter = ',')] - regions: Vec, - - /// Grant access to the given users/groups - #[arg(long, group = "mode")] - grant: bool, - /// Revoke access from the given users/groups - #[arg(long, group = "mode")] - revoke: bool, - - #[command(flatten)] - modify_opts: ModifyOptions, -} - -/// Common entrypoint from main() -pub(crate) async fn run(args: &Args, publish_args: &Who) -> Result<()> { - let (operation, description) = if publish_args.grant { - (OperationType::Add, "granting access") - } else if publish_args.revoke { - (OperationType::Remove, "revoking access") - } else { - unreachable!("developer error: --grant and --revoke not required/exclusive"); - }; - - info!( - "Using AMI data from path: {}", - publish_args.ami_input.display() - ); - let file = File::open(&publish_args.ami_input).context(error::FileSnafu { - op: "open", - path: &publish_args.ami_input, - })?; - let mut ami_input: HashMap = - serde_json::from_reader(file).context(error::DeserializeSnafu { - path: &publish_args.ami_input, - })?; - trace!("Parsed AMI input: {:?}", ami_input); - - // pubsys will not create a file if it did not create AMIs, so we should only have an empty - // file if a user created one manually, and they shouldn't be creating an empty file. - ensure!( - !ami_input.is_empty(), - error::InputSnafu { - path: &publish_args.ami_input - } - ); - - // If a lock file exists, use that, otherwise use Infra.toml or default - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, true) - .context(error::ConfigSnafu)?; - trace!("Using infra config: {:?}", infra_config); - - let aws = infra_config.aws.unwrap_or_default(); - - // If the user gave an override list of regions, use that, otherwise use what's in the config. - let regions = if !publish_args.regions.is_empty() { - publish_args.regions.clone() - } else { - aws.regions.clone().into() - }; - ensure!( - !regions.is_empty(), - error::MissingConfigSnafu { - missing: "aws.regions" - } - ); - let base_region = region_from_string(®ions[0]); - - // Check that the requested regions are a subset of the regions we *could* publish from the AMI - // input JSON. - let requested_regions = HashSet::from_iter(regions.iter()); - let known_regions = HashSet::<&String>::from_iter(ami_input.keys()); - ensure!( - requested_regions.is_subset(&known_regions), - error::UnknownRegionsSnafu { - regions: requested_regions - .difference(&known_regions) - .map(|s| s.to_string()) - .collect::>(), - } - ); - - // Parse region names - let mut amis = HashMap::with_capacity(regions.len()); - for name in regions { - let image = ami_input - .remove(&name) - // This could only happen if someone removes the check above... - .with_context(|| error::UnknownRegionsSnafu { - regions: vec![name.clone()], - })?; - let region = region_from_string(&name); - amis.insert(region, image); - } - - // We make a map storing our regional clients because they're used in a future and need to - // live until the future is resolved. - let mut ec2_clients = HashMap::with_capacity(amis.len()); - for region in amis.keys() { - let client_config = build_client_config(region, &base_region, &aws).await; - let ec2_client = Ec2Client::new(&client_config); - ec2_clients.insert(region.clone(), ec2_client); - } - - // If AMIs aren't in "available" state, we can get a DescribeImages response that includes - // most of the data we need, but not snapshot IDs. - if amis.len() == 1 { - info!("Waiting for AMI to be available before changing its permissions") - } else { - info!( - "Waiting for all {} AMIs to be available before changing any of their permissions", - amis.len(), - ); - } - let mut wait_requests = Vec::with_capacity(amis.len()); - for (region, image) in &amis { - let wait_future = wait_for_ami(&image.id, region, &base_region, "available", 1, &aws); - // Store the region and ID so we can include it in errors - let info_future = ready((region.clone(), image.id.clone())); - wait_requests.push(join(info_future, wait_future)); - } - // Send requests in parallel and wait for responses, collecting results into a list. - let request_stream = stream::iter(wait_requests).buffer_unordered(4); - let wait_responses: Vec<((Region, String), std::result::Result<(), wait::Error>)> = - request_stream.collect().await; - - // Make sure waits succeeded and AMIs are available. - for ((region, image_id), wait_response) in wait_responses { - wait_response.context(error::WaitAmiSnafu { - id: &image_id, - region: region.as_ref(), - })?; - } - - let snapshots = get_regional_snapshots(&amis, &ec2_clients).await?; - trace!("Found snapshots: {:?}", snapshots); - - info!( - "Updating all snapshot permissions before changing any AMI permissions - {}", - description - ); - modify_regional_snapshots( - &publish_args.modify_opts, - &operation, - &snapshots, - &ec2_clients, - ) - .await?; - - info!("Updating AMI permissions - {}", description); - modify_regional_images( - &publish_args.modify_opts, - &operation, - &mut amis, - &ec2_clients, - ) - .await?; - - write_amis( - &publish_args.ami_input, - &amis - .into_iter() - .map(|(region, image)| (region.to_string(), image)) - .collect::>(), - )?; - - Ok(()) -} - -pub(crate) fn write_amis(path: &PathBuf, amis: &HashMap) -> Result<()> { - let file = File::create(path).context(error::FileSnafu { - op: "write AMIs to file", - path, - })?; - serde_json::to_writer_pretty(file, &amis).context(error::SerializeSnafu { path })?; - info!("Wrote AMI data to {}", path.display()); - - Ok(()) -} - -/// Returns the snapshot IDs associated with the given AMI. -pub(crate) async fn get_snapshots( - image_id: &str, - region: &Region, - ec2_client: &Ec2Client, -) -> Result> { - let describe_response = ec2_client - .describe_images() - .set_image_ids(Some(vec![image_id.to_string()])) - .send() - .await - .context(error::DescribeImagesSnafu { - region: region.as_ref(), - })?; - - // Get the image description, ensuring we only have one. - let mut images = describe_response - .images - .context(error::MissingInResponseSnafu { - request_type: "DescribeImages", - missing: "images", - })?; - ensure!( - !images.is_empty(), - error::MissingImageSnafu { - region: region.as_ref(), - image_id: image_id.to_string(), - } - ); - ensure!( - images.len() == 1, - error::MultipleImagesSnafu { - region: region.as_ref(), - images: images - .into_iter() - .map(|i| i.image_id.unwrap_or_else(|| "".to_string())) - .collect::>() - } - ); - let image = images.remove(0); - - // Look into the block device mappings for snapshots. - let bdms = image - .block_device_mappings - .context(error::MissingInResponseSnafu { - request_type: "DescribeImages", - missing: "block_device_mappings", - })?; - ensure!( - !bdms.is_empty(), - error::MissingInResponseSnafu { - request_type: "DescribeImages", - missing: "non-empty block_device_mappings" - } - ); - let mut snapshot_ids = Vec::with_capacity(bdms.len()); - for bdm in bdms { - let ebs = bdm.ebs.context(error::MissingInResponseSnafu { - request_type: "DescribeImages", - missing: "ebs in block_device_mappings", - })?; - let snapshot_id = ebs.snapshot_id.context(error::MissingInResponseSnafu { - request_type: "DescribeImages", - missing: "snapshot_id in block_device_mappings.ebs", - })?; - snapshot_ids.push(snapshot_id); - } - - Ok(snapshot_ids) -} - -/// Returns a regional mapping of snapshot IDs associated with the given AMIs. -async fn get_regional_snapshots( - amis: &HashMap, - clients: &HashMap, -) -> Result>> { - // Build requests for image information. - let mut snapshots_requests = Vec::with_capacity(amis.len()); - for (region, image) in amis { - let ec2_client = &clients[region]; - - let snapshots_future = get_snapshots(&image.id, region, ec2_client); - - // Store the region so we can include it in errors - let info_future = ready(region.clone()); - snapshots_requests.push(join(info_future, snapshots_future)); - } - - // Send requests in parallel and wait for responses, collecting results into a list. - let request_stream = stream::iter(snapshots_requests).buffer_unordered(4); - let snapshots_responses: Vec<(Region, Result>)> = request_stream.collect().await; - - // For each described image, get the snapshot IDs from the block device mappings. - let mut snapshots = HashMap::with_capacity(amis.len()); - for (region, snapshot_ids) in snapshots_responses { - let snapshot_ids = snapshot_ids?; - snapshots.insert(region, snapshot_ids); - } - - Ok(snapshots) -} - -/// Modify createVolumePermission for the given users/groups on the given snapshots. The -/// `operation` should be "add" or "remove" to allow/deny permission. -pub(crate) async fn modify_snapshots( - modify_opts: &ModifyOptions, - operation: &OperationType, - snapshot_ids: &[String], - ec2_client: &Ec2Client, - region: &Region, -) -> Result<()> { - let mut requests = Vec::new(); - for snapshot_id in snapshot_ids { - let response_future = ec2_client - .modify_snapshot_attribute() - .set_attribute(Some(SnapshotAttributeName::CreateVolumePermission)) - .set_user_ids( - (!modify_opts.user_ids.is_empty()).then_some(modify_opts.user_ids.clone()), - ) - .set_group_names( - (!modify_opts.group_names.is_empty()).then_some(modify_opts.group_names.clone()), - ) - .set_operation_type(Some(operation.clone())) - .set_snapshot_id(Some(snapshot_id.clone())) - .send(); - // Store the snapshot_id so we can include it in any errors - let info_future = ready(snapshot_id.to_string()); - requests.push(join(info_future, response_future)); - } - - // Send requests in parallel and wait for responses, collecting results into a list. - let request_stream = stream::iter(requests).buffer_unordered(4); - let responses: Vec<( - String, - std::result::Result>, - )> = request_stream.collect().await; - - for (snapshot_id, response) in responses { - response.context(error::ModifyImageAttributeSnafu { - snapshot_id, - region: region.as_ref(), - })?; - } - - Ok(()) -} - -/// Modify createVolumePermission for the given users/groups, across all of the snapshots in the -/// given regional mapping. The `operation` should be "add" or "remove" to allow/deny permission. -pub(crate) async fn modify_regional_snapshots( - modify_opts: &ModifyOptions, - operation: &OperationType, - snapshots: &HashMap>, - clients: &HashMap, -) -> Result<()> { - // Build requests to modify snapshot attributes. - let mut requests = Vec::new(); - for (region, snapshot_ids) in snapshots { - let ec2_client = &clients[region]; - let modify_snapshot_future = - modify_snapshots(modify_opts, operation, snapshot_ids, ec2_client, region); - - // Store the region and snapshot ID so we can include it in errors - let info_future = ready((region.clone(), snapshot_ids.clone())); - requests.push(join(info_future, modify_snapshot_future)); - } - - // Send requests in parallel and wait for responses, collecting results into a list. - let request_stream = stream::iter(requests).buffer_unordered(4); - - #[allow(clippy::type_complexity)] - let responses: Vec<((Region, Vec), Result<()>)> = request_stream.collect().await; - - // Count up successes and failures so we can give a clear total in the final error message. - let mut error_count = 0u16; - let mut success_count = 0u16; - for ((region, snapshot_ids), response) in responses { - match response { - Ok(()) => { - success_count += 1; - debug!( - "Modified permissions in {} for snapshots [{}]", - region.as_ref(), - snapshot_ids.join(", "), - ); - } - Err(e) => { - error_count += 1; - if let Error::ModifyImageAttribute { source: err, .. } = e { - error!( - "Failed to modify permissions in {} for snapshots [{}]: {:?}", - region.as_ref(), - snapshot_ids.join(", "), - err.into_service_error().code().unwrap_or("unknown"), - ); - } - } - } - } - - ensure!( - error_count == 0, - error::ModifySnapshotAttributesSnafu { - error_count, - success_count, - } - ); - - Ok(()) -} - -/// Modify launchPermission for the given users/groups on the given images. The `operation` -/// should be "add" or "remove" to allow/deny permission. -pub(crate) async fn modify_image( - modify_opts: &ModifyOptions, - operation: &OperationType, - image_id: &str, - ec2_client: &Ec2Client, -) -> std::result::Result> { - ec2_client - .modify_image_attribute() - .set_attribute(Some( - ImageAttributeName::LaunchPermission.as_ref().to_string(), - )) - .set_user_ids((!modify_opts.user_ids.is_empty()).then_some(modify_opts.user_ids.clone())) - .set_user_groups( - (!modify_opts.group_names.is_empty()).then_some(modify_opts.group_names.clone()), - ) - .set_organization_arns( - (!modify_opts.organization_arns.is_empty()) - .then_some(modify_opts.organization_arns.clone()), - ) - .set_organizational_unit_arns( - (!modify_opts.organizational_unit_arns.is_empty()) - .then_some(modify_opts.organizational_unit_arns.clone()), - ) - .set_operation_type(Some(operation.clone())) - .set_image_id(Some(image_id.to_string())) - .send() - .await -} - -/// Modify launchPermission for the given users/groups, across all of the images in the given -/// regional mapping. The `operation` should be "add" or "remove" to allow/deny permission. -pub(crate) async fn modify_regional_images( - modify_opts: &ModifyOptions, - operation: &OperationType, - images: &mut HashMap, - clients: &HashMap, -) -> Result<()> { - let mut requests = Vec::new(); - for (region, image) in &mut *images { - let image_id = &image.id; - let ec2_client = &clients[region]; - - let modify_image_future = modify_image(modify_opts, operation, image_id, ec2_client); - - // Store the region and image ID so we can include it in errors - let info_future = ready((region.as_ref().to_string(), image_id.clone())); - requests.push(join(info_future, modify_image_future)); - } - - // Send requests in parallel and wait for responses, collecting results into a list. - let request_stream = stream::iter(requests).buffer_unordered(4); - #[allow(clippy::type_complexity)] - let responses: Vec<( - (String, String), - std::result::Result>, - )> = request_stream.collect().await; - - // Count up successes and failures so we can give a clear total in the final error message. - let mut error_count = 0u16; - let mut success_count = 0u16; - for ((region, image_id), modify_image_response) in responses { - match modify_image_response { - Ok(_) => { - success_count += 1; - info!("Modified permissions of image {} in {}", image_id, region); - - // Set the `public` and `launch_permissions` fields for the Image object - let image = images.get_mut(&Region::new(region.clone())).ok_or( - error::Error::MissingRegion { - region: region.clone(), - }, - )?; - let launch_permissions: Vec = get_launch_permissions( - &clients[&Region::new(region.clone())], - region.as_ref(), - &image_id, - ) - .await - .context(error::DescribeImageAttributeSnafu { - image_id: image_id.clone(), - region: region.to_string(), - })?; - - // If the launch permissions contain the group `all` after the modification, - // the image is public - image.public = Some(launch_permissions.iter().any(|launch_permission| { - launch_permission - == &LaunchPermissionDef::Group(PermissionGroup::All.as_str().to_string()) - })); - image.launch_permissions = Some(launch_permissions); - } - Err(e) => { - error_count += 1; - error!( - "Modifying permissions of {} in {} failed: {}", - image_id, - region, - e.into_service_error().code().unwrap_or("unknown"), - ); - } - } - } - - ensure!( - error_count == 0, - error::ModifyImagesAttributesSnafu { - error_count, - success_count, - } - ); - - Ok(()) -} - -mod error { - use crate::aws::ami; - use aws_sdk_ec2::error::SdkError; - use aws_sdk_ec2::operation::{ - describe_images::DescribeImagesError, modify_image_attribute::ModifyImageAttributeError, - modify_snapshot_attribute::ModifySnapshotAttributeError, - }; - use snafu::Snafu; - use std::io; - use std::path::PathBuf; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Error reading config: {}", source))] - Config { source: pubsys_config::Error }, - - #[snafu(display( - "Failed to describe image attributes for image {} in region {}: {}", - image_id, - region, - source - ))] - DescribeImageAttribute { - image_id: String, - region: String, - source: crate::aws::ami::launch_permissions::Error, - }, - - #[snafu(display("Failed to describe images in {}: {}", region, source))] - DescribeImages { - region: String, - source: SdkError, - }, - - #[snafu(display("Failed to deserialize input from '{}': {}", path.display(), source))] - Deserialize { - path: PathBuf, - source: serde_json::Error, - }, - - #[snafu(display("Failed to {} '{}': {}", op, path.display(), source))] - File { - op: String, - path: PathBuf, - source: io::Error, - }, - - #[snafu(display("Input '{}' is empty", path.display()))] - Input { path: PathBuf }, - - #[snafu(display("Infra.toml is missing {}", missing))] - MissingConfig { missing: String }, - - #[snafu(display("Failed to find given AMI ID {} in {}", image_id, region))] - MissingImage { region: String, image_id: String }, - - #[snafu(display("Response to {} was missing {}", request_type, missing))] - MissingInResponse { - request_type: String, - missing: String, - }, - - #[snafu(display("Failed to find region {} in AMI map", region))] - MissingRegion { region: String }, - - #[snafu(display( - "Failed to modify permissions of {} in {}: {}", - snapshot_id, - region, - source - ))] - ModifyImageAttribute { - snapshot_id: String, - region: String, - source: SdkError, - }, - - #[snafu(display( - "Failed to modify permissions of {} of {} images", - error_count, error_count + success_count, - ))] - ModifyImagesAttributes { - error_count: u16, - success_count: u16, - }, - - #[snafu(display( - "Failed to modify permissions of {} in {}: {}", - image_id, - region, - source - ))] - ModifyImageAttributes { - image_id: String, - region: String, - source: SdkError, - }, - - #[snafu(display( - "Failed to modify permissions of {} of {} snapshots", - error_count, error_count + success_count, - ))] - ModifySnapshotAttributes { - error_count: u16, - success_count: u16, - }, - - #[snafu(display("DescribeImages in {} with unique filters returned multiple results: {}", region, images.join(", ")))] - MultipleImages { region: String, images: Vec }, - - #[snafu(display("Failed to serialize output to '{}': {}", path.display(), source))] - Serialize { - path: PathBuf, - source: serde_json::Error, - }, - - #[snafu(display( - "Given region(s) in Infra.toml / regions argument that are not in --ami-input file: {}", - regions.join(", ") - ))] - UnknownRegions { regions: Vec }, - - #[snafu(display("AMI '{}' in {} did not become available: {}", id, region, source))] - WaitAmi { - id: String, - region: String, - source: ami::wait::Error, - }, - } - - impl Error { - /// The number of AMIs that have had their permissions successfully changed. - pub(crate) fn amis_affected(&self) -> u16 { - match self { - // We list all of these variants so that future editors of the code will have to - // look at this and decide whether or not their new error variant might have - // modified any AMI permissions. - Error::Config { .. } - | Error::DescribeImageAttribute { .. } - | Error::DescribeImages { .. } - | Error::Deserialize { .. } - | Error::File { .. } - | Error::Input { .. } - | Error::MissingConfig { .. } - | Error::MissingImage { .. } - | Error::MissingInResponse { .. } - | Error::MissingRegion { .. } - | Error::ModifyImageAttribute { .. } - | Error::ModifyImageAttributes { .. } - | Error::ModifySnapshotAttributes { .. } - | Error::MultipleImages { .. } - | Error::Serialize { .. } - | Error::UnknownRegions { .. } - | Error::WaitAmi { .. } => 0u16, - - // If an error occurs during the modify AMI permissions loop, then some AMIs may - // have been affected. - Error::ModifyImagesAttributes { - error_count: _, - success_count, - } => *success_count, - } - } - } -} -pub(crate) use error::Error; -type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/ssm/mod.rs b/tools/pubsys/src/aws/ssm/mod.rs deleted file mode 100644 index 82d3685b..00000000 --- a/tools/pubsys/src/aws/ssm/mod.rs +++ /dev/null @@ -1,540 +0,0 @@ -//! The ssm module owns the 'ssm' subcommand and controls the process of setting SSM parameters -//! based on current build information - -#[allow(clippy::module_inception)] -pub(crate) mod ssm; -pub(crate) mod template; - -use self::template::RenderedParameter; -use crate::aws::ssm::template::RenderedParametersMap; -use crate::aws::{ - ami::public::ami_is_public, ami::Image, client::build_client_config, parse_arch, - region_from_string, -}; -use crate::Args; -use aws_config::SdkConfig; -use aws_sdk_ec2::{types::ArchitectureValues, Client as Ec2Client}; -use aws_sdk_ssm::{config::Region, Client as SsmClient}; -use clap::Parser; -use futures::stream::{StreamExt, TryStreamExt}; -use governor::{prelude::*, Quota, RateLimiter}; -use log::{error, info, trace}; -use nonzero_ext::nonzero; -use pubsys_config::InfraConfig; -use serde::Serialize; -use snafu::{ensure, OptionExt, ResultExt}; -use std::iter::FromIterator; -use std::path::PathBuf; -use std::{ - collections::{HashMap, HashSet}, - fs::File, -}; - -/// Sets SSM parameters based on current build information -#[derive(Debug, Parser)] -pub(crate) struct SsmArgs { - // This is JSON output from `pubsys ami` like `{"us-west-2": "ami-123"}` - /// Path to the JSON file containing regional AMI IDs to modify - #[arg(long)] - ami_input: PathBuf, - - /// The architecture of the machine image - #[arg(long, value_parser = parse_arch)] - arch: ArchitectureValues, - - /// The variant name for the current build - #[arg(long)] - variant: String, - - /// The version of the current build - #[arg(long)] - version: String, - - /// Regions where you want parameters published - #[arg(long, value_delimiter = ',')] - regions: Vec, - - /// File holding the parameter templates - #[arg(long)] - template_path: PathBuf, - - /// Allows overwrite of existing parameters - #[arg(long)] - allow_clobber: bool, - - /// Allows publishing non-public images to the `/aws/` namespace - #[arg(long)] - allow_private_images: bool, - - /// If set, writes the generated SSM parameters to this path - #[arg(long)] - ssm_parameter_output: Option, -} - -/// Wrapper struct over parameter update and AWS clients needed to execute on it. -#[derive(Debug, Clone)] -struct SsmParamUpdateOp { - parameter: RenderedParameter, - ec2_client: Ec2Client, -} - -/// Common entrypoint from main() -pub(crate) async fn run(args: &Args, ssm_args: &SsmArgs) -> Result<()> { - // Setup =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - - // If a lock file exists, use that, otherwise use Infra.toml - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) - .context(error::ConfigSnafu)?; - trace!("Parsed infra config: {:#?}", infra_config); - let aws = infra_config.aws.unwrap_or_default(); - let ssm_prefix = aws.ssm_prefix.as_deref().unwrap_or(""); - - // If the user gave an override list of regions, use that, otherwise use what's in the config. - let regions = if !ssm_args.regions.is_empty() { - ssm_args.regions.clone() - } else { - aws.regions.clone().into() - }; - ensure!( - !regions.is_empty(), - error::MissingConfigSnafu { - missing: "aws.regions" - } - ); - let base_region = region_from_string(®ions[0]); - - let amis = parse_ami_input(®ions, ssm_args)?; - - // Template setup =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - - // Non-image-specific context for building and rendering templates - let build_context = BuildContext { - variant: &ssm_args.variant, - arch: ssm_args.arch.as_ref(), - image_version: &ssm_args.version, - }; - - info!( - "Parsing SSM parameter templates from {}", - ssm_args.template_path.display() - ); - let template_parameters = template::get_parameters(&ssm_args.template_path, &build_context) - .context(error::FindTemplatesSnafu)?; - - if template_parameters.parameters.is_empty() { - info!( - "No parameters for this arch/variant in {}", - ssm_args.template_path.display() - ); - return Ok(()); - } - - let new_parameters = - template::render_parameters(template_parameters, &amis, ssm_prefix, &build_context) - .context(error::RenderTemplatesSnafu)?; - trace!("Generated templated parameters: {:#?}", new_parameters); - - // If the path to an output file was given, write the rendered parameters to this file - if let Some(ssm_parameter_output) = &ssm_args.ssm_parameter_output { - write_rendered_parameters( - ssm_parameter_output, - &RenderedParametersMap::from(&new_parameters).rendered_parameters, - )?; - } - - // Generate AWS Clients to use for the updates. - let mut param_update_ops: Vec = Vec::with_capacity(new_parameters.len()); - let mut aws_sdk_configs: HashMap = HashMap::with_capacity(regions.len()); - let mut ssm_clients = HashMap::with_capacity(amis.len()); - - for parameter in new_parameters.iter() { - let region = ¶meter.ssm_key.region; - // Store client configs so that we only have to create them once. - // The HashMap `entry` API doesn't play well with `async`, so we use a match here instead. - let client_config = match aws_sdk_configs.get(region) { - Some(client_config) => client_config.clone(), - None => { - let client_config = build_client_config(region, &base_region, &aws).await; - aws_sdk_configs.insert(region.clone(), client_config.clone()); - client_config - } - }; - - let ssm_client = SsmClient::new(&client_config); - if ssm_clients.get(region).is_none() { - ssm_clients.insert(region.clone(), ssm_client); - } - - let ec2_client = Ec2Client::new(&client_config); - param_update_ops.push(SsmParamUpdateOp { - parameter: parameter.clone(), - ec2_client, - }); - } - - // Unless overridden, only allow public images to be published to public parameters. - if !ssm_args.allow_private_images { - info!("Ensuring that only public images are published to public parameters."); - ensure!( - check_public_namespace_amis_are_public(param_update_ops.iter()).await?, - error::NoPrivateImagesSnafu - ); - } - - // SSM get/compare =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - - info!("Getting current SSM parameters"); - let new_parameter_names: Vec<&SsmKey> = - new_parameters.iter().map(|param| ¶m.ssm_key).collect(); - let current_parameters = ssm::get_parameters(&new_parameter_names, &ssm_clients) - .await - .context(error::FetchSsmSnafu)?; - trace!("Current SSM parameters: {:#?}", current_parameters); - - // Show the difference between source and target parameters in SSM. - let parameters_to_set = key_difference( - &RenderedParameter::as_ssm_parameters(&new_parameters), - ¤t_parameters, - ); - if parameters_to_set.is_empty() { - info!("No changes necessary."); - return Ok(()); - } - - // Unless the user wants to allow it, make sure we're not going to overwrite any existing - // keys. - if !ssm_args.allow_clobber { - let current_keys: HashSet<&SsmKey> = current_parameters.keys().collect(); - let new_keys: HashSet<&SsmKey> = parameters_to_set.keys().collect(); - ensure!(current_keys.is_disjoint(&new_keys), error::NoClobberSnafu); - } - - // SSM set =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - - info!("Setting updated SSM parameters."); - ssm::set_parameters(¶meters_to_set, &ssm_clients) - .await - .context(error::SetSsmSnafu)?; - - info!("Validating whether live parameters in SSM reflect changes."); - ssm::validate_parameters(¶meters_to_set, &ssm_clients) - .await - .context(error::ValidateSsmSnafu)?; - - info!("All parameters match requested values."); - Ok(()) -} - -/// Write rendered parameters to the file at `ssm_parameters_output` -pub(crate) fn write_rendered_parameters( - ssm_parameters_output: &PathBuf, - parameters: &HashMap>, -) -> Result<()> { - info!( - "Writing rendered SSM parameters to {:#?}", - ssm_parameters_output - ); - - serde_json::to_writer_pretty( - &File::create(ssm_parameters_output).context(error::WriteRenderedSsmParametersSnafu { - path: ssm_parameters_output, - })?, - ¶meters, - ) - .context(error::ParseRenderedSsmParametersSnafu)?; - - info!( - "Wrote rendered SSM parameters to {:#?}", - ssm_parameters_output - ); - Ok(()) -} - -// Rate limits on the EC2 side use the TokenBucket method, and buckets refill at a rate of 20 tokens per second. -// See https://docs.aws.amazon.com/AWSEC2/latest/APIReference/throttling.html#throttling-rate-based for more details. -const DESCRIBE_IMAGES_RATE_LIMIT: Quota = Quota::per_second(nonzero!(20u32)); -const MAX_CONCURRENT_AMI_CHECKS: usize = 8; - -/// Given a set of SSM parameter updates, ensures all parameters in the public namespace refer to public AMIs. -async fn check_public_namespace_amis_are_public( - parameter_updates: impl Iterator, -) -> Result { - let public_namespace_updates = parameter_updates - .filter(|update| update.parameter.ssm_key.is_in_public_namespace()) - .cloned(); - - // Wrap `crate::aws::ami::public::ami_is_public()` in a future that returns the correct error type. - let check_ami_public = |update: SsmParamUpdateOp| async move { - let region = &update.parameter.ssm_key.region; - let ami_id = &update.parameter.ami.id; - let is_public = ami_is_public(&update.ec2_client, region.as_ref(), ami_id) - .await - .context(error::CheckAmiPublicSnafu { - ami_id: ami_id.to_string(), - region: region.to_string(), - }); - - if let Ok(false) = is_public { - error!( - "Attempted to set parameter '{}' in {} to '{}', based on AMI {}. That AMI is not marked public!", - update.parameter.ssm_key.name, region, update.parameter.value, ami_id - ); - } - - is_public - }; - - // Concurrently check our input parameter updates... - let rate_limiter = RateLimiter::direct(DESCRIBE_IMAGES_RATE_LIMIT); - let results: Vec> = futures::stream::iter(public_namespace_updates) - .ratelimit_stream(&rate_limiter) - .then(|update| async move { Ok(check_ami_public(update)) }) - .try_buffer_unordered(usize::min(num_cpus::get(), MAX_CONCURRENT_AMI_CHECKS)) - .collect() - .await; - - // `collect()` on `TryStreams` doesn't seem to happily invert a `Vec>` to a `Result>`, - // so we use the usual `Iterator` methods to do it here. - Ok(results - .into_iter() - .collect::>>()? - .into_iter() - .all(|is_public| is_public)) -} - -/// The key to a unique SSM parameter -#[derive(Debug, Eq, Hash, PartialEq, Clone)] -pub(crate) struct SsmKey { - pub(crate) region: Region, - pub(crate) name: String, -} - -impl SsmKey { - pub(crate) fn new(region: Region, name: String) -> Self { - Self { region, name } - } - - pub(crate) fn is_in_public_namespace(&self) -> bool { - self.name.starts_with("/aws/") - } -} - -impl AsRef for SsmKey { - fn as_ref(&self) -> &Self { - self - } -} - -/// Non-image-specific context for building and rendering templates -#[derive(Debug, Serialize)] -pub(crate) struct BuildContext<'a> { - pub(crate) variant: &'a str, - pub(crate) arch: &'a str, - pub(crate) image_version: &'a str, -} - -/// A map of SsmKey to its value -pub(crate) type SsmParameters = HashMap; - -/// Parse the AMI input file -fn parse_ami_input(regions: &[String], ssm_args: &SsmArgs) -> Result> { - info!("Using AMI data from path: {}", ssm_args.ami_input.display()); - let file = File::open(&ssm_args.ami_input).context(error::FileSnafu { - op: "open", - path: &ssm_args.ami_input, - })?; - let mut ami_input: HashMap = - serde_json::from_reader(file).context(error::DeserializeSnafu { - path: &ssm_args.ami_input, - })?; - trace!("Parsed AMI input: {:#?}", ami_input); - - // pubsys will not create a file if it did not create AMIs, so we should only have an empty - // file if a user created one manually, and they shouldn't be creating an empty file. - ensure!( - !ami_input.is_empty(), - error::InputSnafu { - path: &ssm_args.ami_input - } - ); - - // Check that the requested regions are a subset of the regions we *could* publish from the AMI - // input JSON. - let requested_regions = HashSet::from_iter(regions.iter()); - let known_regions = HashSet::<&String>::from_iter(ami_input.keys()); - ensure!( - requested_regions.is_subset(&known_regions), - error::UnknownRegionsSnafu { - regions: requested_regions - .difference(&known_regions) - .map(|s| s.to_string()) - .collect::>(), - } - ); - - // Parse region names - let mut amis = HashMap::with_capacity(regions.len()); - for name in regions { - let image = ami_input - .remove(name) - // This could only happen if someone removes the check above... - .with_context(|| error::UnknownRegionsSnafu { - regions: vec![name.clone()], - })?; - let region = region_from_string(name); - amis.insert(region.clone(), image); - } - - Ok(amis) -} - -/// Shows the user the difference between two sets of parameters. We look for parameters in -/// `wanted` that are either missing or changed in `current`. We print these differences for the -/// user, then return the `wanted` values. -pub(crate) fn key_difference(wanted: &SsmParameters, current: &SsmParameters) -> SsmParameters { - let mut parameters_to_set = HashMap::new(); - - let wanted_keys: HashSet<&SsmKey> = wanted.keys().collect(); - let current_keys: HashSet<&SsmKey> = current.keys().collect(); - - for key in wanted_keys.difference(¤t_keys) { - let new_value = &wanted[key]; - println!( - "{} - {} - new parameter:\n new value: {}", - key.name, key.region, new_value, - ); - parameters_to_set.insert( - SsmKey::new(key.region.clone(), key.name.clone()), - new_value.clone(), - ); - } - - for key in wanted_keys.intersection(¤t_keys) { - let current_value = ¤t[key]; - let new_value = &wanted[key]; - - if current_value == new_value { - println!("{} - {} - no change", key.name, key.region); - } else { - println!( - "{} - {} - changing value:\n old value: {}\n new value: {}", - key.name, key.region, current_value, new_value - ); - parameters_to_set.insert( - SsmKey::new(key.region.clone(), key.name.clone()), - new_value.clone(), - ); - } - } - // Note: don't care about items that are in current but not wanted; that could happen if you - // remove a parameter from your templates, for example. - - parameters_to_set -} - -mod error { - use crate::aws::ssm::{ssm, template}; - use snafu::Snafu; - use std::io; - use std::path::PathBuf; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Error reading config: {}", source))] - Config { - source: pubsys_config::Error, - }, - - #[snafu(display( - "Failed to check whether AMI {} in {} was public: {}", - ami_id, - region, - source - ))] - CheckAmiPublic { - ami_id: String, - region: String, - source: crate::aws::ami::public::Error, - }, - - #[snafu(display("Failed to create EC2 client for region {}", region))] - CreateEc2Client { - region: String, - }, - - #[snafu(display("Failed to deserialize input from '{}': {}", path.display(), source))] - Deserialize { - path: PathBuf, - source: serde_json::Error, - }, - - #[snafu(display("Failed to fetch parameters from SSM: {}", source))] - FetchSsm { - source: ssm::Error, - }, - - #[snafu(display("Failed to {} '{}': {}", op, path.display(), source))] - File { - op: String, - path: PathBuf, - source: io::Error, - }, - - #[snafu(display("Failed to find templates: {}", source))] - FindTemplates { - source: template::Error, - }, - - #[snafu(display("Input '{}' is empty", path.display()))] - Input { - path: PathBuf, - }, - - #[snafu(display("Infra.toml is missing {}", missing))] - MissingConfig { - missing: String, - }, - - #[snafu(display("Cowardly refusing to overwrite parameters without ALLOW_CLOBBER"))] - NoClobber, - - #[snafu(display("Cowardly refusing to publish private image to public namespace without ALLOW_PRIVATE_IMAGES"))] - NoPrivateImages, - - #[snafu(display("Failed to render templates: {}", source))] - RenderTemplates { - source: template::Error, - }, - - #[snafu(display("Failed to set SSM parameters: {}", source))] - SetSsm { - source: ssm::Error, - }, - - #[snafu(display( - "Given region(s) in Infra.toml / regions argument that are not in --ami-input file: {}", - regions.join(", ") - ))] - UnknownRegions { - regions: Vec, - }, - - ValidateSsm { - source: ssm::Error, - }, - - #[snafu(display("Failed to parse rendered SSM parameters to JSON: {}", source))] - ParseRenderedSsmParameters { - source: serde_json::Error, - }, - - #[snafu(display("Failed to write rendered SSM parameters to {:#?}: {}", path, source))] - WriteRenderedSsmParameters { - path: PathBuf, - source: std::io::Error, - }, - } -} -pub(crate) use error::Error; -type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/ssm/ssm.rs b/tools/pubsys/src/aws/ssm/ssm.rs deleted file mode 100644 index f92666fd..00000000 --- a/tools/pubsys/src/aws/ssm/ssm.rs +++ /dev/null @@ -1,472 +0,0 @@ -//! The ssm module owns the getting and setting of parameters in SSM. - -use super::{SsmKey, SsmParameters}; -use aws_sdk_ssm::error::{ProvideErrorMetadata, SdkError}; -use aws_sdk_ssm::operation::{ - get_parameters::{GetParametersError, GetParametersOutput}, - put_parameter::{PutParameterError, PutParameterOutput}, -}; -use aws_sdk_ssm::{config::Region, types::ParameterType, Client as SsmClient}; -use futures::future::{join, ready}; -use futures::stream::{self, FuturesUnordered, StreamExt}; -use log::{debug, error, info, trace, warn}; -use snafu::{ensure, OptionExt, ResultExt}; -use std::collections::{HashMap, HashSet}; -use std::time::Duration; - -/// Fetches the values of the given SSM keys using the given clients -// TODO: We can batch GET requests so throttling is less likely here, but if we need to handle -// hundreds of parameters for a given build, we could use the throttling logic from -// `set_parameters` -pub(crate) async fn get_parameters( - requested: &[K], - clients: &HashMap, -) -> Result -where - K: AsRef, -{ - // Build requests for parameters; we have to request with a regional client so we split them by - // region - let mut requests = Vec::with_capacity(requested.len()); - let mut regional_names: HashMap> = HashMap::new(); - for key in requested { - let SsmKey { region, name } = key.as_ref(); - regional_names - .entry(region.clone()) - .or_default() - .push(name.clone()); - } - for (region, names) in regional_names { - // At most 10 parameters can be requested at a time. - for names_chunk in names.chunks(10) { - trace!("Requesting {:?} in {}", names_chunk, region); - let ssm_client = &clients[®ion]; - let len = names_chunk.len(); - let get_future = ssm_client - .get_parameters() - .set_names((!names_chunk.is_empty()).then_some(names_chunk.to_vec().clone())) - .send(); - - // Store the region so we can include it in errors and the output map - let info_future = ready((region.clone(), len)); - requests.push(join(info_future, get_future)); - } - } - - // Send requests in parallel and wait for responses, collecting results into a list. - let request_stream = stream::iter(requests).buffer_unordered(4); - #[allow(clippy::type_complexity)] - let responses: Vec<( - (Region, usize), - std::result::Result>, - )> = request_stream.collect().await; - - // If you're checking parameters in a region you haven't pushed to before, you can get an - // error here about the parameter's namespace being new. We want to treat these as new - // parameters rather than failing. Unfortunately, we don't know which parameter in the region - // was considered new, but we expect that most people are publishing all of their parameters - // under the same namespace, so treating the whole region as new is OK. We use this just to - // warn the user. - let mut new_regions = HashSet::new(); - - // For each existing parameter in the response, get the name and value for our output map. - let mut parameters = HashMap::with_capacity(requested.len()); - for ((region, expected_len), response) in responses { - // Get the image description, ensuring we only have one. - let response = match response { - Ok(response) => response, - Err(e) => { - // Note: there's no structured error type for this so we have to string match. - if e.to_string().contains("is not a valid namespace") { - new_regions.insert(region.clone()); - continue; - } else { - return Err(e).context(error::GetParametersSnafu { - region: region.as_ref(), - }); - } - } - }; - - // Check that we received a response including every parameter - // Note: response.invalid_parameters includes both new parameters and ill-formatted - // parameter names... - let valid_count = response.parameters.as_ref().map(|v| v.len()).unwrap_or(0); - let invalid_count = response.invalid_parameters.map(|v| v.len()).unwrap_or(0); - let total_count = valid_count + invalid_count; - ensure!( - total_count == expected_len, - error::MissingInResponseSnafu { - region: region.as_ref(), - request_type: "GetParameters", - missing: format!( - "parameters - got {}, expected {}", - total_count, expected_len - ), - } - ); - - // Save the successful parameters - if let Some(valid_parameters) = response.parameters { - if !valid_parameters.is_empty() { - for parameter in valid_parameters { - let name = parameter.name.context(error::MissingInResponseSnafu { - region: region.as_ref(), - request_type: "GetParameters", - missing: "parameter name", - })?; - let value = parameter.value.context(error::MissingInResponseSnafu { - region: region.as_ref(), - request_type: "GetParameters", - missing: format!("value for parameter {}", name), - })?; - parameters.insert(SsmKey::new(region.clone(), name), value); - } - } - } - } - - for region in new_regions { - warn!( - "Invalid namespace in {}, this is OK for the first publish in a region", - region - ); - } - - Ok(parameters) -} - -/// Fetches all SSM parameters under a given prefix using the given clients -pub(crate) async fn get_parameters_by_prefix<'a>( - clients: &'a HashMap, - ssm_prefix: &str, -) -> HashMap<&'a Region, Result> { - // Build requests for parameters; we have to request with a regional client so we split them by - // region - let mut requests = Vec::with_capacity(clients.len()); - for region in clients.keys() { - trace!("Requesting parameters in {}", region); - let ssm_client: &SsmClient = &clients[region]; - let get_future = get_parameters_by_prefix_in_region(region, ssm_client, ssm_prefix); - - requests.push(join(ready(region), get_future)); - } - - // Send requests in parallel and wait for responses, collecting results into a list. - requests - .into_iter() - .collect::>() - .collect() - .await -} - -/// Fetches all SSM parameters under a given prefix in a single region -pub(crate) async fn get_parameters_by_prefix_in_region( - region: &Region, - client: &SsmClient, - ssm_prefix: &str, -) -> Result { - info!("Retrieving SSM parameters in {}", region.to_string()); - let mut parameters = HashMap::new(); - - // Send the request - let mut get_future = client - .get_parameters_by_path() - .path(ssm_prefix) - .recursive(true) - .into_paginator() - .send(); - - // Iterate over the retrieved parameters - while let Some(page) = get_future.next().await { - let retrieved_parameters = page - .context(error::GetParametersByPathSnafu { - path: ssm_prefix, - region: region.to_string(), - })? - .parameters() - .unwrap_or_default() - .to_owned(); - for parameter in retrieved_parameters { - // Insert a new key-value pair into the map, with the key containing region and parameter name - // and the value containing the parameter value - parameters.insert( - SsmKey::new( - region.to_owned(), - parameter - .name() - .ok_or(error::Error::MissingField { - region: region.to_string(), - field: "name".to_string(), - })? - .to_owned(), - ), - parameter - .value() - .ok_or(error::Error::MissingField { - region: region.to_string(), - field: "value".to_string(), - })? - .to_owned(), - ); - } - } - info!( - "SSM parameters in {} have been retrieved", - region.to_string() - ); - Ok(parameters) -} - -/// Sets the values of the given SSM keys using the given clients -pub(crate) async fn set_parameters( - parameters_to_set: &SsmParameters, - ssm_clients: &HashMap, -) -> Result<()> { - // Start with a small delay between requests, and increase if we get throttled. - let mut request_interval = Duration::from_millis(100); - let max_interval = Duration::from_millis(1600); - let interval_factor = 2; - let mut should_increase_interval = false; - - // We run all requests in a batch, and any failed requests are added to the next batch for - // retry - let mut failed_parameters: HashMap> = HashMap::new(); - let max_failures = 5; - - /// Stores the values we need to be able to retry requests - struct RequestContext<'a> { - region: &'a Region, - name: &'a str, - value: &'a str, - failures: u8, - } - - // Create the initial request contexts - let mut contexts = Vec::new(); - for (SsmKey { region, name }, value) in parameters_to_set { - contexts.push(RequestContext { - region, - name, - value, - failures: 0, - }); - } - let total_count = contexts.len(); - - // We drain requests out of the contexts list and put them back if we need to retry; we do this - // until all requests have succeeded or we've hit the max failures - while !contexts.is_empty() { - debug!("Starting {} SSM put requests", contexts.len()); - - if should_increase_interval { - request_interval *= interval_factor; - warn!( - "Requests were throttled, increasing interval to {:?}", - request_interval - ); - } - should_increase_interval = false; - - ensure!( - request_interval <= max_interval, - error::ThrottledSnafu { max_interval } - ); - - // Build requests for parameters. We need to group them by region so we can run each - // region in parallel. Each region's stream will be throttled to run one request per - // request_interval. - let mut regional_requests = HashMap::new(); - // Remove contexts from the list with drain; they get added back in if we retry the - // request. - for context in contexts.drain(..) { - let ssm_client = &ssm_clients[context.region]; - - let put_future = ssm_client - .put_parameter() - .set_name(Some(context.name.to_string())) - .set_value(Some(context.value.to_string())) - .set_overwrite(Some(true)) - .set_type(Some(ParameterType::String)) - .send(); - - let regional_list = regional_requests - .entry(context.region) - .or_insert_with(Vec::new); - // Store the context so we can retry as needed - regional_list.push(join(ready(context), put_future)); - } - - // Create a throttled stream per region; throttling applies per region. (Request futures - // are already regional, by virtue of being created with a regional client, so we don't - // need the region again here.) - let mut throttled_streams = Vec::new(); - for (_region, request_list) in regional_requests { - throttled_streams.push(Box::pin(tokio_stream::StreamExt::throttle( - stream::iter(request_list), - request_interval, - ))); - } - - // Run all regions in parallel and wait for responses. - let parallel_requests = stream::select_all(throttled_streams).buffer_unordered(4); - let responses: Vec<( - RequestContext<'_>, - std::result::Result>, - )> = parallel_requests.collect().await; - - // For each error response, check if we should retry or bail. - for (context, response) in responses { - if let Err(e) = response { - // Throttling errors are not currently surfaced in AWS SDK Rust, doing a string match is best we can do - let error_type = e - .into_service_error() - .code() - .unwrap_or("unknown") - .to_owned(); - if error_type.contains("ThrottlingException") { - // We only want to increase the interval once per loop, not once per error, - // because when you get throttled you're likely to get a bunch of throttling - // errors at once. - should_increase_interval = true; - // Retry the request without increasing the failure counter; the request didn't - // fail, a throttle means we couldn't even make the request. - contexts.push(context); - // -1 so we don't try again next loop; this keeps failure checking in one place - } else if context.failures >= max_failures - 1 { - // Past max failures, store the failure for reporting, don't retry. - failed_parameters - .entry(context.region.clone()) - .or_default() - .push((context.name.to_string(), error_type)); - } else { - // Increase failure counter and try again. - let context = RequestContext { - failures: context.failures + 1, - ..context - }; - debug!( - "Request attempt {} of {} failed in {}: {}", - context.failures, max_failures, context.region, error_type - ); - contexts.push(context); - } - } - } - } - - if !failed_parameters.is_empty() { - for (region, failures) in &failed_parameters { - for (parameter, error) in failures { - error!("Failed to set {} in {}: {}", parameter, region, error); - } - } - return error::SetParametersSnafu { - failure_count: failed_parameters.len(), - total_count, - } - .fail(); - } - - Ok(()) -} - -/// Fetch the given parameters, and ensure the live values match the given values -pub(crate) async fn validate_parameters( - expected_parameters: &SsmParameters, - ssm_clients: &HashMap, -) -> Result<()> { - // Fetch the given parameter names - let expected_parameter_names: Vec<&SsmKey> = expected_parameters.keys().collect(); - let updated_parameters = get_parameters(&expected_parameter_names, ssm_clients).await?; - - // Walk through and check each value - let mut success = true; - for (expected_key, expected_value) in expected_parameters { - let SsmKey { - region: expected_region, - name: expected_name, - } = expected_key; - // All parameters should have a value, and it should match the given value, otherwise the - // parameter wasn't updated / created. - if let Some(updated_value) = updated_parameters.get(expected_key) { - if updated_value != expected_value { - error!("Failed to set {} in {}", expected_name, expected_region); - success = false; - } - } else { - error!( - "{} in {} still doesn't exist", - expected_name, expected_region - ); - success = false; - } - } - ensure!(success, error::ValidateParametersSnafu); - - Ok(()) -} - -pub(crate) mod error { - use aws_sdk_ssm::error::SdkError; - use aws_sdk_ssm::operation::{ - get_parameters::GetParametersError, get_parameters_by_path::GetParametersByPathError, - }; - use snafu::Snafu; - use std::error::Error as _; - use std::time::Duration; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - #[allow(clippy::large_enum_variant)] - pub enum Error { - #[snafu(display("Failed to fetch SSM parameters in {}: {}", region, source.source().map(|x| x.to_string()).unwrap_or("unknown".to_string())))] - GetParameters { - region: String, - source: SdkError, - }, - - #[snafu(display( - "Failed to fetch SSM parameters by path {} in {}: {}", - path, - region, - source - ))] - GetParametersByPath { - path: String, - region: String, - source: SdkError, - }, - - #[snafu(display("Missing field in parameter in {}: {}", region, field))] - MissingField { region: String, field: String }, - - #[snafu(display("Response to {} was missing {}", request_type, missing))] - MissingInResponse { - region: String, - request_type: String, - missing: String, - }, - - #[snafu(display( - "Failed to set {} of {} parameters; see above", - failure_count, - total_count - ))] - SetParameters { - failure_count: usize, - total_count: usize, - }, - - #[snafu(display( - "SSM requests throttled too many times, went beyond our max interval {:?}", - max_interval - ))] - Throttled { max_interval: Duration }, - - #[snafu(display("Failed to validate all changes; see above."))] - ValidateParameters, - } -} -pub(crate) use error::Error; -pub(crate) type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/ssm/template.rs b/tools/pubsys/src/aws/ssm/template.rs deleted file mode 100644 index ac60583e..00000000 --- a/tools/pubsys/src/aws/ssm/template.rs +++ /dev/null @@ -1,415 +0,0 @@ -//! The template module owns the finding and rendering of parameter templates that used to generate -//! SSM parameter names and values. - -use super::{BuildContext, SsmKey, SsmParameters}; -use crate::aws::ami::Image; -use aws_sdk_ssm::config::Region; -use log::trace; -use serde::{Deserialize, Serialize}; -use snafu::{ensure, ResultExt}; -use std::collections::HashMap; -use std::fs; -use std::path::Path; -use tinytemplate::TinyTemplate; - -/// Represents a single SSM parameter -#[derive(Debug, Deserialize)] -pub(crate) struct TemplateParameter { - pub(crate) name: String, - pub(crate) value: String, - - // User can say parameters only apply to these variants/arches - #[serde(default, rename = "variant")] - pub(crate) variants: Vec, - #[serde(default, rename = "arch")] - pub(crate) arches: Vec, -} - -/// Represents a set of SSM parameters, in a format that allows for clear definition of -/// parameters in TOML files -#[derive(Debug, Deserialize)] -pub(crate) struct TemplateParameters { - // In a TOML table, it's clearer to define a single entry as a "parameter". - #[serde(default, rename = "parameter")] - pub(crate) parameters: Vec, -} - -/// Deserializes template parameters from the template file, taking into account conditional -/// parameters that may or may not apply based on our build context. -pub(crate) fn get_parameters( - template_path: &Path, - build_context: &BuildContext<'_>, -) -> Result { - let templates_str = fs::read_to_string(template_path).context(error::FileSnafu { - op: "read", - path: &template_path, - })?; - let mut template_parameters: TemplateParameters = - toml::from_str(&templates_str).context(error::InvalidTomlSnafu { - path: &template_path, - })?; - trace!("Parsed templates: {:#?}", template_parameters); - - // You shouldn't point to an empty file, but if all the entries are removed by - // conditionals below, we allow that and just don't set any parameters. - ensure!( - !template_parameters.parameters.is_empty(), - error::NoTemplatesSnafu { - path: template_path - } - ); - - let variant = build_context.variant.to_string(); - let arch = build_context.arch.to_string(); - template_parameters.parameters.retain(|p| { - (p.variants.is_empty() || p.variants.contains(&variant)) - && (p.arches.is_empty() || p.arches.contains(&arch)) - }); - trace!("Templates after conditionals: {:#?}", template_parameters); - - Ok(template_parameters) -} - -/// A value which stores rendered SSM parameters alongside metadata used to render their templates -#[derive(Debug, Eq, PartialEq, Hash, Clone)] -pub(crate) struct RenderedParameter { - pub(crate) ami: Image, - pub(crate) ssm_key: SsmKey, - pub(crate) value: String, -} - -impl RenderedParameter { - /// Creates an `SsmParameters` HashMap from a list of `RenderedParameter` - pub(crate) fn as_ssm_parameters(rendered_parameters: &[RenderedParameter]) -> SsmParameters { - rendered_parameters - .iter() - .map(|param| (param.ssm_key.clone(), param.value.clone())) - .collect() - } -} - -/// Render the given template parameters using the data from the given AMIs -pub(crate) fn render_parameters( - template_parameters: TemplateParameters, - amis: &HashMap, - ssm_prefix: &str, - build_context: &BuildContext<'_>, -) -> Result> { - /// Values that we allow as template variables - #[derive(Debug, Serialize)] - struct TemplateContext<'a> { - variant: &'a str, - arch: &'a str, - image_id: &'a str, - image_name: &'a str, - image_version: &'a str, - region: &'a str, - } - let mut new_parameters = Vec::new(); - for (region, image) in amis { - let context = TemplateContext { - variant: build_context.variant, - arch: build_context.arch, - image_id: &image.id, - image_name: &image.name, - image_version: build_context.image_version, - region: region.as_ref(), - }; - - for tp in &template_parameters.parameters { - let mut tt = TinyTemplate::new(); - tt.add_template("name", &tp.name) - .context(error::AddTemplateSnafu { template: &tp.name })?; - tt.add_template("value", &tp.value) - .context(error::AddTemplateSnafu { - template: &tp.value, - })?; - let name_suffix = tt - .render("name", &context) - .context(error::RenderTemplateSnafu { template: &tp.name })?; - let value = tt - .render("value", &context) - .context(error::RenderTemplateSnafu { - template: &tp.value, - })?; - - new_parameters.push(RenderedParameter { - ami: image.clone(), - ssm_key: SsmKey::new(region.clone(), join_name(ssm_prefix, &name_suffix)), - value, - }); - } - } - - Ok(new_parameters) -} - -/// Render the names of the given template parameters using the fixed data about the current build. -/// Returns a mapping of templated name to rendered name, so we can associate rendered names to a -/// common source name -pub(crate) fn render_parameter_names( - template_parameters: &TemplateParameters, - ssm_prefix: &str, - build_context: &BuildContext<'_>, -) -> Result> { - let mut new_parameters = HashMap::new(); - for tp in &template_parameters.parameters { - let mut tt = TinyTemplate::new(); - tt.add_template("name", &tp.name) - .context(error::AddTemplateSnafu { template: &tp.name })?; - let name_suffix = tt - .render("name", &build_context) - .context(error::RenderTemplateSnafu { template: &tp.name })?; - new_parameters.insert(tp.name.clone(), join_name(ssm_prefix, &name_suffix)); - } - - Ok(new_parameters) -} - -/// Make sure prefix and parameter name are separated by one slash -fn join_name(ssm_prefix: &str, name_suffix: &str) -> String { - if ssm_prefix.ends_with('/') && name_suffix.starts_with('/') { - format!("{}{}", ssm_prefix, &name_suffix[1..]) - } else if ssm_prefix.ends_with('/') || name_suffix.starts_with('/') { - format!("{}{}", ssm_prefix, name_suffix) - } else { - format!("{}/{}", ssm_prefix, name_suffix) - } -} - -type RegionName = String; -type SsmParameterName = String; -type SsmParameterValue = String; - -/// Struct containing a HashMap of RegionName, mapped to a HashMap -/// of SsmParameterName, SsmParameterValue pairs -#[derive(Deserialize, PartialEq, Serialize)] -pub(crate) struct RenderedParametersMap { - pub(crate) rendered_parameters: - HashMap>, -} - -impl From<&Vec> for RenderedParametersMap { - fn from(parameters: &Vec) -> Self { - let mut parameter_map: HashMap> = - HashMap::new(); - for parameter in parameters.iter() { - parameter_map - .entry(parameter.ssm_key.region.to_string()) - .or_default() - .insert( - parameter.ssm_key.name.to_owned(), - parameter.value.to_owned(), - ); - } - RenderedParametersMap { - rendered_parameters: parameter_map, - } - } -} - -impl From>> for RenderedParametersMap { - fn from(parameters: HashMap>) -> Self { - let mut parameter_map: HashMap> = - HashMap::new(); - parameters - .into_iter() - .for_each(|(region, region_parameters)| { - parameter_map.insert( - region.to_string(), - region_parameters - .into_iter() - .map(|(ssm_key, ssm_value)| (ssm_key.name, ssm_value)) - .collect::>(), - ); - }); - RenderedParametersMap { - rendered_parameters: parameter_map, - } - } -} - -mod error { - use snafu::Snafu; - use std::io; - use std::path::PathBuf; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Error building template from '{}': {}", template, source))] - AddTemplate { - template: String, - source: tinytemplate::error::Error, - }, - - #[snafu(display("Failed to {} '{}': {}", op, path.display(), source))] - File { - op: String, - path: PathBuf, - source: io::Error, - }, - - #[snafu(display("Invalid config file at '{}': {}", path.display(), source))] - InvalidToml { - path: PathBuf, - source: toml::de::Error, - }, - - #[snafu(display("Found no parameter templates in {}", path.display()))] - NoTemplates { path: PathBuf }, - - #[snafu(display("Error rendering template from '{}': {}", template, source))] - RenderTemplate { - template: String, - source: tinytemplate::error::Error, - }, - } -} -pub(crate) use error::Error; -type Result = std::result::Result; - -#[cfg(test)] -mod test { - use std::collections::HashMap; - - use super::{RenderedParameter, RenderedParametersMap}; - use crate::aws::{ami::Image, ssm::SsmKey}; - use aws_sdk_ssm::config::Region; - - // These tests assert that the RenderedParametersMap can be created correctly. - #[test] - fn rendered_parameters_map_from_vec() { - let rendered_parameters = vec![ - RenderedParameter { - ami: Image { - id: "test1-image-id".to_string(), - name: "test1-image-name".to_string(), - public: Some(true), - launch_permissions: Some(vec![]), - }, - ssm_key: SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - value: "test1-parameter-value".to_string(), - }, - RenderedParameter { - ami: Image { - id: "test2-image-id".to_string(), - name: "test2-image-name".to_string(), - public: Some(true), - launch_permissions: Some(vec![]), - }, - ssm_key: SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - value: "test2-parameter-value".to_string(), - }, - RenderedParameter { - ami: Image { - id: "test3-image-id".to_string(), - name: "test3-image-name".to_string(), - public: Some(true), - launch_permissions: Some(vec![]), - }, - ssm_key: SsmKey { - region: Region::new("us-east-1"), - name: "test3-parameter-name".to_string(), - }, - value: "test3-parameter-value".to_string(), - }, - ]; - let map = &RenderedParametersMap::from(&rendered_parameters).rendered_parameters; - let expected_map = &HashMap::from([ - ( - "us-east-1".to_string(), - HashMap::from([( - "test3-parameter-name".to_string(), - "test3-parameter-value".to_string(), - )]), - ), - ( - "us-west-2".to_string(), - HashMap::from([ - ( - "test1-parameter-name".to_string(), - "test1-parameter-value".to_string(), - ), - ( - "test2-parameter-name".to_string(), - "test2-parameter-value".to_string(), - ), - ]), - ), - ]); - assert_eq!(map, expected_map); - } - - #[test] - fn rendered_parameters_map_from_empty_vec() { - let rendered_parameters = vec![]; - let map = &RenderedParametersMap::from(&rendered_parameters).rendered_parameters; - let expected_map = &HashMap::new(); - assert_eq!(map, expected_map); - } - - #[test] - fn rendered_parameters_map_from_map() { - let existing_parameters = HashMap::from([ - ( - Region::new("us-west-2"), - HashMap::from([ - ( - SsmKey::new(Region::new("us-west-2"), "test1-parameter-name".to_string()), - "test1-parameter-value".to_string(), - ), - ( - SsmKey::new(Region::new("us-west-2"), "test2-parameter-name".to_string()), - "test2-parameter-value".to_string(), - ), - ]), - ), - ( - Region::new("us-east-1"), - HashMap::from([( - SsmKey::new(Region::new("us-east-1"), "test3-parameter-name".to_string()), - "test3-parameter-value".to_string(), - )]), - ), - ]); - let map = &RenderedParametersMap::from(existing_parameters).rendered_parameters; - let expected_map = &HashMap::from([ - ( - "us-east-1".to_string(), - HashMap::from([( - "test3-parameter-name".to_string(), - "test3-parameter-value".to_string(), - )]), - ), - ( - "us-west-2".to_string(), - HashMap::from([ - ( - "test1-parameter-name".to_string(), - "test1-parameter-value".to_string(), - ), - ( - "test2-parameter-name".to_string(), - "test2-parameter-value".to_string(), - ), - ]), - ), - ]); - assert_eq!(map, expected_map); - } - - #[test] - fn rendered_parameters_map_from_empty_map() { - let existing_parameters = HashMap::new(); - let map = &RenderedParametersMap::from(existing_parameters).rendered_parameters; - let expected_map = &HashMap::new(); - assert_eq!(map, expected_map); - } -} diff --git a/tools/pubsys/src/aws/validate_ami/ami.rs b/tools/pubsys/src/aws/validate_ami/ami.rs deleted file mode 100644 index 4ee85fb4..00000000 --- a/tools/pubsys/src/aws/validate_ami/ami.rs +++ /dev/null @@ -1,223 +0,0 @@ -//! The ami module owns the describing of images in EC2. - -use aws_sdk_ec2::{config::Region, types::Image, Client as Ec2Client}; -use futures::future::{join, ready}; -use futures::stream::{FuturesUnordered, StreamExt}; -use log::{info, trace}; -use serde::{Deserialize, Serialize}; -use snafu::ResultExt; -use std::collections::HashMap; - -use crate::aws::ami::launch_permissions::{get_launch_permissions, LaunchPermissionDef}; - -/// Wrapper structure for the `ImageDef` struct, used during deserialization -#[derive(Deserialize)] -#[serde(untagged)] -pub(crate) enum ImageData { - Image(ImageDef), - ImageList(Vec), -} - -impl ImageData { - pub(crate) fn images(&self) -> Vec { - match self { - ImageData::Image(image) => vec![image.to_owned()], - ImageData::ImageList(images) => images.to_owned(), - } - } -} - -/// Structure of the EC2 image fields that should be validated -#[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Debug, Clone)] -pub(crate) struct ImageDef { - /// The ID of the EC2 image - pub(crate) id: String, - - /// The name of the EC2 image - pub(crate) name: String, - - /// Whether or not the EC2 image is public - #[serde(default)] - pub(crate) public: bool, - - /// The launch permissions for the EC2 image. - pub(crate) launch_permissions: Option>, - - /// Whether or not the EC2 image supports Elastic Network Adapter - #[serde(default = "default_ena_support")] - pub(crate) ena_support: bool, - - /// The level of the EC2 image's Single Root I/O Virtualization support - #[serde(default = "default_sriov_net_support")] - pub(crate) sriov_net_support: String, -} - -fn default_ena_support() -> bool { - true -} - -fn default_sriov_net_support() -> String { - "simple".to_string() -} - -impl From<(Image, Option>)> for ImageDef { - fn from(args: (Image, Option>)) -> Self { - Self { - id: args.0.image_id().unwrap_or_default().to_string(), - name: args.0.name().unwrap_or_default().to_string(), - public: args.0.public().unwrap_or_default(), - launch_permissions: args.1, - ena_support: args.0.ena_support().unwrap_or_default(), - sriov_net_support: args.0.sriov_net_support().unwrap_or_default().to_string(), - } - } -} - -/// Fetches all images whose IDs are keys in `expected_images`. The map `expected_image_public` is -/// used to determine if the launch permissions for the image should be fetched (only if the image is not -/// public). The return value is a HashMap of Region to a Result, which is `Ok` if the request for -/// that region was successful and `Err` if not. The Result contains a HashMap of `image_id` to -/// `ImageDef`. -pub(crate) async fn describe_images<'a>( - clients: &'a HashMap, - expected_images: &HashMap>, -) -> HashMap<&'a Region, Result>> { - // Build requests for images; we have to request with a regional client so we split them by - // region - let mut requests = Vec::with_capacity(clients.len()); - clients.iter().for_each(|(region, ec2_client)| { - trace!("Requesting images in {}", region); - let get_future = describe_images_in_region( - region, - ec2_client, - expected_images - .get(region) - .map(|i| i.to_owned()) - .unwrap_or_default() - .into_iter() - .map(|i| (i.id.clone(), i)) - .collect::>(), - ); - - requests.push(join(ready(region), get_future)); - }); - - // Send requests in parallel and wait for responses, collecting results into a list. - requests - .into_iter() - .collect::>() - .collect() - .await -} - -/// Fetches the images whose IDs are keys in `expected_images` -pub(crate) async fn describe_images_in_region( - region: &Region, - client: &Ec2Client, - expected_images: HashMap, -) -> Result> { - info!("Retrieving images in {}", region.to_string()); - let mut images = HashMap::new(); - - // Send the request - let mut get_future = client - .describe_images() - .include_deprecated(true) - .set_image_ids(Some(Vec::from_iter( - expected_images.keys().map(|k| k.to_owned()), - ))) - .into_paginator() - .send(); - - // Iterate over the retrieved images - while let Some(page) = get_future.next().await { - let retrieved_images = page - .context(error::DescribeImagesSnafu { - region: region.to_string(), - })? - .images() - .unwrap_or_default() - .to_owned(); - for image in retrieved_images { - // Insert a new key-value pair into the map, with the key containing image ID - // and the value containing the ImageDef object created from the image - let image_id = image - .image_id() - .ok_or(error::Error::MissingField { - missing: "image_id".to_string(), - })? - .to_string(); - let expected_public = expected_images - .get(&image_id) - .ok_or(error::Error::MissingExpectedPublic { - missing: image_id.clone(), - })? - .public; - // If the image is not expected to be public, retrieve the launch permissions - trace!( - "Retrieving launch permissions for {} in {}", - image_id, - region.as_ref() - ); - let launch_permissions = if !expected_public { - Some( - get_launch_permissions(client, region.as_ref(), &image_id) - .await - .context(error::GetLaunchPermissionsSnafu { - region: region.as_ref(), - image_id: image_id.clone(), - })?, - ) - } else { - None - }; - let image_def = ImageDef::from((image.to_owned(), launch_permissions)); - images.insert(image_id, image_def); - } - } - - info!("Images in {} have been retrieved", region.to_string()); - Ok(images) -} - -pub(crate) mod error { - use aws_sdk_ec2::operation::describe_images::DescribeImagesError; - use aws_sdk_ssm::error::SdkError; - use aws_smithy_types::error::display::DisplayErrorContext; - use snafu::Snafu; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - #[allow(clippy::large_enum_variant)] - pub(crate) enum Error { - #[snafu(display( - "Failed to describe images in {}: {}", - region, - DisplayErrorContext(source) - ))] - DescribeImages { - region: String, - source: SdkError, - }, - - #[snafu(display( - "Failed to retrieve launch permissions for image {} in region {}: {}", - image_id, - region, - source - ))] - GetLaunchPermissions { - region: String, - image_id: String, - source: crate::aws::ami::launch_permissions::Error, - }, - - #[snafu(display("Missing field in image: {}", missing))] - MissingField { missing: String }, - - #[snafu(display("Missing image ID in expected image publicity map: {}", missing))] - MissingExpectedPublic { missing: String }, - } -} - -pub(crate) type Result = std::result::Result; diff --git a/tools/pubsys/src/aws/validate_ami/mod.rs b/tools/pubsys/src/aws/validate_ami/mod.rs deleted file mode 100644 index e827059c..00000000 --- a/tools/pubsys/src/aws/validate_ami/mod.rs +++ /dev/null @@ -1,850 +0,0 @@ -//! The validate_ami module owns the 'validate-ami' subcommand and controls the process of validating -//! EC2 images - -pub(crate) mod ami; -pub(crate) mod results; - -use self::ami::{ImageData, ImageDef}; -use self::results::{AmiValidationResult, AmiValidationResultStatus, AmiValidationResults}; -use crate::aws::client::build_client_config; -use crate::aws::validate_ami::ami::describe_images; -use crate::Args; -use aws_sdk_ec2::{config::Region, Client as AmiClient}; -use clap::Parser; -use log::{error, info, trace}; -use pubsys_config::InfraConfig; -use snafu::ResultExt; -use std::collections::{HashMap, HashSet}; -use std::fs::File; -use std::path::PathBuf; - -/// Validates EC2 images by calling `describe-images` on all images in the file given by -/// `expected-amis-path` and ensuring that the returned `public`, `ena-support`, -/// `sriov-net-support`, and `launch-permissions` fields have the expected values. -#[derive(Debug, Parser)] -pub(crate) struct ValidateAmiArgs { - /// File holding the expected amis - #[arg(long)] - expected_amis_path: PathBuf, - - /// Optional path where the validation results should be written - #[arg(long)] - write_results_path: Option, - - #[arg(long, requires = "write_results_path")] - /// Optional filter to only write validation results with these statuses to the above path - /// The available statuses are: `Correct`, `Incorrect`, `Missing`. - write_results_filter: Option>, - - #[arg(long)] - /// If this argument is given, print the validation results summary as a JSON object instead - /// of a plaintext table - json: bool, -} - -/// Performs EC2 image validation and returns the `AmiValidationResults` object -pub(crate) async fn validate( - args: &Args, - validate_ami_args: &ValidateAmiArgs, -) -> Result { - info!("Parsing Infra.toml file"); - - // If a lock file exists, use that, otherwise use Infra.toml - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) - .context(error::ConfigSnafu)?; - - trace!("Parsed infra config: {:#?}", infra_config); - - let aws = infra_config.aws.unwrap_or_default(); - - // Parse the expected ami file - info!("Parsing expected ami file"); - let expected_images = parse_expected_amis(&validate_ami_args.expected_amis_path).await?; - - info!("Parsed expected ami file"); - - // Create a `HashMap` of `AmiClient`s, one for each region where validation should happen - let base_region = &Region::new( - aws.regions - .get(0) - .ok_or(error::Error::EmptyInfraRegions { - path: args.infra_config_path.clone(), - })? - .clone(), - ); - let mut ami_clients = HashMap::with_capacity(expected_images.len()); - - for region in expected_images.keys() { - let client_config = build_client_config(region, base_region, &aws).await; - let ami_client = AmiClient::new(&client_config); - ami_clients.insert(region.clone(), ami_client); - } - - // Retrieve the EC2 images using the `AmiClient`s - info!("Retrieving EC2 images"); - let images = describe_images(&ami_clients, &expected_images) - .await - .into_iter() - .map(|(region, result)| { - ( - region, - result.map_err(|e| { - error!( - "Failed to retrieve images in region {}: {}", - region.to_string(), - e - ); - error::Error::UnreachableRegion { - region: region.to_string(), - } - }), - ) - }) - .collect::>>(); - - // Validate the retrieved EC2 images per region - info!("Validating EC2 images"); - let results: HashMap> = images - .into_iter() - .map(|(region, region_result)| { - ( - region.clone(), - validate_images_in_region( - &expected_images - .get(region) - .map(|e| e.to_owned()) - .unwrap_or_default(), - ®ion_result, - region, - ), - ) - }) - .collect(); - - let validation_results = AmiValidationResults::from_result_map(results); - - // If a path was given, write the results - if let Some(write_results_path) = &validate_ami_args.write_results_path { - // Filter the results by given status, and if no statuses were given, get all results - info!("Writing results to file"); - let results = if let Some(filter) = &validate_ami_args.write_results_filter { - validation_results.get_results_for_status(filter) - } else { - validation_results.get_all_results() - }; - - // Write the results as JSON - serde_json::to_writer_pretty( - &File::create(write_results_path).context(error::WriteValidationResultsSnafu { - path: write_results_path, - })?, - &results, - ) - .context(error::SerializeValidationResultsSnafu)?; - } - - Ok(validation_results) -} - -/// Validates EC2 images in a single region, based on a `Vec` of expected images -/// and a `HashMap` of actual retrieved images. Returns a -/// `HashSet` containing the result objects. -pub(crate) fn validate_images_in_region( - expected_images: &[ImageDef], - actual_images: &Result>, - region: &Region, -) -> HashSet { - match actual_images { - Ok(actual_images) => expected_images - .iter() - .map(|image| { - let new_image = if image.public { - ImageDef { - launch_permissions: None, - ..image.clone() - } - } else { - image.clone() - }; - AmiValidationResult::new( - image.id.clone(), - new_image, - Ok(actual_images.get(&image.id).map(|v| v.to_owned())), - region.clone(), - ) - }) - .collect(), - Err(_) => expected_images - .iter() - .map(|image| { - AmiValidationResult::new( - image.id.clone(), - image.clone(), - Err(error::Error::UnreachableRegion { - region: region.to_string(), - }), - region.clone(), - ) - }) - .collect(), - } -} - -type RegionName = String; -type AmiId = String; - -/// Parse the file holding image values. Return a `HashMap` of `Region` mapped to a vec of `ImageDef`s -/// for that region. -pub(crate) async fn parse_expected_amis( - expected_amis_path: &PathBuf, -) -> Result>> { - // Parse the JSON file as a `HashMap` of region_name, mapped to an `ImageData` struct - let expected_amis: HashMap = serde_json::from_reader( - &File::open(expected_amis_path.clone()).context(error::ReadExpectedImagesFileSnafu { - path: expected_amis_path, - })?, - ) - .context(error::ParseExpectedImagesFileSnafu)?; - - // Extract the `Vec` from the `ImageData` structs - let vectored_images = expected_amis - .into_iter() - .map(|(region, value)| (Region::new(region), value.images())) - .collect::>>(); - - Ok(vectored_images) -} - -/// Common entrypoint from main() -pub(crate) async fn run(args: &Args, validate_ami_args: &ValidateAmiArgs) -> Result<()> { - let results = validate(args, validate_ami_args).await?; - - if validate_ami_args.json { - println!( - "{}", - serde_json::to_string_pretty(&results.get_json_summary()) - .context(error::SerializeResultsSummarySnafu)? - ) - } else { - println!("{}", results); - } - Ok(()) -} - -mod error { - use snafu::Snafu; - use std::path::PathBuf; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Error reading config: {}", source))] - Config { source: pubsys_config::Error }, - - #[snafu(display("Empty regions array in Infra.toml at path {}", path.display()))] - EmptyInfraRegions { path: PathBuf }, - - #[snafu(display("Failed to parse image file: {}", source))] - ParseExpectedImagesFile { source: serde_json::Error }, - - #[snafu(display("Failed to read image file: {:?}", path))] - ReadExpectedImagesFile { - source: std::io::Error, - path: PathBuf, - }, - - #[snafu(display("Failed to serialize validation results to json: {}", source))] - SerializeValidationResults { source: serde_json::Error }, - - #[snafu(display("Failed to retrieve images from region {}", region))] - UnreachableRegion { region: String }, - - #[snafu(display("Failed to write validation results to {:?}: {}", path, source))] - WriteValidationResults { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to serialize results summary to JSON: {}", source))] - SerializeResultsSummary { source: serde_json::Error }, - } -} - -pub(crate) use error::Error; - -type Result = std::result::Result; - -#[cfg(test)] -mod test { - use super::ami::ImageDef; - use super::validate_images_in_region; - use crate::aws::{ - ami::launch_permissions::LaunchPermissionDef, - validate_ami::results::{AmiValidationResult, AmiValidationResultStatus}, - }; - use aws_sdk_ec2::config::Region; - use std::collections::{HashMap, HashSet}; - - // These tests assert that the images can be validated correctly. - - // Tests validation of images where the expected value is equal to the actual value - #[test] - fn validate_images_all_correct() { - let expected_parameters: Vec = vec![ - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ]; - let actual_parameters: HashMap = HashMap::from([ - ( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ), - ( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ), - ( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ), - ]); - let expected_results = HashSet::from_iter(vec![ - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - ]); - let results = validate_images_in_region( - &expected_parameters, - &Ok(actual_parameters), - &Region::new("us-west-2"), - ); - - for result in &results { - assert_eq!(result.status, AmiValidationResultStatus::Correct); - } - assert_eq!(results, expected_results); - } - - // Tests validation of images where the expected value is different from the actual value - #[test] - fn validate_images_all_incorrect() { - let expected_parameters: Vec = vec![ - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ]; - let actual_parameters: HashMap = HashMap::from([ - ( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - }, - ), - ( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: false, - launch_permissions: Some(vec![LaunchPermissionDef::Group("all".to_string())]), - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ), - ( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "not simple".to_string(), - }, - ), - ]); - let expected_results = HashSet::from_iter(vec![ - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "not simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: false, - launch_permissions: Some(vec![LaunchPermissionDef::Group("all".to_string())]), - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - ]); - let results = validate_images_in_region( - &expected_parameters, - &Ok(actual_parameters), - &Region::new("us-west-2"), - ); - for result in &results { - assert_eq!(result.status, AmiValidationResultStatus::Incorrect); - } - assert_eq!(results, expected_results); - } - - // Tests validation of images where the actual value is missing - #[test] - fn validate_images_all_missing() { - let expected_parameters: Vec = vec![ - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ]; - let actual_parameters = HashMap::new(); - let expected_results = HashSet::from_iter(vec![ - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(None), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(None), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(None), - Region::new("us-west-2"), - ), - ]); - let results = validate_images_in_region( - &expected_parameters, - &Ok(actual_parameters), - &Region::new("us-west-2"), - ); - for result in &results { - assert_eq!(result.status, AmiValidationResultStatus::Missing); - } - assert_eq!(results, expected_results); - } - - // Tests validation of parameters where each reachable status (Correct, Incorrect, Missing) happens once - #[test] - fn validate_images_mixed() { - let expected_parameters: Vec = vec![ - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ]; - let actual_parameters: HashMap = HashMap::from([ - ( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ), - ( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: false, - launch_permissions: Some(vec![LaunchPermissionDef::Group("all".to_string())]), - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ), - ]); - let expected_results = HashSet::from_iter(vec![ - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: false, - launch_permissions: Some(vec![LaunchPermissionDef::Group("all".to_string())]), - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(None), - Region::new("us-west-2"), - ), - ]); - let results = validate_images_in_region( - &expected_parameters, - &Ok(actual_parameters), - &Region::new("us-west-2"), - ); - - assert_eq!(results, expected_results); - } - - // Tests validation of parameters where the region is unreachable - #[test] - fn validate_images_unreachable() { - let expected_parameters: Vec = vec![ - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - ]; - let expected_results = HashSet::from_iter(vec![ - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Err(crate::aws::validate_ami::Error::UnreachableRegion { - region: "us-west-2".to_string(), - }), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Err(crate::aws::validate_ami::Error::UnreachableRegion { - region: "us-west-2".to_string(), - }), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Err(crate::aws::validate_ami::Error::UnreachableRegion { - region: "us-west-2".to_string(), - }), - Region::new("us-west-2"), - ), - ]); - let results = validate_images_in_region( - &expected_parameters, - &Err(crate::aws::validate_ami::Error::UnreachableRegion { - region: "us-west-2".to_string(), - }), - &Region::new("us-west-2"), - ); - - assert_eq!(results, expected_results); - } -} diff --git a/tools/pubsys/src/aws/validate_ami/results.rs b/tools/pubsys/src/aws/validate_ami/results.rs deleted file mode 100644 index 698fbe01..00000000 --- a/tools/pubsys/src/aws/validate_ami/results.rs +++ /dev/null @@ -1,1034 +0,0 @@ -//! The results module owns the reporting of EC2 image validation results. - -use super::ami::ImageDef; -use super::Result; -use aws_sdk_ec2::config::Region; -use serde::{Deserialize, Serialize}; -use serde_plain::{derive_display_from_serialize, derive_fromstr_from_deserialize}; -use std::collections::{HashMap, HashSet}; -use std::fmt::{self, Display}; -use tabled::{Table, Tabled}; - -/// Represent the possible status of an EC2 image validation -#[derive(Debug, Eq, Hash, PartialEq, Serialize, Deserialize, Clone)] -pub(crate) enum AmiValidationResultStatus { - /// The image was found and its monitored fields have the expected values - Correct, - - /// The image was found but some of the monitored fields do not have the expected values - Incorrect, - - /// The image was expected but not included in the actual images - Missing, - - /// The region containing the image was not reachable - Unreachable, -} - -derive_display_from_serialize!(AmiValidationResultStatus); -derive_fromstr_from_deserialize!(AmiValidationResultStatus); - -/// Represents a single EC2 image validation result -#[derive(Debug, Eq, Hash, PartialEq, Serialize)] -pub(crate) struct AmiValidationResult { - /// The ID of the image - pub(crate) id: String, - - /// `ImageDef` containing expected values for the image - pub(crate) expected_image_def: ImageDef, - - /// `ImageDef` containing actual values for the image - pub(crate) actual_image_def: Option, - - /// The region the image resides in - #[serde(serialize_with = "serialize_region")] - pub(crate) region: Region, - - /// The validation status of the image - pub(crate) status: AmiValidationResultStatus, -} - -fn serialize_region(region: &Region, serializer: S) -> std::result::Result -where - S: serde::Serializer, -{ - serializer.serialize_str(region.to_string().as_str()) -} - -impl AmiValidationResult { - pub(crate) fn new( - id: String, - expected_image_def: ImageDef, - actual_image_def: Result>, - region: Region, - ) -> Self { - // Determine the validation status based on equality, presence, and absence of expected and - // actual image values - let status = match (&expected_image_def, &actual_image_def) { - (expected_image_def, Ok(Some(actual_image_def))) - if actual_image_def == expected_image_def => - { - AmiValidationResultStatus::Correct - } - (_, Ok(Some(_))) => AmiValidationResultStatus::Incorrect, - (_, Ok(None)) => AmiValidationResultStatus::Missing, - (_, Err(_)) => AmiValidationResultStatus::Unreachable, - }; - AmiValidationResult { - id, - expected_image_def, - actual_image_def: actual_image_def.unwrap_or_default(), - region, - status, - } - } -} - -#[derive(Tabled, Serialize)] -struct AmiValidationRegionSummary { - correct: u64, - incorrect: u64, - missing: u64, - unreachable: u64, -} - -impl From<&HashSet> for AmiValidationRegionSummary { - fn from(results: &HashSet) -> Self { - let mut region_validation = AmiValidationRegionSummary { - correct: 0, - incorrect: 0, - missing: 0, - unreachable: 0, - }; - for validation_result in results { - match validation_result.status { - AmiValidationResultStatus::Correct => region_validation.correct += 1, - AmiValidationResultStatus::Incorrect => region_validation.incorrect += 1, - AmiValidationResultStatus::Missing => region_validation.missing += 1, - AmiValidationResultStatus::Unreachable => region_validation.missing += 1, - } - } - region_validation - } -} - -/// Represents all EC2 image validation results -#[derive(Debug)] -pub(crate) struct AmiValidationResults { - pub(crate) results: HashMap>, -} - -impl Default for AmiValidationResults { - fn default() -> Self { - Self::from_result_map(HashMap::new()) - } -} - -impl Display for AmiValidationResults { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // Create a summary for each region, counting the number of parameters per status - let region_validations: HashMap = - self.get_results_summary(); - - // Represent the `HashMap` of summaries as a `Table` - let table = Table::new( - region_validations - .iter() - .map(|(region, results)| (region.to_string(), results)) - .collect::>(), - ) - .to_string(); - write!(f, "{}", table) - } -} - -impl AmiValidationResults { - pub(crate) fn from_result_map(results: HashMap>) -> Self { - AmiValidationResults { results } - } - - /// Returns a `HashSet` containing all validation results whose status is present in `requested_status` - pub(crate) fn get_results_for_status( - &self, - requested_status: &[AmiValidationResultStatus], - ) -> HashSet<&AmiValidationResult> { - let mut results = HashSet::new(); - for region_results in self.results.values() { - results.extend( - region_results - .iter() - .filter(|result| requested_status.contains(&result.status)) - .collect::>(), - ) - } - results - } - - /// Returns a `HashSet` containing all validation results - pub(crate) fn get_all_results(&self) -> HashSet<&AmiValidationResult> { - let mut results = HashSet::new(); - for region_results in self.results.values() { - results.extend(region_results) - } - results - } - - fn get_results_summary(&self) -> HashMap { - self.results - .iter() - .map(|(region, region_result)| { - ( - region.clone(), - AmiValidationRegionSummary::from(region_result), - ) - }) - .collect() - } - - pub(crate) fn get_json_summary(&self) -> serde_json::Value { - serde_json::json!(self - .get_results_summary() - .into_iter() - .map(|(region, results)| (region.to_string(), results)) - .collect::>()) - } -} - -#[cfg(test)] -mod test { - use super::{AmiValidationResult, AmiValidationResultStatus, AmiValidationResults}; - use crate::aws::validate_ami::ami::ImageDef; - use aws_sdk_ssm::config::Region; - use std::collections::{HashMap, HashSet}; - - // These tests assert that the `get_results_for_status` function returns the correct values. - - // Tests empty `AmiValidationResults` - #[test] - fn get_results_for_status_empty() { - let results = AmiValidationResults::from_result_map(HashMap::from([ - (Region::new("us-west-2"), HashSet::from([])), - (Region::new("us-east-1"), HashSet::from([])), - ])); - let results_filtered = results.get_results_for_status(&vec![ - AmiValidationResultStatus::Correct, - AmiValidationResultStatus::Incorrect, - AmiValidationResultStatus::Missing, - ]); - - assert_eq!(results_filtered, HashSet::new()); - } - - // Tests the `Correct` status - #[test] - fn get_results_for_status_correct() { - let results = AmiValidationResults::from_result_map(HashMap::from([ - ( - Region::new("us-west-2"), - HashSet::from([ - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "not simple".to_string(), - })), - Region::new("us-west-2"), - ), - ]), - ), - ( - Region::new("us-east-1"), - HashSet::from([ - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ), - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "not simple".to_string(), - })), - Region::new("us-east-1"), - ), - ]), - ), - ])); - let results_filtered = - results.get_results_for_status(&vec![AmiValidationResultStatus::Correct]); - - assert_eq!( - results_filtered, - HashSet::from([ - &AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - &AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ) - ]) - ); - } - - // Tests a filter containing the `Correct` and `Incorrect` statuses - #[test] - fn get_results_for_status_correct_incorrect() { - let results = AmiValidationResults::from_result_map(HashMap::from([ - ( - Region::new("us-west-2"), - HashSet::from([ - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(None), - Region::new("us-west-2"), - ), - ]), - ), - ( - Region::new("us-east-1"), - HashSet::from([ - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ), - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(None), - Region::new("us-east-1"), - ), - ]), - ), - ])); - let results_filtered = results.get_results_for_status(&vec![ - AmiValidationResultStatus::Correct, - AmiValidationResultStatus::Incorrect, - ]); - - assert_eq!( - results_filtered, - HashSet::from([ - &AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - &AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ), - &AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - &AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ) - ]) - ); - } - - // Tests a filter containing all statuses - #[test] - fn get_results_for_status_all() { - let results = AmiValidationResults::from_result_map(HashMap::from([ - ( - Region::new("us-west-2"), - HashSet::from([ - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(None), - Region::new("us-west-2"), - ), - ]), - ), - ( - Region::new("us-east-1"), - HashSet::from([ - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ), - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(None), - Region::new("us-east-1"), - ), - ]), - ), - ( - Region::new("us-east-2"), - HashSet::from([AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Err(crate::aws::validate_ami::error::Error::UnreachableRegion { - region: "us-east-2".to_string(), - }), - Region::new("us-east-2"), - )]), - ), - ])); - let results_filtered = results.get_results_for_status(&vec![ - AmiValidationResultStatus::Correct, - AmiValidationResultStatus::Incorrect, - AmiValidationResultStatus::Missing, - AmiValidationResultStatus::Unreachable, - ]); - - assert_eq!( - results_filtered, - HashSet::from([ - &AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - &AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ), - &AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - &AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ), - &AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(None), - Region::new("us-west-2"), - ), - &AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(None), - Region::new("us-east-1"), - ), - &AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Err(crate::aws::validate_ami::error::Error::UnreachableRegion { - region: "us-east-2".to_string(), - }), - Region::new("us-east-2"), - ), - ]) - ); - } - - // Tests the `Missing` filter when none of the AmiValidationResults have this status - #[test] - fn get_results_for_status_missing_none() { - let results = AmiValidationResults::from_result_map(HashMap::from([ - ( - Region::new("us-west-2"), - HashSet::from([ - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-west-2"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "not simple".to_string(), - })), - Region::new("us-west-2"), - ), - ]), - ), - ( - Region::new("us-east-1"), - HashSet::from([ - AmiValidationResult::new( - "test3-image-id".to_string(), - ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test3-image-id".to_string(), - name: "test3-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ), - AmiValidationResult::new( - "test1-image-id".to_string(), - ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test1-image-id".to_string(), - name: "test1-image".to_string(), - public: true, - launch_permissions: None, - ena_support: false, - sriov_net_support: "simple".to_string(), - })), - Region::new("us-east-1"), - ), - AmiValidationResult::new( - "test2-image-id".to_string(), - ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "simple".to_string(), - }, - Ok(Some(ImageDef { - id: "test2-image-id".to_string(), - name: "test2-image".to_string(), - public: true, - launch_permissions: None, - ena_support: true, - sriov_net_support: "not simple".to_string(), - })), - Region::new("us-east-1"), - ), - ]), - ), - ])); - let results_filtered = - results.get_results_for_status(&vec![AmiValidationResultStatus::Missing]); - - assert_eq!(results_filtered, HashSet::new()); - } -} diff --git a/tools/pubsys/src/aws/validate_ssm/mod.rs b/tools/pubsys/src/aws/validate_ssm/mod.rs deleted file mode 100644 index 3dc5f4ee..00000000 --- a/tools/pubsys/src/aws/validate_ssm/mod.rs +++ /dev/null @@ -1,797 +0,0 @@ -//! The validate_ssm module owns the 'validate-ssm' subcommand and controls the process of -//! validating SSM parameters and AMIs - -pub mod results; - -use self::results::{SsmValidationResult, SsmValidationResultStatus, SsmValidationResults}; -use super::ssm::ssm::get_parameters_by_prefix; -use super::ssm::{SsmKey, SsmParameters}; -use crate::aws::client::build_client_config; -use crate::Args; -use aws_sdk_ssm::{config::Region, Client as SsmClient}; -use clap::Parser; -use log::{error, info, trace}; -use pubsys_config::InfraConfig; -use snafu::ResultExt; -use std::collections::{HashMap, HashSet}; -use std::fs::File; -use std::path::PathBuf; - -/// Validates SSM parameters and AMIs -#[derive(Debug, Parser)] -pub struct ValidateSsmArgs { - /// File holding the expected parameters - #[arg(long)] - expected_parameters_path: PathBuf, - - /// If this flag is set, check for unexpected parameters in the validation regions. If not, - /// only the parameters present in the expected parameters file will be validated. - #[arg(long)] - check_unexpected: bool, - - /// Optional path where the validation results should be written - #[arg(long)] - write_results_path: Option, - - /// Optional filter to only write validation results with these statuses to the above path - /// Available statuses are: `Correct`, `Incorrect`, `Missing`, `Unexpected` - #[arg(long, requires = "write_results_path")] - write_results_filter: Option>, - - /// If this flag is added, print the results summary table as JSON instead of a - /// plaintext table - #[arg(long)] - json: bool, -} - -/// Performs SSM parameter validation and returns the `SsmValidationResults` object -pub async fn validate( - args: &Args, - validate_ssm_args: &ValidateSsmArgs, -) -> Result { - info!("Parsing Infra.toml file"); - - // If a lock file exists, use that, otherwise use Infra.toml - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) - .context(error::ConfigSnafu)?; - - let aws = infra_config.aws.clone().unwrap_or_default(); - - trace!("Parsed infra config: {:#?}", infra_config); - - let ssm_prefix = aws.ssm_prefix.as_deref().unwrap_or(""); - - // Parse the file holding expected parameters - info!("Parsing expected parameters file"); - let expected_parameters = parse_parameters(&validate_ssm_args.expected_parameters_path).await?; - - info!("Parsed expected parameters file"); - - // Create a HashMap of SsmClients, one for each region where validation should happen - let base_region = Region::new(aws.regions[0].clone()); - let mut ssm_clients = HashMap::with_capacity(expected_parameters.len()); - - for region in expected_parameters.keys() { - let client_config = build_client_config(region, &base_region, &aws).await; - let ssm_client = SsmClient::new(&client_config); - ssm_clients.insert(region.clone(), ssm_client); - } - - // Retrieve the SSM parameters using the SsmClients - info!("Retrieving SSM parameters"); - let parameters = get_parameters_by_prefix(&ssm_clients, ssm_prefix) - .await - .into_iter() - .map(|(region, result)| { - ( - region, - result.map_err(|e| { - error!( - "Failed to retrieve images in region {}: {}", - region.to_string(), - e - ); - error::Error::UnreachableRegion { - region: region.to_string(), - } - }), - ) - }) - .collect::>>(); - - // Validate the retrieved SSM parameters per region - info!("Validating SSM parameters"); - let results: HashMap> = parameters - .into_iter() - .map(|(region, region_result)| { - ( - region.clone(), - validate_parameters_in_region( - expected_parameters.get(region).unwrap_or(&HashMap::new()), - ®ion_result, - validate_ssm_args.check_unexpected, - ), - ) - }) - .collect::>>(); - - let validation_results = SsmValidationResults::new(results); - - // If a path was given to write the results to, write the results - if let Some(write_results_path) = &validate_ssm_args.write_results_path { - // Filter the results by given status, and if no statuses were given, get all results - info!("Writing results to file"); - let results = if let Some(filter) = &validate_ssm_args.write_results_filter { - validation_results.get_results_for_status(filter) - } else { - validation_results.get_all_results() - }; - - // Write the results as JSON - serde_json::to_writer_pretty( - &File::create(write_results_path).context(error::WriteValidationResultsSnafu { - path: write_results_path, - })?, - &results, - ) - .context(error::SerializeValidationResultsSnafu)?; - } - - Ok(validation_results) -} - -/// Validates SSM parameters in a single region, based on a HashMap (SsmKey, String) of expected -/// parameters and a HashMap (SsmKey, String) of actual retrieved parameters. Returns a HashSet of -/// SsmValidationResult objects. -pub(crate) fn validate_parameters_in_region( - expected_parameters: &HashMap, - actual_parameters: &Result, - check_unexpected: bool, -) -> HashSet { - match actual_parameters { - Ok(actual_parameters) => { - // Clone the HashMap of actual parameters so items can be removed - let mut actual_parameters = actual_parameters.clone(); - let mut results = HashSet::new(); - - // Validate all expected parameters, creating an SsmValidationResult object and - // removing the corresponding parameter from `actual_parameters` if found - for (ssm_key, ssm_value) in expected_parameters { - results.insert(SsmValidationResult::new( - ssm_key.name.to_owned(), - Some(ssm_value.clone()), - Ok(actual_parameters.get(ssm_key).map(|v| v.to_owned())), - ssm_key.region.clone(), - )); - actual_parameters.remove(ssm_key); - } - - if check_unexpected { - // Any remaining parameters in `actual_parameters` were not present in `expected_parameters` - // and therefore get the `Unexpected` status - for (ssm_key, ssm_value) in actual_parameters { - results.insert(SsmValidationResult::new( - ssm_key.name.to_owned(), - None, - Ok(Some(ssm_value)), - ssm_key.region.clone(), - )); - } - } - results - } - Err(_) => expected_parameters - .iter() - .map(|(ssm_key, ssm_value)| { - SsmValidationResult::new( - ssm_key.name.to_owned(), - Some(ssm_value.to_owned()), - Err(error::Error::UnreachableRegion { - region: ssm_key.region.to_string(), - }), - ssm_key.region.clone(), - ) - }) - .collect(), - } -} - -type RegionName = String; -type ParameterName = String; -type ParameterValue = String; - -/// Parse the file holding expected parameters. Return a HashMap of Region mapped to a HashMap -/// of the parameters in that region, with each parameter being a mapping of `SsmKey` to its -/// value as `String`. -pub(crate) async fn parse_parameters( - expected_parameters_file: &PathBuf, -) -> Result>> { - // Parse the JSON file as a HashMap of region_name, mapped to a HashMap of parameter_name and - // parameter_value - let expected_parameters: HashMap> = - serde_json::from_reader(&File::open(expected_parameters_file.clone()).context( - error::ReadExpectedParameterFileSnafu { - path: expected_parameters_file, - }, - )?) - .context(error::ParseExpectedParameterFileSnafu)?; - - // Iterate over the parsed HashMap, converting the nested HashMap into a HashMap of Region - // mapped to a HashMap of SsmKey, String - let parameter_map = expected_parameters - .into_iter() - .map(|(region, parameters)| { - ( - Region::new(region.clone()), - parameters - .into_iter() - .map(|(parameter_name, parameter_value)| { - ( - SsmKey::new(Region::new(region.clone()), parameter_name), - parameter_value, - ) - }) - .collect::>(), - ) - }) - .collect(); - - Ok(parameter_map) -} - -/// Common entrypoint from main() -pub(crate) async fn run(args: &Args, validate_ssm_args: &ValidateSsmArgs) -> Result<()> { - let results = validate(args, validate_ssm_args).await?; - - if validate_ssm_args.json { - println!( - "{}", - serde_json::to_string_pretty(&results.get_json_summary()) - .context(error::SerializeResultsSummarySnafu)? - ) - } else { - println!("{}", results) - } - Ok(()) -} - -pub(crate) mod error { - use crate::aws::ssm::ssm; - use snafu::Snafu; - use std::path::PathBuf; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub enum Error { - #[snafu(display("Error reading config: {}", source))] - Config { source: pubsys_config::Error }, - - #[snafu(display("Failed to fetch parameters from SSM: {}", source))] - FetchSsm { source: ssm::error::Error }, - - #[snafu(display("Infra.toml is missing {}", missing))] - MissingConfig { missing: String }, - - #[snafu(display("Failed to validate SSM parameters: {}", missing))] - ValidateSsm { missing: String }, - - #[snafu(display("Failed to parse expected parameters file: {}", source))] - ParseExpectedParameterFile { source: serde_json::Error }, - - #[snafu(display("Failed to read expected parameters file: {}", path.display()))] - ReadExpectedParameterFile { - source: std::io::Error, - path: PathBuf, - }, - - #[snafu(display("Invalid validation status filter: {}", filter))] - InvalidStatusFilter { filter: String }, - - #[snafu(display("Failed to serialize validation results to json: {}", source))] - SerializeValidationResults { source: serde_json::Error }, - - #[snafu(display("Failed to retrieve SSM parameters from region {}", region))] - UnreachableRegion { region: String }, - - #[snafu(display("Failed to write validation results to {}: {}", path.display(), source))] - WriteValidationResults { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Failed to serialize results summary into JSON: {}", source))] - SerializeResultsSummary { source: serde_json::Error }, - } -} - -pub(crate) use error::Error; -type Result = std::result::Result; - -#[cfg(test)] -mod test { - use crate::aws::{ - ssm::{SsmKey, SsmParameters}, - validate_ssm::{results::SsmValidationResult, validate_parameters_in_region}, - }; - use aws_sdk_ssm::config::Region; - use std::collections::{HashMap, HashSet}; - - // These tests assert that the parameters can be validated correctly. - - // Tests validation of parameters where the expected value is equal to the actual value - #[test] - fn validate_parameters_all_correct() { - let expected_parameters: HashMap = HashMap::from([ - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - "test1-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - "test2-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-east-1"), - name: "test3-parameter-name".to_string(), - }, - "test3-parameter-value".to_string(), - ), - ]); - let actual_parameters: SsmParameters = HashMap::from([ - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - "test1-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - "test2-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-east-1"), - name: "test3-parameter-name".to_string(), - }, - "test3-parameter-value".to_string(), - ), - ]); - let expected_results = HashSet::from_iter(vec![ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(Some("test3-parameter-value".to_string())), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value".to_string())), - Region::new("us-west-2"), - ), - ]); - let results = - validate_parameters_in_region(&expected_parameters, &Ok(actual_parameters), true); - - assert_eq!(results, expected_results); - } - - // Tests validation of parameters where the expected value is different from the actual value - #[test] - fn validate_parameters_all_incorrect() { - let expected_parameters: HashMap = HashMap::from([ - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - "test1-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - "test2-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-east-1"), - name: "test3-parameter-name".to_string(), - }, - "test3-parameter-value".to_string(), - ), - ]); - let actual_parameters: SsmParameters = HashMap::from([ - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - "test1-parameter-value-wrong".to_string(), - ), - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - "test2-parameter-value-wrong".to_string(), - ), - ( - SsmKey { - region: Region::new("us-east-1"), - name: "test3-parameter-name".to_string(), - }, - "test3-parameter-value-wrong".to_string(), - ), - ]); - let expected_results = HashSet::from_iter(vec![ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(Some("test3-parameter-value-wrong".to_string())), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value-wrong".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-west-2"), - ), - ]); - let results = - validate_parameters_in_region(&expected_parameters, &Ok(actual_parameters), true); - - assert_eq!(results, expected_results); - } - - // Tests validation of parameters where the actual value is missing - #[test] - fn validate_parameters_all_missing() { - let expected_parameters: HashMap = HashMap::from([ - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - "test1-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - "test2-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-east-1"), - name: "test3-parameter-name".to_string(), - }, - "test3-parameter-value".to_string(), - ), - ]); - let actual_parameters: SsmParameters = HashMap::new(); - let expected_results = HashSet::from_iter(vec![ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(None), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(None), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(None), - Region::new("us-west-2"), - ), - ]); - let results = - validate_parameters_in_region(&expected_parameters, &Ok(actual_parameters), true); - - assert_eq!(results, expected_results); - } - - // Tests validation of parameters where the expected value is missing - #[test] - fn validate_parameters_all_unexpected() { - let expected_parameters: HashMap = HashMap::new(); - let actual_parameters: SsmParameters = HashMap::from([ - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - "test1-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - "test2-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-east-1"), - name: "test3-parameter-name".to_string(), - }, - "test3-parameter-value".to_string(), - ), - ]); - let expected_results = HashSet::from_iter(vec![ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - None, - Ok(Some("test3-parameter-value".to_string())), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - None, - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - None, - Ok(Some("test2-parameter-value".to_string())), - Region::new("us-west-2"), - ), - ]); - let results = - validate_parameters_in_region(&expected_parameters, &Ok(actual_parameters), true); - - assert_eq!(results, expected_results); - } - - // Tests validation of parameters where each status (Correct, Incorrect, Missing, Unexpected) - // happens once - #[test] - fn validate_parameters_mixed() { - let expected_parameters: HashMap = HashMap::from([ - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - "test1-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - "test2-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-east-1"), - name: "test3-parameter-name".to_string(), - }, - "test3-parameter-value".to_string(), - ), - ]); - let actual_parameters: SsmParameters = HashMap::from([ - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - "test1-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - "test2-parameter-value-wrong".to_string(), - ), - ( - SsmKey { - region: Region::new("us-east-1"), - name: "test4-parameter-name".to_string(), - }, - "test4-parameter-value".to_string(), - ), - ]); - let expected_results = HashSet::from_iter(vec![ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(None), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test4-parameter-name".to_string(), - None, - Ok(Some("test4-parameter-value".to_string())), - Region::new("us-east-1"), - ), - ]); - let results = - validate_parameters_in_region(&expected_parameters, &Ok(actual_parameters), true); - - assert_eq!(results, expected_results); - } - - // Tests validation of parameters where each reachable status (Correct, Incorrect, Missing, Unexpected) - // happens once and `--check-unexpected` is false - #[test] - fn validate_parameters_mixed_unexpected_false() { - let expected_parameters: HashMap = HashMap::from([ - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - "test1-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - "test2-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-east-1"), - name: "test3-parameter-name".to_string(), - }, - "test3-parameter-value".to_string(), - ), - ]); - let actual_parameters: SsmParameters = HashMap::from([ - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - "test1-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - "test2-parameter-value-wrong".to_string(), - ), - ( - SsmKey { - region: Region::new("us-east-1"), - name: "test4-parameter-name".to_string(), - }, - "test4-parameter-value".to_string(), - ), - ]); - let expected_results = HashSet::from_iter(vec![ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(None), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-west-2"), - ), - ]); - let results = - validate_parameters_in_region(&expected_parameters, &Ok(actual_parameters), false); - - assert_eq!(results, expected_results); - } - - // Tests validation of parameters where the status is Unreachable - #[test] - fn validate_parameters_unreachable() { - let expected_parameters: HashMap = HashMap::from([ - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test1-parameter-name".to_string(), - }, - "test1-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-west-2"), - name: "test2-parameter-name".to_string(), - }, - "test2-parameter-value".to_string(), - ), - ( - SsmKey { - region: Region::new("us-east-1"), - name: "test3-parameter-name".to_string(), - }, - "test3-parameter-value".to_string(), - ), - ]); - let expected_results = HashSet::from_iter(vec![ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Err(crate::aws::validate_ssm::Error::UnreachableRegion { - region: "us-west-2".to_string(), - }), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Err(crate::aws::validate_ssm::Error::UnreachableRegion { - region: "us-west-2".to_string(), - }), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Err(crate::aws::validate_ssm::Error::UnreachableRegion { - region: "us-west-2".to_string(), - }), - Region::new("us-west-2"), - ), - ]); - let results = validate_parameters_in_region( - &expected_parameters, - &Err(crate::aws::validate_ssm::Error::UnreachableRegion { - region: "us-west-2".to_string(), - }), - false, - ); - - assert_eq!(results, expected_results); - } -} diff --git a/tools/pubsys/src/aws/validate_ssm/results.rs b/tools/pubsys/src/aws/validate_ssm/results.rs deleted file mode 100644 index eadd4290..00000000 --- a/tools/pubsys/src/aws/validate_ssm/results.rs +++ /dev/null @@ -1,615 +0,0 @@ -//! The results module owns the reporting of SSM validation results. - -use crate::aws::validate_ssm::Result; -use aws_sdk_ssm::config::Region; -use serde::{Deserialize, Serialize}; -use serde_plain::{derive_display_from_serialize, derive_fromstr_from_deserialize}; -use std::collections::{HashMap, HashSet}; -use std::fmt::{self, Display}; -use tabled::{Table, Tabled}; - -/// Represent the possible status of an SSM validation -#[derive(Debug, Eq, Hash, PartialEq, Serialize, Deserialize, Clone)] -pub enum SsmValidationResultStatus { - /// The expected value was equal to the actual value - Correct, - - /// The expected value was different from the actual value - Incorrect, - - /// The parameter was expected but not included in the actual parameters - Missing, - - /// The parameter was present in the actual parameters but not expected - Unexpected, - - /// The region containing the parameter is not reachable - Unreachable, -} - -derive_display_from_serialize!(SsmValidationResultStatus); -derive_fromstr_from_deserialize!(SsmValidationResultStatus); - -/// Represents a single SSM validation result -#[derive(Debug, Eq, Hash, PartialEq, Serialize)] -pub struct SsmValidationResult { - /// The name of the parameter - pub(crate) name: String, - - /// The expected value of the parameter - pub(crate) expected_value: Option, - - /// The actual retrieved value of the parameter - pub(crate) actual_value: Option, - - /// The region the parameter resides in - #[serde(serialize_with = "serialize_region")] - pub(crate) region: Region, - - /// The validation status of the parameter - pub(crate) status: SsmValidationResultStatus, -} - -fn serialize_region(region: &Region, serializer: S) -> std::result::Result -where - S: serde::Serializer, -{ - serializer.serialize_str(region.to_string().as_str()) -} - -impl SsmValidationResult { - pub(crate) fn new( - name: String, - expected_value: Option, - actual_value: Result>, - region: Region, - ) -> SsmValidationResult { - // Determine the validation status based on equality, presence, and absence of expected and - // actual parameter values - let status = match (&expected_value, &actual_value) { - (Some(expected_value), Ok(Some(actual_value))) if actual_value.eq(expected_value) => { - SsmValidationResultStatus::Correct - } - (Some(_), Ok(Some(_))) => SsmValidationResultStatus::Incorrect, - (_, Ok(None)) => SsmValidationResultStatus::Missing, - (None, Ok(_)) => SsmValidationResultStatus::Unexpected, - (_, Err(_)) => SsmValidationResultStatus::Unreachable, - }; - SsmValidationResult { - name, - expected_value, - actual_value: actual_value.unwrap_or_default(), - region, - status, - } - } -} - -#[derive(Tabled, Serialize)] -struct SsmValidationRegionSummary { - correct: u64, - incorrect: u64, - missing: u64, - unexpected: u64, - unreachable: u64, -} - -impl From<&HashSet> for SsmValidationRegionSummary { - fn from(results: &HashSet) -> Self { - let mut region_validation = SsmValidationRegionSummary { - correct: 0, - incorrect: 0, - missing: 0, - unexpected: 0, - unreachable: 0, - }; - for validation_result in results { - match validation_result.status { - SsmValidationResultStatus::Correct => region_validation.correct += 1, - SsmValidationResultStatus::Incorrect => region_validation.incorrect += 1, - SsmValidationResultStatus::Missing => region_validation.missing += 1, - SsmValidationResultStatus::Unexpected => region_validation.unexpected += 1, - SsmValidationResultStatus::Unreachable => region_validation.unreachable += 1, - } - } - region_validation - } -} - -/// Represents all SSM validation results -#[derive(Debug)] -pub struct SsmValidationResults { - pub(crate) results: HashMap>, -} - -impl Default for SsmValidationResults { - fn default() -> Self { - Self::new(HashMap::new()) - } -} - -impl Display for SsmValidationResults { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // Create a summary for each region, counting the number of parameters per status - let region_validations: HashMap = - self.get_results_summary(); - - // Represent the HashMap of summaries as a `Table` - let table = Table::new( - region_validations - .iter() - .map(|(region, results)| (region.to_string(), results)) - .collect::>(), - ) - .to_string(); - write!(f, "{}", table) - } -} - -impl SsmValidationResults { - pub fn new(results: HashMap>) -> Self { - SsmValidationResults { results } - } - - /// Returns a HashSet containing all validation results whose status is present in - /// `requested_status` - pub fn get_results_for_status( - &self, - requested_status: &[SsmValidationResultStatus], - ) -> HashSet<&SsmValidationResult> { - let mut results = HashSet::new(); - for region_results in self.results.values() { - results.extend( - region_results - .iter() - .filter(|result| requested_status.contains(&result.status)) - .collect::>(), - ) - } - results - } - - /// Returns a `HashSet` containing all validation results - pub(crate) fn get_all_results(&self) -> HashSet<&SsmValidationResult> { - let mut results = HashSet::new(); - for region_results in self.results.values() { - results.extend(region_results) - } - results - } - - fn get_results_summary(&self) -> HashMap { - self.results - .iter() - .map(|(region, region_result)| { - ( - region.clone(), - SsmValidationRegionSummary::from(region_result), - ) - }) - .collect() - } - - pub(crate) fn get_json_summary(&self) -> serde_json::Value { - serde_json::json!(self - .get_results_summary() - .into_iter() - .map(|(region, results)| (region.to_string(), results)) - .collect::>()) - } -} - -#[cfg(test)] -mod test { - use std::collections::{HashMap, HashSet}; - - use crate::aws::validate_ssm::results::{ - SsmValidationResult, SsmValidationResultStatus, SsmValidationResults, - }; - use aws_sdk_ssm::config::Region; - - // These tests assert that the `get_results_for_status` function returns the correct values. - - // Tests empty SsmValidationResults - #[test] - fn get_results_for_status_empty() { - let results = SsmValidationResults::new(HashMap::from([ - (Region::new("us-west-2"), HashSet::from([])), - (Region::new("us-east-1"), HashSet::from([])), - ])); - let results_filtered = results.get_results_for_status(&[ - SsmValidationResultStatus::Correct, - SsmValidationResultStatus::Incorrect, - SsmValidationResultStatus::Missing, - SsmValidationResultStatus::Unexpected, - ]); - - assert_eq!(results_filtered, HashSet::new()); - } - - // Tests the `Correct` status - #[test] - fn get_results_for_status_correct() { - let results = SsmValidationResults::new(HashMap::from([ - ( - Region::new("us-west-2"), - HashSet::from([ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(None), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test4-parameter-name".to_string(), - None, - Ok(Some("test4-parameter-value".to_string())), - Region::new("us-west-2"), - ), - ]), - ), - ( - Region::new("us-east-1"), - HashSet::from([ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(None), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test4-parameter-name".to_string(), - None, - Ok(Some("test4-parameter-value".to_string())), - Region::new("us-east-1"), - ), - ]), - ), - ])); - let results_filtered = - results.get_results_for_status(&[SsmValidationResultStatus::Correct]); - - assert_eq!( - results_filtered, - HashSet::from([ - &SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-west-2"), - ), - &SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-east-1"), - ) - ]) - ); - } - - // Tests a filter containing the `Correct` and `Incorrect` statuses - #[test] - fn get_results_for_status_correct_incorrect() { - let results = SsmValidationResults::new(HashMap::from([ - ( - Region::new("us-west-2"), - HashSet::from([ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(None), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test4-parameter-name".to_string(), - None, - Ok(Some("test4-parameter-value".to_string())), - Region::new("us-west-2"), - ), - ]), - ), - ( - Region::new("us-east-1"), - HashSet::from([ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(None), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test4-parameter-name".to_string(), - None, - Ok(Some("test4-parameter-value".to_string())), - Region::new("us-east-1"), - ), - ]), - ), - ])); - let results_filtered = results.get_results_for_status(&[ - SsmValidationResultStatus::Correct, - SsmValidationResultStatus::Incorrect, - ]); - - assert_eq!( - results_filtered, - HashSet::from([ - &SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-west-2"), - ), - &SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-east-1"), - ), - &SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-west-2"), - ), - &SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-east-1"), - ) - ]) - ); - } - - // Tests a filter containing all statuses - #[test] - fn get_results_for_status_all() { - let results = SsmValidationResults::new(HashMap::from([ - ( - Region::new("us-west-2"), - HashSet::from([ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(None), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test4-parameter-name".to_string(), - None, - Ok(Some("test4-parameter-value".to_string())), - Region::new("us-west-2"), - ), - ]), - ), - ( - Region::new("us-east-1"), - HashSet::from([ - SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(None), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test4-parameter-name".to_string(), - None, - Ok(Some("test4-parameter-value".to_string())), - Region::new("us-east-1"), - ), - ]), - ), - ( - Region::new("us-east-2"), - HashSet::from([SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Err(crate::aws::validate_ssm::Error::UnreachableRegion { - region: "us-east-2".to_string(), - }), - Region::new("us-east-2"), - )]), - ), - ])); - let results_filtered = results.get_results_for_status(&[ - SsmValidationResultStatus::Correct, - SsmValidationResultStatus::Incorrect, - SsmValidationResultStatus::Missing, - SsmValidationResultStatus::Unexpected, - SsmValidationResultStatus::Unreachable, - ]); - - assert_eq!( - results_filtered, - HashSet::from([ - &SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-west-2"), - ), - &SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-east-1"), - ), - &SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-west-2"), - ), - &SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-east-1"), - ), - &SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(None), - Region::new("us-west-2"), - ), - &SsmValidationResult::new( - "test4-parameter-name".to_string(), - None, - Ok(Some("test4-parameter-value".to_string())), - Region::new("us-west-2"), - ), - &SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Ok(None), - Region::new("us-east-1"), - ), - &SsmValidationResult::new( - "test4-parameter-name".to_string(), - None, - Ok(Some("test4-parameter-value".to_string())), - Region::new("us-east-1"), - ), - &SsmValidationResult::new( - "test3-parameter-name".to_string(), - Some("test3-parameter-value".to_string()), - Err(crate::aws::validate_ssm::Error::UnreachableRegion { - region: "us-east-2".to_string() - }), - Region::new("us-east-2"), - ), - ]) - ); - } - - // Tests the `Missing` filter when none of the SsmValidationResults have this status - #[test] - fn get_results_for_status_missing_none() { - let results = SsmValidationResults::new(HashMap::from([ - ( - Region::new("us-west-2"), - HashSet::from([ - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-west-2"), - ), - SsmValidationResult::new( - "test4-parameter-name".to_string(), - None, - Ok(Some("test4-parameter-value".to_string())), - Region::new("us-west-2"), - ), - ]), - ), - ( - Region::new("us-east-1"), - HashSet::from([ - SsmValidationResult::new( - "test1-parameter-name".to_string(), - Some("test1-parameter-value".to_string()), - Ok(Some("test1-parameter-value".to_string())), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test2-parameter-name".to_string(), - Some("test2-parameter-value".to_string()), - Ok(Some("test2-parameter-value-wrong".to_string())), - Region::new("us-east-1"), - ), - SsmValidationResult::new( - "test4-parameter-name".to_string(), - None, - Ok(Some("test4-parameter-value".to_string())), - Region::new("us-east-1"), - ), - ]), - ), - ])); - let results_filtered = - results.get_results_for_status(&[SsmValidationResultStatus::Missing]); - - assert_eq!(results_filtered, HashSet::new()); - } -} diff --git a/tools/pubsys/src/main.rs b/tools/pubsys/src/main.rs deleted file mode 100644 index 520ef4e5..00000000 --- a/tools/pubsys/src/main.rs +++ /dev/null @@ -1,265 +0,0 @@ -/*! -`pubsys` simplifies the process of publishing Bottlerocket updates. - -Currently implemented: -* building repos, whether starting from an existing repo or from scratch -* validating repos by loading them and retrieving their targets -* checking for repository metadata expirations within specified number of days -* refreshing and re-signing repos' non-root metadata files -* registering and copying EC2 AMIs -* Marking EC2 AMIs public (or private again) -* setting SSM parameters based on built AMIs -* promoting SSM parameters from versioned entries to named (e.g. 'latest') -* validating SSM parameters by comparing the returned parameters in a region to a given list of parameters - -To be implemented: -* high-level document describing pubsys usage with examples - -Configuration comes from: -* command-line parameters, to specify basic options and paths to the below files -* Infra.toml, for repo and AMI configuration -* Release.toml, for migrations -* Policy files for repo metadata expiration and update wave timing -*/ - -mod aws; -mod repo; -mod vmware; - -use clap::Parser; -use semver::Version; -use simplelog::{CombinedLogger, Config as LogConfig, ConfigBuilder, LevelFilter, SimpleLogger}; -use snafu::ResultExt; -use std::path::PathBuf; -use std::process; -use tokio::runtime::Runtime; - -fn run() -> Result<()> { - // Parse and store the args passed to the program - let args = Args::parse(); - - // SimpleLogger will send errors to stderr and anything less to stdout. - // To reduce verbosity of messages related to the AWS SDK for Rust we need - // to spin up two loggers, setting different levels for each. This allows - // us to retain the mixed logging of stdout/stderr in simplelog. - match args.log_level { - LevelFilter::Info => { - CombinedLogger::init(vec![ - SimpleLogger::new( - LevelFilter::Info, - ConfigBuilder::new() - .add_filter_ignore_str("aws_config") - .add_filter_ignore_str("aws_credential_types") - .add_filter_ignore_str("aws_smithy") - .add_filter_ignore_str("tracing::span") - .build(), - ), - SimpleLogger::new( - LevelFilter::Warn, - ConfigBuilder::new() - .add_filter_allow_str("aws_config") - .add_filter_allow_str("aws_credential_types") - .add_filter_allow_str("aws_smithy") - .add_filter_allow_str("tracing::span") - .build(), - ), - ]) - .context(error::LoggerSnafu)?; - } - _ => { - SimpleLogger::init(args.log_level, LogConfig::default()).context(error::LoggerSnafu)? - } - } - - match args.subcommand { - SubCommands::Repo(ref repo_args) => repo::run(&args, repo_args).context(error::RepoSnafu), - SubCommands::ValidateRepo(ref validate_repo_args) => { - repo::validate_repo::run(&args, validate_repo_args).context(error::ValidateRepoSnafu) - } - SubCommands::CheckRepoExpirations(ref check_expirations_args) => { - repo::check_expirations::run(&args, check_expirations_args) - .context(error::CheckExpirationsSnafu) - } - SubCommands::RefreshRepo(ref refresh_repo_args) => { - repo::refresh_repo::run(&args, refresh_repo_args).context(error::RefreshRepoSnafu) - } - SubCommands::Ami(ref ami_args) => { - let rt = Runtime::new().context(error::RuntimeSnafu)?; - rt.block_on(async { - aws::ami::run(&args, ami_args) - .await - .context(error::AmiSnafu) - }) - } - SubCommands::PublishAmi(ref publish_args) => { - let rt = Runtime::new().context(error::RuntimeSnafu)?; - rt.block_on(async { - aws::publish_ami::run(&args, publish_args) - .await - .context(error::PublishAmiSnafu) - }) - } - SubCommands::Ssm(ref ssm_args) => { - let rt = Runtime::new().context(error::RuntimeSnafu)?; - rt.block_on(async { - aws::ssm::run(&args, ssm_args) - .await - .context(error::SsmSnafu) - }) - } - SubCommands::PromoteSsm(ref promote_args) => { - let rt = Runtime::new().context(error::RuntimeSnafu)?; - rt.block_on(async { - aws::promote_ssm::run(&args, promote_args) - .await - .context(error::PromoteSsmSnafu) - }) - } - SubCommands::ValidateSsm(ref validate_ssm_args) => { - let rt = Runtime::new().context(error::RuntimeSnafu)?; - rt.block_on(async { - aws::validate_ssm::run(&args, validate_ssm_args) - .await - .context(error::ValidateSsmSnafu) - }) - } - SubCommands::ValidateAmi(ref validate_ami_args) => { - let rt = Runtime::new().context(error::RuntimeSnafu)?; - rt.block_on(async { - aws::validate_ami::run(&args, validate_ami_args) - .await - .context(error::ValidateAmiSnafu) - }) - } - SubCommands::UploadOva(ref upload_args) => { - vmware::upload_ova::run(&args, upload_args).context(error::UploadOvaSnafu) - } - } -} - -fn main() { - if let Err(e) = run() { - eprintln!("{}", e); - process::exit(1); - } -} - -/// Automates publishing of Bottlerocket updates -#[derive(Debug, Parser)] -pub struct Args { - #[arg(global = true, long, default_value = "INFO")] - /// How much detail to log; from least to most: ERROR, WARN, INFO, DEBUG, TRACE - log_level: LevelFilter, - - #[arg(long)] - /// Path to Infra.toml (NOTE: must be specified before subcommand) - infra_config_path: PathBuf, - - #[command(subcommand)] - subcommand: SubCommands, -} - -#[derive(Debug, Parser)] -enum SubCommands { - Repo(repo::RepoArgs), - ValidateRepo(repo::validate_repo::ValidateRepoArgs), - CheckRepoExpirations(repo::check_expirations::CheckExpirationsArgs), - RefreshRepo(repo::refresh_repo::RefreshRepoArgs), - - Ami(aws::ami::AmiArgs), - PublishAmi(aws::publish_ami::Who), - ValidateAmi(aws::validate_ami::ValidateAmiArgs), - - Ssm(aws::ssm::SsmArgs), - PromoteSsm(aws::promote_ssm::PromoteArgs), - ValidateSsm(aws::validate_ssm::ValidateSsmArgs), - - UploadOva(vmware::upload_ova::UploadArgs), -} - -/// Parses a SemVer, stripping a leading 'v' if present -pub(crate) fn friendly_version( - mut version_str: &str, -) -> std::result::Result { - if version_str.starts_with('v') { - version_str = &version_str[1..]; - }; - - Version::parse(version_str) -} - -mod error { - use snafu::Snafu; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(super) enum Error { - #[snafu(display("Failed to build AMI: {}", source))] - Ami { source: crate::aws::ami::Error }, - - #[snafu(display("Logger setup error: {}", source))] - Logger { source: log::SetLoggerError }, - - #[snafu(display( - "Error during publish-ami command: {}: {}", - publish_ami_message(source), - source - ))] - PublishAmi { - source: crate::aws::publish_ami::Error, - }, - - #[snafu(display("Failed to promote SSM: {}", source))] - PromoteSsm { - source: crate::aws::promote_ssm::Error, - }, - - #[snafu(display("Failed to build repo: {}", source))] - Repo { source: crate::repo::Error }, - - #[snafu(display("Failed to validate repository: {}", source))] - ValidateRepo { - source: crate::repo::validate_repo::Error, - }, - - #[snafu(display("Check expirations error: {}", source))] - CheckExpirations { - source: crate::repo::check_expirations::Error, - }, - - #[snafu(display("Failed to refresh repository metadata: {}", source))] - RefreshRepo { - source: crate::repo::refresh_repo::Error, - }, - - #[snafu(display("Failed to create async runtime: {}", source))] - Runtime { source: std::io::Error }, - - #[snafu(display("Failed to update SSM: {}", source))] - Ssm { source: crate::aws::ssm::Error }, - - #[snafu(display("Failed to upload OVA: {}", source))] - UploadOva { - source: crate::vmware::upload_ova::Error, - }, - - #[snafu(display("Failed to validate SSM parameters: {}", source))] - ValidateSsm { - source: crate::aws::validate_ssm::Error, - }, - - #[snafu(display("Failed to validate EC2 images: {}", source))] - ValidateAmi { - source: crate::aws::validate_ami::Error, - }, - } - - fn publish_ami_message(error: &crate::aws::publish_ami::Error) -> String { - match error.amis_affected() { - 0 => String::from("No AMI permissions were updated"), - 1 => String::from("Permissions for 1 AMI were updated, the rest failed"), - n => format!("Permissions for {} AMIs were updated, the rest failed", n), - } - } -} -type Result = std::result::Result; diff --git a/tools/pubsys/src/repo.rs b/tools/pubsys/src/repo.rs deleted file mode 100644 index 97b37d84..00000000 --- a/tools/pubsys/src/repo.rs +++ /dev/null @@ -1,808 +0,0 @@ -//! The repo module owns the 'repo' subcommand and controls the process of building a repository. - -pub(crate) mod check_expirations; -pub(crate) mod refresh_repo; -pub(crate) mod validate_repo; - -use crate::{friendly_version, Args}; -use aws_sdk_kms::{config::Region, Client as KmsClient}; -use chrono::{DateTime, Utc}; -use clap::Parser; -use lazy_static::lazy_static; -use log::{debug, info, trace, warn}; -use parse_datetime::parse_datetime; -use pubsys_config::{ - InfraConfig, KMSKeyConfig, RepoConfig, RepoExpirationPolicy, SigningKeyConfig, -}; -use semver::Version; -use snafu::{ensure, OptionExt, ResultExt}; -use std::convert::TryInto; -use std::fs::{self, File}; -use std::num::NonZeroU64; -use std::path::{Path, PathBuf}; -use tempfile::NamedTempFile; -use tokio::runtime::Runtime; -use tough::{ - editor::signed::PathExists, - editor::RepositoryEditor, - key_source::{KeySource, LocalKeySource}, - schema::Target, - RepositoryLoader, TransportErrorKind, -}; -use tough_kms::{KmsKeySource, KmsSigningAlgorithm}; -use tough_ssm::SsmKeySource; -use update_metadata::{Images, Manifest, Release, UpdateWaves}; -use url::Url; - -lazy_static! { - static ref DEFAULT_START_TIME: DateTime = Utc::now(); -} - -/// Builds Bottlerocket repos using latest build artifacts -#[derive(Debug, Parser)] -pub(crate) struct RepoArgs { - // Metadata about the update - #[arg(long)] - /// Use this named repo infrastructure from Infra.toml - repo: String, - #[arg(long)] - /// The architecture of the repo and the update being added - arch: String, - #[arg(long, value_parser = friendly_version)] - /// The version of the update being added - version: Version, - #[arg(long)] - /// The variant of the update being added - variant: String, - - // The images to add in this update - #[arg(long)] - /// Path to the image containing the boot partition - boot_image: PathBuf, - #[arg(long)] - /// Path to the image containing the root partition - root_image: PathBuf, - #[arg(long)] - /// Path to the image containing the verity hashes - hash_image: PathBuf, - - // Optionally add other files to the repo - #[arg(long = "link-target")] - /// Optional paths to add as targets and symlink into repo - link_targets: Vec, - #[arg(long = "copy-target")] - /// Optional paths to add as targets and copy into repo - copy_targets: Vec, - - // Policies that pubsys interprets to set repo parameters - #[arg(long)] - /// Path to file that defines when repo metadata should expire - repo_expiration_policy_path: PathBuf, - - // Configuration that pubsys passes on to other tools - #[arg(long)] - /// Path to Release.toml - release_config_path: PathBuf, - #[arg(long)] - /// Path to file that defines when this update will become available - wave_policy_path: PathBuf, - #[arg(long)] - /// Path to root.json for this repo - root_role_path: PathBuf, - #[arg(long)] - /// If we generated a local key, we'll find it here; used if Infra.toml has no key defined - default_key_path: PathBuf, - - #[arg(long, value_parser = parse_datetime)] - /// When the waves and expiration timer will start; RFC3339 date or "in X hours/days/weeks" - release_start_time: Option>, - - #[arg(long)] - /// Where to store the created repo - outdir: PathBuf, -} - -/// Adds update, migrations, and waves to the Manifest -fn update_manifest(repo_args: &RepoArgs, manifest: &mut Manifest) -> Result<()> { - // Add update =^..^= =^..^= =^..^= =^..^= - - let filename = |path: &PathBuf| -> Result { - Ok(path - .file_name() - .context(error::InvalidImagePathSnafu { path })? - .to_str() - .context(error::NonUtf8PathSnafu { path })? - .to_string()) - }; - - let images = Images { - boot: filename(&repo_args.boot_image)?, - root: filename(&repo_args.root_image)?, - hash: filename(&repo_args.hash_image)?, - }; - - info!( - "Adding update to manifest for version: {}, arch: {}, variant: {}", - repo_args.version, repo_args.arch, repo_args.variant - ); - manifest - .add_update( - repo_args.version.clone(), - None, - repo_args.arch.clone(), - repo_args.variant.clone(), - images, - ) - .context(error::AddUpdateSnafu)?; - - // Add migrations =^..^= =^..^= =^..^= =^..^= - - info!( - "Using release config from path: {}", - repo_args.release_config_path.display() - ); - let release = Release::from_path(&repo_args.release_config_path).context( - error::UpdateMetadataReadSnafu { - path: &repo_args.release_config_path, - }, - )?; - trace!( - "Adding migrations to manifest for versions: {:#?}", - release - .migrations - .keys() - .map(|(from, to)| format!("({}, {})", from, to)) - .collect::>() - ); - // Replace the manifest 'migrations' section with the new data - manifest.migrations = release.migrations; - - // Add update waves =^..^= =^..^= =^..^= =^..^= - - let wave_start_time = repo_args.release_start_time.unwrap_or(*DEFAULT_START_TIME); - info!( - "Using wave policy from path: {}", - repo_args.wave_policy_path.display() - ); - info!( - "Offsets from that file will be added to the release start time of: {}", - wave_start_time - ); - let waves = UpdateWaves::from_path(&repo_args.wave_policy_path).context( - error::UpdateMetadataReadSnafu { - path: &repo_args.wave_policy_path, - }, - )?; - manifest - .set_waves( - repo_args.variant.clone(), - repo_args.arch.clone(), - repo_args.version.clone(), - wave_start_time, - &waves, - ) - .context(error::SetWavesSnafu { - wave_policy_path: &repo_args.wave_policy_path, - })?; - - Ok(()) -} - -/// Set expirations of all non-root role metadata based on a given `RepoExpirationPolicy` and an -/// expiration start time -fn set_expirations( - editor: &mut RepositoryEditor, - expiration_policy: &RepoExpirationPolicy, - expiration_start_time: DateTime, -) -> Result<()> { - let snapshot_expiration = expiration_start_time + expiration_policy.snapshot_expiration; - let targets_expiration = expiration_start_time + expiration_policy.targets_expiration; - let timestamp_expiration = expiration_start_time + expiration_policy.timestamp_expiration; - info!( - "Setting non-root metadata expiration times:\n\tsnapshot: {}\n\ttargets: {}\n\ttimestamp: {}", - snapshot_expiration, targets_expiration, timestamp_expiration - ); - editor - .snapshot_expires(snapshot_expiration) - .targets_expires(targets_expiration) - .context(error::SetTargetsExpirationSnafu { - expiration: targets_expiration, - })? - .timestamp_expires(timestamp_expiration); - - Ok(()) -} - -/// Set versions of all role metadata; the version will be the UNIX timestamp of the current time. -fn set_versions(editor: &mut RepositoryEditor) -> Result<()> { - let seconds = Utc::now().timestamp(); - let unsigned_seconds = seconds.try_into().expect("System clock before 1970??"); - let version = NonZeroU64::new(unsigned_seconds).expect("System clock exactly 1970??"); - debug!("Repo version: {}", version); - editor - .snapshot_version(version) - .targets_version(version) - .context(error::SetTargetsVersionSnafu { version })? - .timestamp_version(version); - - Ok(()) -} - -/// Adds targets, expirations, and version to the RepositoryEditor -fn update_editor<'a, P>( - repo_args: &'a RepoArgs, - editor: &mut RepositoryEditor, - targets: impl Iterator, - manifest_path: P, -) -> Result<()> -where - P: AsRef, -{ - // Add targets =^..^= =^..^= =^..^= =^..^= - - for target_path in targets { - debug!("Adding target from path: {}", target_path.display()); - editor - .add_target_path(target_path) - .context(error::AddTargetSnafu { path: &target_path })?; - } - - let manifest_target = Target::from_path(&manifest_path).context(error::BuildTargetSnafu { - path: manifest_path.as_ref(), - })?; - debug!("Adding target for manifest.json"); - editor - .add_target("manifest.json", manifest_target) - .context(error::AddTargetSnafu { - path: "manifest.json", - })?; - - // Add expirations =^..^= =^..^= =^..^= =^..^= - - info!( - "Using repo expiration policy from path: {}", - repo_args.repo_expiration_policy_path.display() - ); - let expiration = RepoExpirationPolicy::from_path(&repo_args.repo_expiration_policy_path) - .context(error::ConfigSnafu)?; - - let expiration_start_time = repo_args.release_start_time.unwrap_or(*DEFAULT_START_TIME); - let snapshot_expiration = expiration_start_time + expiration.snapshot_expiration; - let targets_expiration = expiration_start_time + expiration.targets_expiration; - let timestamp_expiration = expiration_start_time + expiration.timestamp_expiration; - info!( - "Repo expiration times:\n\tsnapshot: {}\n\ttargets: {}\n\ttimestamp: {}", - snapshot_expiration, targets_expiration, timestamp_expiration - ); - editor - .snapshot_expires(snapshot_expiration) - .targets_expires(targets_expiration) - .context(error::SetTargetsExpirationSnafu { - expiration: targets_expiration, - })? - .timestamp_expires(timestamp_expiration); - - // Add version =^..^= =^..^= =^..^= =^..^= - - let seconds = Utc::now().timestamp(); - let unsigned_seconds = seconds.try_into().expect("System clock before 1970??"); - let version = NonZeroU64::new(unsigned_seconds).expect("System clock exactly 1970??"); - debug!("Repo version: {}", version); - editor - .snapshot_version(version) - .targets_version(version) - .context(error::SetTargetsVersionSnafu { version })? - .timestamp_version(version); - - Ok(()) -} - -/// If the infra config has a repo section defined for the given repo, and it has metadata base and -/// targets URLs defined, returns those URLs, otherwise None. -fn repo_urls<'a>( - repo_config: &'a RepoConfig, - variant: &str, - arch: &str, -) -> Result> { - // Check if both URLs are set - if let Some(metadata_base_url) = repo_config.metadata_base_url.as_ref() { - if let Some(targets_url) = repo_config.targets_url.as_ref() { - let base_slash = if metadata_base_url.as_str().ends_with('/') { - "" - } else { - "/" - }; - let metadata_url_str = - format!("{}{}{}/{}", metadata_base_url, base_slash, variant, arch); - let metadata_url = Url::parse(&metadata_url_str).context(error::ParseUrlSnafu { - input: &metadata_url_str, - })?; - - debug!("Using metadata url: {}", metadata_url); - return Ok(Some((metadata_url, targets_url))); - } - } - - Ok(None) -} - -/// Builds an editor and manifest; will start from an existing repo if one is specified in the -/// configuration. Returns Err if we fail to read from the repo. Returns Ok(None) if we detect -/// that the repo does not exist. -fn load_editor_and_manifest<'a, P>( - root_role_path: P, - metadata_url: &'a Url, - targets_url: &'a Url, -) -> Result> -where - P: AsRef, -{ - let root_role_path = root_role_path.as_ref(); - - // Try to load the repo... - let repo_load_result = RepositoryLoader::new( - File::open(root_role_path).context(error::FileSnafu { - path: root_role_path, - })?, - metadata_url.clone(), - targets_url.clone(), - ) - .load(); - - match repo_load_result { - // If we load it successfully, build an editor and manifest from it. - Ok(repo) => { - let target = "manifest.json"; - let target = target - .try_into() - .context(error::ParseTargetNameSnafu { target })?; - let reader = repo - .read_target(&target) - .context(error::ReadTargetSnafu { - target: target.raw(), - })? - .with_context(|| error::NoManifestSnafu { - metadata_url: metadata_url.clone(), - })?; - let manifest = serde_json::from_reader(reader).context(error::InvalidJsonSnafu { - path: "manifest.json", - })?; - - let editor = RepositoryEditor::from_repo(root_role_path, repo) - .context(error::EditorFromRepoSnafu)?; - - Ok(Some((editor, manifest))) - } - // If we fail to load, but we only failed because the repo doesn't exist yet, then start - // fresh by signalling that there is no known repo. Otherwise, fail hard. - Err(e) => { - if is_file_not_found_error(&e) { - Ok(None) - } else { - Err(e).with_context(|_| error::RepoLoadSnafu { - metadata_base_url: metadata_url.clone(), - }) - } - } - } -} - -/// Inspects the `tough` error to see if it is a `Transport` error, and if so, is it `FileNotFound`. -fn is_file_not_found_error(e: &tough::error::Error) -> bool { - if let tough::error::Error::Transport { source, .. } = e { - matches!(source.kind(), TransportErrorKind::FileNotFound) - } else { - false - } -} - -/// Gets the corresponding `KeySource` according to the signing key config from Infra.toml -fn get_signing_key_source(signing_key_config: &SigningKeyConfig) -> Result> { - match signing_key_config { - SigningKeyConfig::file { path } => Ok(Box::new(LocalKeySource { path: path.clone() })), - SigningKeyConfig::kms { key_id, config, .. } => Ok(Box::new(KmsKeySource { - profile: None, - key_id: key_id - .clone() - .context(error::MissingConfigSnafu { missing: "key_id" })?, - client: { - let key_id_val = key_id - .clone() - .context(error::MissingConfigSnafu { missing: "key_id" })?; - match config.as_ref() { - Some(config_val) => get_client(config_val, &key_id_val)?, - None => None, - } - }, - signing_algorithm: KmsSigningAlgorithm::RsassaPssSha256, - })), - SigningKeyConfig::ssm { parameter } => Ok(Box::new(SsmKeySource { - profile: None, - parameter_name: parameter.clone(), - key_id: None, - })), - } -} - -/// Helper function that generates a KmsClient or None given config containing available keys -fn get_client(kmskey_config: &KMSKeyConfig, key_id: &str) -> Result> { - if let Some(region) = kmskey_config.available_keys.get(key_id) { - let rt = Runtime::new().context(error::RuntimeSnafu)?; - Ok(Some(rt.block_on(async { async_get_client(region).await }))) - } else { - Ok(None) - } -} - -/// Helper function that generates a KmsClient given region -async fn async_get_client(region: &str) -> KmsClient { - let client_config = aws_config::from_env() - .region(Region::new(region.to_string())) - .load() - .await; - KmsClient::new(&client_config) -} - -/// Common entrypoint from main() -pub(crate) fn run(args: &Args, repo_args: &RepoArgs) -> Result<()> { - let metadata_out_dir = repo_args - .outdir - .join(&repo_args.variant) - .join(&repo_args.arch); - let targets_out_dir = repo_args.outdir.join("targets"); - - // If the given metadata directory exists, throw an error. We don't want to overwrite a user's - // existing repository. (The targets directory is shared, so it's fine if that exists.) - ensure!( - !Path::exists(&metadata_out_dir), - error::RepoExistsSnafu { - path: metadata_out_dir - } - ); - - // Build repo =^..^= =^..^= =^..^= =^..^= - - // If a lock file exists, use that, otherwise use Infra.toml or default - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, true) - .context(error::ConfigSnafu)?; - trace!("Using infra config: {:?}", infra_config); - - // If the user has the requested (or "default") repo defined in their Infra.toml, use it, - // otherwise use a default config. - let default_repo_config = RepoConfig::default(); - let repo_config = if let Some(repo_config) = infra_config - .repo - .as_ref() - .and_then(|repo_section| repo_section.get(&repo_args.repo)) - .map(|repo| { - info!("Using repo '{}' from Infra.toml", repo_args.repo); - repo - }) { - repo_config - } else { - info!( - "Didn't find repo '{}' in Infra.toml, using default configuration", - repo_args.repo - ); - &default_repo_config - }; - - // Build a repo editor and manifest, from an existing repo if available, otherwise fresh - let maybe_urls = repo_urls(repo_config, &repo_args.variant, &repo_args.arch)?; - let (mut editor, mut manifest) = if let Some((metadata_url, targets_url)) = maybe_urls.as_ref() - { - info!("Found metadata and target URLs, loading existing repository"); - match load_editor_and_manifest(&repo_args.root_role_path, metadata_url, targets_url)? { - Some((editor, manifest)) => (editor, manifest), - None => { - warn!( - "Did not find repo at '{}', starting a new one", - metadata_url - ); - ( - RepositoryEditor::new(&repo_args.root_role_path) - .context(error::NewEditorSnafu)?, - Manifest::default(), - ) - } - } - } else { - info!("Did not find metadata and target URLs in infra config, creating a new repository"); - ( - RepositoryEditor::new(&repo_args.root_role_path).context(error::NewEditorSnafu)?, - Manifest::default(), - ) - }; - - // Add update information to manifest - update_manifest(repo_args, &mut manifest)?; - // Write manifest to tempfile so it can be copied in as target later - let manifest_path = NamedTempFile::new() - .context(error::TempFileSnafu)? - .into_temp_path(); - update_metadata::write_file(&manifest_path, &manifest).context(error::ManifestWriteSnafu { - path: &manifest_path, - })?; - - // Add manifest and targets to editor - let copy_targets = &repo_args.copy_targets; - let link_targets = repo_args.link_targets.iter().chain(vec![ - &repo_args.boot_image, - &repo_args.root_image, - &repo_args.hash_image, - ]); - let all_targets = copy_targets.iter().chain(link_targets.clone()); - - update_editor(repo_args, &mut editor, all_targets, &manifest_path)?; - - // Sign repo =^..^= =^..^= =^..^= =^..^= - - // Check if we have a signing key defined in Infra.toml; if not, we'll fall back to the - // generated local key. - let signing_key_config = repo_config.signing_keys.as_ref(); - - let key_source = if let Some(signing_key_config) = signing_key_config { - get_signing_key_source(signing_key_config)? - } else { - ensure!( - repo_args.default_key_path.exists(), - error::MissingConfigSnafu { - missing: "signing_keys in repo config, and we found no local key", - } - ); - Box::new(LocalKeySource { - path: repo_args.default_key_path.clone(), - }) - }; - - let signed_repo = editor.sign(&[key_source]).context(error::RepoSignSnafu)?; - - // Write repo =^..^= =^..^= =^..^= =^..^= - - // Write targets first so we don't have invalid metadata if targets fail - info!("Writing repo targets to: {}", targets_out_dir.display()); - fs::create_dir_all(&targets_out_dir).context(error::CreateDirSnafu { - path: &targets_out_dir, - })?; - - // Copy manifest with proper name instead of tempfile name - debug!("Copying manifest.json into {}", targets_out_dir.display()); - let target = "manifest.json"; - let target = target - .try_into() - .context(error::ParseTargetNameSnafu { target })?; - signed_repo - .copy_target( - &manifest_path, - &targets_out_dir, - // We should never have matching manifests from different repos - PathExists::Fail, - Some(&target), - ) - .context(error::CopyTargetSnafu { - target: &manifest_path, - path: &targets_out_dir, - })?; - - // Copy / link any other user requested targets - for copy_target in copy_targets { - debug!( - "Copying target '{}' into {}", - copy_target.display(), - targets_out_dir.display() - ); - signed_repo - .copy_target(copy_target, &targets_out_dir, PathExists::Skip, None) - .context(error::CopyTargetSnafu { - target: copy_target, - path: &targets_out_dir, - })?; - } - for link_target in link_targets { - debug!( - "Linking target '{}' into {}", - link_target.display(), - targets_out_dir.display() - ); - signed_repo - .link_target(link_target, &targets_out_dir, PathExists::Skip, None) - .context(error::LinkTargetSnafu { - target: link_target, - path: &targets_out_dir, - })?; - } - - info!("Writing repo metadata to: {}", metadata_out_dir.display()); - fs::create_dir_all(&metadata_out_dir).context(error::CreateDirSnafu { - path: &metadata_out_dir, - })?; - signed_repo - .write(&metadata_out_dir) - .context(error::RepoWriteSnafu { - path: &repo_args.outdir, - })?; - - Ok(()) -} - -mod error { - use chrono::{DateTime, Utc}; - use snafu::Snafu; - use std::io; - use std::path::PathBuf; - use url::Url; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Failed to add new update to manifest: {}", source))] - AddUpdate { - source: update_metadata::error::Error, - }, - - #[snafu(display("Failed to add new target '{}' to repo: {}", path.display(), source))] - AddTarget { - path: PathBuf, - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Failed to build target metadata from path '{}': {}", path.display(), source))] - BuildTarget { - path: PathBuf, - #[snafu(source(from(tough::schema::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Failed to copy target '{}' to '{}': {}", target.display(), path.display(), source))] - CopyTarget { - target: PathBuf, - path: PathBuf, - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Error reading config: {}", source))] - Config { source: pubsys_config::Error }, - - #[snafu(display("Failed to create directory '{}': {}", path.display(), source))] - CreateDir { path: PathBuf, source: io::Error }, - - #[snafu(display("Failed to create repo editor from given repo: {}", source))] - EditorFromRepo { - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Failed to read '{}': {}", path.display(), source))] - File { path: PathBuf, source: io::Error }, - - #[snafu(display("Invalid path given for image file: '{}'", path.display()))] - InvalidImagePath { path: PathBuf }, - - #[snafu(display("Invalid config file at '{}': {}", path.display(), source))] - InvalidJson { - path: PathBuf, - source: serde_json::Error, - }, - - #[snafu(display("Failed to symlink target '{}' to '{}': {}", target.display(), path.display(), source))] - LinkTarget { - target: PathBuf, - path: PathBuf, - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Failed to write Manifest to '{}': {}", path.display(), source))] - ManifestWrite { - path: PathBuf, - source: update_metadata::error::Error, - }, - - #[snafu(display("Infra.toml is missing {}", missing))] - MissingConfig { missing: String }, - - #[snafu(display("Repo URLs not specified for repo '{}'", repo))] - MissingRepoUrls { repo: String }, - - #[snafu(display("Failed to create new repo editor: {}", source))] - NewEditor { - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Repo does not have a manifest.json: {}", metadata_url))] - NoManifest { metadata_url: Url }, - - #[snafu(display("Non-UTF8 path '{}' not supported", path.display()))] - NonUtf8Path { path: PathBuf }, - - #[snafu(display("Invalid URL '{}': {}", input, source))] - ParseUrl { - input: String, - source: url::ParseError, - }, - - #[snafu(display("Failed to read target '{}' from repo: {}", target, source))] - ReadTarget { - target: String, - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Failed to create async runtime: {}", source))] - Runtime { source: std::io::Error }, - - #[snafu(display("Failed to parse target name from string '{}': {}", target, source))] - ParseTargetName { - target: String, - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Repo exists at '{}' - remove it and try again", path.display()))] - RepoExists { path: PathBuf }, - - #[snafu(display("Could not fetch repo at '{}': {}", url, msg))] - RepoFetch { url: Url, msg: String }, - - #[snafu(display( - "Failed to load repo from metadata URL '{}': {}", - metadata_base_url, - source - ))] - RepoLoad { - metadata_base_url: Url, - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Requested repository does not exist: '{}'", url))] - RepoNotFound { url: Url }, - - #[snafu(display("Failed to sign repository: {}", source))] - RepoSign { - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Failed to write repository to {}: {}", path.display(), source))] - RepoWrite { - path: PathBuf, - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Failed to set targets expiration to {}: {}", expiration, source))] - SetTargetsExpiration { - expiration: DateTime, - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Failed to set targets version to {}: {}", version, source))] - SetTargetsVersion { - version: u64, - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Failed to set waves from '{}': {}", wave_policy_path.display(), source))] - SetWaves { - wave_policy_path: PathBuf, - source: update_metadata::error::Error, - }, - - #[snafu(display("Failed to create temporary file: {}", source))] - TempFile { source: io::Error }, - - #[snafu(display("Failed to read update metadata '{}': {}", path.display(), source))] - UpdateMetadataRead { - path: PathBuf, - source: update_metadata::error::Error, - }, - } -} -pub(crate) use error::Error; -type Result = std::result::Result; diff --git a/tools/pubsys/src/repo/check_expirations/mod.rs b/tools/pubsys/src/repo/check_expirations/mod.rs deleted file mode 100644 index bebbcd25..00000000 --- a/tools/pubsys/src/repo/check_expirations/mod.rs +++ /dev/null @@ -1,184 +0,0 @@ -//! The check_expirations module owns the 'check-repo-expirations' subcommand and provide methods for -//! checking the metadata expirations of a given TUF repository. - -use crate::repo::{error as repo_error, repo_urls}; -use crate::Args; -use chrono::{DateTime, Utc}; -use clap::Parser; -use log::{error, info, trace, warn}; -use parse_datetime::parse_datetime; -use pubsys_config::InfraConfig; -use snafu::{OptionExt, ResultExt}; -use std::collections::HashMap; -use std::fs::File; -use std::path::PathBuf; -use tough::{ExpirationEnforcement, Repository, RepositoryLoader}; -use url::Url; - -/// Checks for metadata expirations for a set of TUF repositories -#[derive(Debug, Parser)] -pub(crate) struct CheckExpirationsArgs { - #[arg(long)] - /// Use this named repo infrastructure from Infra.toml - repo: String, - - #[arg(long)] - /// The architecture of the repo being checked for expirations - arch: String, - #[arg(long)] - /// The variant of the repo being checked for expirations - variant: String, - - #[arg(long)] - /// Path to root.json for this repo - root_role_path: PathBuf, - - #[arg(long, value_parser = parse_datetime)] - /// Finds metadata files expiring between now and a specified time; RFC3339 date or "in X hours/days/weeks" - expiration_limit: DateTime, -} - -/// Checks for upcoming role expirations, gathering them in a map of role to expiration datetime. -fn find_upcoming_metadata_expiration( - repo: &Repository, - end_date: DateTime, -) -> HashMap> { - let mut expirations = HashMap::new(); - info!( - "Looking for metadata expirations happening from now to {}", - end_date - ); - if repo.root().signed.expires <= end_date { - expirations.insert(tough::schema::RoleType::Root, repo.root().signed.expires); - } - if repo.snapshot().signed.expires <= end_date { - expirations.insert( - tough::schema::RoleType::Snapshot, - repo.snapshot().signed.expires, - ); - } - if repo.targets().signed.expires <= end_date { - expirations.insert( - tough::schema::RoleType::Targets, - repo.targets().signed.expires, - ); - } - if repo.timestamp().signed.expires <= end_date { - expirations.insert( - tough::schema::RoleType::Timestamp, - repo.timestamp().signed.expires, - ); - } - - expirations -} - -fn check_expirations( - root_role_path: &PathBuf, - metadata_url: &Url, - targets_url: &Url, - expiration_limit: DateTime, -) -> Result<()> { - // Load the repository - let repo = RepositoryLoader::new( - File::open(root_role_path).context(repo_error::FileSnafu { - path: root_role_path, - })?, - metadata_url.clone(), - targets_url.clone(), - ) - // We're gonna check the expiration ourselves - .expiration_enforcement(ExpirationEnforcement::Unsafe) - .load() - .context(repo_error::RepoLoadSnafu { - metadata_base_url: metadata_url.clone(), - })?; - info!("Loaded TUF repo:\t{}", metadata_url); - - info!("Root expiration:\t{}", repo.root().signed.expires); - info!("Snapshot expiration:\t{}", repo.snapshot().signed.expires); - info!("Targets expiration:\t{}", repo.targets().signed.expires); - info!("Timestamp expiration:\t{}", repo.timestamp().signed.expires); - // Check for upcoming metadata expirations if a timeframe is specified - let upcoming_expirations = find_upcoming_metadata_expiration(&repo, expiration_limit); - if !upcoming_expirations.is_empty() { - let now = Utc::now(); - for (role, expiration_date) in upcoming_expirations { - if expiration_date < now { - error!( - "Repo '{}': '{}' expired on {}", - metadata_url, role, expiration_date - ) - } else { - warn!( - "Repo '{}': '{}' expiring in {} at {}", - metadata_url, - role, - expiration_date - now, - expiration_date - ) - } - } - return Err(Error::RepoExpirations { - metadata_url: metadata_url.clone(), - }); - } - - Ok(()) -} - -/// Common entrypoint from main() -pub(crate) fn run(args: &Args, check_expirations_args: &CheckExpirationsArgs) -> Result<()> { - // If a lock file exists, use that, otherwise use Infra.toml - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) - .context(repo_error::ConfigSnafu)?; - trace!("Parsed infra config: {:?}", infra_config); - let repo_config = infra_config - .repo - .as_ref() - .context(repo_error::MissingConfigSnafu { - missing: "repo section", - })? - .get(&check_expirations_args.repo) - .with_context(|| repo_error::MissingConfigSnafu { - missing: format!("definition for repo {}", &check_expirations_args.repo), - })?; - - let repo_urls = repo_urls( - repo_config, - &check_expirations_args.variant, - &check_expirations_args.arch, - )? - .context(repo_error::MissingRepoUrlsSnafu { - repo: &check_expirations_args.repo, - })?; - check_expirations( - &check_expirations_args.root_role_path, - &repo_urls.0, - repo_urls.1, - check_expirations_args.expiration_limit, - )?; - - Ok(()) -} - -mod error { - use snafu::Snafu; - use url::Url; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(context(false), display("{}", source))] - Repo { - #[snafu(source(from(crate::repo::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Found expiring/expired metadata in '{}'", metadata_url))] - RepoExpirations { metadata_url: Url }, - } -} -pub(crate) use error::Error; - -type Result = std::result::Result; diff --git a/tools/pubsys/src/repo/refresh_repo/mod.rs b/tools/pubsys/src/repo/refresh_repo/mod.rs deleted file mode 100644 index be707876..00000000 --- a/tools/pubsys/src/repo/refresh_repo/mod.rs +++ /dev/null @@ -1,214 +0,0 @@ -//! The refresh_repo module owns the 'refresh-repo' subcommand and provide methods for -//! refreshing and re-signing the metadata files of a given TUF repository. - -use crate::repo::{ - error as repo_error, get_signing_key_source, repo_urls, set_expirations, set_versions, -}; -use crate::Args; -use chrono::{DateTime, Utc}; -use clap::Parser; -use lazy_static::lazy_static; -use log::{info, trace}; -use pubsys_config::{InfraConfig, RepoExpirationPolicy}; -use snafu::{ensure, OptionExt, ResultExt}; -use std::fs; -use std::fs::File; -use std::path::{Path, PathBuf}; -use tough::editor::RepositoryEditor; -use tough::key_source::{KeySource, LocalKeySource}; -use tough::{ExpirationEnforcement, RepositoryLoader}; -use url::Url; - -lazy_static! { - static ref EXPIRATION_START_TIME: DateTime = Utc::now(); -} - -/// Refreshes and re-sign TUF repositories' non-root metadata files with new expiration dates -#[derive(Debug, Parser)] -pub(crate) struct RefreshRepoArgs { - #[arg(long)] - /// Use this named repo infrastructure from Infra.toml - repo: String, - - #[arg(long)] - /// The architecture of the repo being refreshed and re-signed - arch: String, - #[arg(long)] - /// The variant of the repo being refreshed and re-signed - variant: String, - - #[arg(long)] - /// Path to root.json for this repo - root_role_path: PathBuf, - - #[arg(long)] - /// If we generated a local key, we'll find it here; used if Infra.toml has no key defined - default_key_path: PathBuf, - - #[arg(long)] - /// Path to file that defines when repo non-root metadata should expire - repo_expiration_policy_path: PathBuf, - - #[arg(long)] - /// Where to store the refresh/re-signed repository (just the metadata files) - outdir: PathBuf, - - #[arg(long)] - /// If this flag is set, repositories will succeed in loading and be refreshed even if they have - /// expired metadata files. - unsafe_refresh: bool, -} - -fn refresh_repo( - root_role_path: &PathBuf, - metadata_out_dir: &PathBuf, - metadata_url: &Url, - targets_url: &Url, - key_source: Box, - expiration: &RepoExpirationPolicy, - unsafe_refresh: bool, -) -> Result<(), Error> { - // If the given metadata directory exists, throw an error. We don't want to overwrite a user's - // existing repository. - ensure!( - !Path::exists(metadata_out_dir), - repo_error::RepoExistsSnafu { - path: metadata_out_dir - } - ); - - let expiration_enforcement = if unsafe_refresh { - ExpirationEnforcement::Unsafe - } else { - ExpirationEnforcement::Safe - }; - - // Load the repository and get the repo editor for it - let repo = RepositoryLoader::new( - File::open(root_role_path).context(repo_error::FileSnafu { - path: root_role_path, - })?, - metadata_url.clone(), - targets_url.clone(), - ) - .expiration_enforcement(expiration_enforcement) - .load() - .context(repo_error::RepoLoadSnafu { - metadata_base_url: metadata_url.clone(), - })?; - let mut repo_editor = RepositoryEditor::from_repo(root_role_path, repo) - .context(repo_error::EditorFromRepoSnafu)?; - info!("Loaded TUF repo: {}", metadata_url); - - // Refresh the expiration dates of all non-root metadata files - set_expirations(&mut repo_editor, expiration, *EXPIRATION_START_TIME)?; - - // Refresh the versions of all non-root metadata files - set_versions(&mut repo_editor)?; - - // Sign the repository - let signed_repo = repo_editor - .sign(&[key_source]) - .context(repo_error::RepoSignSnafu)?; - - // Write out the metadata files for the repository - info!("Writing repo metadata to: {}", metadata_out_dir.display()); - fs::create_dir_all(metadata_out_dir).context(repo_error::CreateDirSnafu { - path: &metadata_out_dir, - })?; - signed_repo - .write(metadata_out_dir) - .context(repo_error::RepoWriteSnafu { - path: &metadata_out_dir, - })?; - - Ok(()) -} - -/// Common entrypoint from main() -pub(crate) fn run(args: &Args, refresh_repo_args: &RefreshRepoArgs) -> Result<(), Error> { - // If a lock file exists, use that, otherwise use Infra.toml - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) - .context(repo_error::ConfigSnafu)?; - trace!("Parsed infra config: {:?}", infra_config); - - let repo_config = infra_config - .repo - .as_ref() - .context(repo_error::MissingConfigSnafu { - missing: "repo section", - })? - .get(&refresh_repo_args.repo) - .context(repo_error::MissingConfigSnafu { - missing: format!("definition for repo {}", &refresh_repo_args.repo), - })?; - - // Check if we have a signing key defined in Infra.toml; if not, we'll fall back to the - // generated local key. - let signing_key_config = repo_config.signing_keys.as_ref(); - - let key_source = if let Some(signing_key_config) = signing_key_config { - get_signing_key_source(signing_key_config)? - } else { - ensure!( - refresh_repo_args.default_key_path.exists(), - repo_error::MissingConfigSnafu { - missing: "signing_keys in repo config, and we found no local key", - } - ); - Box::new(LocalKeySource { - path: refresh_repo_args.default_key_path.clone(), - }) - }; - - // Get the expiration policy - info!( - "Using repo expiration policy from path: {}", - refresh_repo_args.repo_expiration_policy_path.display() - ); - let expiration = - RepoExpirationPolicy::from_path(&refresh_repo_args.repo_expiration_policy_path) - .context(repo_error::ConfigSnafu)?; - - let repo_urls = repo_urls( - repo_config, - &refresh_repo_args.variant, - &refresh_repo_args.arch, - )? - .context(repo_error::MissingRepoUrlsSnafu { - repo: &refresh_repo_args.repo, - })?; - refresh_repo( - &refresh_repo_args.root_role_path, - &refresh_repo_args - .outdir - .join(&refresh_repo_args.variant) - .join(&refresh_repo_args.arch), - &repo_urls.0, - repo_urls.1, - key_source, - &expiration, - refresh_repo_args.unsafe_refresh, - )?; - - Ok(()) -} - -mod error { - use snafu::Snafu; - use url::Url; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(context(false), display("{}", source))] - Repo { - #[snafu(source(from(crate::repo::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Failed to refresh & re-sign metadata for: {:#?}", list_of_urls))] - RepoRefresh { list_of_urls: Vec }, - } -} -pub(crate) use error::Error; diff --git a/tools/pubsys/src/repo/validate_repo/mod.rs b/tools/pubsys/src/repo/validate_repo/mod.rs deleted file mode 100644 index 1734f6bb..00000000 --- a/tools/pubsys/src/repo/validate_repo/mod.rs +++ /dev/null @@ -1,198 +0,0 @@ -//! The validate_repo module owns the 'validate-repo' subcommand and provides methods for validating -//! a given TUF repository by attempting to load the repository and download its targets. - -use crate::repo::{error as repo_error, repo_urls}; -use crate::Args; -use clap::Parser; -use log::{info, trace}; -use pubsys_config::InfraConfig; -use snafu::{OptionExt, ResultExt}; -use std::cmp::min; -use std::fs::File; -use std::io; -use std::path::PathBuf; -use std::sync::mpsc; -use tough::{Repository, RepositoryLoader, TargetName}; -use url::Url; - -/// Validates a set of TUF repositories -#[derive(Debug, Parser)] -pub(crate) struct ValidateRepoArgs { - #[arg(long)] - /// Use this named repo infrastructure from Infra.toml - repo: String, - - #[arg(long)] - /// The architecture of the repo being validated - arch: String, - #[arg(long)] - /// The variant of the repo being validated - variant: String, - - #[arg(long)] - /// Path to root.json for this repo - root_role_path: PathBuf, - - #[arg(long)] - /// Specifies whether to validate all listed targets by attempting to download them - validate_targets: bool, -} - -/// If we are on a machine with a large number of cores, then we limit the number of simultaneous -/// downloads to this arbitrarily chosen maximum. -const MAX_DOWNLOAD_THREADS: usize = 16; - -/// Retrieves listed targets and attempts to download them for validation purposes. We use a Rayon -/// thread pool instead of tokio for async execution because `reqwest::blocking` creates a tokio -/// runtime (and multiple tokio runtimes are not supported). -fn retrieve_targets(repo: &Repository) -> Result<(), Error> { - let targets = &repo.targets().signed.targets; - let thread_pool = rayon::ThreadPoolBuilder::new() - .num_threads(min(num_cpus::get(), MAX_DOWNLOAD_THREADS)) - .build() - .context(error::ThreadPoolSnafu)?; - - // create the channels through which our download results will be passed - let (tx, rx) = mpsc::channel(); - - for target in targets.keys() { - let repo = repo.clone(); - let tx = tx.clone(); - info!("Downloading target: {}", target.raw()); - let target = target.clone(); - thread_pool.spawn(move || { - tx.send(download_targets(&repo, target)) - // inability to send on this channel is unrecoverable - .unwrap(); - }); - } - // close all senders - drop(tx); - - // block and await all downloads - let results: Vec> = rx.into_iter().collect(); - - // check all results and return the first error we see - for result in results { - result?; - } - - // no errors were found, the targets are validated - Ok(()) -} - -fn download_targets(repo: &Repository, target: TargetName) -> Result { - let mut reader = match repo.read_target(&target) { - Ok(Some(reader)) => reader, - Ok(None) => { - return error::TargetMissingSnafu { - target: target.raw(), - } - .fail() - } - Err(e) => { - return Err(e).context(error::TargetReadSnafu { - target: target.raw(), - }) - } - }; - // tough's `Read` implementation validates the target as it's being downloaded - io::copy(&mut reader, &mut io::sink()).context(error::TargetDownloadSnafu { - target: target.raw(), - }) -} - -fn validate_repo( - root_role_path: &PathBuf, - metadata_url: Url, - targets_url: &Url, - validate_targets: bool, -) -> Result<(), Error> { - // Load the repository - let repo = RepositoryLoader::new( - File::open(root_role_path).context(repo_error::FileSnafu { - path: root_role_path, - })?, - metadata_url.clone(), - targets_url.clone(), - ) - .load() - .context(repo_error::RepoLoadSnafu { - metadata_base_url: metadata_url.clone(), - })?; - info!("Loaded TUF repo: {}", metadata_url); - if validate_targets { - // Try retrieving listed targets - retrieve_targets(&repo)?; - } - - Ok(()) -} - -/// Common entrypoint from main() -pub(crate) fn run(args: &Args, validate_repo_args: &ValidateRepoArgs) -> Result<(), Error> { - // If a lock file exists, use that, otherwise use Infra.toml - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, false) - .context(repo_error::ConfigSnafu)?; - trace!("Parsed infra config: {:?}", infra_config); - let repo_config = infra_config - .repo - .as_ref() - .context(repo_error::MissingConfigSnafu { - missing: "repo section", - })? - .get(&validate_repo_args.repo) - .context(repo_error::MissingConfigSnafu { - missing: format!("definition for repo {}", &validate_repo_args.repo), - })?; - - let repo_urls = repo_urls( - repo_config, - &validate_repo_args.variant, - &validate_repo_args.arch, - )? - .context(repo_error::MissingRepoUrlsSnafu { - repo: &validate_repo_args.repo, - })?; - validate_repo( - &validate_repo_args.root_role_path, - repo_urls.0, - repo_urls.1, - validate_repo_args.validate_targets, - ) -} - -mod error { - use snafu::Snafu; - use std::io; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Invalid percentage specified: {} is greater than 100", percentage))] - InvalidPercentage { percentage: u8 }, - - #[snafu(context(false), display("{}", source))] - Repo { - #[snafu(source(from(crate::repo::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Failed to download and write target '{}': {}", target, source))] - TargetDownload { target: String, source: io::Error }, - - #[snafu(display("Missing target: {}", target))] - TargetMissing { target: String }, - - #[snafu(display("Failed to read target '{}' from repo: {}", target, source))] - TargetRead { - target: String, - #[snafu(source(from(tough::error::Error, Box::new)))] - source: Box, - }, - - #[snafu(display("Unable to create thread pool: {}", source))] - ThreadPool { source: rayon::ThreadPoolBuildError }, - } -} -pub(crate) use error::Error; diff --git a/tools/pubsys/src/vmware/govc.rs b/tools/pubsys/src/vmware/govc.rs deleted file mode 100644 index b44d6700..00000000 --- a/tools/pubsys/src/vmware/govc.rs +++ /dev/null @@ -1,177 +0,0 @@ -//! The govc module handles the process of building and executing the calls to Docker in order to -//! run specific `govc` commands. -use duct::cmd; -use log::trace; -use pubsys_config::vmware::{Datacenter, DatacenterCreds}; -use snafu::ResultExt; -use std::env; -use std::path::Path; -use std::process::Output; - -pub(crate) struct Govc { - env_config: Vec, -} - -impl Govc { - const GOVC: &'static str = "govc"; - - /// Make a new instance of `Govc`, creating all of the environment variables required to run - /// `govc` as Docker `--env` arguments - pub(crate) fn new(dc: Datacenter, creds: DatacenterCreds) -> Self { - let mut env_config = Vec::new(); - env_config.env_arg("GOVC_USERNAME", creds.username); - env_config.env_arg("GOVC_PASSWORD", creds.password); - env_config.env_arg("GOVC_URL", dc.vsphere_url); - env_config.env_arg("GOVC_DATACENTER", dc.datacenter); - env_config.env_arg("GOVC_DATASTORE", dc.datastore); - env_config.env_arg("GOVC_NETWORK", dc.network); - env_config.env_arg("GOVC_RESOURCE_POOL", dc.resource_pool); - env_config.env_arg("GOVC_FOLDER", dc.folder); - - Self { env_config } - } - - /// Run `govc import.ova` using Docker. - /// - /// Using the given name, OVA path, and import spec path, this function builds the `govc - /// import.ova` command as it will be used in the container. It also builds the necessary bind - /// mount arguments to mount the import spec and OVA into the container. Finally, it calls - /// `govc` via `docker run` invocation using these arguments. - pub(crate) fn upload_ova( - self, - name: S, - ova_path: P1, - import_spec_path: P2, - ) -> Result - where - S: AsRef, - P1: AsRef, - P2: AsRef, - { - let name = name.as_ref(); - let ova_host_path = ova_path.as_ref(); - let import_spec_host_path = import_spec_path.as_ref(); - - // Define the paths to the OVA and import spec we will use for the bind mounts into the - // container - let ova_container_path = "/tmp/bottlerocket.ova"; - let import_spec_container_path = "/tmp/import.spec"; - - //--mount type=bind,source="path/to/thing",target=/tmp/thing,readonly - let mount_config = &[ - // Mount the import spec file - "--mount", - &format!( - "type=bind,source={},target={},readonly", - import_spec_host_path.display(), - import_spec_container_path - ), - // Mount the OVA - "--mount", - &format!( - "type=bind,source={},target={},readonly", - ova_host_path.display(), - ova_container_path - ), - ]; - - // govc import.ova -options=/path/to/spec -name bottlerocket_vm_name /path/to/ova - let govc_cmd = &[ - Self::GOVC, - "import.ova", - &format!("-options={}", import_spec_container_path), - "-name", - name, - ova_container_path, - ]; - - let env_config: Vec<&str> = self.env_config.iter().map(|s| s.as_ref()).collect(); - - docker_run(&env_config, Some(mount_config), govc_cmd) - } -} - -/// Execute `docker run` using the SDK container with the specified environment, mount, and command -/// arguments. -/// -/// This builds the entire `docker run` command string using a list of Docker `--env FOO=BAR` -/// strings, an optional list of `--mount` strings, and a list of strings meant to be the command -/// to run in the container. -// The arguments are `&[&str]` in an attempt to be as flexible as possible for the caller -fn docker_run(docker_env: &[&str], mount: Option<&[&str]>, command: &[&str]) -> Result { - let sdk = env::var("BUILDSYS_SDK_IMAGE").context(error::EnvironmentSnafu { - var: "BUILDSYS_SDK_IMAGE", - })?; - trace!("SDK image: {}", sdk); - - let mut args = vec!["run"]; - args.push("--net=host"); - args.extend(docker_env); - - if let Some(mount_cfg) = mount { - args.extend(mount_cfg) - } - - args.push(&sdk); - args.extend(command); - - let output = cmd("docker", args) - .stderr_to_stdout() - .stdout_capture() - .unchecked() - .run() - .context(error::CommandStartSnafu)?; - - let stdout = String::from_utf8_lossy(&output.stdout); - trace!("{}", stdout); - if output.status.success() { - Ok(output) - } else { - error::DockerSnafu { output: stdout }.fail() - } -} - -// =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - -/// Helper trait for constructing Docker `--env` arguments -trait EnvArg { - fn env_arg(&mut self, key: S1, value: S2) - where - S1: AsRef, - S2: AsRef; -} - -impl EnvArg for Vec { - fn env_arg(&mut self, key: S1, value: S2) - where - S1: AsRef, - S2: AsRef, - { - self.push("--env".to_string()); - self.push(format!("{}={}", key.as_ref(), value.as_ref())) - } -} - -// =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - -pub(crate) mod error { - use snafu::Snafu; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Failed to start command: {}", source))] - CommandStart { source: std::io::Error }, - - #[snafu(display("Docker invocation failed: {}", output))] - Docker { output: String }, - - #[snafu(display("Missing environment variable '{}'", var))] - Environment { - var: String, - source: std::env::VarError, - }, - } -} -pub(crate) use error::Error; -type Result = std::result::Result; diff --git a/tools/pubsys/src/vmware/mod.rs b/tools/pubsys/src/vmware/mod.rs deleted file mode 100644 index 3eabc7ed..00000000 --- a/tools/pubsys/src/vmware/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub(crate) mod govc; -pub(crate) mod upload_ova; diff --git a/tools/pubsys/src/vmware/upload_ova/mod.rs b/tools/pubsys/src/vmware/upload_ova/mod.rs deleted file mode 100644 index 3df0454d..00000000 --- a/tools/pubsys/src/vmware/upload_ova/mod.rs +++ /dev/null @@ -1,239 +0,0 @@ -//! The upload_ova module owns the 'upload_ova' subcommand and is responsible for collating all of -//! the config necessary to upload an OVA bundle to VMware datacenters. -use crate::vmware::govc::Govc; -use crate::Args; -use clap::Parser; -use log::{debug, info, trace}; -use pubsys_config::vmware::{ - Datacenter, DatacenterBuilder, DatacenterCreds, DatacenterCredsBuilder, DatacenterCredsConfig, - VMWARE_CREDS_PATH, -}; -use pubsys_config::InfraConfig; -use serde::Serialize; -use snafu::{ensure, OptionExt, ResultExt}; -use std::fs; -use std::path::PathBuf; -use tempfile::NamedTempFile; -use tinytemplate::TinyTemplate; - -const SPEC_TEMPLATE_NAME: &str = "spec_template"; - -/// Uploads a Bottlerocket OVA to VMware datacenters -#[derive(Debug, Parser)] -pub(crate) struct UploadArgs { - /// Path to the OVA image - #[arg(short = 'o', long)] - ova: PathBuf, - - /// Path to the import spec - #[arg(short = 's', long)] - spec: PathBuf, - - /// The desired VM name - #[arg(short = 'n', long)] - name: String, - - /// Make the uploaded OVA a VM template - #[arg(long)] - mark_as_template: bool, - - /// Datacenters to which you want to upload the OVA - #[arg(long, value_delimiter = ',')] - datacenters: Vec, -} - -/// Common entrypoint from main() -pub(crate) fn run(args: &Args, upload_args: &UploadArgs) -> Result<()> { - // If a lock file exists, use that, otherwise use Infra.toml or default - let infra_config = InfraConfig::from_path_or_lock(&args.infra_config_path, true) - .context(error::InfraConfigSnafu)?; - trace!("Using infra config: {:?}", infra_config); - - let vmware = infra_config - .vmware - .context(error::MissingConfigSnafu { missing: "vmware" })?; - - // If the user gave an override list of datacenters, use it, otherwise use what's in the config - let upload_datacenters = if !upload_args.datacenters.is_empty() { - &upload_args.datacenters - } else { - &vmware.datacenters - }; - ensure!( - !upload_datacenters.is_empty(), - error::MissingConfigSnafu { - missing: "vmware.datacenters" - } - ); - - // Retrieve credentials from GOVC_ environment variables - let creds_env = DatacenterCredsBuilder::from_env(); - // Retrieve credentials from file. The `home` crate is used to construct the VMWARE_CREDS_PATH, - // and it's possible (however unlikely) that it is unable to determine the user's home folder. - let creds_file = if let Some(ref creds_file) = *VMWARE_CREDS_PATH { - if creds_file.exists() { - info!("Using vSphere credentials file at {}", creds_file.display()); - DatacenterCredsConfig::from_path(creds_file).context(error::VmwareConfigSnafu)? - } else { - info!("vSphere credentials file not found, will attempt to use environment"); - DatacenterCredsConfig::default() - } - } else { - info!("Unable to determine vSphere credentials file location, will attempt to use environment"); - DatacenterCredsConfig::default() - }; - - // Retrieve datacenter-related GOVC_ environment variables and any common configuration given - // via Infra.toml - let dc_env = DatacenterBuilder::from_env(); - let dc_common = vmware.common.as_ref(); - - // Read the import spec as a template - let import_spec_str = fs::read_to_string(&upload_args.spec).context(error::FileSnafu { - action: "read", - path: &upload_args.spec, - })?; - let mut tt = TinyTemplate::new(); - tt.add_template(SPEC_TEMPLATE_NAME, &import_spec_str) - .context(error::AddTemplateSnafu { - path: &upload_args.spec, - })?; - - info!( - "Uploading to datacenters: {}", - &upload_datacenters.join(", ") - ); - for dc in upload_datacenters { - debug!("Building config for {}", &dc); - // If any specific configuration exists for this datacenter, retrieve it from VMware - // config. Then build out a complete datacenter config with all values necessary to - // interact with VMware. Environment variables trump all others, so start with those, then - // fill in any missing items with datacenter-specific configuration and any common - // configuration. - let dc_config = vmware.datacenter.get(dc); - trace!("{} config: {:?}", &dc, &dc_config); - let datacenter: Datacenter = dc_env - .take_missing_from(dc_config) - .take_missing_from(dc_common) - .build() - .context(error::DatacenterBuildSnafu)?; - - // Use a similar pattern here for credentials; start with environment variables and fill in - // any missing items with the datacenter-specific credentials from file. - let dc_creds = creds_file.datacenter.get(dc); - let creds: DatacenterCreds = creds_env - .take_missing_from(dc_creds) - .build() - .context(error::CredsBuildSnafu)?; - - // Render the import spec with this datacenter's details and write to temp file - let rendered_spec = render_spec(&tt, &datacenter.network, upload_args.mark_as_template)?; - let import_spec = NamedTempFile::new().context(error::TempFileSnafu)?; - fs::write(import_spec.path(), &rendered_spec).context(error::FileSnafu { - action: "write", - path: import_spec.path(), - })?; - trace!("Import spec: {}", &rendered_spec); - - if upload_args.mark_as_template { - info!( - "Uploading OVA to datacenter '{}' as template with name: '{}'", - &dc, &upload_args.name - ); - } else { - info!( - "Uploading OVA to datacenter '{}' with name '{}'", - &dc, &upload_args.name - ); - } - - Govc::new(datacenter, creds) - .upload_ova(&upload_args.name, &upload_args.ova, import_spec) - .context(error::UploadOvaSnafu)?; - } - - Ok(()) -} - -/// Render the import spec template given the current network and template setting. -// This exists primarily to abstract the creation of the Context struct that is required by -// TinyTemplate; it's pretty ugly to do inline with the rest of the code. -fn render_spec(tt: &TinyTemplate<'_>, network: S, mark_as_template: bool) -> Result -where - S: AsRef, -{ - #[derive(Debug, Serialize)] - struct Context { - network: String, - mark_as_template: bool, - } - - let context = Context { - network: network.as_ref().to_string(), - mark_as_template, - }; - - tt.render(SPEC_TEMPLATE_NAME, &context) - .context(error::RenderTemplateSnafu) -} - -mod error { - use snafu::Snafu; - use std::io; - use std::path::PathBuf; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub(crate) enum Error { - #[snafu(display("Error building template from '{}': {}", path.display(), source))] - AddTemplate { - path: PathBuf, - source: tinytemplate::error::Error, - }, - - #[snafu(display("Unable to build datacenter credentials: {}", source))] - CredsBuild { - source: pubsys_config::vmware::Error, - }, - - #[snafu(display("Unable to build datacenter config: {}", source))] - DatacenterBuild { - source: pubsys_config::vmware::Error, - }, - - #[snafu(display("Missing environment variable '{}'", var))] - Environment { - var: String, - source: std::env::VarError, - }, - - #[snafu(display("Failed to {} '{}': {}", action, path.display(), source))] - File { - action: String, - path: PathBuf, - source: io::Error, - }, - - #[snafu(display("Error reading config: {}", source))] - InfraConfig { source: pubsys_config::Error }, - - #[snafu(display("Infra.toml is missing {}", missing))] - MissingConfig { missing: String }, - - #[snafu(display("Error rendering template: {}", source))] - RenderTemplate { source: tinytemplate::error::Error }, - - #[snafu(display("Failed to create temporary file: {}", source))] - TempFile { source: io::Error }, - - #[snafu(display("Error reading config: {}", source))] - VmwareConfig { - source: pubsys_config::vmware::Error, - }, - - #[snafu(display("Failed to upload OVA: {}", source))] - UploadOva { source: crate::vmware::govc::Error }, - } -} -pub(crate) use error::Error; -type Result = std::result::Result; diff --git a/tools/rpm2img b/tools/rpm2img deleted file mode 100755 index 93e4cb4d..00000000 --- a/tools/rpm2img +++ /dev/null @@ -1,792 +0,0 @@ -#!/usr/bin/env bash -# shellcheck disable=SC2034 - -set -eu -o pipefail -shopt -qs failglob - -# import the partition helper functions -# shellcheck source=partyplanner -. "${0%/*}/partyplanner" - -OUTPUT_FMT="raw" -BUILDER_ARCH="$(uname -m)" -OVF_TEMPLATE="" - -GRUB_SET_PRIVATE_VAR="no" -XFS_DATA_PARTITION="no" -UEFI_SECURE_BOOT="no" - -for opt in "$@"; do - optarg="$(expr "${opt}" : '[^=]*=\(.*\)')" - case "${opt}" in - --package-dir=*) PACKAGE_DIR="${optarg}" ;; - --output-dir=*) OUTPUT_DIR="${optarg}" ;; - --output-fmt=*) OUTPUT_FMT="${optarg}" ;; - --os-image-size-gib=*) OS_IMAGE_SIZE_GIB="${optarg}" ;; - --data-image-size-gib=*) DATA_IMAGE_SIZE_GIB="${optarg}" ;; - --os-image-publish-size-gib=*) OS_IMAGE_PUBLISH_SIZE_GIB="${optarg}" ;; - --data-image-publish-size-gib=*) DATA_IMAGE_PUBLISH_SIZE_GIB="${optarg}" ;; - --partition-plan=*) PARTITION_PLAN="${optarg}" ;; - --ovf-template=*) OVF_TEMPLATE="${optarg}" ;; - --with-grub-set-private-var=*) GRUB_SET_PRIVATE_VAR="${optarg}" ;; - --xfs-data-partition=*) XFS_DATA_PARTITION="${optarg}" ;; - --with-uefi-secure-boot=*) UEFI_SECURE_BOOT="${optarg}" ;; - esac -done - -case "${OUTPUT_FMT}" in - raw|qcow2|vmdk) ;; - *) - echo "unexpected image output format '${OUTPUT_FMT}'" >&2 - exit 1 - ;; -esac - -case "${PARTITION_PLAN}" in - split|unified) ;; - *) - echo "unexpected partition plan '${PARTITION_PLAN}'" >&2 - exit 1 - ;; -esac - -# Fail fast if the OVF template doesn't exist, or doesn't match the layout. -if [ "${OUTPUT_FMT}" == "vmdk" ] ; then - if [ ! -s "${OVF_TEMPLATE}" ] ; then - echo "required OVF template not found: ${OVF_TEMPLATE}" >&2 - exit 1 - fi - - if [ "${PARTITION_PLAN}" == "split" ] ; then - if ! grep -Fq '{{DATA_DISK}}' "${OVF_TEMPLATE}" ; then - echo "Missing data disk in OVF template, which is required for 'split' layout." >&2 - exit 1 - fi - fi - - if [ "${PARTITION_PLAN}" == "unified" ] ; then - if grep -Fq '{{DATA_DISK}}' "${OVF_TEMPLATE}" ; then - echo "Incorrect data disk in OVF template, which is not supported for 'unified' layout." >&2 - exit 1 - fi - fi - - if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then - if ! grep -Fq '{{DB_CERT_DER_HEX}}' "${OVF_TEMPLATE}" ; then - echo "Missing CA certificate field in OVF template, which is required for Secure Boot support." >&2 - exit 1 - fi - fi -fi - -# Store output artifacts in a versioned directory. -OUTPUT_DIR="${OUTPUT_DIR}/${VERSION_ID}-${BUILD_ID}" -mkdir -p "${OUTPUT_DIR}" - -FILENAME_PREFIX="${IMAGE_NAME}-${VARIANT}-${ARCH}-${VERSION_ID}-${BUILD_ID}" -SYMLINK_PREFIX="${IMAGE_NAME}-${VARIANT}-${ARCH}" -VERSIONED_SYMLINK_PREFIX="${IMAGE_NAME}-${VARIANT}-${ARCH}-${VERSION_ID}" -FRIENDLY_VERSIONED_SYMLINK_PREFIX="${IMAGE_NAME}-${VARIANT}-${ARCH}-v${VERSION_ID}" - -OS_IMAGE_NAME="${FILENAME_PREFIX}" -OS_IMAGE_SYMLINK="${SYMLINK_PREFIX}" -OS_IMAGE_VERSIONED_SYMLINK="${VERSIONED_SYMLINK_PREFIX}" -OS_IMAGE_FRIENDLY_VERSIONED_SYMLINK="${FRIENDLY_VERSIONED_SYMLINK_PREFIX}" - -DATA_IMAGE_NAME="${FILENAME_PREFIX}-data" -DATA_IMAGE_SYMLINK="${SYMLINK_PREFIX}-data" -DATA_IMAGE_VERSIONED_SYMLINK="${VERSIONED_SYMLINK_PREFIX}-data" -DATA_IMAGE_FRIENDLY_VERSIONED_SYMLINK="${FRIENDLY_VERSIONED_SYMLINK_PREFIX}-data" - -BOOT_IMAGE_NAME="${FILENAME_PREFIX}-boot.ext4.lz4" -BOOT_IMAGE_SYMLINK="${SYMLINK_PREFIX}-boot.ext4.lz4" -BOOT_IMAGE_VERSIONED_SYMLINK="${VERSIONED_SYMLINK_PREFIX}-boot.ext4.lz4" -BOOT_IMAGE_FRIENDLY_VERSIONED_SYMLINK="${FRIENDLY_VERSIONED_SYMLINK_PREFIX}-boot.ext4.lz4" - -VERITY_IMAGE_NAME="${FILENAME_PREFIX}-root.verity.lz4" -VERITY_IMAGE_SYMLINK="${SYMLINK_PREFIX}-root.verity.lz4" -VERITY_IMAGE_VERSIONED_SYMLINK="${VERSIONED_SYMLINK_PREFIX}-root.verity.lz4" -VERITY_IMAGE_FRIENDLY_VERSIONED_SYMLINK="${FRIENDLY_VERSIONED_SYMLINK_PREFIX}-root.verity.lz4" - -ROOT_IMAGE_NAME="${FILENAME_PREFIX}-root.ext4.lz4" -ROOT_IMAGE_SYMLINK="${SYMLINK_PREFIX}-root.ext4.lz4" -ROOT_IMAGE_VERSIONED_SYMLINK="${VERSIONED_SYMLINK_PREFIX}-root.ext4.lz4" -ROOT_IMAGE_FRIENDLY_VERSIONED_SYMLINK="${FRIENDLY_VERSIONED_SYMLINK_PREFIX}-root.ext4.lz4" - -OS_IMAGE="$(mktemp)" -BOOT_IMAGE="$(mktemp)" -VERITY_IMAGE="$(mktemp)" -ROOT_IMAGE="$(mktemp)" -DATA_IMAGE="$(mktemp)" -EFI_IMAGE="$(mktemp)" -PRIVATE_IMAGE="$(mktemp)" -BOTTLEROCKET_DATA="$(mktemp)" - -ROOT_MOUNT="$(mktemp -d)" -BOOT_MOUNT="$(mktemp -d)" -DATA_MOUNT="$(mktemp -d)" -EFI_MOUNT="$(mktemp -d)" -PRIVATE_MOUNT="$(mktemp -d)" - -SBKEYS="${HOME}/sbkeys" - -SELINUX_ROOT="/etc/selinux" -SELINUX_POLICY="fortified" -SELINUX_FILE_CONTEXTS="${ROOT_MOUNT}/${SELINUX_ROOT}/${SELINUX_POLICY}/contexts/files/file_contexts" - -VERITY_VERSION=1 -VERITY_HASH_ALGORITHM=sha256 -VERITY_DATA_BLOCK_SIZE=4096 -VERITY_HASH_BLOCK_SIZE=4096 - -# Bottlerocket has been experimentally shown to boot faster on EBS volumes when striping the root filesystem into 4MiB stripes. -# We use 4kb ext4 blocks. The stride and stripe should both be $STRIPE_SIZE / $EXT4_BLOCK_SIZE -ROOT_STRIDE=1024 -ROOT_STRIPE_WIDTH=1024 - -case "${PARTITION_PLAN}" in - split) - truncate -s "${OS_IMAGE_SIZE_GIB}G" "${OS_IMAGE}" - truncate -s "${DATA_IMAGE_SIZE_GIB}G" "${DATA_IMAGE}" - ;; - unified) - truncate -s "$((OS_IMAGE_SIZE_GIB + DATA_IMAGE_SIZE_GIB))G" "${OS_IMAGE}" - ;; -esac - -declare -A partlabel parttype partguid partsize partoff -set_partition_sizes \ - "${OS_IMAGE_SIZE_GIB}" "${DATA_IMAGE_SIZE_GIB}" "${PARTITION_PLAN}" \ - partsize partoff -set_partition_labels partlabel -set_partition_types parttype -set_partition_uuids partguid "${PARTITION_PLAN}" - -declare -a partargs -for part in \ - BIOS \ - EFI-A BOOT-A ROOT-A HASH-A RESERVED-A \ - EFI-B BOOT-B ROOT-B HASH-B RESERVED-B \ - PRIVATE DATA-A DATA-B ; -do - # We create the DATA-B partition separately if we're using the split layout - if [ "${part}" == "DATA-B" ] ; then - continue - fi - - # Each partition is aligned to a 1 MiB boundary, and extends to the sector - # before the next partition starts. Specify the end point in sectors so we - # can subtract a sector to fix the off-by-one error that comes from adding - # start and size together. (1 MiB contains 2048 512-byte sectors.) - part_start="${partoff[${part}]}" - part_end="$((part_start + partsize[${part}]))" - part_end="$((part_end * 2048 - 1))" - - partargs+=(-n "0:${part_start}M:${part_end}") - partargs+=(-c "0:${partlabel[${part}]}") - partargs+=(-t "0:${parttype[${part}]}") - partargs+=(-u "0:${partguid[${part}]:-R}") - - # Boot partition attributes: - # 48 = gptprio priority bit - # 56 = gptprio successful bit - case "${part}" in - BOOT-A) partargs+=(-A 0:"set":48 -A 0:"set":56) ;; - BOOT-B) partargs+=(-A 0:"clear":48 -A 0:"clear":56) ;; - esac -done - -sgdisk --clear "${partargs[@]}" --sort --print "${OS_IMAGE}" - -# Partition the separate data disk, if we're using the split layout. -if [ "${PARTITION_PLAN}" == "split" ] ; then - data_start="${partoff[DATA-B]}" - data_end=$((data_start + partsize[DATA-B])) - data_end=$((data_end * 2048 - 1)) - sgdisk --clear \ - -n "0:${data_start}M:${data_end}" \ - -c "0:${partlabel[DATA-B]}" \ - -t "0:${parttype[DATA-B]}" \ - -u "0:${partguid[DATA-B]}" \ - --sort --print "${DATA_IMAGE}" -fi - -INSTALL_TIME="$(date -u +%Y-%m-%dT%H:%M:%SZ)" -rpm -iv --ignorearch --root "${ROOT_MOUNT}" "${PACKAGE_DIR}"/*.rpm - -# inventory installed packages -INVENTORY_QUERY="\{\"Name\":\"%{NAME}\"\ -,\"Publisher\":\"Bottlerocket\"\ -,\"Version\":\"${VERSION_ID}\"\ -,\"Release\":\"${BUILD_ID}\"\ -,\"InstalledTime\":\"${INSTALL_TIME}\"\ -,\"ApplicationType\":\"%{GROUP}\"\ -,\"Architecture\":\"%{ARCH}\"\ -,\"Url\":\"%{URL}\"\ -,\"Summary\":\"%{Summary}\"\}\n" - -mapfile -t installed_rpms <<< "$(rpm -qa --root "${ROOT_MOUNT}" \ - --queryformat "${INVENTORY_QUERY}")" - -# wrap installed_rpms mapfile into json -INVENTORY_DATA="$(jq --raw-output . <<< "${installed_rpms[@]}")" -# remove the 'bottlerocket-' prefix from package names -INVENTORY_DATA="$(jq --arg PKG_PREFIX "bottlerocket-" \ - '(.Name) |= sub($PKG_PREFIX; "")' <<< "${INVENTORY_DATA}")" -# sort by package name and add 'Content' as top-level -INVENTORY_DATA="$(jq --slurp 'sort_by(.Name)' <<< "${INVENTORY_DATA}" | jq '{"Content": .}')" -printf "%s\n" "${INVENTORY_DATA}" > "${ROOT_MOUNT}/usr/share/bottlerocket/application-inventory.json" - -# install licenses -install -p -m 0644 /host/{COPYRIGHT,LICENSE-APACHE,LICENSE-MIT} "${ROOT_MOUNT}"/usr/share/licenses/ -mksquashfs \ - "${ROOT_MOUNT}"/usr/share/licenses \ - "${ROOT_MOUNT}"/usr/share/bottlerocket/licenses.squashfs \ - -no-exports -all-root -comp zstd -rm -rf "${ROOT_MOUNT}"/var/lib "${ROOT_MOUNT}"/usr/share/licenses/* - -if [[ "${ARCH}" == "x86_64" ]]; then - # MBR and BIOS-BOOT - echo "(hd0) ${OS_IMAGE}" > "${ROOT_MOUNT}/boot/grub/device.map" - "${ROOT_MOUNT}/sbin/grub-bios-setup" \ - --directory="${ROOT_MOUNT}/boot/grub" \ - --device-map="${ROOT_MOUNT}/boot/grub/device.map" \ - --root="hd0" \ - --skip-fs-probe \ - "${OS_IMAGE}" - - rm -vf "${ROOT_MOUNT}"/boot/grub/* "${ROOT_MOUNT}"/sbin/grub* -fi - -# We also need an EFI partition, formatted FAT32 with the -# EFI binary at the correct path, e.g. /efi/boot. The grub -# package has placed the image in /boot/efi/EFI/BOOT. -mv "${ROOT_MOUNT}/boot/efi"/* "${EFI_MOUNT}" - -# Do the setup required for `pesign` and `gpg` signing and -# verification to "just work" later on, regardless of which -# type of signing profile we have. -if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then - declare -a SHIM_SIGN_KEY - declare -a CODE_SIGN_KEY - - # For an AWS profile, we expect a config file for the PKCS11 - # helper. Otherwise, there should be a local key and cert. - if [ -s "${HOME}/.config/aws-kms-pkcs11/config.json" ] ; then - # Set AWS environment variables from build secrets, if present. - for var in AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN ; do - val="${var,,}" - val="${HOME}/.aws/${val//_/-}.env" - [ -s "${val}" ] || continue - declare -x "${var}=$(cat "${val}")" - done - # Verify that AWS credentials are functional. - aws sts get-caller-identity - # Log all PKCS11 helper activity, to simplify debugging. - export AWS_KMS_PKCS11_DEBUG=1 - SB_KEY_SOURCE="aws" - SHIM_SIGN_KEY=(-c shim-sign-key -t shim-sign-key) - CODE_SIGN_KEY=(-c code-sign-key -t code-sign-key) - else - # Disable the PKCS11 helper. - rm /etc/pkcs11/modules/aws-kms-pkcs11.module - - # Generate the PKCS12 archives for import. - openssl pkcs12 \ - -export \ - -passout pass: \ - -inkey "${SBKEYS}/shim-sign.key" \ - -in "${SBKEYS}/shim-sign.crt" \ - -certfile "${SBKEYS}/db.crt" \ - -out "${SBKEYS}/shim-sign.p12" - - openssl pkcs12 \ - -export \ - -passout pass: \ - -inkey "${SBKEYS}/code-sign.key" \ - -in "${SBKEYS}/code-sign.crt" \ - -certfile "${SBKEYS}/vendor.crt" \ - -out "${SBKEYS}/code-sign.p12" - - # Import certificates and private key archive. - PEDB="/etc/pki/pesign" - - certutil -d "${PEDB}" -A -n db -i "${SBKEYS}/db.crt" -t ",,C" - certutil -d "${PEDB}" -A -n shim-sign-key -i "${SBKEYS}/shim-sign.crt" -t ",,P" - pk12util -d "${PEDB}" -i "${SBKEYS}/shim-sign.p12" -W "" - - certutil -d "${PEDB}" -A -n vendor -i "${SBKEYS}/vendor.crt" -t ",,C" - certutil -d "${PEDB}" -A -n code-sign-key -i "${SBKEYS}/code-sign.crt" -t ",,P" - pk12util -d "${PEDB}" -i "${SBKEYS}/code-sign.p12" -W "" - - certutil -d "${PEDB}" -L - SB_KEY_SOURCE="local" - SHIM_SIGN_KEY=(-c shim-sign-key) - CODE_SIGN_KEY=(-c code-sign-key) - fi - - # Convert certificates from PEM format (ASCII) to DER (binary). This could be - # done when the certificates are created, but the resulting binary files are - # not as nice to store in source control. - for cert in PK KEK db vendor ; do - openssl x509 \ - -inform PEM -in "${SBKEYS}/${cert}.crt" \ - -outform DER -out "${SBKEYS}/${cert}.cer" - done - - # For signing the grub config, we need to embed the GPG public key in binary - # form, which is similarly awkward to store in source control. - gpg --import "${SBKEYS}/config-sign.key" - if [ "${SB_KEY_SOURCE}" == "aws" ] ; then - gpg --card-status - fi - gpg --export > "${SBKEYS}/config-sign.pubkey" - gpg --list-keys -fi - -# shim expects the following data structure in `.vendor_cert`: -# -# struct { -# uint32_t vendor_authorized_size; -# uint32_t vendor_deauthorized_size; -# uint32_t vendor_authorized_offset; -# uint32_t vendor_deauthorized_offset; -# } cert_table; -# -cert_table() { - local input output size offset uint32_t - input="${1:?}" - output="${2:?}" - size="$(stat -c %s "${input}")" - rm -f "${output}" - # The cert payload is offset by four 4-byte uint32_t values in the header. - offset="$((4 * 4))" - for n in "${size}" 0 "${offset}" "$(( size + offset ))" ; do - printf -v uint32_t '\\x%02x\\x%02x\\x%02x\\x%02x' \ - $((n & 255)) $((n >> 8 & 255)) $((n >> 16 & 255)) $((n >> 24 & 255)) - printf "${uint32_t}" >> "${output}" - done - cat "${input}" >> "${output}" - # Zero-pad the output to the expected section size. Otherwise a subsequent - # `objcopy` operation on the same section might fail to replace it, if the - # new vendor certificate is larger than this one. - truncate -s 4096 "${output}" -} - -# Helper function to log the object layout before and after changes. -objdumpcopy() { - local obj objdump objcopy - obj="${1:?}" - shift - objdump="${ARCH}-bottlerocket-linux-gnu-objdump" - objcopy="${ARCH}-bottlerocket-linux-gnu-objcopy" - "${objdump}" -h "${obj}" - "${objcopy}" "${@}" "${obj}" - "${objdump}" -h "${obj}" -} - -pushd "${EFI_MOUNT}/EFI/BOOT" >/dev/null -shims=(boot*.efi) -shim="${shims[0]}" -grubs=(grub*.efi) -grub="${grubs[0]}" -mokms=(mm*.efi) -mokm="${mokms[0]}" -if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then - # Convert the vendor certificate to the expected format. - cert_table "${SBKEYS}/vendor.cer" "${SBKEYS}/vendor.obj" - - # Replace the embedded vendor certificate, then sign shim with the db key. - objdumpcopy "${shim}" \ - --update-section ".vendor_cert=${SBKEYS}/vendor.obj" - pesign -i "${shim}" -o "${shim}.signed" -s "${SHIM_SIGN_KEY[@]}" - mv "${shim}.signed" "${shim}" - pesigcheck -i "${shim}" -n 0 -c "${SBKEYS}/db.cer" - - # Sign the MOK manager as well. - pesign -i "${mokm}" -o "${mokm}.signed" -s "${CODE_SIGN_KEY[@]}" - mv "${mokm}.signed" "${mokm}" - pesigcheck -i "${mokm}" -n 0 -c "${SBKEYS}/vendor.cer" - - # Replace the embedded gpg public key, then sign grub with the vendor key. - objdumpcopy "${grub}" \ - --file-alignment 4096 \ - --update-section ".pubkey=${SBKEYS}/config-sign.pubkey" - pesign -i "${grub}" -o "${grub}.signed" -s "${CODE_SIGN_KEY[@]}" - mv "${grub}.signed" "${grub}" - pesigcheck -i "${grub}" -n 0 -c "${SBKEYS}/vendor.cer" -else - # Generate a zero-sized certificate in the expected format. - cert_table /dev/null "${SBKEYS}/vendor.obj" - - # Replace the embedded vendor certificate with the zero-sized one, which shim - # will ignore when Secure Boot is disabled. - objdumpcopy "${shim}" \ - --update-section ".vendor_cert=${SBKEYS}/vendor.obj" - - # Remove the embedded gpg public key to disable GRUB's signature checks. - objdumpcopy "${grub}" \ - --file-alignment 4096 \ - --remove-section ".pubkey" -fi -popd >/dev/null - -dd if=/dev/zero of="${EFI_IMAGE}" bs=1M count="${partsize[EFI-A]}" -mkfs.vfat -I -S 512 "${EFI_IMAGE}" $((partsize[EFI-A] * 1024)) -mmd -i "${EFI_IMAGE}" ::/EFI -mmd -i "${EFI_IMAGE}" ::/EFI/BOOT -mcopy -i "${EFI_IMAGE}" "${EFI_MOUNT}/EFI/BOOT"/*.efi ::/EFI/BOOT -if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then - # Make the signing certificate available on the EFI system partition so it - # can be imported through the firmware setup UI on bare metal systems. - mcopy -i "${EFI_IMAGE}" "${SBKEYS}"/db.{crt,cer} ::/EFI/BOOT -fi -dd if="${EFI_IMAGE}" of="${OS_IMAGE}" conv=notrunc bs=1M seek="${partoff[EFI-A]}" - -# Ensure that the grub directory exists. -mkdir -p "${ROOT_MOUNT}/boot/grub" - -# Now that we're done messing with /, move /boot out of it -mv "${ROOT_MOUNT}/boot"/* "${BOOT_MOUNT}" - -if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then - pushd "${BOOT_MOUNT}" >/dev/null - vmlinuz="vmlinuz" - pesign -i "${vmlinuz}" -o "${vmlinuz}.signed" -s "${CODE_SIGN_KEY[@]}" - mv "${vmlinuz}.signed" "${vmlinuz}" - pesigcheck -i "${vmlinuz}" -n 0 -c "${SBKEYS}/vendor.cer" - popd >/dev/null -fi - -# Set the Bottlerocket variant, version, and build-id -SYS_ROOT="${ARCH}-bottlerocket-linux-gnu/sys-root" -VERSION="${VERSION_ID} (${VARIANT})" -cat <> "${ROOT_MOUNT}/${SYS_ROOT}/usr/lib/os-release" -VERSION="${VERSION}" -PRETTY_NAME="${PRETTY_NAME} ${VERSION}" -VARIANT_ID=${VARIANT} -VERSION_ID=${VERSION_ID} -BUILD_ID=${BUILD_ID} -HOME_URL="https://github.com/bottlerocket-os/bottlerocket" -SUPPORT_URL="https://github.com/bottlerocket-os/bottlerocket/discussions" -BUG_REPORT_URL="https://github.com/bottlerocket-os/bottlerocket/issues" -DOCUMENTATION_URL="https://bottlerocket.dev" -EOF - -# Set the BOTTLEROCKET-DATA Filesystem for creating/mounting -if [ "${XFS_DATA_PARTITION}" == "yes" ] ; then - printf "%s\n" "DATA_PARTITION_FILESYSTEM=xfs" >> "${ROOT_MOUNT}/${SYS_ROOT}/usr/share/bottlerocket/image-features.env" -else - printf "%s\n" "DATA_PARTITION_FILESYSTEM=ext4" >> "${ROOT_MOUNT}/${SYS_ROOT}/usr/share/bottlerocket/image-features.env" -fi - -# BOTTLEROCKET-ROOT-A -mkdir -p "${ROOT_MOUNT}/lost+found" -ROOT_LABELS=$(setfiles -n -d -F -m -r "${ROOT_MOUNT}" \ - "${SELINUX_FILE_CONTEXTS}" "${ROOT_MOUNT}" \ - | awk -v root="${ROOT_MOUNT}" '{gsub(root"/","/"); gsub(root,"/"); print "ea_set", $1, "security.selinux", $4}') -mkfs.ext4 -E "lazy_itable_init=0,stride=${ROOT_STRIDE},stripe_width=${ROOT_STRIPE_WIDTH}" \ - -O ^has_journal -b "${VERITY_DATA_BLOCK_SIZE}" -d "${ROOT_MOUNT}" "${ROOT_IMAGE}" "${partsize[ROOT-A]}M" -echo "${ROOT_LABELS}" | debugfs -w -f - "${ROOT_IMAGE}" -resize2fs -M "${ROOT_IMAGE}" -dd if="${ROOT_IMAGE}" of="${OS_IMAGE}" conv=notrunc bs=1M seek="${partoff[ROOT-A]}" - -# BOTTLEROCKET-VERITY-A -veritypart_mib="${partsize[HASH-A]}" -truncate -s "${veritypart_mib}M" "${VERITY_IMAGE}" -veritysetup_output="$(veritysetup format \ - --format "$VERITY_VERSION" \ - --hash "$VERITY_HASH_ALGORITHM" \ - --data-block-size "$VERITY_DATA_BLOCK_SIZE" \ - --hash-block-size "$VERITY_HASH_BLOCK_SIZE" \ - "${ROOT_IMAGE}" "${VERITY_IMAGE}" \ - | tee /dev/stderr)" -verityimage_size="$(stat -c %s "${VERITY_IMAGE}")" -veritypart_bytes="$((veritypart_mib * 1024 * 1024))" -if [ "${verityimage_size}" -gt "${veritypart_bytes}" ] ; then - echo "verity content is larger than partition (${veritypart_mib}M)" - exit 1 -fi -VERITY_DATA_4K_BLOCKS="$(grep '^Data blocks:' <<<"${veritysetup_output}" | awk '{ print $NF }')" -VERITY_DATA_512B_BLOCKS="$((VERITY_DATA_4K_BLOCKS * 8))" -VERITY_ROOT_HASH="$(grep '^Root hash:' <<<"${veritysetup_output}" | awk '{ print $NF }')" -VERITY_SALT="$(grep '^Salt:' <<<"${veritysetup_output}" | awk '{ print $NF }')" -veritysetup verify "${ROOT_IMAGE}" "${VERITY_IMAGE}" "${VERITY_ROOT_HASH}" -dd if="${VERITY_IMAGE}" of="${OS_IMAGE}" conv=notrunc bs=1M seek="${partoff[HASH-A]}" - -declare -a DM_VERITY_ROOT -DM_VERITY_ROOT=( - "root,,,ro,0" - "${VERITY_DATA_512B_BLOCKS}" - "verity" - "${VERITY_VERSION}" - "PARTUUID=\$boot_uuid/PARTNROFF=1" - "PARTUUID=\$boot_uuid/PARTNROFF=2" - "${VERITY_DATA_BLOCK_SIZE}" - "${VERITY_HASH_BLOCK_SIZE}" - "${VERITY_DATA_4K_BLOCKS}" - "1" - "${VERITY_HASH_ALGORITHM}" - "${VERITY_ROOT_HASH}" - "${VERITY_SALT}" - "2" - "restart_on_corruption" - "ignore_zero_blocks" -) - -# write GRUB config -# If GRUB_SET_PRIVATE_VAR is set, include the parameters that support Boot Config -if [ "${GRUB_SET_PRIVATE_VAR}" == "yes" ] ; then - BOOTCONFIG='bootconfig' - INITRD="initrd (\$private)/bootconfig.data" -else - BOOTCONFIG="" - INITRD="" -fi - -# If UEFI_SECURE_BOOT is set, disable interactive edits. Otherwise the intended -# kernel command line parameters could be changed if the boot fails. Disable -# signature checking as well, since grub.cfg will have already been verified -# before we reach this point. bootconfig.data is generated at runtime and can't -# be signed with a trusted key, so continuing to check signatures would prevent -# it from being read. If boot fails, trigger an automatic reboot, since nothing -# can be changed for troubleshooting purposes. -if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then - echo 'set superusers=""' > "${BOOT_MOUNT}/grub/grub.cfg" - echo 'set check_signatures="no"' >> "${BOOT_MOUNT}/grub/grub.cfg" - FALLBACK=$' echo "rebooting in 30 seconds..."\n' - FALLBACK+=$' sleep 30\n' - FALLBACK+=$' reboot\n' -else - FALLBACK="" -fi - -cat <> "${BOOT_MOUNT}/grub/grub.cfg" -set default="0" -set timeout="0" -set dm_verity_root="${DM_VERITY_ROOT[@]}" - -menuentry "${PRETTY_NAME} ${VERSION_ID}" --unrestricted { - linux (\$root)/vmlinuz \\ - ${KERNEL_PARAMETERS} \\ - ${BOOTCONFIG} \\ - root=/dev/dm-0 rootwait ro \\ - raid=noautodetect \\ - random.trust_cpu=on \\ - selinux=1 enforcing=1 \\ - dm-mod.create="\$dm_verity_root" \\ - -- \\ - systemd.log_target=journal-or-kmsg \\ - systemd.log_color=0 \\ - systemd.show_status=true - ${INITRD} - boot - ${FALLBACK} -} -EOF - -if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then - gpg --detach-sign "${BOOT_MOUNT}/grub/grub.cfg" - gpg --verify "${BOOT_MOUNT}/grub/grub.cfg.sig" -fi - -# BOTTLEROCKET-BOOT-A -mkdir -p "${BOOT_MOUNT}/lost+found" -chmod -R go-rwx "${BOOT_MOUNT}" -BOOT_LABELS=$(setfiles -n -d -F -m -r "${BOOT_MOUNT}" \ - "${SELINUX_FILE_CONTEXTS}" "${BOOT_MOUNT}" \ - | awk -v root="${BOOT_MOUNT}" '{gsub(root"/","/"); gsub(root,"/"); print "ea_set", $1, "security.selinux", $4}') -mkfs.ext4 -O ^has_journal -d "${BOOT_MOUNT}" "${BOOT_IMAGE}" "${partsize[BOOT-A]}M" -echo "${BOOT_LABELS}" | debugfs -w -f - "${BOOT_IMAGE}" -resize2fs -M "${BOOT_IMAGE}" -dd if="${BOOT_IMAGE}" of="${OS_IMAGE}" conv=notrunc bs=1M seek="${partoff[BOOT-A]}" - -# BOTTLEROCKET-PRIVATE - -# Generate an empty bootconfig file for the image, so grub doesn't pause and -# print an error that the file doesn't exist. -cat < "${PRIVATE_MOUNT}/bootconfig.in" -kernel {} -init {} -EOF -touch "${PRIVATE_MOUNT}/bootconfig.data" -bootconfig -a "${PRIVATE_MOUNT}/bootconfig.in" "${PRIVATE_MOUNT}/bootconfig.data" -rm "${PRIVATE_MOUNT}/bootconfig.in" - -# Targeted toward the current API server implementation. -# Relative to the ext4 defaults, we: -# - adjust the inode ratio since we expect lots of small files -# - retain the inode size to allow most settings to be stored inline -# - retain the block size to handle worse-case alignment for hardware -mkfs.ext4 -b 4096 -i 4096 -I 256 -d "${PRIVATE_MOUNT}" "${PRIVATE_IMAGE}" "${partsize[PRIVATE]}M" -dd if="${PRIVATE_IMAGE}" of="${OS_IMAGE}" conv=notrunc bs=1M seek="${partoff[PRIVATE]}" - -# BOTTLEROCKET-DATA-A and BOTTLEROCKET-DATA-B - -# If we build on a host with SELinux enabled, we could end up with labels that -# do not match our policy. Since we allow replacing the data volume at runtime, -# we can't count on these labels being correct in any case, and it's better to -# remove them all. -UNLABELED=$(find "${DATA_MOUNT}" \ - | awk -v root="${DATA_MOUNT}" '{gsub(root"/","/"); gsub(root,"/"); print "ea_rm", $1, "security.selinux"}') - -mkfs_data() { - local target size offset - target="${1:?}" - size="${2:?}" - offset="${3:?}" - # Create an XFS filesystem if requested - if [ "${XFS_DATA_PARTITION}" == "yes" ] ; then - echo "writing blank partition for DATA" - # Create a file to write the filesystem to first - dd if=/dev/zero of="${BOTTLEROCKET_DATA}" bs=1M count=${size%?} - else - # default to ext4 - echo "writing ext4 filesystem for DATA" - mkfs.ext4 -m 0 -d "${DATA_MOUNT}" "${BOTTLEROCKET_DATA}" "${size}" - echo "${UNLABELED}" | debugfs -w -f - "${BOTTLEROCKET_DATA}" - fi - dd if="${BOTTLEROCKET_DATA}" of="${target}" conv=notrunc bs=1M seek="${offset}" -} - -# Decide which data filesystem to create at build time based on layout. -# -# The DATA-A partition will always exist, but for the "split" layout, it will be -# too small to provide the desired filesystem parameters (inode count, etc) when -# it is grown later on. Hence this filesystem is only created for "unified". -# -# The DATA-B partition does not exist on the "unified" layout, which anticipates -# a single storage device. Hence this filesystem is only created for "split". -# -# If the other partition is available at runtime, the filesystem will be created -# during first boot instead, providing flexibility at the cost of a minor delay. -case "${PARTITION_PLAN}" in - unified) - mkfs_data "${OS_IMAGE}" "${partsize["DATA-A"]}M" "${partoff["DATA-A"]}" - ;; - split) - mkfs_data "${DATA_IMAGE}" "${partsize["DATA-B"]}M" "${partoff["DATA-B"]}" - ;; -esac - -sgdisk -v "${OS_IMAGE}" -[ -s "${DATA_IMAGE}" ] && sgdisk -v "${DATA_IMAGE}" - -symlink_image() { - local ext what - ext="${1}" - what="${2}" - ext="${ext:+.$ext}" - target="${what^^}_NAME" - for link in symlink versioned_symlink friendly_versioned_symlink ; do - link="${what^^}_${link^^}" - ln -s "${!target}${ext}" "${OUTPUT_DIR}/${!link}${ext}" - done -} - -if [[ ${OUTPUT_FMT} == "raw" ]]; then - lz4 -vc "${OS_IMAGE}" >"${OUTPUT_DIR}/${OS_IMAGE_NAME}.img.lz4" - symlink_image "img.lz4" "os_image" - if [ -s "${DATA_IMAGE}" ] ; then - lz4 -vc "${DATA_IMAGE}" >"${OUTPUT_DIR}/${DATA_IMAGE_NAME}.img.lz4" - symlink_image "img.lz4" "data_image" - fi -elif [[ ${OUTPUT_FMT} == "qcow2" ]]; then - qemu-img convert -f raw -O qcow2 "${OS_IMAGE}" "${OUTPUT_DIR}/${OS_IMAGE_NAME}.qcow2" - symlink_image "qcow2" "os_image" - if [ -s "${DATA_IMAGE}" ] ; then - qemu-img convert -f raw -O qcow2 "${DATA_IMAGE}" "${OUTPUT_DIR}/${DATA_IMAGE_NAME}.qcow2" - symlink_image "qcow2" "data_image" - fi -elif [[ ${OUTPUT_FMT} == "vmdk" ]]; then - # Stream optimization is required for creating an Open Virtual Appliance (OVA) - qemu-img convert -f raw -O vmdk -o subformat=streamOptimized "${OS_IMAGE}" "${OUTPUT_DIR}/${OS_IMAGE_NAME}.vmdk" - symlink_image "vmdk" "os_image" - if [ -s "${DATA_IMAGE}" ] ; then - qemu-img convert -f raw -O vmdk -o subformat=streamOptimized "${DATA_IMAGE}" "${OUTPUT_DIR}/${DATA_IMAGE_NAME}.vmdk" - symlink_image "vmdk" "data_image" - fi -fi - -# Now create the OVA if needed. -if [ "${OUTPUT_FMT}" == "vmdk" ] ; then - os_vmdk="${OS_IMAGE_NAME}.vmdk" - data_vmdk="${DATA_IMAGE_NAME}.vmdk" - ovf="${OS_IMAGE_NAME}.ovf" - ova_dir="$(mktemp -d)" - - # The manifest expects disk sizes in bytes. - bytes_in_gib="$((1024 * 1024 * 1024))" - os_disk_bytes="$((OS_IMAGE_PUBLISH_SIZE_GIB * bytes_in_gib))" - data_disk_bytes="$((DATA_IMAGE_PUBLISH_SIZE_GIB * bytes_in_gib))" - sed "${OVF_TEMPLATE}" \ - -e "s/{{OS_DISK}}/${os_vmdk}/g" \ - -e "s/{{DATA_DISK}}/${data_vmdk}/g" \ - -e "s/{{OS_DISK_BYTES}}/${os_disk_bytes}/g" \ - -e "s/{{DATA_DISK_BYTES}}/${data_disk_bytes}/g" \ - > "${ova_dir}/${ovf}" - - # The manifest templates for Secure Boot expect the cert data for - # PK, KEK, db, and dbx. - if [ "${UEFI_SECURE_BOOT}" == "yes" ] ; then - pk_cert_der_hex="$(hexdump -ve '1/1 "%02x"' "${SBKEYS}/PK.cer")" - kek_cert_der_hex="$(hexdump -ve '1/1 "%02x"' "${SBKEYS}/KEK.cer")" - db_cert_der_hex="$(hexdump -ve '1/1 "%02x"' "${SBKEYS}/db.cer")" - dbx_empty_hash_hex="$(sha256sum /dev/null | awk '{ print $1 }')" - sed -i \ - -e "s/{{PK_CERT_DER_HEX}}/${pk_cert_der_hex}/g" \ - -e "s/{{KEK_CERT_DER_HEX}}/${kek_cert_der_hex}/g" \ - -e "s/{{DB_CERT_DER_HEX}}/${db_cert_der_hex}/g" \ - -e "s/{{DBX_EMPTY_HASH_HEX}}/${dbx_empty_hash_hex}/g" \ - "${ova_dir}/${ovf}" - fi - - # Make sure we replaced all the '{{...}}' fields with real values. - if grep -F -e '{{' -e '}}' "${ova_dir}/${ovf}" ; then - echo "Failed to fully render the OVF template" >&2 - exit 1 - fi - - # Create the manifest file with the hashes of the VMDKs and the OVF. - manifest="${OS_IMAGE_NAME}.mf" - pushd "${OUTPUT_DIR}" >/dev/null - os_sha256="$(sha256sum ${os_vmdk} | awk '{print $1}')" - echo "SHA256(${os_vmdk})= ${os_sha256}" > "${ova_dir}/${manifest}" - if [ -s "${DATA_IMAGE}" ] ; then - data_sha256="$(sha256sum ${data_vmdk} | awk '{print $1}')" - echo "SHA256(${data_vmdk})= ${data_sha256}" >> "${ova_dir}/${manifest}" - fi - popd >/dev/null - pushd "${ova_dir}" >/dev/null - ovf_sha256="$(sha256sum ${ovf} | awk '{print $1}')" - echo "SHA256(${ovf})= ${ovf_sha256}" >> "${manifest}" - popd >/dev/null - - # According to the OVF spec: - # https://www.dmtf.org/sites/default/files/standards/documents/DSP0243_2.1.1.pdf, - # the OVF must be first in the tar bundle. Manifest is next, and then the - # files must fall in the same order as listed in the References section of the - # OVF file - ova="${OS_IMAGE_NAME}.ova" - tar -cf "${OUTPUT_DIR}/${ova}" -C "${ova_dir}" "${ovf}" "${manifest}" - tar -rf "${OUTPUT_DIR}/${ova}" -C "${OUTPUT_DIR}" "${os_vmdk}" - if [ -s "${DATA_IMAGE}" ] ; then - tar -rf "${OUTPUT_DIR}/${ova}" -C "${OUTPUT_DIR}" "${data_vmdk}" - fi - - symlink_image "ova" "os_image" -fi - -lz4 -9vc "${BOOT_IMAGE}" >"${OUTPUT_DIR}/${BOOT_IMAGE_NAME}" -lz4 -9vc "${VERITY_IMAGE}" >"${OUTPUT_DIR}/${VERITY_IMAGE_NAME}" -lz4 -9vc "${ROOT_IMAGE}" >"${OUTPUT_DIR}/${ROOT_IMAGE_NAME}" - -symlink_image "" "boot_image" -symlink_image "" "verity_image" -symlink_image "" "root_image" - -find "${OUTPUT_DIR}" -type f -print -exec chown 1000:1000 {} \; - -# Clean up temporary files to reduce size of layer. -rm -f "${PACKAGE_DIR}"/*.rpm -rm -rf /tmp/* diff --git a/tools/rpm2kmodkit b/tools/rpm2kmodkit deleted file mode 100755 index 079d98ce..00000000 --- a/tools/rpm2kmodkit +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env bash -# -# Create an archive of kernel development sources and toolchain. -set -eu -o pipefail - -for opt in "$@"; do - optarg="$(expr "${opt}" : '[^=]*=\(.*\)')" - case "${opt}" in - --archive-dir=*) ARCHIVE_DIR="${optarg}" ;; - --toolchain-dir=*) TOOLCHAIN_DIR="${optarg}" ;; - --output-dir=*) OUTPUT_DIR="${optarg}" ;; - esac -done - -# Store output artifacts in a versioned directory. -OUTPUT_DIR="${OUTPUT_DIR}/${VERSION_ID}-${BUILD_ID}" - -# Use a friendly name for the top-level directory inside the archive. -KMOD_KIT="${VARIANT}-${ARCH}-kmod-kit-v${VERSION_ID}" - -# Use the build ID within the filename, to align with our build's expectations. -KMOD_KIT_FULL="bottlerocket-${VARIANT}-${ARCH}-${VERSION_ID}-${BUILD_ID}-kmod-kit" -KMOD_KIT_VERSIONED_SYMLINK="bottlerocket-${VARIANT}-${ARCH}-${VERSION_ID}-kmod-kit" -KMOD_KIT_FRIENDLY_VERSIONED_SYMLINK="bottlerocket-${VARIANT}-${ARCH}-v${VERSION_ID}-kmod-kit" -KMOD_KIT_SYMLINK="bottlerocket-${VARIANT}-${ARCH}-kmod-kit" - -EXTRACT_DIR="$(mktemp -d)" -KIT_DIR="$(mktemp -d)" -mkdir -p "${OUTPUT_DIR}" "${KIT_DIR}/${KMOD_KIT}" - -# Extract any RPMs and find the kernel development archive. -pushd "${EXTRACT_DIR}" >/dev/null -find "${ARCHIVE_DIR}" -type f -name '*.rpm' \ - -exec rpm2cpio {} \; | cpio -idm --quiet -find -name 'kernel-devel.tar.xz' \ - -exec mv {} "${KIT_DIR}/${KMOD_KIT}" \; -popd >/dev/null - -# Extract it and copy in the toolchain. -pushd "${KIT_DIR}/${KMOD_KIT}" >/dev/null -tar xf kernel-devel.tar.xz -rm kernel-devel.tar.xz -cp -a "${TOOLCHAIN_DIR}" toolchain -popd >/dev/null - -# Merge them together into a unified archive. -pushd "${KIT_DIR}" >/dev/null -tar cf "${OUTPUT_DIR}/${KMOD_KIT_FULL}.tar" "${KMOD_KIT}" -xz -T0 "${OUTPUT_DIR}/${KMOD_KIT_FULL}.tar" -popd >/dev/null - -# Create friendly symlinks. -ln -s "${KMOD_KIT_FULL}.tar.xz" "${OUTPUT_DIR}/${KMOD_KIT_FRIENDLY_VERSIONED_SYMLINK}.tar.xz" -ln -s "${KMOD_KIT_FULL}.tar.xz" "${OUTPUT_DIR}/${KMOD_KIT_VERSIONED_SYMLINK}.tar.xz" -ln -s "${KMOD_KIT_FULL}.tar.xz" "${OUTPUT_DIR}/${KMOD_KIT_SYMLINK}.tar.xz" -ln -s "${KMOD_KIT_FULL}.tar.xz" "${OUTPUT_DIR}/${KMOD_KIT}.tar.xz" - -rm -rf "${EXTRACT_DIR}" "${KIT_DIR}" diff --git a/tools/rpm2migrations b/tools/rpm2migrations deleted file mode 100755 index 87d3d87e..00000000 --- a/tools/rpm2migrations +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env bash -# -# Retrieve migrations from the RPM and output an appropriately named tarball -set -eu -o pipefail - -for opt in "$@"; do - optarg="$(expr "${opt}" : '[^=]*=\(.*\)')" - case "${opt}" in - --package-dir=*) PACKAGE_DIR="${optarg}" ;; - --output-dir=*) OUTPUT_DIR="${optarg}" ;; - esac -done - -# Store output artifacts in a versioned directory. -OUTPUT_DIR="${OUTPUT_DIR}/${VERSION_ID}-${BUILD_ID}" -mkdir -p "${OUTPUT_DIR}" - -MIGRATIONS_ARCHIVE="bottlerocket-${VARIANT}-${ARCH}-${VERSION_ID}-${BUILD_ID}-migrations.tar" -MIGRATIONS_VERSIONED_SYMLINK="bottlerocket-${VARIANT}-${ARCH}-${VERSION_ID}-migrations.tar" -MIGRATIONS_FRIENDLY_VERSIONED_SYMLINK="bottlerocket-${VARIANT}-${ARCH}-v${VERSION_ID}-migrations.tar" -MIGRATIONS_SYMLINK="bottlerocket-${VARIANT}-${ARCH}-migrations.tar" -ROOT_TEMP="$(mktemp -d)" -SYS_ROOT="${ARCH}-bottlerocket-linux-gnu/sys-root" -MIGRATIONS_DIR="${ROOT_TEMP}/${SYS_ROOT}/usr/share/migrations" - -# "Install" the migrations (just puts them in $MIGRATIONS_DIR) -rpm -iv --ignorearch --root "${ROOT_TEMP}" "${PACKAGE_DIR}"/*.rpm - -if [ ! -d "${MIGRATIONS_DIR}" ]; then - echo "Migrations directory does not exist: ${MIGRATIONS_DIR}" - rm -rf "${ROOT_TEMP}" - exit 1 -fi - -# lz4 compress each migration -for migration in "${MIGRATIONS_DIR}"/*; do - [ -e "${migration}" ] || continue - lz4 -v "${migration}" "${migration}.lz4" -done - -# Tar up migrations with a .lz4 extension if they exist. -# Otherwise create an empty archive -pushd "${MIGRATIONS_DIR}" -if ls *.lz4 &> /dev/null; then - tar -cvf "${OUTPUT_DIR}/${MIGRATIONS_ARCHIVE}" *.lz4 -else - tar -cvf "${OUTPUT_DIR}/${MIGRATIONS_ARCHIVE}" --files-from /dev/null -fi -popd - -# Create friendly symlinks. -ln -s "${MIGRATIONS_ARCHIVE}" "${OUTPUT_DIR}/${MIGRATIONS_FRIENDLY_VERSIONED_SYMLINK}" -ln -s "${MIGRATIONS_ARCHIVE}" "${OUTPUT_DIR}/${MIGRATIONS_VERSIONED_SYMLINK}" -ln -s "${MIGRATIONS_ARCHIVE}" "${OUTPUT_DIR}/${MIGRATIONS_SYMLINK}" - -rm -rf "${ROOT_TEMP}" diff --git a/tools/testsys-config/Cargo.toml b/tools/testsys-config/Cargo.toml deleted file mode 100644 index 064097ff..00000000 --- a/tools/testsys-config/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "testsys-config" -version = "0.1.0" -authors = ["Ethan Pullen "] -license = "Apache-2.0 OR MIT" -edition = "2021" -publish = false - -[dependencies] -bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.9", tag = "v0.0.9" } -bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } -handlebars = "4" -log = "0.4" -maplit = "1" -testsys-model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.9", tag = "v0.0.9" } -serde = { version = "1", features = ["derive"] } -serde_plain = "1" -serde_yaml = "0.9" -snafu = "0.7" -toml = "0.5" diff --git a/tools/testsys-config/src/lib.rs b/tools/testsys-config/src/lib.rs deleted file mode 100644 index b43c06a6..00000000 --- a/tools/testsys-config/src/lib.rs +++ /dev/null @@ -1,554 +0,0 @@ -use bottlerocket_types::agent_config::KarpenterDeviceMapping; -use bottlerocket_variant::Variant; -pub use error::Error; -use handlebars::Handlebars; -use log::{debug, trace, warn}; -use maplit::btreemap; -use serde::{Deserialize, Serialize}; -use snafu::ResultExt; -use std::collections::{BTreeMap, HashMap}; -use std::fs; -use std::path::Path; -use testsys_model::constants::TESTSYS_VERSION; -use testsys_model::{DestructionPolicy, SecretName}; -pub type Result = std::result::Result; -use serde_plain::derive_fromstr_from_deserialize; - -/// Configuration needed to run tests -#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq)] -#[serde(rename_all = "kebab-case")] -pub struct TestConfig { - /// High level configuration for TestSys - pub test: Option, - - #[serde(flatten, serialize_with = "toml::ser::tables_last")] - /// Configuration for testing variants - pub configs: HashMap, -} - -impl TestConfig { - /// Deserializes a TestConfig from a given path - pub fn from_path

(path: P) -> Result - where - P: AsRef, - { - let path = path.as_ref(); - let test_config_str = fs::read_to_string(path).context(error::FileSnafu { path })?; - let mut config: Self = - toml::from_str(&test_config_str).context(error::InvalidTomlSnafu { path })?; - // Copy the GenericConfig from `test` to `configs`. - config.test.as_ref().and_then(|test| { - config - .configs - .insert("test".to_string(), test.config.clone()) - }); - - Ok(config) - } - - /// Deserializes a TestConfig from a given path, if it exists, otherwise builds a default - /// config - pub fn from_path_or_default

(path: P) -> Result - where - P: AsRef, - { - if path.as_ref().exists() { - Self::from_path(path) - } else { - warn!( - "No test config was found at '{}'. Using the default config.", - path.as_ref().display() - ); - Ok(Self::default()) - } - } - - /// Create a single config for the `variant` and `arch` from this test configuration by - /// determining a list of tables that contain information relevant to the arch, variant - /// combination. Then, the tables are reduced to a single config by selecting values from the - /// table based on the order of precedence. If `starting_config` is provided it will be used as - /// the config with the highest precedence. - pub fn reduced_config( - &self, - variant: &Variant, - arch: S, - starting_config: Option, - test_type: &str, - ) -> (GenericVariantConfig, String) - where - S: Into, - { - let arch = arch.into(); - // Starting with a list of keys ordered by precedence, return a single config with values - // selected by the order of the list. - let (test_type, configs) = config_keys(variant) - // Convert the vec of keys in to an iterator of keys. - .into_iter() - // Convert the iterator of keys to and iterator of Configs. If the key does not have a - // configuration in the config file, remove it from the iterator. - .filter_map(|key| self.configs.get(&key).cloned()) - // Reverse the iterator - .rev() - .fold( - (test_type.to_string(), Vec::new()), - |(test_type, mut configs), config| { - let (ordered_configs, test_type) = config.test_configs(test_type); - configs.push(ordered_configs); - (test_type, configs) - }, - ); - debug!("Resolved test-type '{}'", test_type); - ( - configs - .into_iter() - .rev() - .flatten() - // Take the iterator of configurations and extract the arch specific config and the - // non-arch specific config for each config. Then, convert them into a single iterator. - .flat_map(|config| vec![config.for_arch(&arch), config.config]) - // Take the iterator of configurations and merge them into a single config by populating - // each field with the first value that is not `None` while following the list of - // precedence. - .fold( - starting_config.unwrap_or_default(), - GenericVariantConfig::merge, - ), - test_type, - ) - } -} - -/// High level configurations for a test -#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq, Clone)] -#[serde(deny_unknown_fields, rename_all = "kebab-case")] -pub struct Test { - /// The name of the repo in `Infra.toml` that should be used for testing - pub repo: Option, - - /// The name of the vSphere data center in `Infra.toml` that should be used for testing - /// If no data center is provided, the first one in `vmware.datacenters` will be used - pub datacenter: Option, - - #[serde(flatten)] - /// The URI of TestSys images - pub testsys_images: TestsysImages, - - /// A registry containing all TestSys images - pub testsys_image_registry: Option, - - /// The tag that should be used for TestSys images - pub testsys_image_tag: Option, - - #[serde(flatten)] - /// Configuration values for all Bottlerocket variants - pub config: GenericConfig, -} - -/// Create a vec of relevant keys for this variant ordered from most specific to least specific. -fn config_keys(variant: &Variant) -> Vec { - let (family_flavor, platform_flavor) = variant - .variant_flavor() - .map(|flavor| { - ( - format!("{}-{}", variant.family(), flavor), - format!("{}-{}", variant.platform(), flavor), - ) - }) - .unwrap_or_default(); - - // The keys used to describe configuration (most specific -> least specific) - vec![ - variant.to_string(), - family_flavor, - variant.family().to_string(), - platform_flavor, - variant.platform().to_string(), - "test".to_string(), - ] -} - -/// All configurations for a specific config level, i.e `-` -#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq, Clone)] -#[serde(deny_unknown_fields)] -pub struct GenericConfig { - #[serde(default)] - aarch64: GenericVariantConfig, - #[serde(default)] - x86_64: GenericVariantConfig, - #[serde(default, flatten)] - config: GenericVariantConfig, - #[serde(default)] - configuration: HashMap, - #[serde(rename = "test-type")] - test_type: Option, -} - -impl GenericConfig { - /// Get the configuration for a specific arch. - pub fn for_arch(&self, arch: S) -> GenericVariantConfig - where - S: Into, - { - match arch.into().as_str() { - "x86_64" => self.x86_64.clone(), - "aarch64" => self.aarch64.clone(), - _ => Default::default(), - } - } - - /// Get the configuration for a specific test type. - pub fn test(&self, test_type: S) -> GenericConfig - where - S: AsRef, - { - self.configuration - .get(test_type.as_ref()) - .cloned() - .unwrap_or_default() - } - - /// Get a set of `GenericConfig`s following test types (test_type -> generic config). - fn test_configs(&self, test_type: S) -> (Vec, String) - where - S: AsRef, - { - // A vec containing all relevant test configs for this `GenericConfig` starting with - // `test_type` and ending with the `GenericConfig` itself. - let mut configs = Vec::new(); - // Track the last test_type that we added to `configs` - let mut cur_test_type = test_type.as_ref().to_string(); - loop { - // Add the config for the current test type (if the config doesn't exist, an empty - // config is added) - let test_config = self.test(&cur_test_type); - configs.push(test_config.clone()); - // If the current test config specifies another test type, that test type needs to be - // added to the configurations. - if let Some(test_type) = test_config.test_type.to_owned() { - trace!("Test-type '{}' resolves to '{}'", cur_test_type, test_type); - cur_test_type = test_type; - } else { - break; - } - } - - // Add the `self` config - configs.push(self.clone()); - (configs, cur_test_type) - } -} - -/// The configuration for a specific config level (-). This may or may not be arch -/// specific depending on it's location in `GenericConfig`. -#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq, Clone)] -#[serde(deny_unknown_fields)] -#[serde(rename_all = "kebab-case")] -pub struct GenericVariantConfig { - /// The names of all clusters this variant should be tested over. This is particularly useful - /// for testing Bottlerocket on ipv4 and ipv6 clusters. - #[serde(default)] - pub cluster_names: Vec, - /// The instance type that instances should be launched with - pub instance_type: Option, - /// Specify how Bottlerocket instances should be launched (ec2, karpenter) - pub resource_agent_type: Option, - /// Launch instances with the following Block Device Mapping - #[serde(default)] - pub block_device_mapping: Vec, - /// The secrets needed by the agents - #[serde(default)] - pub secrets: BTreeMap, - /// The role that should be assumed for this particular variant - pub agent_role: Option, - /// The location of the sonobuoy testing image - pub sonobuoy_image: Option, - /// The custom images used for conformance testing - pub conformance_image: Option, - /// The custom registry used for conformance testing - pub conformance_registry: Option, - /// The endpoint IP to reserve for the vSphere control plane VMs when creating a K8s cluster - pub control_plane_endpoint: Option, - /// The path to userdata that should be used for Bottlerocket launch - pub userdata: Option, - /// The directory containing Bottlerocket images. For metal, this is the directory containing - /// gzipped images. - pub os_image_dir: Option, - /// The hardware that should be used for provisioning Bottlerocket. For metal, this is the - /// hardware csv that is passed to EKS Anywhere. - pub hardware_csv: Option, - /// The workload tests that should be run - #[serde(default)] - pub workloads: BTreeMap, - #[serde(default)] - pub dev: DeveloperConfig, -} - -impl GenericVariantConfig { - /// Overwrite the unset values of `self` with the set values of `other` - fn merge(self, other: Self) -> Self { - let cluster_names = if self.cluster_names.is_empty() { - other.cluster_names - } else { - self.cluster_names - }; - - let secrets = if self.secrets.is_empty() { - other.secrets - } else { - self.secrets - }; - - let workloads = if self.workloads.is_empty() { - other.workloads - } else { - self.workloads - }; - - let block_device_mapping = if self.block_device_mapping.is_empty() { - other.block_device_mapping - } else { - self.block_device_mapping - }; - - Self { - cluster_names, - instance_type: self.instance_type.or(other.instance_type), - resource_agent_type: self.resource_agent_type.or(other.resource_agent_type), - block_device_mapping, - secrets, - agent_role: self.agent_role.or(other.agent_role), - sonobuoy_image: self.sonobuoy_image.or(other.sonobuoy_image), - conformance_image: self.conformance_image.or(other.conformance_image), - conformance_registry: self.conformance_registry.or(other.conformance_registry), - control_plane_endpoint: self.control_plane_endpoint.or(other.control_plane_endpoint), - userdata: self.userdata.or(other.userdata), - os_image_dir: self.os_image_dir.or(other.os_image_dir), - hardware_csv: self.hardware_csv.or(other.hardware_csv), - workloads, - dev: self.dev.merge(other.dev), - } - } -} - -#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Clone)] -#[serde(rename_all = "kebab-case")] -pub enum ResourceAgentType { - Karpenter, - Ec2, -} - -impl Default for ResourceAgentType { - fn default() -> Self { - Self::Ec2 - } -} - -derive_fromstr_from_deserialize!(ResourceAgentType); - -/// The configuration for a specific config level (-). This may or may not be arch -/// specific depending on it's location in `GenericConfig`. -/// The configurable fields here add refined control to TestSys objects. -#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq, Clone)] -#[serde(deny_unknown_fields)] -#[serde(rename_all = "kebab-case")] -pub struct DeveloperConfig { - /// Control the destruction behavior of cluster CRDs - pub cluster_destruction_policy: Option, - /// Control the destruction behavior of Bottlerocket CRDs - pub bottlerocket_destruction_policy: Option, - /// Keep test pods running on completion - pub keep_tests_running: Option, - /// Use an alternate account for image lookup - pub image_account_id: Option, - /// Overrides the EKS service endpoint for TestSys agents gathering EKS cluster metadata - /// (only for pre-existing EKS clusters, does not apply to new EKS cluster creation) - pub eks_service_endpoint: Option, - /// A manifest containing the EKS Anywhere binary that should be used for cluster provisioning - pub eks_a_release_manifest_url: Option, -} - -impl DeveloperConfig { - /// Overwrite the unset values of `self` with the set values of `other` - fn merge(self, other: Self) -> Self { - Self { - cluster_destruction_policy: self - .cluster_destruction_policy - .or(other.cluster_destruction_policy), - bottlerocket_destruction_policy: self - .bottlerocket_destruction_policy - .or(other.bottlerocket_destruction_policy), - keep_tests_running: self.keep_tests_running.or(other.keep_tests_running), - image_account_id: self.image_account_id.or(other.image_account_id), - eks_service_endpoint: self.eks_service_endpoint.or(other.eks_service_endpoint), - eks_a_release_manifest_url: self - .eks_a_release_manifest_url - .or(other.eks_a_release_manifest_url), - } - } -} - -/// Fill in the templated cluster name with `arch` and `variant`. -pub fn rendered_cluster_name(cluster_name: String, arch: S1, variant: S2) -> Result -where - S1: Into, - S2: Into, -{ - let mut cluster_template = Handlebars::new(); - cluster_template.register_template_string("cluster_name", cluster_name)?; - Ok(cluster_template.render( - "cluster_name", - &btreemap! {"arch".to_string() => arch.into(), "variant".to_string() => variant.into()}, - )?) -} - -#[derive(Debug, Default, Deserialize, Serialize, PartialEq, Eq, Clone)] -#[serde(deny_unknown_fields)] -#[serde(rename_all = "kebab-case")] -pub struct TestsysImages { - pub eks_resource_agent_image: Option, - pub ecs_resource_agent_image: Option, - pub vsphere_k8s_cluster_resource_agent_image: Option, - pub metal_k8s_cluster_resource_agent_image: Option, - pub ec2_resource_agent_image: Option, - pub ec2_karpenter_resource_agent_image: Option, - pub vsphere_vm_resource_agent_image: Option, - pub sonobuoy_test_agent_image: Option, - pub ecs_test_agent_image: Option, - pub migration_test_agent_image: Option, - pub k8s_workload_agent_image: Option, - pub ecs_workload_agent_image: Option, - pub controller_image: Option, - pub testsys_agent_pull_secret: Option, -} - -impl TestsysImages { - /// Create an images config for a specific registry. - pub fn new(registry: S, tag: Option) -> Self - where - S: Into, - { - let registry = registry.into(); - let tag = tag.unwrap_or_else(|| format!("v{}", TESTSYS_VERSION)); - Self { - eks_resource_agent_image: Some(format!("{}/eks-resource-agent:{tag}", registry)), - ecs_resource_agent_image: Some(format!("{}/ecs-resource-agent:{tag}", registry)), - vsphere_k8s_cluster_resource_agent_image: Some(format!( - "{}/vsphere-k8s-cluster-resource-agent:{tag}", - registry - )), - metal_k8s_cluster_resource_agent_image: Some(format!( - "{}/metal-k8s-cluster-resource-agent:{tag}", - registry - )), - ec2_resource_agent_image: Some(format!("{}/ec2-resource-agent:{tag}", registry)), - ec2_karpenter_resource_agent_image: Some(format!( - "{}/ec2-karpenter-resource-agent:{tag}", - registry - )), - vsphere_vm_resource_agent_image: Some(format!( - "{}/vsphere-vm-resource-agent:{tag}", - registry - )), - sonobuoy_test_agent_image: Some(format!("{}/sonobuoy-test-agent:{tag}", registry)), - ecs_test_agent_image: Some(format!("{}/ecs-test-agent:{tag}", registry)), - migration_test_agent_image: Some(format!("{}/migration-test-agent:{tag}", registry)), - k8s_workload_agent_image: Some(format!("{}/k8s-workload-agent:{tag}", registry)), - ecs_workload_agent_image: Some(format!("{}/ecs-workload-agent:{tag}", registry)), - controller_image: Some(format!("{}/controller:{tag}", registry)), - testsys_agent_pull_secret: None, - } - } - - pub fn merge(self, other: Self) -> Self { - Self { - eks_resource_agent_image: self - .eks_resource_agent_image - .or(other.eks_resource_agent_image), - ecs_resource_agent_image: self - .ecs_resource_agent_image - .or(other.ecs_resource_agent_image), - vsphere_k8s_cluster_resource_agent_image: self - .vsphere_k8s_cluster_resource_agent_image - .or(other.vsphere_k8s_cluster_resource_agent_image), - metal_k8s_cluster_resource_agent_image: self - .metal_k8s_cluster_resource_agent_image - .or(other.metal_k8s_cluster_resource_agent_image), - vsphere_vm_resource_agent_image: self - .vsphere_vm_resource_agent_image - .or(other.vsphere_vm_resource_agent_image), - ec2_resource_agent_image: self - .ec2_resource_agent_image - .or(other.ec2_resource_agent_image), - ec2_karpenter_resource_agent_image: self - .ec2_karpenter_resource_agent_image - .or(other.ec2_karpenter_resource_agent_image), - sonobuoy_test_agent_image: self - .sonobuoy_test_agent_image - .or(other.sonobuoy_test_agent_image), - ecs_test_agent_image: self.ecs_test_agent_image.or(other.ecs_test_agent_image), - migration_test_agent_image: self - .migration_test_agent_image - .or(other.migration_test_agent_image), - k8s_workload_agent_image: self - .k8s_workload_agent_image - .or(other.k8s_workload_agent_image), - ecs_workload_agent_image: self - .ecs_workload_agent_image - .or(other.ecs_workload_agent_image), - controller_image: self.controller_image.or(other.controller_image), - testsys_agent_pull_secret: self - .testsys_agent_pull_secret - .or(other.testsys_agent_pull_secret), - } - } - - pub fn public_images() -> Self { - Self::new("public.ecr.aws/bottlerocket-test-system", None) - } -} - -mod error { - use snafu::Snafu; - use std::io; - use std::path::PathBuf; - - #[derive(Debug, Snafu)] - #[snafu(visibility(pub(super)))] - pub enum Error { - #[snafu(display("Failed to read '{}': {}", path.display(), source))] - File { path: PathBuf, source: io::Error }, - - #[snafu(display("Invalid config file at '{}': {}", path.display(), source))] - InvalidToml { - path: PathBuf, - source: toml::de::Error, - }, - - #[snafu(display("Invalid lock file at '{}': {}", path.display(), source))] - InvalidLock { - path: PathBuf, - source: serde_yaml::Error, - }, - - #[snafu(display("Missing config: {}", what))] - MissingConfig { what: String }, - - #[snafu(display("Failed to get parent of path: {}", path.display()))] - Parent { path: PathBuf }, - - #[snafu( - context(false), - display("Failed to create template for cluster name: {}", source) - )] - TemplateError { - #[snafu(source(from(handlebars::TemplateError, Box::new)))] - source: Box, - }, - - #[snafu( - context(false), - display("Failed to render templated cluster name: {}", source) - )] - RenderError { source: handlebars::RenderError }, - } -} diff --git a/tools/testsys/Cargo.toml b/tools/testsys/Cargo.toml deleted file mode 100644 index ff7ccce2..00000000 --- a/tools/testsys/Cargo.toml +++ /dev/null @@ -1,37 +0,0 @@ -[package] -name = "testsys" -version = "0.1.0" -authors = [ - "Ethan Pullen ", - "Matt Briggs ", -] -license = "Apache-2.0 OR MIT" -edition = "2021" -publish = false - -[dependencies] -async-trait = "0.1" -aws-config = "0.55" -aws-sdk-ec2 = "0.28" -base64 = "0.20" -bottlerocket-types = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.9", tag = "v0.0.9" } -bottlerocket-variant = { version = "0.1", path = "../../sources/bottlerocket-variant" } -clap = { version = "4", features = ["derive", "env"] } -env_logger = "0.10" -futures = "0.3" -handlebars = "4" -log = "0.4" -maplit = "1" -testsys-model = { git = "https://github.com/bottlerocket-os/bottlerocket-test-system", version = "0.0.9", tag = "v0.0.9" } -pubsys-config = { path = "../pubsys-config/", version = "0.1.0" } -fastrand = "1" -serde = { version = "1", features = ["derive"] } -serde_json = "1" -serde_plain = "1" -serde_yaml = "0.9" -snafu = "0.7" -term_size = "0.3" -testsys-config = { path = "../testsys-config/", version = "0.1" } -tokio = { version = "1", features = ["macros", "rt-multi-thread", "fs"] } -unescape = "0.1" -url = "2" diff --git a/tools/testsys/Test.toml.example b/tools/testsys/Test.toml.example deleted file mode 100644 index 415df1ee..00000000 --- a/tools/testsys/Test.toml.example +++ /dev/null @@ -1,125 +0,0 @@ -# This is an example testing configuration for TestSys, the tool that is used to validate -# Bottlerocket builds. - -# This section contains configuration details for all testing -[test] - -# The repo from `Infra.toml` that should be used for Bottlerocket update images. It may be useful to -# create a repo in `Infra.toml` that contains the infrastructure needed for testing -repo = "default" - -# The registry containing alternate TestSys agent images -testsys-image-registry = "public.ecr.aws/bottlerocket-test-system" - -# The tag that should be used with `testsys-images-registry` for image pulls -testsys-images-registry = "latest" - -# The URI for the EKS resource agent that should be used. An individual agent's provided URI will be -# used even if `testsys-image-registry` is present. -eks-resource-agent-image = "public.ecr.aws/bottlerocket-test-system/eks_resource_agent:v0.0.2" - -# Test Configurations -# -# Testing requirements tend to differ by variant and architecture. This configuration file provides -# the ability to set values that apply generally to a broad group of similar variants, and to -# override those values at a more granular level. For example, you can set a value for all `aws-k8s` -# variants, then override that value for 'aws-k8s-nvidia' variants, and further override the value -# for 'aws-k8s-nvidia'.aarch64 builds. -# -# The mechanism for resolving configuration values has the following order of precedence: -# -# ''.ARCH -# '' -# '-'.ARCH -# '-' -# ''.ARCH -# '' -# '-'.ARCH -# '-' -# ''.ARCH -# '' -# -# For concrete example, given a variant such as `aws-k8s-1.23-nvidia` with the architecture -# `x86_64`, configurations will have the following order of precedence: -# ['aws-k8s-1.23-nvidia'.x86_64] -# ['aws-k8s-1.23-nvidia'] -# ['aws-k8s-nvidia'.x86_64] -# ['aws-k8s-nvidia'] -# ['aws-k8s'.x86_64] -# ['aws-k8s'] -# ['aws-nvidia'.x86_64] -# ['aws-nvidia'] -# ['aws'.x86_64] -# ['aws'] -# -# Configurable values: -# -# cluster-names: -# All clusters the variant should be tested over. Cluster naming supports templated strings, and -# both `arch` and `variant` are provided as variables (`{{arch}}-{{variant}}`). -# -# instance-type: -# The instance type that should be used for testing. -# -# secrets: -# A map containing the names of all kubernetes secrets needed for resource creation and testing. -# -# agent-role: -# The role that should be assumed by each test and resource agent. -# -# conformance-image: (K8s only) -# Specify a custom image for conformance testing. For `aws-k8s` variants this will be used as a -# custom Kubernetes conformance image for Sonobuoy. -# -# conformance-registry: (K8s only) -# Specify a custom registry for conformance testing images. -# For `aws-k8s` variants this will be used as the Sonobuoy e2e registry. -# -# Note: values passed by command line argument will take precedence over those passed by environment -# variable, and both take precedence over values set by `Test.toml`. - -# Additional fields are configurable with the `dev` table. -# See `DeveloperConfig` for individual fields. - -# Example Configurations - -# Configuration for all variants with the `aws` platform. -[aws] -agent-role = "" - -# Configuration for all nvidia AWS variants on x86_64 (platform-flavor level configuration) -[aws-nvidia.x86_64] -instance-type = "p3.2xlarge" - -# Configuration for all nvidia AWS variants on aarch64 (platform-flavor level configuration) -[aws-nvidia.aarch64] -instance-type = "g5g.2xlarge" - -# Configuration for all `aws-k8s` variants testing (family level configuration). -[aws-k8s] -# A single role can be assumed by agents to test all `aws-k8s` variants in a separate -# testing account. -agent-role = "arn:aws:iam:::role/" - -# The cluster name templating can be defined for all `aws-k8s` variants. To test on ipv4 and ipv6 -# clusters, the following templates could be used. Note: TestSys does not currently support creating -# ipv6 clusters, so the ipv6 cluster must already exist. -cluster-names = ["{{arch}}-{{variant}}", "{{arch}}-{{variant}}-ipv6"] - -# A custom conformance registry may be needed for testing if image pull reliability is a concern. -conformance-registry = ".dkr.ecr.cn-north-1.amazonaws.com.cn" - -# If testing using a kind cluster, AWS credentials need to be passed as a K8s secret. -secrets = {"awsCreds" = "myAwsCredentials"} - -# Configuration for all nvidia AWS variants on x86_64 (family-flavor level configuration) -[aws-ecs-nvidia.x86_64] -instance-type = "p3.2xlarge" - -# Configuration for all nvidia AWS variants on aarch64 (family-flavor level configuration) -[aws-ecs-nvidia.aarch64] -instance-type = "g5g.2xlarge" - -# Configuration for only the `aws-k8s-1.24` variant (variant level configuration). -["aws-k8s-1.24".aarch64] -conformance-image = "" diff --git a/tools/testsys/src/aws_ecs.rs b/tools/testsys/src/aws_ecs.rs deleted file mode 100644 index f021528d..00000000 --- a/tools/testsys/src/aws_ecs.rs +++ /dev/null @@ -1,281 +0,0 @@ -use crate::aws_resources::{ami, ami_name, ec2_crd, get_ami_id}; -use crate::crds::{ - BottlerocketInput, ClusterInput, CrdCreator, CrdInput, CreateCrdOutput, MigrationInput, - TestInput, -}; -use crate::error::{self, Result}; -use crate::migration::migration_crd; -use bottlerocket_types::agent_config::{ - ClusterType, EcsClusterConfig, EcsTestConfig, EcsWorkloadTestConfig, WorkloadTest, -}; -use log::debug; -use maplit::btreemap; -use snafu::{OptionExt, ResultExt}; -use std::collections::BTreeMap; -use testsys_model::{Crd, DestructionPolicy, Test}; - -/// A `CrdCreator` responsible for creating crd related to `aws-ecs` variants. -pub(crate) struct AwsEcsCreator { - pub(crate) region: String, - pub(crate) ami_input: String, - pub(crate) migrate_starting_commit: Option, -} - -#[async_trait::async_trait] -impl CrdCreator for AwsEcsCreator { - /// Determine the AMI from `amis.json`. - async fn image_id(&self, _: &CrdInput) -> Result { - ami(&self.ami_input, &self.region) - } - - /// Determine the starting image from EC2 using standard Bottlerocket naming conventions. - async fn starting_image_id(&self, crd_input: &CrdInput) -> Result { - get_ami_id(ami_name(&crd_input.arch,&crd_input.variant,crd_input.starting_version - .as_ref() - .context(error::InvalidSnafu{ - what: "The starting version must be provided for migration testing" - })?, self.migrate_starting_commit - .as_ref() - .context(error::InvalidSnafu{ - what: "The commit for the starting version must be provided if the starting image id is not" - })?) - , &crd_input.arch, - & self.region, - crd_input.config.dev.image_account_id.as_deref(), - ) - .await - } - - /// Create an ECS cluster CRD with the `cluster_name` in `cluster_input`. - async fn cluster_crd<'a>(&self, cluster_input: ClusterInput<'a>) -> Result { - debug!("Creating ECS cluster CRD"); - // Create labels that will be used for identifying existing CRDs for an ECS cluster. - let labels = cluster_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => "cluster".to_string(), - "testsys/cluster".to_string() => cluster_input.cluster_name.to_string(), - "testsys/region".to_string() => self.region.clone() - }); - - // Check if the cluster already has a CRD in the TestSys cluster. - if let Some(cluster_crd) = cluster_input - .crd_input - .existing_crds( - &labels, - &["testsys/cluster", "testsys/type", "testsys/region"], - ) - .await? - .pop() - { - // Return the name of the existing CRD for the cluster. - debug!("ECS cluster CRD already exists with name '{}'", cluster_crd); - return Ok(CreateCrdOutput::ExistingCrd(cluster_crd)); - } - - // Create the CRD for ECS cluster creation. - let ecs_crd = EcsClusterConfig::builder() - .cluster_name(cluster_input.cluster_name) - .region(Some(self.region.to_owned())) - .assume_role(cluster_input.crd_input.config.agent_role.clone()) - .destruction_policy( - cluster_input - .crd_input - .config - .dev - .cluster_destruction_policy - .to_owned() - .unwrap_or(DestructionPolicy::OnTestSuccess), - ) - .image( - cluster_input - .crd_input - .images - .ecs_resource_agent_image - .as_ref() - .expect("The default ecs resource provider image uri is missing."), - ) - .set_image_pull_secret( - cluster_input - .crd_input - .images - .testsys_agent_pull_secret - .to_owned(), - ) - .set_labels(Some(labels)) - .set_secrets(Some(cluster_input.crd_input.config.secrets.clone())) - .build(cluster_input.cluster_name) - .context(error::BuildSnafu { - what: "ECS cluster CRD", - })?; - - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Resource(ecs_crd)))) - } - - /// Create an EC2 provider CRD to launch Bottlerocket instances on the cluster created by - /// `cluster_crd`. - async fn bottlerocket_crd<'a>( - &self, - bottlerocket_input: BottlerocketInput<'a>, - ) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Resource( - ec2_crd(bottlerocket_input, ClusterType::Ecs, &self.region).await?, - )))) - } - - async fn migration_crd<'a>( - &self, - migration_input: MigrationInput<'a>, - ) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(migration_crd( - migration_input, - None, - "ids", - )?)))) - } - - async fn test_crd<'a>(&self, test_input: TestInput<'a>) -> Result { - let cluster_resource_name = test_input - .cluster_crd_name - .as_ref() - .expect("A cluster name is required for migrations"); - let bottlerocket_resource_name = test_input - .bottlerocket_crd_name - .as_ref() - .expect("A cluster name is required for migrations"); - - // Create labels that are used to help filter status. - let labels = test_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => test_input.test_type.to_string(), - "testsys/cluster".to_string() => cluster_resource_name.to_string(), - }); - - let test_crd = EcsTestConfig::builder() - .cluster_name_template(cluster_resource_name, "clusterName") - .region(Some(self.region.to_owned())) - .task_count(1) - .assume_role(test_input.crd_input.config.agent_role.to_owned()) - .resources(bottlerocket_resource_name) - .resources(cluster_resource_name) - .set_depends_on(Some(test_input.prev_tests)) - .set_retries(Some(5)) - .image( - test_input - .crd_input - .images - .ecs_test_agent_image - .to_owned() - .expect("The default ECS testing image is missing"), - ) - .set_image_pull_secret( - test_input - .crd_input - .images - .testsys_agent_pull_secret - .to_owned(), - ) - .keep_running( - test_input - .crd_input - .config - .dev - .keep_tests_running - .unwrap_or(false), - ) - .set_secrets(Some(test_input.crd_input.config.secrets.to_owned())) - .set_labels(Some(labels)) - .build(format!( - "{}-{}", - cluster_resource_name, - test_input - .name_suffix - .unwrap_or(test_input.crd_input.test_flavor.as_str()) - )) - .context(error::BuildSnafu { - what: "ECS test CRD", - })?; - - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(test_crd)))) - } - - async fn workload_crd<'a>(&self, test_input: TestInput<'a>) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(workload_crd( - &self.region, - test_input, - )?)))) - } - - fn additional_fields(&self, _test_type: &str) -> BTreeMap { - btreemap! {"region".to_string() => self.region.clone()} - } -} - -/// Create a workload CRD for K8s testing. -pub(crate) fn workload_crd(region: &str, test_input: TestInput) -> Result { - let cluster_resource_name = test_input - .cluster_crd_name - .as_ref() - .expect("A cluster name is required for ECS workload tests"); - let bottlerocket_resource_name = test_input - .bottlerocket_crd_name - .as_ref() - .expect("A bottlerocket resource name is required for ECS workload tests"); - - let labels = test_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => test_input.test_type.to_string(), - "testsys/cluster".to_string() => cluster_resource_name.to_string(), - }); - let gpu = test_input.crd_input.variant.variant_flavor() == Some("nvidia"); - let plugins: Vec<_> = test_input - .crd_input - .config - .workloads - .iter() - .map(|(name, image)| WorkloadTest { - name: name.to_string(), - image: image.to_string(), - gpu, - }) - .collect(); - if plugins.is_empty() { - return Err(error::Error::Invalid { - what: "There were no plugins specified in the workload test. - Workloads can be specified in `Test.toml` or via the command line." - .to_string(), - }); - } - - EcsWorkloadTestConfig::builder() - .resources(bottlerocket_resource_name) - .resources(cluster_resource_name) - .set_depends_on(Some(test_input.prev_tests)) - .set_retries(Some(5)) - .image( - test_input - .crd_input - .images - .ecs_workload_agent_image - .to_owned() - .expect("The default K8s workload testing image is missing"), - ) - .set_image_pull_secret( - test_input - .crd_input - .images - .testsys_agent_pull_secret - .to_owned(), - ) - .keep_running(true) - .region(region.to_string()) - .cluster_name_template(cluster_resource_name, "clusterName") - .assume_role(test_input.crd_input.config.agent_role.to_owned()) - .tests(plugins) - .set_secrets(Some(test_input.crd_input.config.secrets.to_owned())) - .set_labels(Some(labels)) - .build(format!( - "{}{}", - cluster_resource_name, - test_input.name_suffix.unwrap_or("-test") - )) - .context(error::BuildSnafu { - what: "Workload CRD", - }) -} diff --git a/tools/testsys/src/aws_k8s.rs b/tools/testsys/src/aws_k8s.rs deleted file mode 100644 index 2fc063fa..00000000 --- a/tools/testsys/src/aws_k8s.rs +++ /dev/null @@ -1,238 +0,0 @@ -use crate::aws_resources::{ami, ami_name, ec2_crd, ec2_karpenter_crd, get_ami_id}; -use crate::crds::{ - BottlerocketInput, ClusterInput, CrdCreator, CrdInput, CreateCrdOutput, MigrationInput, - TestInput, -}; -use crate::error::{self, Result}; -use crate::migration::migration_crd; -use crate::sonobuoy::{sonobuoy_crd, workload_crd}; -use bottlerocket_types::agent_config::{ - ClusterType, CreationPolicy, EksClusterConfig, EksctlConfig, K8sVersion, -}; -use maplit::btreemap; -use serde_yaml::Value; -use snafu::{OptionExt, ResultExt}; -use std::collections::BTreeMap; -use std::str::FromStr; -use testsys_config::ResourceAgentType; -use testsys_model::{Crd, DestructionPolicy}; - -/// A `CrdCreator` responsible for creating crd related to `aws-k8s` variants. -pub(crate) struct AwsK8sCreator { - pub(crate) region: String, - pub(crate) ami_input: String, - pub(crate) migrate_starting_commit: Option, -} - -#[async_trait::async_trait] -impl CrdCreator for AwsK8sCreator { - /// Determine the AMI from `amis.json`. - async fn image_id(&self, _: &CrdInput) -> Result { - ami(&self.ami_input, &self.region) - } - - /// Determine the starting image from EC2 using standard Bottlerocket naming conventions. - async fn starting_image_id(&self, crd_input: &CrdInput) -> Result { - get_ami_id(ami_name(&crd_input.arch,&crd_input.variant,crd_input.starting_version - .as_ref() - .context(error::InvalidSnafu{ - what: "The starting version must be provided for migration testing" - })?, self.migrate_starting_commit - .as_ref() - .context(error::InvalidSnafu{ - what: "The commit for the starting version must be provided if the starting image id is not" - })?) - , &crd_input.arch, - & self.region, - crd_input.config.dev.image_account_id.as_deref(), - ) - .await - } - - /// Create an EKS cluster CRD with the `cluster_name` in `cluster_input`. - async fn cluster_crd<'a>(&self, cluster_input: ClusterInput<'a>) -> Result { - let cluster_version = - K8sVersion::from_str(cluster_input.crd_input.variant.version().context( - error::MissingSnafu { - item: "K8s version".to_string(), - what: "aws-k8s variant".to_string(), - }, - )?) - .map_err(|_| error::Error::K8sVersion { - version: cluster_input.crd_input.variant.to_string(), - })?; - - let (cluster_name, region, config) = match cluster_input.cluster_config { - Some(config) => { - let (cluster_name, region) = cluster_config_data(config)?; - ( - cluster_name, - region, - EksctlConfig::File { - encoded_config: base64::encode(config), - }, - ) - } - None => ( - cluster_input.cluster_name.to_string(), - self.region.clone(), - EksctlConfig::Args { - cluster_name: cluster_input.cluster_name.to_string(), - region: Some(self.region.clone()), - zones: None, - version: Some(cluster_version), - }, - ), - }; - - let labels = cluster_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => "cluster".to_string(), - "testsys/cluster".to_string() => cluster_name.to_string(), - "testsys/region".to_string() => region.clone() - }); - - // Check if the cluster already has a crd - if let Some(cluster_crd) = cluster_input - .crd_input - .existing_crds( - &labels, - &["testsys/cluster", "testsys/type", "testsys/region"], - ) - .await? - .pop() - { - return Ok(CreateCrdOutput::ExistingCrd(cluster_crd)); - } - - let eks_crd = EksClusterConfig::builder() - .creation_policy(CreationPolicy::IfNotExists) - .eks_service_endpoint( - cluster_input - .crd_input - .config - .dev - .eks_service_endpoint - .clone(), - ) - .assume_role(cluster_input.crd_input.config.agent_role.clone()) - .config(config) - .image( - cluster_input - .crd_input - .images - .eks_resource_agent_image - .to_owned() - .expect("Missing default image for EKS resource agent"), - ) - .set_image_pull_secret( - cluster_input - .crd_input - .images - .testsys_agent_pull_secret - .clone(), - ) - .set_labels(Some(labels)) - .set_secrets(Some(cluster_input.crd_input.config.secrets.clone())) - .destruction_policy( - cluster_input - .crd_input - .config - .dev - .cluster_destruction_policy - .to_owned() - .unwrap_or(DestructionPolicy::Never), - ) - .build(cluster_name) - .context(error::BuildSnafu { - what: "EKS cluster CRD", - })?; - - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Resource(eks_crd)))) - } - - /// Create an EC2 provider CRD to launch Bottlerocket instances on the cluster created by - /// `cluster_crd`. - async fn bottlerocket_crd<'a>( - &self, - bottlerocket_input: BottlerocketInput<'a>, - ) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Resource( - match bottlerocket_input - .crd_input - .config - .resource_agent_type - .to_owned() - .unwrap_or_default() - { - ResourceAgentType::Ec2 => { - ec2_crd(bottlerocket_input, ClusterType::Eks, &self.region).await? - } - ResourceAgentType::Karpenter => { - ec2_karpenter_crd(bottlerocket_input, &self.region).await? - } - }, - )))) - } - - async fn migration_crd<'a>( - &self, - migration_input: MigrationInput<'a>, - ) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(migration_crd( - migration_input, - None, - "ids", - )?)))) - } - - async fn test_crd<'a>(&self, test_input: TestInput<'a>) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(sonobuoy_crd( - test_input, - )?)))) - } - - async fn workload_crd<'a>(&self, test_input: TestInput<'a>) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(workload_crd( - test_input, - )?)))) - } - - fn additional_fields(&self, _test_type: &str) -> BTreeMap { - btreemap! {"region".to_string() => self.region.clone()} - } -} - -/// Converts a eksctl cluster config to a `serde_yaml::Value` and extracts the cluster name and -/// region from it. -fn cluster_config_data(cluster_config: &str) -> Result<(String, String)> { - let config: Value = serde_yaml::from_str(cluster_config).context(error::SerdeYamlSnafu { - what: "Unable to deserialize cluster config", - })?; - - let (cluster_name, region) = config - .get("metadata") - .map(|metadata| { - ( - metadata.get("name").and_then(|name| name.as_str()), - metadata.get("region").and_then(|region| region.as_str()), - ) - }) - .context(error::MissingSnafu { - item: "metadata", - what: "eksctl config", - })?; - Ok(( - cluster_name - .context(error::MissingSnafu { - item: "name", - what: "eksctl config metadata", - })? - .to_string(), - region - .context(error::MissingSnafu { - item: "region", - what: "eksctl config metadata", - })? - .to_string(), - )) -} diff --git a/tools/testsys/src/aws_resources.rs b/tools/testsys/src/aws_resources.rs deleted file mode 100644 index aa97a3fb..00000000 --- a/tools/testsys/src/aws_resources.rs +++ /dev/null @@ -1,348 +0,0 @@ -use crate::crds::BottlerocketInput; -use crate::error::{self, Result}; -use aws_sdk_ec2::config::Region; -use aws_sdk_ec2::types::{Filter, Image}; -use bottlerocket_types::agent_config::{ - ClusterType, CustomUserData, Ec2Config, Ec2KarpenterConfig, KarpenterDeviceMapping, -}; -use maplit::btreemap; -use serde::Deserialize; -use snafu::{ensure, OptionExt, ResultExt}; -use std::collections::HashMap; -use std::fs::File; -use std::iter::repeat_with; -use testsys_model::{DestructionPolicy, Resource}; - -/// Get the AMI for the given `region` from the `ami_input` file. -pub(crate) fn ami(ami_input: &str, region: &str) -> Result { - let file = File::open(ami_input).context(error::IOSnafu { - what: "Unable to open amis.json", - })?; - // Convert the `ami_input` file to a `HashMap` that maps regions to AMI id. - let amis: HashMap = - serde_json::from_reader(file).context(error::SerdeJsonSnafu { - what: format!("Unable to deserialize '{}'", ami_input), - })?; - // Make sure there are some AMIs present in the `ami_input` file. - ensure!( - !amis.is_empty(), - error::InvalidSnafu { - what: format!("{} is empty", ami_input) - } - ); - Ok(amis - .get(region) - .context(error::InvalidSnafu { - what: format!("AMI not found for region '{}'", region), - })? - .id - .clone()) -} - -/// Queries EC2 for the given AMI name. If found, returns Ok(Some(id)), if not returns Ok(None). -pub(crate) async fn get_ami_id( - name: S1, - arch: S2, - region: S3, - account: Option<&str>, -) -> Result -where - S1: Into, - S2: Into, - S3: Into, -{ - // Create the `aws_config` that will be used to search EC2 for AMIs. - // TODO: Follow chain of assumed roles for creating config like pubsys uses. - let config = aws_config::from_env() - .region(Region::new(region.into())) - .load() - .await; - let ec2_client = aws_sdk_ec2::Client::new(&config); - // Find all images named `name` on `arch` in the `region`. - let describe_images = ec2_client - .describe_images() - .owners(account.unwrap_or("self")) - .filters(Filter::builder().name("name").values(name).build()) - .filters( - Filter::builder() - .name("image-type") - .values("machine") - .build(), - ) - .filters(Filter::builder().name("architecture").values(arch).build()) - .filters( - Filter::builder() - .name("virtualization-type") - .values("hvm") - .build(), - ) - .send() - .await? - .images; - let images: Vec<&Image> = describe_images.iter().flatten().collect(); - // Make sure there is exactly 1 image that matches the parameters. - if images.len() > 1 { - return Err(error::Error::Invalid { - what: "Unable to determine AMI. Multiple images were found".to_string(), - }); - }; - if let Some(image) = images.last().as_ref() { - Ok(image - .image_id() - .context(error::InvalidSnafu { - what: "No image id for AMI", - })? - .to_string()) - } else { - Err(error::Error::Invalid { - what: "Unable to determine AMI. No images were found".to_string(), - }) - } -} - -/// Get the standard Bottlerocket AMI name. -pub(crate) fn ami_name(arch: &str, variant: &str, version: &str, commit_id: &str) -> String { - format!( - "bottlerocket-{}-{}-{}-{}", - variant, arch, version, commit_id - ) -} - -#[derive(Clone, Debug, Deserialize)] -pub(crate) struct AmiImage { - pub(crate) id: String, -} - -/// Create a CRD to launch Bottlerocket instances on an EKS or ECS cluster. -pub(crate) async fn ec2_crd<'a>( - bottlerocket_input: BottlerocketInput<'a>, - cluster_type: ClusterType, - region: &str, -) -> Result { - if !bottlerocket_input - .crd_input - .config - .block_device_mapping - .is_empty() - { - return Err(error::Error::Invalid { - what: "Custom block mappings are not supported for ec2 instance launch".to_string(), - }); - } - - let cluster_name = bottlerocket_input - .cluster_crd_name - .as_ref() - .expect("A cluster provider is required"); - - // Create the labels for this EC2 provider. - let labels = bottlerocket_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => "instances".to_string(), - "testsys/cluster".to_string() => cluster_name.to_string(), - "testsys/region".to_string() => region.to_string() - }); - - // Find all resources using the same cluster. - let conflicting_resources = bottlerocket_input - .crd_input - .existing_crds( - &labels, - &["testsys/cluster", "testsys/type", "testsys/region"], - ) - .await?; - - let mut ec2_builder = Ec2Config::builder(); - ec2_builder - .node_ami(bottlerocket_input.image_id) - .instance_count(2) - .instance_types::>( - bottlerocket_input - .crd_input - .config - .instance_type - .iter() - .cloned() - .collect(), - ) - .custom_user_data( - bottlerocket_input - .crd_input - .encoded_userdata()? - .map(|encoded_userdata| CustomUserData::Merge { encoded_userdata }), - ) - .cluster_name_template(cluster_name, "clusterName") - .region_template(cluster_name, "region") - .instance_profile_arn_template(cluster_name, "iamInstanceProfileArn") - .assume_role(bottlerocket_input.crd_input.config.agent_role.clone()) - .cluster_type(cluster_type.clone()) - .depends_on(cluster_name) - .image( - bottlerocket_input - .crd_input - .images - .ec2_resource_agent_image - .as_ref() - .expect("Missing default image for EC2 resource agent"), - ) - .set_image_pull_secret( - bottlerocket_input - .crd_input - .images - .testsys_agent_pull_secret - .clone(), - ) - .set_labels(Some(labels)) - .set_conflicts_with(conflicting_resources.into()) - .set_secrets(Some(bottlerocket_input.crd_input.config.secrets.clone())) - .destruction_policy( - bottlerocket_input - .crd_input - .config - .dev - .bottlerocket_destruction_policy - .to_owned() - .unwrap_or(DestructionPolicy::OnTestSuccess), - ); - - // Add in the EKS specific configuration. - if cluster_type == ClusterType::Eks { - ec2_builder - .subnet_ids_template(cluster_name, "publicSubnetIds") - .endpoint_template(cluster_name, "endpoint") - .certificate_template(cluster_name, "certificate") - .cluster_dns_ip_template(cluster_name, "clusterDnsIp") - .security_groups_template(cluster_name, "securityGroups"); - } else { - // The default VPC doesn't attach private subnets to an ECS cluster, so public subnet ids - // are used instead. - ec2_builder - .subnet_ids_template(cluster_name, "publicSubnetIds") - // TODO If this is not set, the crd cannot be serialized since it is a `Vec` not - // `Option`. - .security_groups(Vec::new()); - } - - let suffix: String = repeat_with(fastrand::lowercase).take(4).collect(); - ec2_builder - .build(format!("{}-instances-{}", cluster_name, suffix)) - .context(error::BuildSnafu { - what: "EC2 instance provider CRD", - }) -} - -/// Create a CRD to launch Bottlerocket instances on an EKS or ECS cluster. -pub(crate) async fn ec2_karpenter_crd<'a>( - bottlerocket_input: BottlerocketInput<'a>, - region: &str, -) -> Result { - let cluster_name = bottlerocket_input - .cluster_crd_name - .as_ref() - .expect("A cluster provider is required"); - - // Create the labels for this EC2 provider. - let labels = bottlerocket_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => "instances".to_string(), - "testsys/cluster".to_string() => cluster_name.to_string(), - "testsys/region".to_string() => region.to_string() - }); - - // Find all resources using the same cluster. - let conflicting_resources = bottlerocket_input - .crd_input - .existing_crds( - &labels, - &["testsys/cluster", "testsys/type", "testsys/region"], - ) - .await?; - - // If no mappings were provided use a standard mapping as a default - let device_mappings = if bottlerocket_input - .crd_input - .config - .block_device_mapping - .is_empty() - { - vec![ - KarpenterDeviceMapping { - name: "/dev/xvda".to_string(), - volume_type: "gp3".to_string(), - volume_size: 4, - delete_on_termination: true, - }, - KarpenterDeviceMapping { - name: "/dev/xvdb".to_string(), - volume_type: "gp3".to_string(), - volume_size: 20, - delete_on_termination: true, - }, - ] - } else { - bottlerocket_input - .crd_input - .config - .block_device_mapping - .clone() - }; - - let mut ec2_builder = Ec2KarpenterConfig::builder(); - ec2_builder - .node_ami(bottlerocket_input.image_id) - .instance_types::>( - bottlerocket_input - .crd_input - .config - .instance_type - .iter() - .cloned() - .collect(), - ) - .custom_user_data( - bottlerocket_input - .crd_input - .encoded_userdata()? - .map(|encoded_userdata| CustomUserData::Merge { encoded_userdata }), - ) - .cluster_name_template(cluster_name, "clusterName") - .region_template(cluster_name, "region") - .subnet_ids_template(cluster_name, "publicSubnetIds") - .endpoint_template(cluster_name, "endpoint") - .cluster_sg_template(cluster_name, "clustersharedSg") - .device_mappings(device_mappings) - .assume_role(bottlerocket_input.crd_input.config.agent_role.clone()) - .depends_on(cluster_name) - .image( - bottlerocket_input - .crd_input - .images - .ec2_karpenter_resource_agent_image - .as_ref() - .expect("Missing default image for EC2 resource agent"), - ) - .set_image_pull_secret( - bottlerocket_input - .crd_input - .images - .testsys_agent_pull_secret - .clone(), - ) - .set_labels(Some(labels)) - .set_conflicts_with(conflicting_resources.into()) - .set_secrets(Some(bottlerocket_input.crd_input.config.secrets.clone())) - .destruction_policy( - bottlerocket_input - .crd_input - .config - .dev - .bottlerocket_destruction_policy - .to_owned() - .unwrap_or(DestructionPolicy::OnTestSuccess), - ); - - let suffix: String = repeat_with(fastrand::lowercase).take(4).collect(); - ec2_builder - .build(format!("{}-karpenter-{}", cluster_name, suffix)) - .context(error::BuildSnafu { - what: "EC2 instance provider CRD", - }) -} diff --git a/tools/testsys/src/crds.rs b/tools/testsys/src/crds.rs deleted file mode 100644 index 43875d93..00000000 --- a/tools/testsys/src/crds.rs +++ /dev/null @@ -1,787 +0,0 @@ -use crate::error::{self, Result}; -use crate::run::{KnownTestType, TestType}; -use bottlerocket_types::agent_config::TufRepoConfig; -use bottlerocket_variant::Variant; -use handlebars::Handlebars; -use log::{debug, info, warn}; -use maplit::btreemap; -use pubsys_config::RepoConfig; -use serde::Deserialize; -use snafu::{OptionExt, ResultExt}; -use std::collections::BTreeMap; -use std::fs; -use std::path::PathBuf; -use testsys_config::{rendered_cluster_name, GenericVariantConfig, TestsysImages}; -use testsys_model::constants::{API_VERSION, NAMESPACE}; -use testsys_model::test_manager::{SelectionParams, TestManager}; -use testsys_model::Crd; - -/// A type that is used for the creation of all CRDs. -pub struct CrdInput<'a> { - pub client: &'a TestManager, - pub arch: String, - pub variant: Variant, - pub config: GenericVariantConfig, - pub repo_config: RepoConfig, - pub test_flavor: String, - pub starting_version: Option, - pub migrate_to_version: Option, - pub build_id: Option, - /// `CrdCreator::starting_image_id` function should be used instead of using this field, so - /// it is not externally visible. - pub(crate) starting_image_id: Option, - pub(crate) test_type: TestType, - pub(crate) tests_directory: PathBuf, - pub images: TestsysImages, -} - -impl<'a> CrdInput<'a> { - /// Retrieve the TUF repo information from `Infra.toml` - pub fn tuf_repo_config(&self) -> Option { - if let (Some(metadata_base_url), Some(targets_url)) = ( - &self.repo_config.metadata_base_url, - &self.repo_config.targets_url, - ) { - debug!( - "Using TUF metadata from Infra.toml, metadata: '{}', targets: '{}'", - metadata_base_url, targets_url - ); - Some(TufRepoConfig { - metadata_url: format!("{}{}/{}/", metadata_base_url, &self.variant, &self.arch), - targets_url: targets_url.to_string(), - }) - } else { - warn!("No TUF metadata was found in Infra.toml using the default TUF repos"); - None - } - } - - /// Create a set of labels for the CRD by adding `additional_labels` to the standard labels. - pub fn labels(&self, additional_labels: BTreeMap) -> BTreeMap { - let mut labels = btreemap! { - "testsys/arch".to_string() => self.arch.to_string(), - "testsys/variant".to_string() => self.variant.to_string(), - "testsys/build-id".to_string() => self.build_id.to_owned().unwrap_or_default(), - "testsys/test-type".to_string() => self.test_type.to_string(), - }; - let mut add_labels = additional_labels; - labels.append(&mut add_labels); - labels - } - - /// Determine all CRDs that have the same value for each `id_labels` as `labels`. - pub async fn existing_crds( - &self, - labels: &BTreeMap, - id_labels: &[&str], - ) -> Result> { - // Create a single string containing all `label=value` pairs. - let checks = id_labels - .iter() - .map(|label| { - labels - .get(&label.to_string()) - .map(|value| format!("{}={}", label, value)) - .context(error::InvalidSnafu { - what: format!("The label '{}' was missing", label), - }) - }) - .collect::>>()? - .join(","); - - // Create a list of all CRD names that match all of the specified labels. - Ok(self - .client - .list(&SelectionParams { - labels: Some(checks), - ..Default::default() - }) - .await? - .iter() - .filter_map(Crd::name) - .collect()) - } - - /// Use the provided userdata path to create the encoded userdata. - pub fn encoded_userdata(&self) -> Result> { - let userdata_path = match self.config.userdata.as_ref() { - Some(userdata) => self.custom_userdata_file_path(userdata)?, - None => return Ok(None), - }; - - info!("Using userdata at '{}'", userdata_path.display()); - - let userdata = std::fs::read_to_string(&userdata_path).context(error::FileSnafu { - path: userdata_path, - })?; - - Ok(Some(base64::encode(userdata))) - } - - /// Find the userdata file for the test type - fn custom_userdata_file_path(&self, userdata: &str) -> Result { - let test_type = &self.test_type.to_string(); - - // List all acceptable paths to the custom crd to allow users some freedom in the way - // `tests` is organized. - let acceptable_paths = vec![ - // Check the absolute path - userdata.into(), - // Check for // - self.tests_directory.join(test_type).join(userdata), - // Check for //.toml - self.tests_directory - .join(test_type) - .join(userdata) - .with_extension("toml"), - // Check for /shared/ - self.tests_directory.join("shared").join(userdata), - // Check for /shared/.toml - self.tests_directory - .join("shared") - .join(userdata) - .with_extension("toml"), - // Check for /shared/userdata/ - self.tests_directory - .join("shared") - .join("userdata") - .join(userdata), - // Check for /shared/userdata/.toml - self.tests_directory - .join("shared") - .join("userdata") - .join(userdata) - .with_extension("toml"), - // Check for the path in the top level directory - PathBuf::new().join(userdata), - ]; - - // Find the first acceptable path that exists and return that. - acceptable_paths - .into_iter() - .find(|path| path.exists()) - .context(error::InvalidSnafu { - what: format!( - "Could not find userdata '{}' for test type '{}'", - userdata, test_type - ), - }) - } - - /// Fill in the templated cluster name with `arch` and `variant`. - fn rendered_cluster_name(&self, raw_cluster_name: String) -> Result { - Ok(rendered_cluster_name( - raw_cluster_name, - self.kube_arch(), - self.kube_variant(), - )?) - } - - /// Get the k8s safe architecture name - fn kube_arch(&self) -> String { - self.arch.replace('_', "-") - } - - /// Get the k8s safe variant name - fn kube_variant(&self) -> String { - self.variant.to_string().replace('.', "") - } - - /// Bottlerocket cluster naming convention. - fn default_cluster_name(&self) -> String { - format!("{}-{}", self.kube_arch(), self.kube_variant()) - } - - /// Get a list of cluster_names for this variant. If there are no cluster names, the default - /// cluster name will be used. - fn cluster_names(&self) -> Result> { - Ok(if self.config.cluster_names.is_empty() { - vec![self.default_cluster_name()] - } else { - self.config - .cluster_names - .iter() - .map(String::to_string) - // Fill the template fields in the clusters name before using it. - .map(|cluster_name| self.rendered_cluster_name(cluster_name)) - .collect::>>()? - }) - } - - /// Creates a `BTreeMap` of all configurable fields from this input - fn config_fields(&self, cluster_name: &str) -> BTreeMap { - btreemap! { - "arch".to_string() => self.arch.clone(), - "variant".to_string() => self.variant.to_string(), - "kube-arch".to_string() => self.kube_arch(), - "kube-variant".to_string() => self.kube_variant(), - "flavor".to_string() => some_or_null(&self.variant.variant_flavor().map(str::to_string)), - "version".to_string() => some_or_null(&self.variant.version().map(str::to_string)), - "cluster-name".to_string() => cluster_name.to_string(), - "instance-type".to_string() => some_or_null(&self.config.instance_type), - "agent-role".to_string() => some_or_null(&self.config.agent_role), - "conformance-image".to_string() => some_or_null(&self.config.conformance_image), - "conformance-registry".to_string() => some_or_null(&self.config.conformance_registry), - "control-plane-endpoint".to_string() => some_or_null(&self.config.control_plane_endpoint), - } - } - - /// Find the crd template file for the given test type - fn custom_crd_template_file_path(&self) -> Option { - let test_type = &self.test_type.to_string(); - // List all acceptable paths to the custom crd to allow users some freedom in the way - // `tests` is organized. - let acceptable_paths = vec![ - // Check for .yaml in the top level directory - PathBuf::new().join(test_type).with_extension("yaml"), - // Check for //.yaml - self.tests_directory - .join(test_type) - .join(test_type) - .with_extension("yaml"), - // Check for //crd.yaml - self.tests_directory.join(test_type).join("crd.yaml"), - // Check for /shared/.yaml - self.tests_directory - .join("shared") - .join(test_type) - .with_extension("yaml"), - // Check for /shared/tests/.yaml - self.tests_directory - .join("shared") - .join("tests") - .join(test_type) - .with_extension("yaml"), - ]; - - // Find the first acceptable path that exists and return that. - acceptable_paths.into_iter().find(|path| path.exists()) - } - - /// Find the cluster config file for the given cluster name and test type. - fn cluster_config_file_path(&self, cluster_name: &str) -> Option { - let test_type = &self.test_type.to_string(); - // List all acceptable paths to the custom crd to allow users some freedom in the way - // `tests` is organized. - let acceptable_paths = vec![ - // Check for //.yaml - self.tests_directory - .join(test_type) - .join(cluster_name) - .with_extension("yaml"), - // Check for /shared/.yaml - self.tests_directory - .join("shared") - .join(cluster_name) - .with_extension("yaml"), - // Check for /shared/cluster-config/.yaml - self.tests_directory - .join("shared") - .join("cluster-config") - .join(cluster_name) - .with_extension("yaml"), - // Check for /shared/clusters/.yaml - self.tests_directory - .join("shared") - .join("clusters") - .join(cluster_name) - .with_extension("yaml"), - // Check for /shared/clusters//cluster.yaml - self.tests_directory - .join("shared") - .join("clusters") - .join(cluster_name) - .join("cluster") - .with_extension("yaml"), - ]; - - // Find the first acceptable path that exists and return that. - acceptable_paths.into_iter().find(|path| path.exists()) - } - - /// Find the resolved cluster config file for the given cluster name and test type if it exists. - fn resolved_cluster_config( - &self, - cluster_name: &str, - additional_fields: &mut BTreeMap, - ) -> Result> { - let path = match self.cluster_config_file_path(cluster_name) { - None => return Ok(None), - Some(path) => path, - }; - info!("Using cluster config at {}", path.display()); - let config = fs::read_to_string(&path).context(error::FileSnafu { path })?; - - let mut fields = self.config_fields(cluster_name); - fields.insert("api-version".to_string(), API_VERSION.to_string()); - fields.insert("namespace".to_string(), NAMESPACE.to_string()); - fields.append(additional_fields); - - let mut handlebars = Handlebars::new(); - handlebars.set_strict_mode(true); - let rendered_config = handlebars.render_template(&config, &fields)?; - - Ok(Some(rendered_config)) - } - - /// Find the hardware csv file for the given hardware csv name and test type. - fn hardware_csv_file_path(&self, hardware_csv: &str) -> Option { - let test_type = &self.test_type.to_string(); - // List all acceptable paths to the custom crd to allow users some freedom in the way - // `tests` is organized. - let acceptable_paths = vec![ - // Check for //.csv - self.tests_directory - .join(test_type) - .join(hardware_csv) - .with_extension("csv"), - // Check for /shared/.csv - self.tests_directory - .join("shared") - .join(hardware_csv) - .with_extension("csv"), - // Check for /shared/cluster-config/.csv - self.tests_directory - .join("shared") - .join("cluster-config") - .join(hardware_csv) - .with_extension("csv"), - // Check for /shared/clusters/.csv - self.tests_directory - .join("shared") - .join("clusters") - .join(hardware_csv) - .with_extension("csv"), - ]; - - // Find the first acceptable path that exists and return that. - acceptable_paths.into_iter().find(|path| path.exists()) - } - - /// Find the resolved cluster config file for the given cluster name and test type if it exists. - fn resolved_hardware_csv(&self) -> Result> { - let hardware_csv = match &self.config.hardware_csv { - Some(hardware_csv) => hardware_csv, - None => return Ok(None), - }; - - // If the hardware csv is csv like, it probably is a csv; otherwise, it is a path to the - // hardware csv. - if hardware_csv.contains(',') { - return Ok(Some(hardware_csv.to_string())); - } - - let path = match self.hardware_csv_file_path(hardware_csv) { - None => return Ok(None), - Some(path) => path, - }; - - info!("Using hardware csv at {}", path.display()); - - let config = fs::read_to_string(&path).context(error::FileSnafu { path })?; - Ok(Some(config)) - } - - fn hardware_for_cluster(&self, cluster_name: &str) -> Result> { - // Check for /shared/clusters//hardware.csv - let path = self - .tests_directory - .join("shared") - .join("clusters") - .join(cluster_name) - .join("hardware") - .with_extension("csv"); - - if !path.exists() { - return Ok(None); - } - - info!("Using hardware csv at {}", path.display()); - - let config = fs::read_to_string(&path).context(error::FileSnafu { path })?; - Ok(Some(config)) - } -} - -/// Take the value of the `Option` or `"null"` if the `Option` was `None` -fn some_or_null(field: &Option) -> String { - field.to_owned().unwrap_or_else(|| "null".to_string()) -} - -/// The `CrdCreator` trait is used to create CRDs. Each variant family should have a `CrdCreator` -/// that is responsible for creating the CRDs needed for testing. -#[async_trait::async_trait] -pub(crate) trait CrdCreator: Sync { - /// Return the image id that should be used for normal testing. - async fn image_id(&self, crd_input: &CrdInput) -> Result; - - /// Return the image id that should be used as the starting point for migration testing. - async fn starting_image_id(&self, crd_input: &CrdInput) -> Result; - - /// Create a CRD for the cluster needed to launch Bottlerocket. If no cluster CRD is - /// needed, `CreateCrdOutput::None` can be returned. - async fn cluster_crd<'a>(&self, cluster_input: ClusterInput<'a>) -> Result; - - /// Create a CRD to launch Bottlerocket. `CreateCrdOutput::None` can be returned if this CRD is - /// not needed. - async fn bottlerocket_crd<'a>( - &self, - bottlerocket_input: BottlerocketInput<'a>, - ) -> Result; - - /// Create a CRD that migrates Bottlerocket from one version to another. - async fn migration_crd<'a>( - &self, - migration_input: MigrationInput<'a>, - ) -> Result; - - /// Create a testing CRD for this variant of Bottlerocket. - async fn test_crd<'a>(&self, test_input: TestInput<'a>) -> Result; - - /// Create a workload testing CRD for this variant of Bottlerocket. - async fn workload_crd<'a>(&self, test_input: TestInput<'a>) -> Result; - - /// Create a set of additional fields that may be used by an externally defined agent on top of - /// the ones in `CrdInput` - fn additional_fields(&self, _test_type: &str) -> BTreeMap { - Default::default() - } - - /// Creates a set of CRDs for the specified variant and test type that can be added to a TestSys - /// cluster. - async fn create_crds( - &self, - test_type: &KnownTestType, - crd_input: &CrdInput, - ) -> Result> { - let mut crds = Vec::new(); - let image_id = match &test_type { - KnownTestType::Migration => { - if let Some(image_id) = &crd_input.starting_image_id { - debug!( - "Using the provided starting image id for migration testing '{}'", - image_id - ); - image_id.to_string() - } else { - let image_id = self.starting_image_id(crd_input).await?; - debug!( - "A starting image id was not provided, '{}' will be used instead.", - image_id - ); - image_id - } - } - _ => self.image_id(crd_input).await?, - }; - for cluster_name in &crd_input.cluster_names()? { - let cluster_output = self - .cluster_crd(ClusterInput { - cluster_name, - image_id: &image_id, - crd_input, - cluster_config: &crd_input.resolved_cluster_config( - cluster_name, - &mut self - .additional_fields(&test_type.to_string()) - .into_iter() - // Add the image id in case it is needed for cluster creation - .chain(Some(("image-id".to_string(), image_id.clone()))) - .collect::>(), - )?, - hardware_csv: &crd_input - .resolved_hardware_csv() - .transpose() - .or_else(|| crd_input.hardware_for_cluster(cluster_name).transpose()) - .transpose()?, - }) - .await?; - let cluster_crd_name = cluster_output.crd_name(); - if let Some(crd) = cluster_output.crd() { - debug!("Cluster crd was created for '{}'", cluster_name); - crds.push(crd) - } - let bottlerocket_output = self - .bottlerocket_crd(BottlerocketInput { - cluster_crd_name: &cluster_crd_name, - image_id: image_id.clone(), - test_type, - crd_input, - }) - .await?; - let bottlerocket_crd_name = bottlerocket_output.crd_name(); - match &test_type { - KnownTestType::Conformance | KnownTestType::Quick => { - if let Some(crd) = bottlerocket_output.crd() { - debug!("Bottlerocket crd was created for '{}'", cluster_name); - crds.push(crd) - } - let test_output = self - .test_crd(TestInput { - cluster_crd_name: &cluster_crd_name, - bottlerocket_crd_name: &bottlerocket_crd_name, - test_type, - crd_input, - prev_tests: Default::default(), - name_suffix: None, - }) - .await?; - if let Some(crd) = test_output.crd() { - crds.push(crd) - } - } - KnownTestType::Workload => { - if let Some(crd) = bottlerocket_output.crd() { - debug!("Bottlerocket crd was created for '{}'", cluster_name); - crds.push(crd) - } - let test_output = self - .workload_crd(TestInput { - cluster_crd_name: &cluster_crd_name, - bottlerocket_crd_name: &bottlerocket_crd_name, - test_type, - crd_input, - prev_tests: Default::default(), - name_suffix: None, - }) - .await?; - if let Some(crd) = test_output.crd() { - crds.push(crd) - } - } - KnownTestType::Migration => { - if let Some(crd) = bottlerocket_output.crd() { - debug!("Bottlerocket crd was created for '{}'", cluster_name); - crds.push(crd) - } - let mut tests = Vec::new(); - let test_output = self - .test_crd(TestInput { - cluster_crd_name: &cluster_crd_name, - bottlerocket_crd_name: &bottlerocket_crd_name, - test_type, - crd_input, - prev_tests: tests.clone(), - name_suffix: "1-initial".into(), - }) - .await?; - if let Some(name) = test_output.crd_name() { - tests.push(name) - } - if let Some(crd) = test_output.crd() { - crds.push(crd) - } - let migration_output = self - .migration_crd(MigrationInput { - cluster_crd_name: &cluster_crd_name, - bottlerocket_crd_name: &bottlerocket_crd_name, - crd_input, - prev_tests: tests.clone(), - name_suffix: "2-migrate".into(), - migration_direction: MigrationDirection::Upgrade, - }) - .await?; - if let Some(name) = migration_output.crd_name() { - tests.push(name) - } - if let Some(crd) = migration_output.crd() { - crds.push(crd) - } - let test_output = self - .test_crd(TestInput { - cluster_crd_name: &cluster_crd_name, - bottlerocket_crd_name: &bottlerocket_crd_name, - test_type, - crd_input, - prev_tests: tests.clone(), - name_suffix: "3-migrated".into(), - }) - .await?; - if let Some(name) = test_output.crd_name() { - tests.push(name) - } - if let Some(crd) = test_output.crd() { - crds.push(crd) - } - let migration_output = self - .migration_crd(MigrationInput { - cluster_crd_name: &cluster_crd_name, - bottlerocket_crd_name: &bottlerocket_crd_name, - crd_input, - prev_tests: tests.clone(), - name_suffix: "4-migrate".into(), - migration_direction: MigrationDirection::Downgrade, - }) - .await?; - if let Some(name) = migration_output.crd_name() { - tests.push(name) - } - if let Some(crd) = migration_output.crd() { - crds.push(crd) - } - let test_output = self - .test_crd(TestInput { - cluster_crd_name: &cluster_crd_name, - bottlerocket_crd_name: &bottlerocket_crd_name, - test_type, - crd_input, - prev_tests: tests, - name_suffix: "5-final".into(), - }) - .await?; - if let Some(crd) = test_output.crd() { - crds.push(crd) - } - } - } - } - - Ok(crds) - } - - /// Creates a set of CRDs for the specified variant and test type that can be added to a TestSys - /// cluster. - async fn create_custom_crds( - &self, - test_type: &str, - crd_input: &CrdInput, - override_crd_template: Option, - ) -> Result> { - debug!("Creating custom CRDs for '{}' test", test_type); - let crd_template_file_path = &override_crd_template - .or_else(|| crd_input.custom_crd_template_file_path()) - .context(error::InvalidSnafu { - what: format!( - "A custom yaml file could not be found for test type '{}'", - test_type - ), - })?; - info!( - "Creating custom crd from '{}'", - crd_template_file_path.display() - ); - let mut crds = Vec::new(); - for cluster_name in &crd_input.cluster_names()? { - let mut fields = crd_input.config_fields(cluster_name); - fields.insert("api-version".to_string(), API_VERSION.to_string()); - fields.insert("namespace".to_string(), NAMESPACE.to_string()); - fields.insert("image-id".to_string(), self.image_id(crd_input).await?); - fields.append(&mut self.additional_fields(test_type)); - - let mut handlebars = Handlebars::new(); - handlebars.set_strict_mode(true); - let rendered_manifest = handlebars.render_template( - &std::fs::read_to_string(crd_template_file_path).context(error::FileSnafu { - path: crd_template_file_path, - })?, - &fields, - )?; - - for crd_doc in serde_yaml::Deserializer::from_str(&rendered_manifest) { - let value = - serde_yaml::Value::deserialize(crd_doc).context(error::SerdeYamlSnafu { - what: "Unable to deserialize rendered manifest", - })?; - let mut crd: Crd = - serde_yaml::from_value(value).context(error::SerdeYamlSnafu { - what: "The manifest did not match a `CRD`", - })?; - // Add in the secrets from the config manually. - match &mut crd { - Crd::Test(test) => { - test.spec.agent.secrets = Some(crd_input.config.secrets.clone()) - } - Crd::Resource(resource) => { - resource.spec.agent.secrets = Some(crd_input.config.secrets.clone()) - } - } - crds.push(crd); - } - } - Ok(crds) - } -} - -/// The input used for cluster crd creation -pub struct ClusterInput<'a> { - pub cluster_name: &'a String, - pub image_id: &'a String, - pub crd_input: &'a CrdInput<'a>, - pub cluster_config: &'a Option, - pub hardware_csv: &'a Option, -} - -/// The input used for bottlerocket crd creation -pub struct BottlerocketInput<'a> { - pub cluster_crd_name: &'a Option, - /// The image id that should be used by this CRD - pub image_id: String, - pub test_type: &'a KnownTestType, - pub crd_input: &'a CrdInput<'a>, -} - -/// The input used for test crd creation -pub struct TestInput<'a> { - pub cluster_crd_name: &'a Option, - pub bottlerocket_crd_name: &'a Option, - pub test_type: &'a KnownTestType, - pub crd_input: &'a CrdInput<'a>, - /// The set of tests that have already been created that are related to this test - pub prev_tests: Vec, - /// The suffix that should be appended to the end of the test name to prevent naming conflicts - pub name_suffix: Option<&'a str>, -} - -/// The input used for migration crd creation -pub struct MigrationInput<'a> { - pub cluster_crd_name: &'a Option, - pub bottlerocket_crd_name: &'a Option, - pub crd_input: &'a CrdInput<'a>, - /// The set of tests that have already been created that are related to this test - pub prev_tests: Vec, - /// The suffix that should be appended to the end of the test name to prevent naming conflicts - pub name_suffix: Option<&'a str>, - pub migration_direction: MigrationDirection, -} - -pub enum MigrationDirection { - Upgrade, - Downgrade, -} - -pub enum CreateCrdOutput { - /// A new CRD was created and needs to be applied to the cluster. - NewCrd(Box), - /// An existing CRD is already representing this object. - ExistingCrd(String), - /// There is no CRD to create for this step of this family. - None, -} - -impl Default for CreateCrdOutput { - fn default() -> Self { - Self::None - } -} - -impl CreateCrdOutput { - /// Get the name of the CRD that was created or already existed - pub(crate) fn crd_name(&self) -> Option { - match self { - CreateCrdOutput::NewCrd(crd) => { - Some(crd.name().expect("A CRD is missing the name field.")) - } - CreateCrdOutput::ExistingCrd(name) => Some(name.to_string()), - CreateCrdOutput::None => None, - } - } - - /// Get the CRD if it was created - pub(crate) fn crd(self) -> Option { - match self { - CreateCrdOutput::NewCrd(crd) => Some(*crd), - CreateCrdOutput::ExistingCrd(_) => None, - CreateCrdOutput::None => None, - } - } -} diff --git a/tools/testsys/src/delete.rs b/tools/testsys/src/delete.rs deleted file mode 100644 index a08ec9f7..00000000 --- a/tools/testsys/src/delete.rs +++ /dev/null @@ -1,80 +0,0 @@ -use crate::error::Result; -use clap::Parser; -use futures::TryStreamExt; -use log::info; -use testsys_model::test_manager::{CrdState, CrdType, DeleteEvent, SelectionParams, TestManager}; - -/// Delete all tests and resources from a testsys cluster. -#[derive(Debug, Parser)] -pub(crate) struct Delete { - /// Only delete tests - #[clap(long)] - test: bool, - - /// Focus status on a particular arch - #[clap(long)] - arch: Option, - - /// Focus status on a particular variant - #[clap(long)] - variant: Option, - - /// Only delete passed tests - #[clap(long, conflicts_with_all=&["failed", "running"])] - passed: bool, - - /// Only delete failed tests - #[clap(long, conflicts_with_all=&["passed", "running"])] - failed: bool, - - /// Only CRD's that haven't finished - #[clap(long, conflicts_with_all=&["passed", "failed"])] - running: bool, -} - -impl Delete { - pub(crate) async fn run(self, client: TestManager) -> Result<()> { - let state = if self.running { - info!("Deleting all running tests and resources"); - Some(CrdState::NotFinished) - } else if self.passed { - info!("Deleting all passed tests"); - Some(CrdState::Passed) - } else if self.failed { - info!("Deleting all failed tests"); - Some(CrdState::Failed) - } else { - info!("Deleting all tests and resources"); - None - }; - let crd_type = self.test.then_some(CrdType::Test); - let mut labels = Vec::new(); - if let Some(arch) = self.arch { - labels.push(format!("testsys/arch={}", arch)) - }; - if let Some(variant) = self.variant { - labels.push(format!("testsys/variant={}", variant)) - }; - let mut stream = client - .delete( - &SelectionParams { - labels: Some(labels.join(",")), - state, - crd_type, - ..Default::default() - }, - false, - ) - .await?; - - while let Some(delete) = stream.try_next().await? { - match delete { - DeleteEvent::Starting(crd) => println!("Starting delete for {}", crd.name()), - DeleteEvent::Deleted(crd) => println!("Delete finished for {}", crd.name()), - DeleteEvent::Failed(crd) => println!("Delete failed for {}", crd.name()), - } - } - info!("Delete finished"); - Ok(()) - } -} diff --git a/tools/testsys/src/error.rs b/tools/testsys/src/error.rs deleted file mode 100644 index 06f73950..00000000 --- a/tools/testsys/src/error.rs +++ /dev/null @@ -1,121 +0,0 @@ -use aws_sdk_ec2::error::SdkError; -use aws_sdk_ec2::operation::describe_images::DescribeImagesError; -use snafu::Snafu; -use std::path::PathBuf; - -pub type Result = std::result::Result; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(super)))] -pub enum Error { - // `error` must be used instead of `source` because the build function returns - // `std::error::Error` but not `std::error::Error + Sync + Send`. - #[snafu(display("Unable to build '{}': {}", what, source))] - Build { - what: String, - source: Box, - }, - - #[snafu(display("Unable to build datacenter credentials: {}", source))] - CredsBuild { - source: pubsys_config::vmware::Error, - }, - - #[snafu(display("Unable to build data center config: {}", source))] - DatacenterBuild { - source: pubsys_config::vmware::Error, - }, - - #[snafu(context(false), display("{}", source))] - DescribeImages { - source: SdkError, - }, - - #[snafu(display("Unable to read file '{}': {}", path.display(), source))] - File { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(context(false), display("Unable render templated yaml: {}", source))] - HandlebarsRender { source: handlebars::RenderError }, - - #[snafu( - context(false), - display("Unable create template from yaml: {}", source) - )] - HandlebarsTemplate { - #[snafu(source(from(handlebars::TemplateError, Box::new)))] - source: Box, - }, - - #[snafu(display("Unable to create map from {}: {}", what, source))] - IntoMap { - what: String, - source: testsys_model::Error, - }, - - #[snafu(display("{}", what))] - Invalid { what: String }, - - #[snafu(display("{}: {}", what, source))] - IO { - what: String, - source: std::io::Error, - }, - - #[snafu(display("Unable to parse K8s version '{}'", version))] - K8sVersion { version: String }, - - #[snafu(display("{} was missing from {}", item, what))] - Missing { item: String, what: String }, - - #[snafu(context(false), display("{}", source))] - PubsysConfig { source: pubsys_config::Error }, - - #[snafu(display("Unable to create secret name for '{}': {}", secret_name, source))] - SecretName { - secret_name: String, - source: testsys_model::Error, - }, - - #[snafu(display("{}: {}", what, source))] - SerdeJson { - what: String, - source: serde_json::Error, - }, - - #[snafu(display("{}: {}", what, source))] - SerdeYaml { - what: String, - source: serde_yaml::Error, - }, - - #[snafu(context(false), display("{}", source))] - TestManager { - source: testsys_model::test_manager::Error, - }, - - #[snafu(context(false), display("{}", source))] - TestsysConfig { source: testsys_config::Error }, - - #[snafu(display("{} is not supported.", what))] - Unsupported { what: String }, - - #[snafu(display("Unable to parse url from '{}': {}", url, source))] - UrlParse { - url: String, - source: url::ParseError, - }, - - #[snafu(display("Unable to create `Variant` from `{}`: {}", variant, source))] - Variant { - variant: String, - source: bottlerocket_variant::error::Error, - }, - - #[snafu(display("Error reading config: {}", source))] - VmwareConfig { - source: pubsys_config::vmware::Error, - }, -} diff --git a/tools/testsys/src/install.rs b/tools/testsys/src/install.rs deleted file mode 100644 index 17991869..00000000 --- a/tools/testsys/src/install.rs +++ /dev/null @@ -1,62 +0,0 @@ -use crate::error::Result; -use crate::run::TestsysImages; -use clap::Parser; -use log::{info, trace}; -use std::path::PathBuf; -use testsys_config::TestConfig; -use testsys_model::test_manager::{ImageConfig, TestManager}; - -/// The install subcommand is responsible for putting all of the necessary components for testsys in -/// a k8s cluster. -#[derive(Debug, Parser)] -pub(crate) struct Install { - /// The path to `Test.toml` - #[arg(long, env = "TESTSYS_TEST_CONFIG_PATH")] - test_config_path: PathBuf, - - #[command(flatten)] - agent_images: TestsysImages, -} - -impl Install { - pub(crate) async fn run(self, client: TestManager) -> Result<()> { - // Use Test.toml or default - let test_config = TestConfig::from_path_or_default(&self.test_config_path)?; - - let test_opts = test_config.test.to_owned().unwrap_or_default(); - - let images = vec![ - Some(self.agent_images.into()), - Some(test_opts.testsys_images), - test_opts.testsys_image_registry.map(|registry| { - testsys_config::TestsysImages::new(registry, test_opts.testsys_image_tag) - }), - Some(testsys_config::TestsysImages::public_images()), - ] - .into_iter() - .flatten() - .fold(Default::default(), testsys_config::TestsysImages::merge); - - let controller_uri = images - .controller_image - .expect("The default controller image is missing."); - - trace!( - "Installing testsys using controller image '{}'", - controller_uri - ); - - let controller_image = match images.testsys_agent_pull_secret { - Some(secret) => ImageConfig::WithCreds { - secret, - image: controller_uri, - }, - None => ImageConfig::Image(controller_uri), - }; - client.install(controller_image).await?; - - info!("testsys components were successfully installed."); - - Ok(()) - } -} diff --git a/tools/testsys/src/logs.rs b/tools/testsys/src/logs.rs deleted file mode 100644 index d63dd81b..00000000 --- a/tools/testsys/src/logs.rs +++ /dev/null @@ -1,47 +0,0 @@ -use crate::error::{self, Result}; -use clap::Parser; -use futures::TryStreamExt; -use snafu::OptionExt; -use testsys_model::test_manager::{ResourceState, TestManager}; -use unescape::unescape; - -/// Stream the logs of an object from a testsys cluster. -#[derive(Debug, Parser)] -pub(crate) struct Logs { - /// The name of the test we want logs from. - #[clap(long, conflicts_with = "resource")] - test: Option, - - /// The name of the resource we want logs from. - #[clap(long, conflicts_with = "test", requires = "state")] - resource: Option, - - /// The resource state we want logs for (Creation, Destruction). - #[clap(long = "state", conflicts_with = "test")] - resource_state: Option, - - /// Follow logs - #[clap(long, short)] - follow: bool, -} - -impl Logs { - pub(crate) async fn run(self, client: TestManager) -> Result<()> { - match (self.test, self.resource, self.resource_state) { - (Some(test), None, None) => { - let mut logs = client.test_logs(test, self.follow).await?; - while let Some(line) = logs.try_next().await? { - println!("{}", unescape(&String::from_utf8_lossy(&line)).context(error::InvalidSnafu{what: "Unable to unescape log string"})?); - } - } - (None, Some(resource), Some(state)) => { - let mut logs = client.resource_logs(resource, state, self.follow).await?; - while let Some(line) = logs.try_next().await? { - println!("{}", unescape(&String::from_utf8_lossy(&line)).context(error::InvalidSnafu{what: "Unable to unescape log string"})?); - } - } - _ => return Err(error::Error::Invalid{what: "Invalid arguments were provided. Exactly one of `--test` or `--resource` must be given.".to_string()}), - }; - Ok(()) - } -} diff --git a/tools/testsys/src/main.rs b/tools/testsys/src/main.rs deleted file mode 100644 index 26a97d4b..00000000 --- a/tools/testsys/src/main.rs +++ /dev/null @@ -1,112 +0,0 @@ -use clap::{Parser, Subcommand}; -use delete::Delete; -use env_logger::Builder; -use error::Result; -use install::Install; -use log::{debug, error, LevelFilter}; -use logs::Logs; -use restart_test::RestartTest; -use run::Run; -use secret::Add; -use status::Status; -use std::path::PathBuf; -use testsys_model::test_manager::TestManager; -use uninstall::Uninstall; - -mod aws_ecs; -mod aws_k8s; -mod aws_resources; -mod crds; -mod delete; -mod error; -mod install; -mod logs; -mod metal_k8s; -mod migration; -mod restart_test; -mod run; -mod secret; -mod sonobuoy; -mod status; -mod uninstall; -mod vmware_k8s; - -/// A program for running and controlling Bottlerocket tests in a Kubernetes cluster using -/// bottlerocket-test-system -#[derive(Parser, Debug)] -#[clap(about, long_about = None)] -struct TestsysArgs { - #[arg(global = true, long, default_value = "INFO")] - /// How much detail to log; from least to most: ERROR, WARN, INFO, DEBUG, TRACE - log_level: LevelFilter, - - /// Path to the kubeconfig file for the testsys cluster. Can also be passed with the KUBECONFIG - /// environment variable. - #[arg(long)] - kubeconfig: Option, - - #[command(subcommand)] - command: Command, -} - -impl TestsysArgs { - async fn run(self) -> Result<()> { - let client = match self.kubeconfig { - Some(path) => TestManager::new_from_kubeconfig_path(&path).await?, - None => TestManager::new().await?, - }; - match self.command { - Command::Run(run) => run.run(client).await?, - Command::Install(install) => install.run(client).await?, - Command::Delete(delete) => delete.run(client).await?, - Command::Status(status) => status.run(client).await?, - Command::Logs(logs) => logs.run(client).await?, - Command::RestartTest(restart_test) => restart_test.run(client).await?, - Command::Add(add) => add.run(client).await?, - Command::Uninstall(uninstall) => uninstall.run(client).await?, - }; - Ok(()) - } -} - -#[derive(Subcommand, Debug)] -enum Command { - // We need to box some commands because they require significantly more arguments than the other commands. - Install(Box), - Run(Box), - Delete(Delete), - Status(Status), - Logs(Logs), - RestartTest(RestartTest), - Add(Add), - Uninstall(Uninstall), -} - -#[tokio::main] -async fn main() { - let args = TestsysArgs::parse(); - init_logger(args.log_level); - debug!("{:?}", args); - if let Err(e) = args.run().await { - error!("{}", e); - std::process::exit(1); - } -} - -/// Initialize the logger with the value passed by `--log-level` (or its default) when the -/// `RUST_LOG` environment variable is not present. If present, the `RUST_LOG` environment variable -/// overrides `--log-level`/`level`. -fn init_logger(level: LevelFilter) { - match std::env::var(env_logger::DEFAULT_FILTER_ENV).ok() { - Some(_) => { - // RUST_LOG exists; env_logger will use it. - Builder::from_default_env().init(); - } - None => { - // RUST_LOG does not exist; use default log level for this crate only. - Builder::new() - .filter(Some(env!("CARGO_CRATE_NAME")), level) - .init(); - } - } -} diff --git a/tools/testsys/src/metal_k8s.rs b/tools/testsys/src/metal_k8s.rs deleted file mode 100644 index 3cef00a1..00000000 --- a/tools/testsys/src/metal_k8s.rs +++ /dev/null @@ -1,261 +0,0 @@ -use crate::crds::{ - BottlerocketInput, ClusterInput, CrdCreator, CrdInput, CreateCrdOutput, MigrationInput, - TestInput, -}; -use crate::error::{self, Result}; -use crate::migration::migration_crd; -use crate::sonobuoy::{sonobuoy_crd, workload_crd}; -use bottlerocket_types::agent_config::MetalK8sClusterConfig; -use maplit::btreemap; -use serde::Deserialize; -use snafu::{OptionExt, ResultExt}; -use std::collections::BTreeMap; -use testsys_model::{Crd, DestructionPolicy}; -use url::Url; - -/// A `CrdCreator` responsible for creating crd related to `metal-k8s` variants. -pub(crate) struct MetalK8sCreator { - pub(crate) region: String, - pub(crate) encoded_mgmt_cluster_kubeconfig: String, - pub(crate) image_name: String, -} - -#[async_trait::async_trait] -impl CrdCreator for MetalK8sCreator { - /// Use the provided image name with the `os_image_dir` from `Test.toml` for the image id. - async fn image_id(&self, crd_input: &CrdInput) -> Result { - image_url( - crd_input - .config - .os_image_dir - .as_ref() - .context(error::InvalidSnafu { - what: "An os image directory is required for metal testing", - })?, - &self.image_name, - ) - } - - /// Use standard naming conventions to predict the starting image name. - async fn starting_image_id(&self, crd_input: &CrdInput) -> Result { - let filename = format!( - "bottlerocket-{}-{}-{}.img.gz", - crd_input.variant, - crd_input.arch, - crd_input - .starting_version - .as_ref() - .context(error::InvalidSnafu { - what: "The starting version must be provided for migration testing" - })? - ); - image_url(crd_input.config.os_image_dir.as_ref().context(error::InvalidSnafu { - what: "An os image directory is required for metal testing if a starting image id not used", - })?, &filename) - } - - /// Creates a metal K8s cluster CRD with the `cluster_name` in `cluster_input`. - async fn cluster_crd<'a>(&self, cluster_input: ClusterInput<'a>) -> Result { - let (cluster_name, control_plane_endpoint_ip, k8s_version) = cluster_data( - cluster_input - .cluster_config - .as_ref() - .context(error::InvalidSnafu { - what: "A cluster config is required for Bare Metal cluster provisioning.", - })?, - )?; - - let labels = cluster_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => "cluster".to_string(), - "testsys/cluster".to_string() => cluster_name.clone(), - "testsys/controlPlaneEndpoint".to_string() => control_plane_endpoint_ip, - "testsys/k8sVersion".to_string() => k8s_version - }); - - // Check if the cluster already has a CRD - if let Some(cluster_crd) = cluster_input - .crd_input - .existing_crds( - &labels, - &[ - "testsys/cluster", - "testsys/type", - "testsys/controlPlaneEndpoint", - "testsys/k8sVersion", - ], - ) - .await? - .pop() - { - return Ok(CreateCrdOutput::ExistingCrd(cluster_crd)); - } - - // Check if an existing cluster is using this endpoint - let existing_clusters = cluster_input - .crd_input - .existing_crds(&labels, &["testsys/type", "testsys/controlPlaneEndpoint"]) - .await?; - - let metal_k8s_crd = MetalK8sClusterConfig::builder() - .set_labels(Some(labels)) - .mgmt_cluster_kubeconfig_base64(&self.encoded_mgmt_cluster_kubeconfig) - .hardware_csv_base64(base64::encode( - cluster_input - .hardware_csv - .as_ref() - .context(error::InvalidSnafu { - what: "A hardware CSV is required for Bare Metal testing", - })?, - )) - .cluster_config_base64(base64::encode( - cluster_input - .cluster_config - .as_ref() - .context(error::InvalidSnafu { - what: "A cluster config is required for Bare Metal testing", - })?, - )) - .eks_a_release_manifest_url( - cluster_input - .crd_input - .config - .dev - .eks_a_release_manifest_url - .clone(), - ) - .set_conflicts_with(Some(existing_clusters)) - .destruction_policy( - cluster_input - .crd_input - .config - .dev - .cluster_destruction_policy - .to_owned() - .unwrap_or(DestructionPolicy::OnTestSuccess), - ) - .image( - cluster_input - .crd_input - .images - .metal_k8s_cluster_resource_agent_image - .as_ref() - .expect( - "The default metal K8s cluster resource provider image URI is missing.", - ), - ) - .set_image_pull_secret( - cluster_input - .crd_input - .images - .testsys_agent_pull_secret - .to_owned(), - ) - .privileged(true) - .build(cluster_name) - .context(error::BuildSnafu { - what: "metal K8s cluster CRD", - })?; - - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Resource( - metal_k8s_crd, - )))) - } - - /// Machines are provisioned during cluster creation, so there is nothing to do here. - async fn bottlerocket_crd<'a>( - &self, - _bottlerocket_input: BottlerocketInput<'a>, - ) -> Result { - Ok(CreateCrdOutput::None) - } - - async fn migration_crd<'a>( - &self, - migration_input: MigrationInput<'a>, - ) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(migration_crd( - migration_input, - Some("us-west-2".to_string()), - "instanceIds", - )?)))) - } - - async fn test_crd<'a>(&self, test_input: TestInput<'a>) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(sonobuoy_crd( - test_input, - )?)))) - } - - async fn workload_crd<'a>(&self, test_input: TestInput<'a>) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(workload_crd( - test_input, - )?)))) - } - - fn additional_fields(&self, _test_type: &str) -> BTreeMap { - btreemap! {"region".to_string() => self.region.clone()} - } -} - -/// Determine the (cluster name, control plane endpoint ip, K8s version) from an EKS Anywhere cluster manifest -fn cluster_data(config: &str) -> Result<(String, String, String)> { - let cluster_manifest = serde_yaml::Deserializer::from_str(config) - .map(|config| { - serde_yaml::Value::deserialize(config).context(error::SerdeYamlSnafu { - what: "Unable to deserialize cluster config", - }) - }) - // Make sure all of the configs were deserializable - .collect::>>()? - .into_iter() - // Find the `Cluster` config - .find(|config| { - config.get("kind") == Some(&serde_yaml::Value::String("Cluster".to_string())) - }); - let cluster_name = cluster_manifest - .as_ref() - // Get the name from the metadata field in the `Cluster` config - .and_then(|config| config.get("metadata")) - .and_then(|config| config.get("name")) - .and_then(|name| name.as_str()) - .context(error::MissingSnafu { - item: "name", - what: "EKS Anywhere config metadata", - })? - .to_string(); - - let control_plane_endpoint_ip = cluster_manifest - .as_ref() - // Get the name from the metadata field in the `Cluster` config - .and_then(|config| config.get("spec")) - .and_then(|config| config.get("controlPlaneConfiguration")) - .and_then(|config| config.get("endpoint")) - .and_then(|config| config.get("host")) - .and_then(|name| name.as_str()) - .context(error::MissingSnafu { - item: "control plane endpoint", - what: "EKS Anywhere config metadata", - })? - .to_string(); - - let k8s_version = cluster_manifest - .as_ref() - // Get the name from the metadata field in the `Cluster` config - .and_then(|config| config.get("spec")) - .and_then(|config| config.get("kubernetesVersion")) - .and_then(|name| name.as_str()) - .context(error::MissingSnafu { - item: "control plane endpoint", - what: "EKS Anywhere config metadata", - })? - .to_string(); - - Ok((cluster_name, control_plane_endpoint_ip, k8s_version)) -} - -fn image_url(image_dir: &str, filename: &str) -> Result { - let image_url = Url::parse(image_dir) - .and_then(|base_url| base_url.join(filename)) - .context(error::UrlParseSnafu { url: image_dir })?; - Ok(image_url.to_string()) -} diff --git a/tools/testsys/src/migration.rs b/tools/testsys/src/migration.rs deleted file mode 100644 index aeb3f7a1..00000000 --- a/tools/testsys/src/migration.rs +++ /dev/null @@ -1,109 +0,0 @@ -use crate::crds::{MigrationDirection, MigrationInput}; -use crate::error::{self, Result}; -use bottlerocket_types::agent_config::MigrationConfig; -use maplit::btreemap; -use snafu::{OptionExt, ResultExt}; -use testsys_model::Test; - -/// Create a CRD for migrating Bottlerocket instances using SSM commands. -/// `aws_region_override` allows the region that's normally derived from the cluster resource CRD to be overridden -/// `instance_id_field_name` specifies the VM/Instance resource CRD field name for retrieving the instances IDs of the created instances -pub(crate) fn migration_crd( - migration_input: MigrationInput, - aws_region_override: Option, - instance_id_field_name: &str, -) -> Result { - let cluster_resource_name = migration_input - .cluster_crd_name - .as_ref() - .expect("A cluster name is required for migrations"); - - let labels = migration_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => "migration".to_string(), - "testsys/cluster".to_string() => cluster_resource_name.to_string(), - }); - - // Determine which version should be migrated to from `migration_input`. - let migration_version = match migration_input.migration_direction { - MigrationDirection::Upgrade => migration_input - .crd_input - .migrate_to_version - .as_ref() - .context(error::InvalidSnafu { - what: "The target migration version is required", - }), - MigrationDirection::Downgrade => migration_input - .crd_input - .starting_version - .as_ref() - .context(error::InvalidSnafu { - what: "The starting migration version is required", - }), - }?; - - // Construct the migration CRD. - let mut migration_config = MigrationConfig::builder(); - - // Use the specified AWS region for the migration test. - // If no region is specified, derive the appropriate region based on the region of the - // cluster resource CRD (assuming it's an ECS or EKS cluster). - if let Some(aws_region) = aws_region_override { - migration_config.aws_region(aws_region) - } else { - migration_config.aws_region_template(cluster_resource_name, "region") - }; - - migration_config - .instance_ids_template( - migration_input - .bottlerocket_crd_name - .as_ref() - .unwrap_or(cluster_resource_name), - instance_id_field_name, - ) - .migrate_to_version(migration_version) - .tuf_repo(migration_input.crd_input.tuf_repo_config()) - .assume_role(migration_input.crd_input.config.agent_role.clone()) - .set_resources(Some( - vec![cluster_resource_name.to_owned()] - .into_iter() - .chain(migration_input.bottlerocket_crd_name.iter().cloned()) - .collect(), - )) - .set_depends_on(Some(migration_input.prev_tests)) - .image( - migration_input - .crd_input - .images - .migration_test_agent_image - .as_ref() - .expect("Missing default image for migration test agent"), - ) - .set_image_pull_secret( - migration_input - .crd_input - .images - .testsys_agent_pull_secret - .to_owned(), - ) - .keep_running( - migration_input - .crd_input - .config - .dev - .keep_tests_running - .unwrap_or(false), - ) - .set_secrets(Some(migration_input.crd_input.config.secrets.to_owned())) - .set_labels(Some(labels)) - .build(format!( - "{}-{}", - cluster_resource_name, - migration_input - .name_suffix - .unwrap_or(migration_input.crd_input.test_flavor.as_str()) - )) - .context(error::BuildSnafu { - what: "migration CRD", - }) -} diff --git a/tools/testsys/src/restart_test.rs b/tools/testsys/src/restart_test.rs deleted file mode 100644 index 85f4fbac..00000000 --- a/tools/testsys/src/restart_test.rs +++ /dev/null @@ -1,18 +0,0 @@ -use crate::error::Result; -use clap::Parser; -use testsys_model::test_manager::TestManager; - -/// Restart a test. This will delete the test object from the testsys cluster and replace it with -/// a new, identical test object with a clean state. -#[derive(Debug, Parser)] -pub(crate) struct RestartTest { - /// The name of the test to be restarted. - #[clap()] - test_name: String, -} - -impl RestartTest { - pub(crate) async fn run(self, client: TestManager) -> Result<()> { - Ok(client.restart_test(&self.test_name).await?) - } -} diff --git a/tools/testsys/src/run.rs b/tools/testsys/src/run.rs deleted file mode 100644 index eb03de0a..00000000 --- a/tools/testsys/src/run.rs +++ /dev/null @@ -1,619 +0,0 @@ -use crate::aws_ecs::AwsEcsCreator; -use crate::aws_k8s::AwsK8sCreator; -use crate::crds::{CrdCreator, CrdInput}; -use crate::error; -use crate::error::Result; -use crate::metal_k8s::MetalK8sCreator; -use crate::vmware_k8s::VmwareK8sCreator; -use bottlerocket_variant::Variant; -use clap::Parser; -use log::{debug, info}; -use pubsys_config::vmware::{ - Datacenter, DatacenterBuilder, DatacenterCreds, DatacenterCredsBuilder, DatacenterCredsConfig, - VMWARE_CREDS_PATH, -}; -use pubsys_config::InfraConfig; -use serde::{Deserialize, Serialize}; -use serde_plain::{derive_display_from_serialize, derive_fromstr_from_deserialize}; -use snafu::{OptionExt, ResultExt}; -use std::fs::read_to_string; -use std::path::PathBuf; -use std::str::FromStr; -use testsys_config::{GenericVariantConfig, ResourceAgentType, TestConfig}; -use testsys_model::test_manager::TestManager; -use testsys_model::SecretName; - -/// Run a set of tests for a given arch and variant -#[derive(Debug, Parser)] -pub(crate) struct Run { - /// The type of test to run. Options are `quick` and `conformance`. - test_flavor: TestType, - - /// The architecture to test. Either x86_64 or aarch64. - #[arg(long, env = "BUILDSYS_ARCH")] - arch: String, - - /// The variant to test - #[arg(long, env = "BUILDSYS_VARIANT")] - variant: String, - - /// The path to `Infra.toml` - #[arg(long, env = "PUBLISH_INFRA_CONFIG_PATH")] - infra_config_path: PathBuf, - - /// The path to `Test.toml` - #[arg(long, env = "TESTSYS_TEST_CONFIG_PATH")] - test_config_path: PathBuf, - - /// The path to the `tests` directory - #[arg(long, env = "TESTSYS_TESTS_DIR")] - tests_directory: PathBuf, - - /// The path to the EKS-A management cluster kubeconfig for vSphere or metal K8s cluster creation - #[arg(long, env = "TESTSYS_MGMT_CLUSTER_KUBECONFIG")] - mgmt_cluster_kubeconfig: Option, - - /// Use this named repo infrastructure from Infra.toml for upgrade/downgrade testing. - #[arg(long, env = "PUBLISH_REPO")] - repo: Option, - - /// The name of the vSphere data center in `Infra.toml` that should be used for testing - /// If no data center is provided, the first one in `vmware.datacenters` will be used - #[arg(long, env = "TESTSYS_DATACENTER")] - datacenter: Option, - - /// The name of the VMware OVA that should be used for testing - #[arg(long, env = "BUILDSYS_OVA")] - ova_name: Option, - - /// The name of the image that should be used for Bare Metal testing - #[arg(long, env = "BUILDSYS_NAME_FULL")] - image_name: Option, - - /// The path to `amis.json` - #[arg(long, env = "AMI_INPUT")] - ami_input: Option, - - /// Override for the region the tests should be run in. If none is provided the first region in - /// Infra.toml will be used. This is the region that the aws client is created with for testing - /// and resource agents. - #[arg(long, env = "TESTSYS_TARGET_REGION")] - target_region: Option, - - #[arg(long, env = "BUILDSYS_VERSION_BUILD")] - build_id: Option, - - #[command(flatten)] - agent_images: TestsysImages, - - #[command(flatten)] - config: CliConfig, - - // Migrations - /// Override the starting image used for migrations. The image will be pulled from available - /// amis in the users account if no override is provided. - #[arg(long, env = "TESTSYS_STARTING_IMAGE_ID")] - starting_image_id: Option, - - /// The starting version for migrations. This is required for all migrations tests. - /// This is the version that will be created and migrated to `migration-target-version`. - #[arg(long, env = "TESTSYS_STARTING_VERSION")] - migration_starting_version: Option, - - /// The commit id of the starting version for migrations. This is required for all migrations - /// tests unless `starting-image-id` is provided. This is the version that will be created and - /// migrated to `migration-target-version`. - #[arg(long, env = "TESTSYS_STARTING_COMMIT")] - migration_starting_commit: Option, - - /// The target version for migrations. This is required for all migration tests. This is the - /// version that will be migrated to. - #[arg(long, env = "BUILDSYS_VERSION_IMAGE")] - migration_target_version: Option, - - /// The template file that should be used for custom testing. - #[arg(long = "template-file", short = 'f')] - custom_crd_template: Option, -} - -/// This is a CLI parsable version of `testsys_config::GenericVariantConfig`. -#[derive(Debug, Parser)] -struct CliConfig { - /// The repo containing images necessary for conformance testing. It may be omitted to use the - /// default conformance image registry. - #[arg(long, env = "TESTSYS_CONFORMANCE_REGISTRY")] - conformance_registry: Option, - - /// The name of the cluster for resource agents (EKS resource agent, ECS resource agent). Note: - /// This is not the name of the `testsys cluster` this is the name of the cluster that tests - /// should be run on. If no cluster name is provided, the bottlerocket cluster - /// naming convention `{{arch}}-{{variant}}` will be used. - #[arg(long, env = "TESTSYS_TARGET_CLUSTER_NAME")] - target_cluster_name: Option, - - /// The sonobuoy image that should be used for conformance testing. It may be omitted to use the default - /// sonobuoy image. - #[arg(long, env = "TESTSYS_SONOBUOY_IMAGE")] - sonobuoy_image: Option, - - /// The image that should be used for conformance testing. It may be omitted to use the default - /// testing image. - #[arg(long, env = "TESTSYS_CONFORMANCE_IMAGE")] - conformance_image: Option, - - /// The role that should be assumed by the agents - #[arg(long, env = "TESTSYS_ASSUME_ROLE")] - assume_role: Option, - - /// Specify the instance type that should be used. This is only applicable for aws-* variants. - /// It can be omitted for non-aws variants and can be omitted to use default instance types. - #[arg(long, env = "TESTSYS_INSTANCE_TYPE")] - instance_type: Option, - - /// Add secrets to the testsys agents (`--secret awsCredentials=my-secret`) - #[arg(long, short, value_parser = parse_key_val, number_of_values = 1)] - secret: Vec<(String, SecretName)>, - - /// The endpoint IP to reserve for the vSphere control plane VMs when creating a K8s cluster - #[arg(long, env = "TESTSYS_CONTROL_PLANE_ENDPOINT")] - pub control_plane_endpoint: Option, - - /// Specify the path to the userdata that should be added for Bottlerocket launch - #[arg(long, env = "TESTSYS_USERDATA")] - pub userdata: Option, - - /// Specify the method that should be used to launch instances - #[arg(long, env = "TESTSYS_RESOURCE_AGENT")] - pub resource_agent_type: Option, - - /// A set of workloads that should be run for a workload test (--workload my-workload=) - #[arg(long = "workload", value_parser = parse_workloads, number_of_values = 1)] - pub workloads: Vec<(String, String)>, - - /// The directory containing Bottlerocket images. For metal, this is the directory containing - /// gzipped images. - #[arg(long)] - pub os_image_dir: Option, - - /// The hardware that should be used for provisioning Bottlerocket. For metal, this is the - /// hardware csv that is passed to EKS Anywhere. - #[arg(long)] - pub hardware_csv: Option, -} - -impl From for GenericVariantConfig { - fn from(val: CliConfig) -> Self { - GenericVariantConfig { - cluster_names: val.target_cluster_name.into_iter().collect(), - instance_type: val.instance_type, - resource_agent_type: val.resource_agent_type, - block_device_mapping: Default::default(), - secrets: val.secret.into_iter().collect(), - agent_role: val.assume_role, - sonobuoy_image: val.sonobuoy_image, - conformance_image: val.conformance_image, - conformance_registry: val.conformance_registry, - control_plane_endpoint: val.control_plane_endpoint, - userdata: val.userdata, - os_image_dir: val.os_image_dir, - hardware_csv: val.hardware_csv, - dev: Default::default(), - workloads: val.workloads.into_iter().collect(), - } - } -} - -impl Run { - pub(crate) async fn run(self, client: TestManager) -> Result<()> { - // agent config (eventually with configuration) - let variant = Variant::new(&self.variant).context(error::VariantSnafu { - variant: self.variant, - })?; - debug!("Using variant '{}'", variant); - - // Use Test.toml or default - let test_config = TestConfig::from_path_or_default(&self.test_config_path)?; - - let test_opts = test_config.test.to_owned().unwrap_or_default(); - - let (variant_config, test_type) = test_config.reduced_config( - &variant, - &self.arch, - Some(self.config.into()), - &self.test_flavor.to_string(), - ); - let resolved_test_type = TestType::from_str(&test_type) - .expect("All unrecognized test type become `TestType::Custom`"); - - // If a lock file exists, use that, otherwise use Infra.toml or default - let infra_config = InfraConfig::from_path_or_lock(&self.infra_config_path, true)?; - - let repo_config = infra_config - .repo - .unwrap_or_default() - .remove( - &self - .repo - .or(test_opts.repo) - .unwrap_or_else(|| "default".to_string()), - ) - .unwrap_or_default(); - - let images = vec![ - Some(self.agent_images.into()), - Some(test_opts.testsys_images), - test_opts.testsys_image_registry.map(|registry| { - testsys_config::TestsysImages::new(registry, test_opts.testsys_image_tag) - }), - Some(testsys_config::TestsysImages::public_images()), - ] - .into_iter() - .flatten() - .fold(Default::default(), testsys_config::TestsysImages::merge); - - // The `CrdCreator` is responsible for creating crds for the given architecture and variant. - let crd_creator: Box = match variant.family() { - "aws-k8s" => { - debug!("Using family 'aws-k8s'"); - let aws_config = infra_config.aws.unwrap_or_default(); - let region = aws_config - .regions - .front() - .map(String::to_string) - .unwrap_or_else(|| "us-west-2".to_string()); - Box::new(AwsK8sCreator { - region, - ami_input: self.ami_input.context(error::InvalidSnafu { - what: "amis.json is required. You may need to run `cargo make ami`", - })?, - migrate_starting_commit: self.migration_starting_commit, - }) - } - "aws-ecs" => { - debug!("Using family 'aws-ecs'"); - let aws_config = infra_config.aws.unwrap_or_default(); - let region = aws_config - .regions - .front() - .map(String::to_string) - .unwrap_or_else(|| "us-west-2".to_string()); - Box::new(AwsEcsCreator { - region, - ami_input: self.ami_input.context(error::InvalidSnafu { - what: "amis.json is required. You may need to run `cargo make ami`", - })?, - migrate_starting_commit: self.migration_starting_commit, - }) - } - "vmware-k8s" => { - debug!("Using family 'vmware-k8s'"); - let aws_config = infra_config.aws.unwrap_or_default(); - let region = aws_config - .regions - .front() - .map(String::to_string) - .unwrap_or_else(|| "us-west-2".to_string()); - let vmware_config = infra_config.vmware.unwrap_or_default(); - let dc_env = DatacenterBuilder::from_env(); - let dc_common = vmware_config.common.as_ref(); - let dc_config = self - .datacenter - .as_ref() - .or_else(|| vmware_config.datacenters.first()) - .and_then(|datacenter| vmware_config.datacenter.get(datacenter)); - - let datacenter: Datacenter = dc_env - .take_missing_from(dc_config) - .take_missing_from(dc_common) - .build() - .context(error::DatacenterBuildSnafu)?; - - let vsphere_secret = if !variant_config.secrets.contains_key("vsphereCredentials") { - info!("Creating vSphere secret, 'vspherecreds'"); - let creds_env = DatacenterCredsBuilder::from_env(); - let creds_file = if let Some(ref creds_file) = *VMWARE_CREDS_PATH { - if creds_file.exists() { - info!("Using vSphere credentials file at {}", creds_file.display()); - DatacenterCredsConfig::from_path(creds_file) - .context(error::VmwareConfigSnafu)? - } else { - info!( - "vSphere credentials file not found, will attempt to use environment" - ); - DatacenterCredsConfig::default() - } - } else { - info!("Unable to determine vSphere credentials file location, will attempt to use environment"); - DatacenterCredsConfig::default() - }; - let dc_creds = creds_file.datacenter.get(&datacenter.datacenter); - let creds: DatacenterCreds = creds_env - .take_missing_from(dc_creds) - .build() - .context(error::CredsBuildSnafu)?; - - let secret_name = - SecretName::new("vspherecreds").context(error::SecretNameSnafu { - secret_name: "vspherecreds", - })?; - client - .create_secret( - &secret_name, - vec![ - ("username".to_string(), creds.username), - ("password".to_string(), creds.password), - ], - ) - .await?; - Some(("vsphereCredentials".to_string(), secret_name)) - } else { - None - }; - - let mgmt_cluster_kubeconfig = - self.mgmt_cluster_kubeconfig.context(error::InvalidSnafu { - what: "A management cluster kubeconfig is required for VMware testing", - })?; - let encoded_kubeconfig = base64::encode( - read_to_string(&mgmt_cluster_kubeconfig).context(error::FileSnafu { - path: mgmt_cluster_kubeconfig, - })?, - ); - - Box::new(VmwareK8sCreator { - region, - ova_name: self.ova_name.context(error::InvalidSnafu { - what: "An OVA name is required for VMware testing.", - })?, - datacenter, - encoded_mgmt_cluster_kubeconfig: encoded_kubeconfig, - creds: vsphere_secret, - }) - } - "metal-k8s" => { - debug!("Using family 'metal-k8s'"); - let aws_config = infra_config.aws.unwrap_or_default(); - let region = aws_config - .regions - .front() - .map(String::to_string) - .unwrap_or_else(|| "us-west-2".to_string()); - - let mgmt_cluster_kubeconfig = - self.mgmt_cluster_kubeconfig.context(error::InvalidSnafu { - what: "A management cluster kubeconfig is required for metal testing", - })?; - let encoded_kubeconfig = base64::encode( - read_to_string(&mgmt_cluster_kubeconfig).context(error::FileSnafu { - path: mgmt_cluster_kubeconfig, - })?, - ); - Box::new(MetalK8sCreator { - region, - encoded_mgmt_cluster_kubeconfig: encoded_kubeconfig, - image_name: self.image_name.context(error::InvalidSnafu{what: "The image name is required for Bare Metal testing. This can be set with `BUILDSYS_NAME_FULL`."})? - }) - } - unsupported => { - return Err(error::Error::Unsupported { - what: unsupported.to_string(), - }) - } - }; - - let crd_input = CrdInput { - client: &client, - arch: self.arch, - variant, - build_id: self.build_id, - config: variant_config, - repo_config, - starting_version: self.migration_starting_version, - migrate_to_version: self.migration_target_version, - starting_image_id: self.starting_image_id, - test_type: resolved_test_type.clone(), - test_flavor: self.test_flavor.to_string(), - images, - tests_directory: self.tests_directory, - }; - - let crds = match &resolved_test_type { - TestType::Known(resolved_test_type) => { - crd_creator - .create_crds(resolved_test_type, &crd_input) - .await? - } - TestType::Custom(resolved_test_type) => { - crd_creator - .create_custom_crds( - resolved_test_type, - &crd_input, - self.custom_crd_template.to_owned(), - ) - .await? - } - }; - - debug!("Adding crds to testsys cluster"); - for crd in crds { - let crd = client.create_object(crd).await?; - info!("Successfully added '{}'", crd.name().unwrap()); - } - - Ok(()) - } -} - -fn parse_key_val(s: &str) -> Result<(String, SecretName)> { - let mut iter = s.splitn(2, '='); - let key = iter.next().context(error::InvalidSnafu { - what: "Key is missing", - })?; - let value = iter.next().context(error::InvalidSnafu { - what: "Value is missing", - })?; - Ok(( - key.to_string(), - SecretName::new(value).context(error::SecretNameSnafu { secret_name: value })?, - )) -} - -fn parse_workloads(s: &str) -> Result<(String, String)> { - let mut iter = s.splitn(2, '='); - let key = iter.next().context(error::InvalidSnafu { - what: "Key is missing", - })?; - let value = iter.next().context(error::InvalidSnafu { - what: "Value is missing", - })?; - Ok((key.to_string(), value.to_string())) -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -#[serde(rename_all = "lowercase")] -pub enum KnownTestType { - /// Conformance testing is a full integration test that asserts that Bottlerocket is working for - /// customer workloads. For k8s variants, for example, this will run the full suite of sonobuoy - /// conformance tests. - Conformance, - /// Run a quick test that ensures a basic workload can run on Bottlerocket. For example, on k8s - /// variance this will run sonobuoy in "quick" mode. For ECS variants, this will run a simple - /// ECS task. - Quick, - /// Migration testing ensures that all bottlerocket migrations work as expected. Instances will - /// be created at the starting version, migrated to the target version and back to the starting - /// version with validation testing. - Migration, - /// Workload testing is used to test specific workloads on a set of Bottlerocket nodes. - Workload, -} - -/// If a test type is one that is supported by TestSys it will be created as `Known(KnownTestType)`. -/// All other test types will be stored as `Custom()`. -#[derive(Debug, Serialize, Deserialize, Clone)] -#[serde(untagged)] -pub(crate) enum TestType { - Known(KnownTestType), - Custom(String), -} - -derive_fromstr_from_deserialize!(TestType); -derive_display_from_serialize!(TestType); -derive_display_from_serialize!(KnownTestType); - -/// This is a CLI parsable version of `testsys_config::TestsysImages` -#[derive(Debug, Parser)] -pub(crate) struct TestsysImages { - /// EKS resource agent URI. If not provided the latest released resource agent will be used. - #[arg( - long = "eks-resource-agent-image", - env = "TESTSYS_EKS_RESOURCE_AGENT_IMAGE" - )] - pub(crate) eks_resource: Option, - - /// ECS resource agent URI. If not provided the latest released resource agent will be used. - #[arg( - long = "ecs-resource-agent-image", - env = "TESTSYS_ECS_RESOURCE_AGENT_IMAGE" - )] - pub(crate) ecs_resource: Option, - - /// vSphere cluster resource agent URI. If not provided the latest released resource agent will be used. - #[arg( - long = "vsphere-k8s-cluster-resource-agent-image", - env = "TESTSYS_VSPHERE_K8S_CLUSTER_RESOURCE_AGENT_IMAGE" - )] - pub(crate) vsphere_k8s_cluster_resource: Option, - - /// Bare Metal cluster resource agent URI. If not provided the latest released resource agent will be used. - #[arg( - long = "metal-k8s-cluster-resource-agent-image", - env = "TESTSYS_METAL_K8S_CLUSTER_RESOURCE_AGENT_IMAGE" - )] - pub(crate) metal_k8s_cluster_resource: Option, - - /// EC2 resource agent URI. If not provided the latest released resource agent will be used. - #[arg( - long = "ec2-resource-agent-image", - env = "TESTSYS_EC2_RESOURCE_AGENT_IMAGE" - )] - pub(crate) ec2_resource: Option, - - /// EC2 Karpenter resource agent URI. If not provided the latest released resource agent will be used. - #[arg( - long = "ec2-resource-agent-image", - env = "TESTSYS_EC2_KARPENTER_RESOURCE_AGENT_IMAGE" - )] - pub(crate) ec2_karpenter_resource: Option, - - /// vSphere VM resource agent URI. If not provided the latest released resource agent will be used. - #[arg( - long = "vsphere-vm-resource-agent-image", - env = "TESTSYS_VSPHERE_VM_RESOURCE_AGENT_IMAGE" - )] - pub(crate) vsphere_vm_resource: Option, - - /// Sonobuoy test agent URI. If not provided the latest released test agent will be used. - #[arg( - long = "sonobuoy-test-agent-image", - env = "TESTSYS_SONOBUOY_TEST_AGENT_IMAGE" - )] - pub(crate) sonobuoy_test: Option, - - /// ECS test agent URI. If not provided the latest released test agent will be used. - #[arg(long = "ecs-test-agent-image", env = "TESTSYS_ECS_TEST_AGENT_IMAGE")] - pub(crate) ecs_test: Option, - - /// Migration test agent URI. If not provided the latest released test agent will be used. - #[arg( - long = "migration-test-agent-image", - env = "TESTSYS_MIGRATION_TEST_AGENT_IMAGE" - )] - pub(crate) migration_test: Option, - - /// K8s workload agent URI. If not provided the latest released test agent will be used. - #[arg( - long = "k8s-workload-agent-image", - env = "TESTSYS_K8S_WORKLOAD_AGENT_IMAGE" - )] - pub(crate) k8s_workload: Option, - - /// ECS workload agent URI. If not provided the latest released test agent will be used. - #[arg( - long = "ecs-workload-agent-image", - env = "TESTSYS_ECS_WORKLOAD_AGENT_IMAGE" - )] - pub(crate) ecs_workload: Option, - - /// TestSys controller URI. If not provided the latest released controller will be used. - #[arg(long = "controller-image", env = "TESTSYS_CONTROLLER_IMAGE")] - pub(crate) controller_uri: Option, - - /// Images pull secret. This is the name of a Kubernetes secret that will be used to - /// pull the container image from a private registry. For example, if you created a pull secret - /// with `kubectl create secret docker-registry regcred` then you would pass - /// `--images-pull-secret regcred`. - #[arg(long = "images-pull-secret", env = "TESTSYS_IMAGES_PULL_SECRET")] - pub(crate) secret: Option, -} - -impl From for testsys_config::TestsysImages { - fn from(val: TestsysImages) -> Self { - testsys_config::TestsysImages { - eks_resource_agent_image: val.eks_resource, - ecs_resource_agent_image: val.ecs_resource, - vsphere_k8s_cluster_resource_agent_image: val.vsphere_k8s_cluster_resource, - metal_k8s_cluster_resource_agent_image: val.metal_k8s_cluster_resource, - ec2_resource_agent_image: val.ec2_resource, - ec2_karpenter_resource_agent_image: val.ec2_karpenter_resource, - vsphere_vm_resource_agent_image: val.vsphere_vm_resource, - sonobuoy_test_agent_image: val.sonobuoy_test, - ecs_test_agent_image: val.ecs_test, - migration_test_agent_image: val.migration_test, - k8s_workload_agent_image: val.k8s_workload, - ecs_workload_agent_image: val.ecs_workload, - controller_image: val.controller_uri, - testsys_agent_pull_secret: val.secret, - } - } -} diff --git a/tools/testsys/src/secret.rs b/tools/testsys/src/secret.rs deleted file mode 100644 index 6343c163..00000000 --- a/tools/testsys/src/secret.rs +++ /dev/null @@ -1,118 +0,0 @@ -use crate::error::{self, Result}; -use clap::Parser; -use snafu::OptionExt; -use testsys_model::test_manager::TestManager; -use testsys_model::SecretName; - -/// Add a testsys object to the testsys cluster. -#[derive(Debug, Parser)] -pub(crate) struct Add { - #[command(subcommand)] - command: AddCommand, -} - -#[derive(Debug, Parser)] -enum AddCommand { - /// Add a secret to the testsys cluster. - Secret(AddSecret), -} - -impl Add { - pub(crate) async fn run(self, client: TestManager) -> Result<()> { - match self.command { - AddCommand::Secret(add_secret) => add_secret.run(client).await, - } - } -} - -/// Add a secret to the cluster. -#[derive(Debug, Parser)] -pub(crate) struct AddSecret { - #[command(subcommand)] - command: Command, -} - -#[derive(Debug, Parser)] -enum Command { - /// Create a secret for image pulls. - Image(AddSecretImage), - /// Create a secret from key value pairs. - Map(AddSecretMap), -} - -impl AddSecret { - pub(crate) async fn run(self, client: TestManager) -> Result<()> { - match self.command { - Command::Image(add_secret_image) => add_secret_image.run(client).await, - Command::Map(add_secret_map) => add_secret_map.run(client).await, - } - } -} - -/// Add a `Secret` with key value pairs. -#[derive(Debug, Parser)] -pub(crate) struct AddSecretMap { - /// Name of the secret - #[arg(short, long)] - name: SecretName, - - /// Key value pairs for secrets. (Key=value) - #[arg(value_parser = parse_key_val)] - args: Vec<(String, String)>, -} - -impl AddSecretMap { - pub(crate) async fn run(self, client: TestManager) -> Result<()> { - client.create_secret(&self.name, self.args).await?; - println!("Successfully added '{}' to secrets.", self.name); - Ok(()) - } -} - -fn parse_key_val(s: &str) -> Result<(String, String)> { - let mut iter = s.splitn(2, '='); - let key = iter.next().context(error::InvalidSnafu { - what: "Key is missing", - })?; - let value = iter.next().context(error::InvalidSnafu { - what: "Value is missing", - })?; - Ok((key.to_string(), value.to_string())) -} - -/// Add a secret to the testsys cluster for image pulls. -#[derive(Debug, Parser)] -pub(crate) struct AddSecretImage { - /// Controller image pull username - #[arg(long, short = 'u')] - pull_username: String, - - /// Controller image pull password - #[arg(long, short = 'p')] - pull_password: String, - - /// Image uri - #[arg(long = "image-uri", short)] - image_uri: String, - - /// Controller image uri - #[arg(long, short = 'n')] - secret_name: String, -} - -impl AddSecretImage { - pub(crate) async fn run(self, client: TestManager) -> Result<()> { - client - .create_image_pull_secret( - &self.secret_name, - &self.pull_username, - &self.pull_password, - &self.image_uri, - ) - .await?; - - println!("The secret was added."); - - Ok(()) - } -} diff --git a/tools/testsys/src/sonobuoy.rs b/tools/testsys/src/sonobuoy.rs deleted file mode 100644 index d3288442..00000000 --- a/tools/testsys/src/sonobuoy.rs +++ /dev/null @@ -1,172 +0,0 @@ -use crate::crds::TestInput; -use crate::error::{self, Result}; -use crate::run::KnownTestType; -use bottlerocket_types::agent_config::{ - SonobuoyConfig, SonobuoyMode, WorkloadConfig, WorkloadTest, -}; -use maplit::btreemap; -use snafu::ResultExt; -use std::fmt::Display; -use testsys_model::Test; - -/// Create a Sonobuoy CRD for K8s conformance and quick testing. -pub(crate) fn sonobuoy_crd(test_input: TestInput) -> Result { - let cluster_resource_name = test_input - .cluster_crd_name - .as_ref() - .expect("A cluster name is required for sonobuoy testing"); - let bottlerocket_resource_name = test_input.bottlerocket_crd_name; - let sonobuoy_mode = match test_input.test_type { - KnownTestType::Conformance => SonobuoyMode::CertifiedConformance, - KnownTestType::Quick | KnownTestType::Migration | KnownTestType::Workload => { - SonobuoyMode::Quick - } - }; - - let labels = test_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => test_input.test_type.to_string(), - "testsys/flavor".to_string() => test_input.crd_input.test_flavor.clone(), - "testsys/cluster".to_string() => cluster_resource_name.to_string(), - }); - - SonobuoyConfig::builder() - .set_resources(Some(bottlerocket_resource_name.iter().cloned().collect())) - .resources(cluster_resource_name) - .set_depends_on(Some(test_input.prev_tests)) - .set_retries(Some(5)) - .image( - test_input - .crd_input - .images - .sonobuoy_test_agent_image - .to_owned() - .expect("The default Sonobuoy testing image is missing"), - ) - .set_image_pull_secret( - test_input - .crd_input - .images - .testsys_agent_pull_secret - .to_owned(), - ) - .keep_running( - test_input - .crd_input - .config - .dev - .keep_tests_running - .unwrap_or(false), - ) - .kubeconfig_base64_template(cluster_resource_name, "encodedKubeconfig") - .plugin("e2e") - .mode(sonobuoy_mode) - .e2e_repo_config_base64( - test_input - .crd_input - .config - .conformance_registry - .to_owned() - .map(e2e_repo_config_base64), - ) - .sonobuoy_image(test_input.crd_input.config.sonobuoy_image.to_owned()) - .kube_conformance_image(test_input.crd_input.config.conformance_image.to_owned()) - .assume_role(test_input.crd_input.config.agent_role.to_owned()) - .set_secrets(Some(test_input.crd_input.config.secrets.to_owned())) - .set_labels(Some(labels)) - .build(format!( - "{}-{}", - cluster_resource_name, - test_input - .name_suffix - .unwrap_or(test_input.crd_input.test_flavor.as_str()) - )) - .context(error::BuildSnafu { - what: "Sonobuoy CRD", - }) -} - -/// Create a workload CRD for K8s testing. -pub(crate) fn workload_crd(test_input: TestInput) -> Result { - let cluster_resource_name = test_input - .cluster_crd_name - .as_ref() - .expect("A cluster name is required for migrations"); - let bottlerocket_resource_name = test_input - .bottlerocket_crd_name - .as_ref() - .expect("A cluster name is required for migrations"); - - let labels = test_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => test_input.test_type.to_string(), - "testsys/cluster".to_string() => cluster_resource_name.to_string(), - }); - let plugins: Vec<_> = test_input - .crd_input - .config - .workloads - .iter() - .map(|(name, image)| WorkloadTest { - name: name.to_string(), - image: image.to_string(), - ..Default::default() - }) - .collect(); - if plugins.is_empty() { - return Err(error::Error::Invalid { - what: "There were no plugins specified in the workload test. - Workloads can be specified in `Test.toml` or via the command line." - .to_string(), - }); - } - - WorkloadConfig::builder() - .resources(bottlerocket_resource_name) - .resources(cluster_resource_name) - .set_depends_on(Some(test_input.prev_tests)) - .set_retries(Some(5)) - .image( - test_input - .crd_input - .images - .k8s_workload_agent_image - .to_owned() - .expect("The default K8s workload testing image is missing"), - ) - .set_image_pull_secret( - test_input - .crd_input - .images - .testsys_agent_pull_secret - .to_owned(), - ) - .keep_running(true) - .kubeconfig_base64_template(cluster_resource_name, "encodedKubeconfig") - .tests(plugins) - .set_secrets(Some(test_input.crd_input.config.secrets.to_owned())) - .set_labels(Some(labels)) - .build(format!( - "{}{}", - cluster_resource_name, - test_input.name_suffix.unwrap_or("-test") - )) - .context(error::BuildSnafu { - what: "Workload CRD", - }) -} - -fn e2e_repo_config_base64(e2e_registry: S) -> String -where - S: Display, -{ - base64::encode(format!( - r#"buildImageRegistry: {e2e_registry} -dockerGluster: {e2e_registry} -dockerLibraryRegistry: {e2e_registry} -e2eRegistry: {e2e_registry} -e2eVolumeRegistry: {e2e_registry} -gcRegistry: {e2e_registry} -gcEtcdRegistry: {e2e_registry} -promoterE2eRegistry: {e2e_registry} -sigStorageRegistry: {e2e_registry}"# - )) -} diff --git a/tools/testsys/src/status.rs b/tools/testsys/src/status.rs deleted file mode 100644 index 2aadcd99..00000000 --- a/tools/testsys/src/status.rs +++ /dev/null @@ -1,128 +0,0 @@ -use crate::error::{self, Result}; -use clap::Parser; -use log::{debug, info}; -use serde::Deserialize; -use serde_plain::derive_fromstr_from_deserialize; -use snafu::ResultExt; -use testsys_model::test_manager::{CrdState, CrdType, SelectionParams, StatusColumn, TestManager}; - -/// Check the status of testsys objects. -#[derive(Debug, Parser)] -pub(crate) struct Status { - /// Configure the output of the command (json, narrow, wide). - #[arg(long, short = 'o')] - output: Option, - - /// Focus status on a particular arch - #[arg(long)] - arch: Option, - - /// Focus status on a particular variant - #[arg(long)] - variant: Option, - - /// Only show tests - #[arg(long)] - test: bool, - - /// Only show passed tests - #[arg(long, conflicts_with_all=&["failed", "running"])] - passed: bool, - - /// Only show failed tests - #[arg(long, conflicts_with_all=&["passed", "running"])] - failed: bool, - - /// Only CRD's that haven't finished - #[arg(long, conflicts_with_all=&["passed", "failed"])] - running: bool, -} - -impl Status { - pub(crate) async fn run(self, client: TestManager) -> Result<()> { - let state = if self.running { - Some(CrdState::NotFinished) - } else if self.passed { - Some(CrdState::Passed) - } else if self.failed { - Some(CrdState::Failed) - } else { - None - }; - let crd_type = self.test.then_some(CrdType::Test); - let mut labels = Vec::new(); - if let Some(arch) = self.arch { - labels.push(format!("testsys/arch={}", arch)) - }; - if let Some(variant) = self.variant { - labels.push(format!("testsys/variant={}", variant)) - }; - let mut status = client - .status(&SelectionParams { - labels: Some(labels.join(",")), - state, - crd_type, - ..Default::default() - }) - .await?; - - status.add_column(StatusColumn::name()); - status.add_column(StatusColumn::crd_type()); - status.add_column(StatusColumn::state()); - status.add_column(StatusColumn::passed()); - status.add_column(StatusColumn::failed()); - status.add_column(StatusColumn::skipped()); - - match self.output { - Some(StatusOutput::Json) => { - info!( - "{}", - serde_json::to_string_pretty(&status).context(error::SerdeJsonSnafu { - what: "Could not create string from status." - })? - ); - return Ok(()); - } - Some(StatusOutput::Narrow) => (), - None => { - status.new_column("BUILD ID", |crd| { - crd.labels() - .get("testsys/build-id") - .cloned() - .into_iter() - .collect() - }); - status.add_column(StatusColumn::last_update()); - } - Some(StatusOutput::Wide) => { - status.new_column("BUILD ID", |crd| { - crd.labels() - .get("testsys/build-id") - .cloned() - .into_iter() - .collect() - }); - status.add_column(StatusColumn::last_update()); - } - }; - - let (width, _) = term_size::dimensions().unwrap_or((80, 0)); - debug!("Window width '{}'", width); - println!("{:width$}", status); - - Ok(()) - } -} - -#[derive(Debug, Deserialize, Clone)] -#[serde(rename_all = "kebab-case")] -enum StatusOutput { - /// Output the status in json - Json, - /// Show minimal columns in the status table - Narrow, - /// Show all columns in the status table - Wide, -} - -derive_fromstr_from_deserialize!(StatusOutput); diff --git a/tools/testsys/src/uninstall.rs b/tools/testsys/src/uninstall.rs deleted file mode 100644 index 5a55f0fc..00000000 --- a/tools/testsys/src/uninstall.rs +++ /dev/null @@ -1,21 +0,0 @@ -use crate::error::Result; -use clap::Parser; -use log::{info, trace}; -use testsys_model::test_manager::TestManager; - -/// The uninstall subcommand is responsible for removing all of the components for testsys in -/// a k8s cluster. This is completed by removing the `testsys-bottlerocket-aws` namespace. -#[derive(Debug, Parser)] -pub(crate) struct Uninstall {} - -impl Uninstall { - pub(crate) async fn run(self, client: TestManager) -> Result<()> { - trace!("Uninstalling testsys"); - - client.uninstall().await?; - - info!("testsys components were successfully uninstalled."); - - Ok(()) - } -} diff --git a/tools/testsys/src/vmware_k8s.rs b/tools/testsys/src/vmware_k8s.rs deleted file mode 100644 index 43d26f77..00000000 --- a/tools/testsys/src/vmware_k8s.rs +++ /dev/null @@ -1,299 +0,0 @@ -use crate::crds::{ - BottlerocketInput, ClusterInput, CrdCreator, CrdInput, CreateCrdOutput, MigrationInput, - TestInput, -}; -use crate::error::{self, Result}; -use crate::migration::migration_crd; -use crate::sonobuoy::{sonobuoy_crd, workload_crd}; -use bottlerocket_types::agent_config::{ - CreationPolicy, CustomUserData, K8sVersion, VSphereK8sClusterConfig, VSphereK8sClusterInfo, - VSphereVmConfig, -}; -use maplit::btreemap; -use pubsys_config::vmware::Datacenter; -use snafu::{OptionExt, ResultExt}; -use std::collections::BTreeMap; -use std::iter::repeat_with; -use std::str::FromStr; -use testsys_model::{Crd, DestructionPolicy, SecretName}; - -/// A `CrdCreator` responsible for creating crd related to `vmware-k8s` variants. -pub(crate) struct VmwareK8sCreator { - pub(crate) region: String, - pub(crate) datacenter: Datacenter, - pub(crate) creds: Option<(String, SecretName)>, - pub(crate) ova_name: String, - pub(crate) encoded_mgmt_cluster_kubeconfig: String, -} - -#[async_trait::async_trait] -impl CrdCreator for VmwareK8sCreator { - /// Use the provided OVA name for the image id. - async fn image_id(&self, _: &CrdInput) -> Result { - Ok(self.ova_name.to_string()) - } - - /// Use standard naming conventions to predict the starting OVA. - async fn starting_image_id(&self, crd_input: &CrdInput) -> Result { - Ok(format!( - "bottlerocket-{}-{}-{}.ova", - crd_input.variant, - crd_input.arch, - crd_input - .starting_version - .as_ref() - .context(error::InvalidSnafu { - what: "The starting version must be provided for migration testing" - })? - )) - } - - /// Creates a vSphere K8s cluster CRD with the `cluster_name` in `cluster_input`. - async fn cluster_crd<'a>(&self, cluster_input: ClusterInput<'a>) -> Result { - let control_plane_endpoint = cluster_input - .crd_input - .config - .control_plane_endpoint - .as_ref() - .context(error::InvalidSnafu { - what: "The control plane endpoint is required for VMware cluster creation.", - })?; - let labels = cluster_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => "cluster".to_string(), - "testsys/cluster".to_string() => cluster_input.cluster_name.to_string(), - "testsys/controlPlaneEndpoint".to_string() => control_plane_endpoint.to_string(), - }); - - // Check if the cluster already has a CRD - if let Some(cluster_crd) = cluster_input - .crd_input - .existing_crds( - &labels, - &[ - "testsys/cluster", - "testsys/type", - "testsys/controlPlaneEndpoint", - ], - ) - .await? - .pop() - { - return Ok(CreateCrdOutput::ExistingCrd(cluster_crd)); - } - - // Check if an existing cluster is using this endpoint - let existing_clusters = cluster_input - .crd_input - .existing_crds(&labels, &["testsys/type", "testsys/controlPlaneEndpoint"]) - .await?; - - let cluster_version = - K8sVersion::from_str(cluster_input.crd_input.variant.version().context( - error::MissingSnafu { - item: "K8s version".to_string(), - what: "aws-k8s variant".to_string(), - }, - )?) - .map_err(|_| error::Error::K8sVersion { - version: cluster_input.crd_input.variant.to_string(), - })?; - - let vsphere_k8s_crd = VSphereK8sClusterConfig::builder() - .name(cluster_input.cluster_name) - .set_labels(Some(labels)) - .control_plane_endpoint_ip(control_plane_endpoint) - .creation_policy(CreationPolicy::IfNotExists) - .version(cluster_version) - .ova_name(self.image_id(cluster_input.crd_input).await?) - .tuf_repo( - cluster_input - .crd_input - .tuf_repo_config() - .context(error::InvalidSnafu { - what: "TUF repo information is required for VMware cluster creation.", - })?, - ) - .vcenter_host_url(&self.datacenter.vsphere_url) - .vcenter_datacenter(&self.datacenter.datacenter) - .vcenter_datastore(&self.datacenter.datastore) - .vcenter_network(&self.datacenter.network) - .vcenter_resource_pool(&self.datacenter.resource_pool) - .vcenter_workload_folder(&self.datacenter.folder) - .mgmt_cluster_kubeconfig_base64(&self.encoded_mgmt_cluster_kubeconfig) - .eks_a_release_manifest_url( - cluster_input - .crd_input - .config - .dev - .eks_a_release_manifest_url - .clone(), - ) - .set_conflicts_with(Some(existing_clusters)) - .destruction_policy( - cluster_input - .crd_input - .config - .dev - .cluster_destruction_policy - .to_owned() - .unwrap_or(DestructionPolicy::OnTestSuccess), - ) - .image( - cluster_input - .crd_input - .images - .vsphere_k8s_cluster_resource_agent_image - .as_ref() - .expect( - "The default vSphere K8s cluster resource provider image URI is missing.", - ), - ) - .set_image_pull_secret( - cluster_input - .crd_input - .images - .testsys_agent_pull_secret - .to_owned(), - ) - .set_secrets(Some( - cluster_input - .crd_input - .config - .secrets - .clone() - .into_iter() - .chain(self.creds.clone()) - .collect(), - )) - .privileged(true) - .build(cluster_input.cluster_name) - .context(error::BuildSnafu { - what: "vSphere K8s cluster CRD", - })?; - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Resource( - vsphere_k8s_crd, - )))) - } - - /// Create a vSphere VM provider CRD to launch Bottlerocket VMs on the cluster created by - /// `cluster_crd`. - async fn bottlerocket_crd<'a>( - &self, - bottlerocket_input: BottlerocketInput<'a>, - ) -> Result { - let cluster_name = bottlerocket_input - .cluster_crd_name - .as_ref() - .expect("A vSphere K8s cluster provider is required"); - let labels = bottlerocket_input.crd_input.labels(btreemap! { - "testsys/type".to_string() => "vms".to_string(), - "testsys/cluster".to_string() => cluster_name.to_string(), - }); - - // Check if other VMs are using this cluster - let existing_clusters = bottlerocket_input - .crd_input - .existing_crds(&labels, &["testsys/type", "testsys/cluster"]) - .await?; - - let suffix: String = repeat_with(fastrand::lowercase).take(4).collect(); - let vsphere_vm_crd = VSphereVmConfig::builder() - .ova_name(bottlerocket_input.image_id) - .tuf_repo(bottlerocket_input.crd_input.tuf_repo_config().context( - error::InvalidSnafu { - what: "TUF repo information is required for Bottlerocket vSphere VM creation.", - }, - )?) - .vcenter_host_url(&self.datacenter.vsphere_url) - .vcenter_datacenter(&self.datacenter.datacenter) - .vcenter_datastore(&self.datacenter.datastore) - .vcenter_network(&self.datacenter.network) - .vcenter_resource_pool(&self.datacenter.resource_pool) - .vcenter_workload_folder(&self.datacenter.folder) - .cluster(VSphereK8sClusterInfo { - name: format!("${{{}.clusterName}}", cluster_name), - control_plane_endpoint_ip: format!("${{{}.endpoint}}", cluster_name), - kubeconfig_base64: format!("${{{}.encodedKubeconfig}}", cluster_name), - }) - .custom_user_data( - bottlerocket_input - .crd_input - .encoded_userdata()? - .map(|encoded_userdata| CustomUserData::Merge { encoded_userdata }), - ) - .assume_role(bottlerocket_input.crd_input.config.agent_role.clone()) - .set_labels(Some(labels)) - .set_conflicts_with(Some(existing_clusters)) - .destruction_policy( - bottlerocket_input - .crd_input - .config - .dev - .bottlerocket_destruction_policy - .to_owned() - .unwrap_or(DestructionPolicy::OnTestSuccess), - ) - .image( - bottlerocket_input - .crd_input - .images - .vsphere_vm_resource_agent_image - .as_ref() - .expect("The default vSphere VM resource provider image URI is missing."), - ) - .set_image_pull_secret( - bottlerocket_input - .crd_input - .images - .testsys_agent_pull_secret - .to_owned(), - ) - .set_secrets(Some( - bottlerocket_input - .crd_input - .config - .secrets - .clone() - .into_iter() - .chain(self.creds.clone()) - .collect(), - )) - .depends_on(cluster_name) - .build(format!("{}-vms-{}", cluster_name, suffix)) - .context(error::BuildSnafu { - what: "vSphere VM CRD", - })?; - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Resource( - vsphere_vm_crd, - )))) - } - - async fn migration_crd<'a>( - &self, - migration_input: MigrationInput<'a>, - ) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(migration_crd( - migration_input, - // Let the migration test's SSM RunDocuments and RunCommand invocations happen in 'us-west-2' - // FIXME: Do we need to allow this to be configurable? - Some("us-west-2".to_string()), - "instanceIds", - )?)))) - } - - async fn test_crd<'a>(&self, test_input: TestInput<'a>) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(sonobuoy_crd( - test_input, - )?)))) - } - - async fn workload_crd<'a>(&self, test_input: TestInput<'a>) -> Result { - Ok(CreateCrdOutput::NewCrd(Box::new(Crd::Test(workload_crd( - test_input, - )?)))) - } - - fn additional_fields(&self, _test_type: &str) -> BTreeMap { - btreemap! {"region".to_string() => self.region.clone()} - } -} From 0eff356de1c9aec4fb8e1d0ca4922d84070833d4 Mon Sep 17 00:00:00 2001 From: Matthew James Briggs Date: Mon, 18 Sep 2023 15:51:28 -0700 Subject: [PATCH 1117/1356] fix cargo install twoliter Unfortunately the cargo install command does not seem to respect the rust-toolchain.toml and .cargo/config.toml settings when installing from a git reference. https://github.com/rust-lang/cargo/issues/11036 We need to fix this by adding +nightly and specifying -Z bindeps in the cargo install command for twoliter. --- tools/install-twoliter.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/install-twoliter.sh b/tools/install-twoliter.sh index 959643fd..cf54fe75 100755 --- a/tools/install-twoliter.sh +++ b/tools/install-twoliter.sh @@ -146,7 +146,8 @@ else fi if [ "${from_source}" = "true" ] ; then - cargo install \ + cargo +nightly install \ + -Z bindeps \ --locked \ --root "${workdir}" \ --git "${repo}" \ From d19ef3ec24cb4a1da4dc47cf5b202916af90c41f Mon Sep 17 00:00:00 2001 From: Matthew James Briggs Date: Wed, 20 Sep 2023 11:33:04 -0700 Subject: [PATCH 1118/1356] skip reinstillation of twoliter from source When we are testing changes to Twoliter, we need to install from git. In that case the sha is not going to match the version output from Twoliter --version. This means that we have to rebuild and install Twoliter each time we call cargo make. It takes 10 minutes to compile Twoliter. Fix this such that TWOLITER_ALLOW_BINARY_INSTALL=false skips checking the version when any `twoliter` is installed. --- tools/install-twoliter.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/install-twoliter.sh b/tools/install-twoliter.sh index cf54fe75..f2db4cad 100755 --- a/tools/install-twoliter.sh +++ b/tools/install-twoliter.sh @@ -110,6 +110,10 @@ on_exit "rm -rf ${workdir}" if [ "${reuse_existing}" = "true" ] ; then if [ -x "${dir}/twoliter" ] ; then + if [ "${allow_bin}" != "true" ]; then + echo "Twoliter binary found and --allow-binary-install is false. Skipping install." + exit 0 + fi version_output="$("${dir}/twoliter" --version)" found_version=v$(echo $version_output | awk '{print $2}') echo "Found twoliter ${found_version} installed." From 3cb316b3af0a85aa52585189d32bdc44f4006f81 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Fri, 15 Sep 2023 09:12:17 +0000 Subject: [PATCH 1119/1356] models: add new setting to auto-load kernel modules Loading specific kernel modules can be necessary to use specific features on your node. One example would be loading the correct module for your choice of scheduling algorithm for ipvs. Loading kernel modules is currently only possible during boot and requires the use and maintenance of an init-container image to load the desired kernel modules on boot. We can simplify that by adding the additional setting `autoload` for kernel modules and utilizing the already available systemd-modules-load service. Module auto-loading is in conflict with blocking modules from loading through the sibling setting `allowed`. Hence, do not auto-load a module if the module is not allowed at the same time. Blocking takes precedence, as it is the more prohibitive operation. Signed-off-by: Leonard Foerster --- README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/README.md b/README.md index 6a127048..c5cceef2 100644 --- a/README.md +++ b/README.md @@ -1164,6 +1164,17 @@ Here are the metrics settings: allowed = false ``` +* `settings.kernel.modules..autoload`: Whether the named kernel modules shall be loaded automatically. + **Important note:** this setting needs to be used in conjunction with the `allowed` setting for the same module to ensure we are not auto-loading a module that is blocked. + + Example user data for auto-loading a kernel module on boot: + + ```toml + [settings.kernel.modules.ip_vs_lc] + allowed = true + autoload = true + ``` + * `settings.kernel.sysctl`: Key/value pairs representing Linux kernel parameters. Remember to quote keys (since they often contain ".") and to quote all values. From 1c5cb47809394479a668cd261e657a20bfe3e1d6 Mon Sep 17 00:00:00 2001 From: Josh Hart Date: Fri, 22 Sep 2023 08:53:10 +0100 Subject: [PATCH 1120/1356] readme: clarify host lifecycle behaviour --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index c5cceef2..fdf1c140 100644 --- a/README.md +++ b/README.md @@ -1317,6 +1317,7 @@ There are a few important caveats to understand about host containers: * They run in a separate instance of containerd than the one used for orchestrated containers like Kubernetes pods. * They're not updated automatically. You need to update the `source` and commit those changes. * If you set `superpowered` to true, they'll essentially have root access to the host. +* If the container exits for any reason, Bottlerocket will attempt to restart it after 45 seconds. Because of these caveats, host containers are only intended for special use cases. We use them for the control container because it needs to be available early to give you access to the OS, and for the admin container because it needs high levels of privilege and because you need it to debug when orchestration isn't working. From 77b837b96762e8ec8425d46bc1ef6028e879f3d5 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Thu, 21 Sep 2023 15:48:27 +0000 Subject: [PATCH 1121/1356] actions: Update for twoliter changes There were a few paths being cached and targets being called that are no longer relevant with the move to twoliter. This cleans up those instances. Signed-off-by: Sean McGinnis --- .github/actions/setup-node/action.yml | 17 +---------------- .github/workflows/cache.yml | 9 ++------- 2 files changed, 3 insertions(+), 23 deletions(-) diff --git a/.github/actions/setup-node/action.yml b/.github/actions/setup-node/action.yml index d6db96ed..666e9cc6 100644 --- a/.github/actions/setup-node/action.yml +++ b/.github/actions/setup-node/action.yml @@ -14,7 +14,7 @@ runs: sudo apt -y install build-essential openssl libssl-dev pkg-config liblz4-tool shell: bash - uses: actions/cache@v3 - # Cache `cargo-make`, `cargo-cache`, `cargo-sweep` + # Cache `cargo-make`, `cargo-cache` with: path: | ~/.cargo @@ -25,23 +25,8 @@ runs: path: | .cargo key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }}-${{ hashFiles('sources/Cargo.lock') }} - - uses: actions/cache@v3 - # Cache 'tools/' dependencies and build artifacts - with: - path: | - tools/bin - tools/.crates.toml - tools/.crates2.json - tools/target - key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }}-${{ hashFiles('tools/Cargo.lock') }} - run: cargo install --locked --version 0.36.0 cargo-make shell: bash - - run: cargo install --locked --version 0.6.2 cargo-sweep - shell: bash - if: ${{ inputs.perform-cache-cleanup }} run: cargo install --locked --version 0.8.3 --no-default-features --features ci-autoclean cargo-cache shell: bash - - run: | - cargo sweep -i -r tools/ - cargo sweep -t 7 -r tools/ - shell: bash diff --git a/.github/workflows/cache.yml b/.github/workflows/cache.yml index fd93f220..2b66cc02 100644 --- a/.github/workflows/cache.yml +++ b/.github/workflows/cache.yml @@ -7,18 +7,15 @@ on: paths: - '.github/**' - 'sources/Cargo.lock' - - 'tools/buildsys/**' - 'tools/pubsys*/**' - '!tools/pubsys/policies/**' - '!tools/pubsys/**.example' - - '!tools/pubsys/**.template' - - 'tools/Cargo.lock' jobs: cache: if: github.repository == 'bottlerocket-os/bottlerocket' runs-on: group: bottlerocket - labels: bottlerocket_ubuntu-latest_16-core + labels: bottlerocket_ubuntu-latest_8-core continue-on-error: true steps: - uses: actions/checkout@v4 @@ -26,8 +23,6 @@ jobs: uses: ./.github/actions/setup-node with: perform-cache-cleanup: true - - run: cargo make publish-setup-tools - - run: cargo make publish-tools - - run: cargo make build-tools + - run: cargo make install-twoliter # This cleans the cargo cache in ~/.cargo - run: cargo-cache From 9faa706e83086c519a33e658322055e12f7cc832 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Wed, 20 Sep 2023 21:56:41 +0000 Subject: [PATCH 1122/1356] trivial: Add entries to .mailmap file This adds some common user entries to the .mailmap file so things like `git shortlog -se` is able to combine different users into one. Signed-off-by: Sean McGinnis --- .mailmap | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/.mailmap b/.mailmap index 96bbec15..dbe230b0 100644 --- a/.mailmap +++ b/.mailmap @@ -7,3 +7,28 @@ Tom Kirchner Zac Mrowicki Zac Mrowicki Mahdi Chaker M +Arnaldo Garcia Rincon +Arnaldo Garcia Rincon +Ben Cressey +Erikson Tung +Jacob Vallejo +John McBride +Kyle J. Davis +Markus Boehme +Matthew James Briggs +Matthew James Briggs <6260372+webern@users.noreply.github.com> +Matthew James Briggs +Matthew James Briggs Matt Briggs +Matthew Yeazel <67169369+yeazelm@users.noreply.github.com> +Matthias Sterckx +Samuel Karp +Sanika Shah +Sean Kelly +Sean McGinnis +Sean P. Kelly +Shailesh Gothi +Tianhao Geng +Tianhao Geng <45469883+gthao313@users.noreply.github.com> +Ethan Pullen +Ethan Pullen +Shikha Vyaghra <107685805+vyaghras@users.noreply.github.com> From bb1bd56c3a3519dfa351d080560b079eb43e937f Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Thu, 14 Sep 2023 18:07:56 +0000 Subject: [PATCH 1123/1356] pluto: add hyper-proxy as a module The 'hyper_proxy' module is clone of tafia/hyper-proxy, but modified to take advantage of newer Rust dependencies. Since Bottlerocket only uses 'rustls', other features and unused lines of code have been removed. --- COPYRIGHT | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/COPYRIGHT b/COPYRIGHT index 235f44a2..8dae6197 100644 --- a/COPYRIGHT +++ b/COPYRIGHT @@ -17,3 +17,7 @@ operating system images. macros/rust and macros/cargo (used during build) are derived from the Fedora Rust SIG's rust2rpm. https://pagure.io/fedora-rust/rust2rpm Copyright (c) 2017 Igor Gnatenko + +Contains modified hyper-proxy files [mod.rs, stream.rs, tunnel.rs] from +https://github.com/tafia/hyper-proxy 2021-09-20. +Copyright (c) 2017 Johann Tuffe. Licensed under the MIT License. From 251f7ba19dcc2ef5a8722d55756a68145db0c640 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Mon, 18 Sep 2023 21:35:54 +0000 Subject: [PATCH 1124/1356] copyright: remove macros/rust and macros/cargo --- COPYRIGHT | 4 ---- 1 file changed, 4 deletions(-) diff --git a/COPYRIGHT b/COPYRIGHT index 8dae6197..d670cbd2 100644 --- a/COPYRIGHT +++ b/COPYRIGHT @@ -14,10 +14,6 @@ operating system images. =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= -macros/rust and macros/cargo (used during build) are derived from the Fedora Rust SIG's rust2rpm. -https://pagure.io/fedora-rust/rust2rpm -Copyright (c) 2017 Igor Gnatenko - Contains modified hyper-proxy files [mod.rs, stream.rs, tunnel.rs] from https://github.com/tafia/hyper-proxy 2021-09-20. Copyright (c) 2017 Johann Tuffe. Licensed under the MIT License. From 628f13c6d5cf88d7bda577437749a26e31433d9f Mon Sep 17 00:00:00 2001 From: Matthew James Briggs Date: Mon, 25 Sep 2023 15:34:14 -0700 Subject: [PATCH 1125/1356] build: twoliter skip version check Allow the skipping of the Twoliter version when deciding whether or not to install Twoliter. This allows us to accept any installed Twoliter binary and move on without re-installing. --- tools/install-twoliter.sh | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/tools/install-twoliter.sh b/tools/install-twoliter.sh index f2db4cad..e201152e 100755 --- a/tools/install-twoliter.sh +++ b/tools/install-twoliter.sh @@ -45,6 +45,10 @@ Usage: $0 -r GIT_REPO -v TWOLITER_VERSION -d INSTALL_DIR [-e REUSE_EXISTING] [-b -s, --allow-from-source we will install from source using cargo install pointed to a git repo and rev when binary install is either not allowed or not possible + -k, --skip-version-check do not check to see if the installed version matches the one that + is requested by the --version argument. twoliter will not be + installed when the binary is present, regardless of what version + it is. -h, --help show this help text Example invocation: @@ -95,6 +99,8 @@ while [[ $# -gt 0 ]]; do allow_bin="true" ;; -s|--allow-from-source) from_source="true" ;; + -k|--skip-version-check) + skip_version_check="true" ;; -h|--help) usage; exit 0 ;; *) @@ -110,13 +116,13 @@ on_exit "rm -rf ${workdir}" if [ "${reuse_existing}" = "true" ] ; then if [ -x "${dir}/twoliter" ] ; then - if [ "${allow_bin}" != "true" ]; then - echo "Twoliter binary found and --allow-binary-install is false. Skipping install." + if [ "${skip_version_check}" = "true" ]; then + echo "Twoliter binary found and --skip-version-check is true. Skipping install." exit 0 fi version_output="$("${dir}/twoliter" --version)" found_version=v$(echo $version_output | awk '{print $2}') - echo "Found twoliter ${found_version} installed." + echo "Found Twoliter ${found_version} installed." if [ "${found_version}" = "${version}" ] ; then echo "Skipping installation." exit 0 @@ -131,7 +137,7 @@ if [ "${allow_bin}" = "true" ] ; then host_kernel="${host_kernel,,}" case "${host_kernel}-${host_arch}" in linux-x86_64 | linux-aarch64) - echo "Installing twoliter from binary release." + echo "Installing Twoliter from binary release." twoliter_release="${repo}/releases/download/${version}" twoliter_target="${host_arch}-unknown-${host_kernel}-musl" cd "${workdir}" @@ -146,10 +152,11 @@ if [ "${allow_bin}" = "true" ] ; then ;; esac else - echo "Skipped installing twoliter ${version} from pre-built binaries." + echo "Skipping binary installation of twoliter ${version} because --allow-binary-install was not set." fi if [ "${from_source}" = "true" ] ; then + echo "Installing Twoliter version ${version} from source" cargo +nightly install \ -Z bindeps \ --locked \ From 7239835a55ee108868f4cfd916352ff2ae821bd0 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Mon, 25 Sep 2023 13:16:13 +0000 Subject: [PATCH 1126/1356] actions: Use latest cargo-make This updates the version of cargo-make used in the GitHub Action runs. This is only part of the workflow since things are delegated off to twoliter and the SDK, but we should keep the initial `cargo make` calls updated. Signed-off-by: Sean McGinnis --- .github/actions/setup-node/action.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/setup-node/action.yml b/.github/actions/setup-node/action.yml index 666e9cc6..d8f9f192 100644 --- a/.github/actions/setup-node/action.yml +++ b/.github/actions/setup-node/action.yml @@ -25,8 +25,8 @@ runs: path: | .cargo key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }}-${{ hashFiles('sources/Cargo.lock') }} - - run: cargo install --locked --version 0.36.0 cargo-make + - run: cargo install cargo-make shell: bash - if: ${{ inputs.perform-cache-cleanup }} - run: cargo install --locked --version 0.8.3 --no-default-features --features ci-autoclean cargo-cache + run: cargo install --no-default-features --features ci-autoclean cargo-cache shell: bash From 6cf686dc5d4159d168cec1e7f000a2a95dfba0d1 Mon Sep 17 00:00:00 2001 From: Matthew James Briggs Date: Fri, 29 Sep 2023 14:24:04 -0700 Subject: [PATCH 1127/1356] build: make sure twoliter dir is created If the twoliter directory does not exist, make sure we create it regardless of which installation method is being used. --- tools/install-twoliter.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/install-twoliter.sh b/tools/install-twoliter.sh index e201152e..0253fad3 100755 --- a/tools/install-twoliter.sh +++ b/tools/install-twoliter.sh @@ -113,6 +113,7 @@ set -e workdir="$(mktemp -d)" on_exit "rm -rf ${workdir}" +mkdir -p "${dir}" if [ "${reuse_existing}" = "true" ] ; then if [ -x "${dir}/twoliter" ] ; then @@ -143,7 +144,6 @@ if [ "${allow_bin}" = "true" ] ; then cd "${workdir}" curl -sSL "${twoliter_release}/twoliter-${twoliter_target}.tar.xz" -o "twoliter.tar.xz" tar xf twoliter.tar.xz - mkdir -p "${dir}" mv "./twoliter-${twoliter_target}/twoliter" "${dir}" exit 0 ;; From bacd02c2077d09fbb74555c3e2ccbc4af7b3c5f9 Mon Sep 17 00:00:00 2001 From: Sam Berning Date: Tue, 3 Oct 2023 14:50:19 -0700 Subject: [PATCH 1128/1356] docs: update links to point to the currently used issue labels Signed-off-by: Sam Berning --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 724daab6..10bb7334 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -57,7 +57,7 @@ Bottlerocket follows a few basic filename case conventions: ## Finding contributions to work on Looking at the existing issues is a great way to find something to contribute on. -As this repository uses GitHub issue [labels](https://github.com/bottlerocket-os/bottlerocket/labels), looking at any ['status/helpwelcome'](https://github.com/bottlerocket-os/bottlerocket/labels/status%2Fhelpwelcome) issues is a great place to start. +As this repository uses GitHub issue [labels](https://github.com/bottlerocket-os/bottlerocket/labels), looking through issues labeled ['good first issue'](https://github.com/bottlerocket-os/bottlerocket/labels/good%20first%20issue) or ['help wanted'](https://github.com/bottlerocket-os/bottlerocket/labels/help%20wanted) is a great place to start. ## Code of Conduct From b20908a5a7d867faca1218f7cb1cc1391b5a8c1c Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Mon, 16 Oct 2023 19:23:10 +0000 Subject: [PATCH 1129/1356] Drop K8s 1.23 metal and VMware variants This removes the metal and VMware 1.23 variants. This version of Kubernetes has gone end-of-life and these variants are no longer supported. Signed-off-by: Sean McGinnis --- README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/README.md b/README.md index fdf1c140..177df053 100644 --- a/README.md +++ b/README.md @@ -81,7 +81,6 @@ The following variants support ECS: We also have variants that are designed to be Kubernetes worker nodes in VMware: -* `vmware-k8s-1.23` * `vmware-k8s-1.24` * `vmware-k8s-1.25` * `vmware-k8s-1.26` @@ -90,7 +89,6 @@ We also have variants that are designed to be Kubernetes worker nodes in VMware: The following variants are designed to be Kubernetes worker nodes on bare metal: -* `metal-k8s-1.23` * `metal-k8s-1.24` * `metal-k8s-1.25` * `metal-k8s-1.26` From 837e56714443dbfd220a14d05639a201338b9484 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 5 Oct 2023 07:33:51 +0000 Subject: [PATCH 1130/1356] kernel-5.10: update to 5.10.196 Rebase to Amazon Linux upstream version 5.10.196-185.743.amzn2. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 3e18783a..435605e6 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/4cbf281b8513ad2257aae8ad983a75fd76cb9c613fe7025822f0f16879cb2e2b/kernel-5.10.192-182.736.amzn2.src.rpm" -sha512 = "8c1885a9f3a7c00d55b5c1bdadc5d95f1f64b321eabb602d69ce78706ce7f7241022cb094f161aebeebac74d4a08479c07d4a3db7bacb2896cf10ede962de3ec" +url = "https://cdn.amazonlinux.com/blobstore/2e0b99966781510902082be83f28d36844f9f84a1cc9c31f08550a5d7b632e14/kernel-5.10.196-185.743.amzn2.src.rpm" +sha512 = "579684744ae32d79ea6b40cee223613541d0d82db9f760528d043999d6f96c6d9656e01e403f1f8a434b0ee1ea2c5bb637afe97a74339b6a7cba752da48c2b14" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 2c8a449d..789af702 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.192 +Version: 5.10.196 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/4cbf281b8513ad2257aae8ad983a75fd76cb9c613fe7025822f0f16879cb2e2b/kernel-5.10.192-182.736.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/2e0b99966781510902082be83f28d36844f9f84a1cc9c31f08550a5d7b632e14/kernel-5.10.196-185.743.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From df371c91f5c7ddc9c6ddc24ffb2de944849c4919 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 5 Oct 2023 07:34:43 +0000 Subject: [PATCH 1131/1356] kernel-5.15: update to 5.15.133 Rebase to Amazon Linux upstream version 5.15.133-86.144.amzn2. Signed-off-by: Leonard Foerster --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 3cab212a..cbbf94da 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/d301b89106ee983f8cd5cd0b4d6b4111ea97b7e51ca2892a6d9bbec4eaf18c4f/kernel-5.15.128-80.144.amzn2.src.rpm" -sha512 = "ac8fce4c8f293dd123e64ec6f3cf553e2d9b0462de5b48e0caebeecb1091a6d72dde35571264da1ed05984845778e758552636faf42d89ac6af41feec1f8b5da" +url = "https://cdn.amazonlinux.com/blobstore/2856e0e792b1a49369693e4b0e4246700fdf5094b2f5f953569e74d7b99e8f0e/kernel-5.15.133-86.144.amzn2.src.rpm" +sha512 = "5d0ffb542f8c7caebc0bf61e91c9a65b2fd2c17df91d1ec3e4536f9f1fd1b56e7150391513e9d95eab179975e07f5a6ffd543dc0edaa103f4cf18e023b8ca2f1" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 2eff2f74..79ac2b3c 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.128 +Version: 5.15.133 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/d301b89106ee983f8cd5cd0b4d6b4111ea97b7e51ca2892a6d9bbec4eaf18c4f/kernel-5.15.128-80.144.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/2856e0e792b1a49369693e4b0e4246700fdf5094b2f5f953569e74d7b99e8f0e/kernel-5.15.133-86.144.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 51cfe01bb5dd3dee392b59fdbb649ebe621dd050 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 5 Oct 2023 07:35:32 +0000 Subject: [PATCH 1132/1356] kernel-6.1: update to 6.1.55 Rebase to Amazon Linux upstream version 6.1.55-75.123.amzn2023. Signed-off-by: Leonard Foerster --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index e6ad1e28..0825db0e 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/b7fd4bb92caacd373bbd4cf41dca8c29736bf229c08ef80c59bb6063654d058b/kernel-6.1.49-69.116.amzn2023.src.rpm" -sha512 = "d9ccbf828b0466a226a6bf42e9d8a4482b4acea1bd27f6ba28a823d481d6357688a1594b457a6b8735b611d4d370b2aeb1382726ae694bb03f7aa1cf9ee7a9c2" +url = "https://cdn.amazonlinux.com/al2023/blobstore/b857edbf6e8d7c005d0e6e25d052548bb4a1113e504b6d2f50357998d94f9d07/kernel-6.1.55-75.123.amzn2023.src.rpm" +sha512 = "b87a14ab06804d1574a5a9b91df0749be4e22af5531a45b1bd2933656f92ac3688ea36adb06dd440234eb82f2c6139351a0efa1efa95259d151f91b3c242b67d" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 94c6cb0b..b239587f 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.49 +Version: 6.1.55 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/b7fd4bb92caacd373bbd4cf41dca8c29736bf229c08ef80c59bb6063654d058b/kernel-6.1.49-69.116.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/b857edbf6e8d7c005d0e6e25d052548bb4a1113e504b6d2f50357998d94f9d07/kernel-6.1.55-75.123.amzn2023.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From a76ee47a064d4579d3ea5d2b52b751c23847e466 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Fri, 20 Oct 2023 12:07:13 +0000 Subject: [PATCH 1133/1356] kernel-5.10: update to 5.10.197 Rebase to Amazon Linux upstream version 5.10.197-186.748.amzn2. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 435605e6..9b7664f4 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/2e0b99966781510902082be83f28d36844f9f84a1cc9c31f08550a5d7b632e14/kernel-5.10.196-185.743.amzn2.src.rpm" -sha512 = "579684744ae32d79ea6b40cee223613541d0d82db9f760528d043999d6f96c6d9656e01e403f1f8a434b0ee1ea2c5bb637afe97a74339b6a7cba752da48c2b14" +url = "https://cdn.amazonlinux.com/blobstore/9f9ded8eec13c7cacb468496c899e93063db7800ad20b12d07c1fee60e05eb33/kernel-5.10.197-186.748.amzn2.src.rpm" +sha512 = "c5986ab33ef52cfe61a67e29db2856072cb68c525c69dc0be14efbba58ad7df9f9989ddad27ebd722088d6f01b58875b49bf1aed06901e3d9966c0fed95ba722" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 789af702..89349680 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.196 +Version: 5.10.197 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/2e0b99966781510902082be83f28d36844f9f84a1cc9c31f08550a5d7b632e14/kernel-5.10.196-185.743.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/9f9ded8eec13c7cacb468496c899e93063db7800ad20b12d07c1fee60e05eb33/kernel-5.10.197-186.748.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 6d86e09c978a11941f3b0e645fd20ff56eac11b5 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Fri, 20 Oct 2023 12:07:42 +0000 Subject: [PATCH 1134/1356] kernel-5.15: update to 5.15.134 Rebase to Amazon Linux upstream version 5.15.134-87.145.amzn2. Signed-off-by: Leonard Foerster --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index cbbf94da..9b1a4169 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/2856e0e792b1a49369693e4b0e4246700fdf5094b2f5f953569e74d7b99e8f0e/kernel-5.15.133-86.144.amzn2.src.rpm" -sha512 = "5d0ffb542f8c7caebc0bf61e91c9a65b2fd2c17df91d1ec3e4536f9f1fd1b56e7150391513e9d95eab179975e07f5a6ffd543dc0edaa103f4cf18e023b8ca2f1" +url = "https://cdn.amazonlinux.com/blobstore/418a9aab17cff76bb9577affa1df20b27fa223168e2fafef62510de157e1957d/kernel-5.15.134-87.145.amzn2.src.rpm" +sha512 = "fdc386b82928c7a29bbdbf0ff0c55e22a36f03d725aedfe8d6d309628e79484d2743ec00322713009fb2a56c49e1c20f3938fef7075215c76028b00f3149bdad" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 79ac2b3c..ddc3ffae 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.133 +Version: 5.15.134 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/2856e0e792b1a49369693e4b0e4246700fdf5094b2f5f953569e74d7b99e8f0e/kernel-5.15.133-86.144.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/418a9aab17cff76bb9577affa1df20b27fa223168e2fafef62510de157e1957d/kernel-5.15.134-87.145.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 102ac0a2f54bf85d7b590a92be834b5bc8f01896 Mon Sep 17 00:00:00 2001 From: "Kyle J. Davis" Date: Mon, 23 Oct 2023 10:06:09 -0600 Subject: [PATCH 1135/1356] [Docs] Removes redundant setting reference from readme (#3530) * Removes redundant setting reference from readme * Fixes typo. Co-authored-by: Ben Cressey --------- Co-authored-by: Ben Cressey --- README.md | 1001 ++--------------------------------------------------- 1 file changed, 28 insertions(+), 973 deletions(-) diff --git a/README.md b/README.md index 177df053..871b83db 100644 --- a/README.md +++ b/README.md @@ -349,1022 +349,97 @@ It's also more compact for our needs here. In this format, "settings.kubernetes.cluster-name" refers to the same key as in the JSON `{"settings": {"kubernetes": {"cluster-name": "value"}}}`. +**NOTE:** [bottlerocket.dev](https://bottlerocket.dev/en/os/latest/#/api/settings/) now contains a complete, versioned setting reference. +This documents retains the headings below for existing link and bookmark compatability. +Please update your bookmarks and check out [bottlerocket.dev](https://bottlerocket.dev/) for future updates to the setting reference. + #### Top-level settings -* `settings.motd`: This setting is just written out to /etc/motd. It's useful as a way to get familiar with the API! Try changing it. +See the [`settings.motd` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/motd/). #### Kubernetes settings -See the [EKS setup guide](QUICKSTART-EKS.md) for much more detail on setting up Bottlerocket and Kubernetes in AWS EKS. -For more details about running Bottlerocket as a Kubernetes worker node in VMware, see the [VMware setup guide](QUICKSTART-VMWARE.md). - -The following settings must be specified in order to join a Kubernetes cluster. -You should [specify them in user data](#using-user-data). - -* `settings.kubernetes.api-server`: This is the cluster's Kubernetes API endpoint. -* `settings.kubernetes.cluster-certificate`: This is the base64-encoded certificate authority of the cluster. - -For Kubernetes variants in AWS, you must also specify: - -* `settings.kubernetes.cluster-name`: The cluster name you chose during setup; the [setup guide](QUICKSTART-EKS.md) uses "bottlerocket". - -For Kubernetes variants in VMware, you must specify: - -* `settings.kubernetes.bootstrap-token`: The token used for [TLS bootstrapping](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/). - -The following settings can be optionally set to customize the node labels and taints. Remember to quote keys (since they often contain ".") and to quote all values. - -* `settings.kubernetes.cluster-dns-ip`: The IP of the DNS service running in the cluster. - - This value can be set as a string containing a single IP address, or as a list containing multiple IP addresses. - - Examples: - - ```toml - # Valid, single IP - [settings.kubernetes] - "cluster-dns-ip" = "10.0.0.1" - - # Also valid, multiple nameserver IPs - [settings.kubernetes] - "cluster-dns-ip" = ["10.0.0.1", "10.0.0.2"] - ``` - -* `settings.kubernetes.node-labels`: [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) in the form of key, value pairs added when registering the node in the cluster. -* `settings.kubernetes.node-taints`: [Taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) in the form of key, values and effects entries added when registering the node in the cluster. - - Example user data for setting up labels and taints: - - ```toml - [settings.kubernetes.node-labels] - "label1" = "foo" - "label2" = "bar" - [settings.kubernetes.node-taints] - "dedicated" = ["experimental:PreferNoSchedule", "experimental:NoExecute"] - "special" = ["true:NoSchedule"] - ``` - -The following settings are optional and allow you to further configure your cluster. - -* `settings.kubernetes.allowed-unsafe-sysctls`: Enables specified list of unsafe sysctls. - - Example user data for setting up allowed unsafe sysctls: - - ```toml - allowed-unsafe-sysctls = ["net.core.somaxconn", "net.ipv4.ip_local_port_range"] - ``` - -* `settings.kubernetes.authentication-mode`: Which authentication method the kubelet should use to connect to the API server, and for incoming requests. Defaults to `aws` for AWS variants, and `tls` for other variants. -* `settings.kubernetes.bootstrap-token`: The token to use for [TLS bootstrapping](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/). This is only used with the `tls` authentication mode, and is otherwise ignored. -* `settings.kubernetes.cloud-provider`: The cloud provider for this cluster. Defaults to `aws` for AWS variants, and `external` for other variants. -* `settings.kubernetes.cluster-domain`: The DNS domain for this cluster, allowing all Kubernetes-run containers to search this domain before the host's search domains. Defaults to `cluster.local`. -* `settings.kubernetes.container-log-max-files`: The maximum number of container log files that can be present for a container. -* `settings.kubernetes.container-log-max-size`: The maximum size of container log file before it is rotated. -* `settings.kubernetes.cpu-cfs-quota-enforced`: Whether CPU CFS quotas are enforced. Defaults to `true`. -* `settings.kubernetes.cpu-manager-policy`: Specifies the CPU manager policy. Possible values are `static` and `none`. Defaults to `none`. If you want to allow pods with certain resource characteristics to be granted increased CPU affinity and exclusivity on the node, you can set this setting to `static`. You should reboot if you change this setting after startup - try `apiclient reboot`. -* `settings.kubernetes.cpu-manager-policy-options`: Policy options to apply when `cpu-manager-policy` is set to `static`. Currently `full-pcpus-only` is the only option. - - For example: - - ```toml - [settings.kubernetes] - cpu-manager-policy = "static" - cpu-manager-policy-options = [ - "full-pcpus-only" - ] - ``` - -* `settings.kubernetes.cpu-manager-reconcile-period`: Specifies the CPU manager reconcile period, which controls how often updated CPU assignments are written to cgroupfs. The value is a duration like `30s` for 30 seconds or `1h5m` for 1 hour and 5 minutes. -* `settings.kubernetes.credential-providers`: Contains a collection of Kubelet image credential provider settings. - Each name under `credential-providers` is the name of the plugin to configure. - - Example user data for configuring the `ecr-credential-provider` credential provider plugin: - - ```toml - [settings.kubernetes.credential-providers.ecr-credential-provider] - enabled = true - # (optional - defaults to "12h") - cache-duration = "30m" - image-patterns = [ - # One or more URL paths to match an image prefix. Supports globbing of subdomains. - "*.dkr.ecr.us-east-2.amazonaws.com", - "*.dkr.ecr.us-west-2.amazonaws.com" - ] - - [settings.kubernetes.credential-providers.ecr-credential-provider.environment] - # The following are not used with ecr-credential-provider, but are provided for illustration - "KEY" = "abc123xyz" - "GOMAXPROCS" = "2" - ``` - - **Note:** `ecr-credential-provider` is currently the only supported provider. - To manage its AWS credentials, see the `settings.aws.config` and `settings.aws.credentials` settings. - - The `ecr-credential-provider` plugin can also be used for AWS IAM Roles Anywhere support. - IAM Roles Anywhere is configured using the `settings.aws.config` setting. - The content of that setting needs to configure the `credential_process` using the `aws_signing_helper` using your IAM Roles Anywhere settings, similar to the following: - - ```ini - [default] - region = us-west-2 - credential_process = aws_signing_helper credential-process \ - --certificate /var/lib/kubelet/pki/kubelet-client-current.pem \ - --private-key /var/lib/kubelet/pki/kubelet-client-current.pem \ - --profile-arn [profile ARN] - --role-arn [role ARN] - --trust-anchor-arn [trust anchor ARN] - ``` - - See the [Roles Anywhere documentation](https://docs.aws.amazon.com/rolesanywhere/latest/userguide/credential-helper.html) for more details on the `aws_signing_helper` arguments. - -* `settings.kubernetes.event-burst`: The maximum size of a burst of event creations. -* `settings.kubernetes.event-qps`: The maximum event creations per second. -* `settings.kubernetes.eviction-hard`: The signals and thresholds that trigger pod eviction. -* `settings.kubernetes.eviction-max-pod-grace-period`: Maximum grace period, in seconds, to wait for pod termination before soft eviction. Default is `0`. -* `settings.kubernetes.eviction-soft`: The signals and thresholds that trigger pod eviction with a provided grace period. -* `settings.kubernetes.eviction-soft-grace-period`: Delay for each signal to wait for pod termination before eviction. - - Remember to quote signals (since they all contain ".") and to quote all values. - - Example user data for setting up eviction values: - - ```toml - [settings.kubernetes.eviction-hard] - "memory.available" = "15%" - - [settings.kubernetes.eviction-soft] - "memory.available" = "12%" - - [settings.kubernetes.eviction-soft-grace-period] - "memory.available" = "30s" - - [settings.kubernetes] - "eviction-max-pod-grace-period" = 40 - ``` - -* `settings.kubernetes.image-gc-high-threshold-percent`: The percent of disk usage after which image garbage collection is always run, expressed as an integer from 0-100 inclusive. -* `settings.kubernetes.image-gc-low-threshold-percent`: The percent of disk usage before which image garbage collection is never run, expressed as an integer from 0-100 inclusive. - - Since v1.14.0 `image-gc-high-threshold-percent` and `image-gc-low-threshold-percent` can be represented as numbers. - For example: - - ```toml - [settings.kubernetes] - image-gc-high-threshold-percent = 85 - image-gc-low-threshold-percent = 80 - ``` - - For backward compatibility, both string and numeric representations are accepted since v1.14.0. - Prior to v1.14.0 these needed to be represented as strings, for example: - - ```toml - [settings.kubernetes] - image-gc-high-threshold-percent = "85" - image-gc-low-threshold-percent = "80" - ``` - - If you downgrade from v1.14.0 to an earlier version, and you have these values set as numbers, they will be converted to strings on downgrade. - -* `settings.kubernetes.kube-api-burst`: The burst to allow while talking with kubernetes. -* `settings.kubernetes.kube-api-qps`: The QPS to use while talking with kubernetes apiserver. -* `settings.kubernetes.log-level`: Adjust the logging verbosity of the `kubelet` process. - The default log level is 2, with higher numbers enabling more verbose logging. -* `settings.kubernetes.memory-manager-policy`: The memory management policy to use: `None` (default) or `Static`. - Note, when using the `Static` policy you should also set `settings.kubernetes.memory-manager-reserved-memory` values. -* `settings.kubernetes.memory-manager-reserved-memory`: Used to set the total amount of reserved memory for a node. - These settings are used to configure memory manager policy when `settings.kubernetes.memory-manager-policy` is set to `Static`. - - `memory-manager-reserved-memory` is set per NUMA node. For example: - - ```toml - [settings.kubernetes] - "memory-manager-policy" = "Static" - - [settings.kubernetes.memory-manager-reserved-memory.0] - # Reserve a single 1GiB huge page along with 674MiB of memory - "enabled" = true - "memory" = "674Mi" - "hugepages-1Gi" = "1Gi" - - [settings.kubernetes.memory-manager-reserved-memory.1] - # Reserve 1,074 2MiB huge pages - "enabled" = true - "hugepages-2Mi" = "2148Mi" - ``` - - **Warning:** `memory-manager-reserved-memory` settings are an advanced configuration and requires a clear understanding of what you are setting. - Misconfiguration of reserved memory settings may cause the Kubernetes `kubelet` process to fail. - It can be very difficult to recover from configuration errors. - Use the memory reservation information from `kubectl describe node` and make sure you understand the Kubernetes documentation related to the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) and how to [reserve compute resources for system daemons](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/). - -* `settings.kubernetes.pod-pids-limit`: The maximum number of processes per pod. -* `settings.kubernetes.provider-id`: This sets the unique ID of the instance that an external provider (i.e. cloudprovider) can use to identify a specific node. -* `settings.kubernetes.registry-burst`: The maximum size of bursty pulls. -* `settings.kubernetes.registry-qps`: The registry pull QPS. -* `settings.kubernetes.seccomp-default`: Enable RuntimeDefault as the default seccomp profile for all workloads via kubelet-configuration. This is disabled by default. -* `settings.kubernetes.server-tls-bootstrap`: Enables or disables server certificate bootstrap. When enabled, the kubelet will request a certificate from the certificates.k8s.io API. This requires an approver to approve the certificate signing requests (CSR). Defaults to `true`. -* `settings.kubernetes.shutdown-grace-period`: Delay the node should wait for pod termination before shutdown. Default is `0s`. -* `settings.kubernetes.shutdown-grace-period-for-critical-pods`: The portion of the shutdown delay that should be dedicated to critical pod shutdown. Default is `0s`. -* `settings.kubernetes.standalone-mode`: Whether to run the kubelet in standalone mode, without connecting to an API server. Defaults to `false`. -* `settings.kubernetes.system-reserved`: Resources reserved for system components. - - Example user data for setting up system reserved: - - ```toml - [settings.kubernetes.system-reserved] - cpu = "10m" - memory = "100Mi" - ephemeral-storage= "1Gi" - ``` - -* `settings.kubernetes.server-certificate`: The base64 encoded content of an x509 certificate for the Kubelet web server, which is used for retrieving logs and executing commands. -* `settings.kubernetes.server-key`: The base64 encoded content of an x509 private key for the Kubelet web server. -* `settings.kubernetes.topology-manager-policy`: Specifies the topology manager policy. Possible values are `none`, `restricted`, `best-effort`, and `single-numa-node`. Defaults to `none`. -* `settings.kubernetes.topology-manager-scope`: Specifies the topology manager scope. Possible values are `container` and `pod`. Defaults to `container`. If you want to group all containers in a pod to a common set of NUMA nodes, you can set this setting to `pod`. - -You can also optionally specify static pods for your node with the following settings. -Static pods can be particularly useful when running in standalone mode. - -* `settings.kubernetes.static-pods..enabled`: Whether the static pod is enabled. -* `settings.kubernetes.static-pods..manifest`: A base64-encoded pod manifest. - -For Kubernetes variants in AWS and VMware, the following are set for you automatically, but you can override them if you know what you're doing! -In AWS, [pluto](sources/api/) sets these based on runtime instance information. -In VMware and on bare metal, Bottlerocket uses [netdog](sources/api/) (for `node-ip`) or relies on default values. -(See the [VMware defaults](sources/models/src/vmware-k8s-1.23/defaults.d) or [bare metal defaults](sources/models/src/metal-k8s-1.23/defaults.d)). - -* `settings.kubernetes.kube-reserved`: Resources reserved for node components. - - Bottlerocket provides default values for the resources by [schnauzer](sources/api/): - - * `cpu`: in millicores from the total number of vCPUs available on the instance. - * `memory`: in mebibytes from the max num of pods on the instance. `memory_to_reserve = max_num_pods * 11 + 255`. - * `ephemeral-storage`: defaults to `1Gi`. - -* `settings.kubernetes.node-ip`: The IP address of this node. -* `settings.kubernetes.pod-infra-container-image`: The URI of the "pause" container. - -For Kubernetes variants in AWS, the following settings are set for you automatically by [pluto](sources/api/). - -* `settings.kubernetes.cluster-dns-ip`: Derived from the EKS Service IP CIDR or the CIDR block of the primary network interface. -* `settings.kubernetes.max-pods`: The maximum number of pods that can be scheduled on this node (limited by number of available IPv4 addresses) -* `settings.kubernetes.hostname-override`: The node name kubelet uses as identification instead of the hostname or the name determined by the in-tree cloud provider if that's enabled. - - **Important note for all Kubernetes variants:** Changing this setting at runtime (not via user-data) can cause issues with kubelet registration, as hostname is closely tied to the identity of the system for both registration and certificates/authorization purposes. - - Most users don't need to change this setting. - If left unset, the system hostname will be used instead. - The `settings.network.hostname` setting can be used to specify the value for both `kubelet` and the host. - Only set this override if you intend for the `kubelet` to register with a different name than the host. - - For `aws-k8s-1.26` variants, which use the "external" cloud provider, a hostname override will be automatically generated by querying the EC2 API for the private DNS name of the instance. - This is done for backwards compatibility with the deprecated "aws" cloud provider, which adjusted the hostname in a similar way. - Future `aws-k8s-*` variants may remove this behavior. +See the [`settings.kubernetes.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/kubernetes/). #### Amazon ECS settings -See the [setup guide](QUICKSTART-ECS.md) for much more detail on setting up Bottlerocket and ECS. - -The following settings are optional and allow you to configure how your instance joins an ECS cluster. -Since joining a cluster happens at startup, they need to be [specified in user data](#using-user-data). - -* `settings.ecs.cluster`: The name or [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of your Amazon ECS cluster. - If left unspecified, Bottlerocket will join your `default` cluster. -* `settings.ecs.instance-attributes`: [Attributes](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html#attributes) in the form of key, value pairs added when registering the container instance in the cluster. - - Example user data for setting up attributes: - - ```toml - [settings.ecs.instance-attributes] - attribute1 = "foo" - attribute2 = "bar" - ``` - -The following settings are optional and allow you to further configure your cluster. -These settings can be changed at any time. - -* `settings.ecs.allow-privileged-containers`: Whether launching privileged containers is allowed on the container instance. - If this value is set to false, privileged containers are not permitted. - Bottlerocket sets this value to false by default. -* `settings.ecs.container-stop-timeout`: Time to wait for the task's containers to stop on their own before they are forcefully stopped. -Valid time units include `s`, `m`, and `h`, e.g. `1h`, `1m1s`. -* `settings.ecs.enable-spot-instance-draining`: If the instance receives a spot termination notice, the agent will set the instance's state to `DRAINING`, so the workload can be moved gracefully before the instance is removed. Defaults to `false`. -* `settings.ecs.image-pull-behavior`: The behavior used to customize the [pull image process](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html#ecs-agent-availparam) for your container instances. - Supported values are `default`, `always`, `once`, `prefer-cached`, and the default is `default`. -* `settings.ecs.logging-drivers`: The list of logging drivers available on the container instance. - The ECS agent running on a container instance must register available logging drivers before tasks that use those drivers are eligible to be placed on the instance. - Bottlerocket enables the `json-file`, `awslogs`, and `none` drivers by default. -* `settings.ecs.loglevel`: The level of verbosity for the ECS agent's logs. - Supported values are `debug`, `info`, `warn`, `error`, and `crit`, and the default is `info`. -* `settings.ecs.metadata-service-rps`: The steady state rate limit of the throttling configurations set for the task metadata service. -* `settings.ecs.metadata-service-burst`: The burst rate limit of the throttling configurations set for the task metadata service. -* `settings.ecs.reserved-memory`: The amount of memory, in MiB, reserved for critical system processes. -* `settings.ecs.task-cleanup-wait`: Time to wait before the task's containers are removed after they are stopped. -Valid time units are `s`, `m`, and `h`, e.g. `1h`, `1m1s`. -* `settings.ecs.image-cleanup-wait`: Time to wait between image cleanup cycles. -Valid time units are `s`, `m`, and `h`, e.g. `1h`, `1m1s`. -* `settings.ecs.image-cleanup-delete-per-cycle`: Number of images to delete in a single image cleanup cycle. -* `settings.ecs.image-cleanup-enabled`: Enable automatic images clean up after the tasks have been removed. -Defaults to `false` -* `settings.ecs.image-cleanup-age`: Time since the image was pulled to be considered for clean up. -Valid time units are `s`, `m`, and `h`, e.g. `1h`, `1m1s`. - - **Note**: `metadata-service-rps` and `metadata-service-burst` directly map to the values set by the `ECS_TASK_METADATA_RPS_LIMIT` environment variable. +See the [`settings.ecs.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/ecs/). #### CloudFormation signal helper settings -For AWS variants, these settings allow you to set up CloudFormation signaling to indicate whether Bottlerocket hosts running in EC2 have been successfully created or updated: - -* `settings.cloudformation.logical-resource-id`: The logical ID of the AutoScalingGroup resource that you want to signal. -* `settings.cloudformation.should-signal`: Whether to check status and send signal. Defaults to `false`. If set to `true`, both `stack-name` and `logical-resource-id` need to be specified. -* `settings.cloudformation.stack-name`: Name of the CloudFormation Stack to signal. +See the [`settings.cloudformation.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/cloudformation/). #### Auto Scaling group settings -* `settings.autoscaling.should-wait`: Whether to wait for the instance to reach the `InService` state before the orchestrator agent joins the cluster. Defaults to `false`. Set this to `true` only if the instance is part of an Auto Scaling group, or will be attached to one later. - For example: - - ```toml - [settings.autoscaling] - should-wait = true - ``` +See the [`settings.autoscaling.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/autoscaling/). #### OCI Hooks settings -Bottlerocket allows you to opt-in to use additional [OCI hooks](https://github.com/opencontainers/runtime-spec/blob/main/runtime.md#lifecycle) for your orchestrated containers. -Once you opt-in to use additional OCI hooks, any new orchestrated containers will be configured with them, but existing containers won't be changed. - -* `settings.oci-hooks.log4j-hotpatch-enabled`: **Deprecated**. This setting is no longer supported by Bottlerocket starting from v1.15.0. Though it is still available for backwards compatibility, enabling it has no effect beyond printing a deprecation warning to the system logs. +See the [`settings.oci-hooks.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/oci-hooks/). #### OCI Defaults settings -Bottlerocket allows you to customize certain parts of the default [OCI spec](https://github.com/opencontainers/runtime-spec/blob/main/config.md) that is applied to workload containers. - -The following settings are available: +See the [`settings.oci-defaults.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/oci-defaults/). ##### OCI Defaults: Capabilities -All of the `capabilities` settings below are boolean values (`true`/`false`). - -The full list of capabilities that can be configured in Bottlerocket are as follows: - -capability | setting | default value ------ | ----- | ----- -`CAP_AUDIT_WRITE` | `settings.oci-defaults.capabilities.audit-write` | true -`CAP_CHOWN` | `settings.oci-defaults.capabilities.chown` | true -`CAP_DAC_OVERRIDE` | `settings.oci-defaults.capabilities.dac-override` | true -`CAP_FOWNER` | `settings.oci-defaults.capabilities.fowner` | true -`CAP_FSETID` | `settings.oci-defaults.capabilities.fsetid` | true -`CAP_KILL` | `settings.oci-defaults.capabilities.kill` | true -`CAP_MKNOD` | `settings.oci-defaults.capabilities.mknod` | true -`CAP_NET_BIND_SERVICE` | `settings.oci-defaults.capabilities.net-bind-service` | true -`CAP_SETGID` | `settings.oci-defaults.capabilities.setgid` | true -`CAP_SETFCAP` | `settings.oci-defaults.capabilities.setfcap` | true -`CAP_SETPCAP` | `settings.oci-defaults.capabilities.setpcap` | true -`CAP_SETUID` | `settings.oci-defaults.capabilities.setuid` | true -`CAP_SYS_CHROOT` | `settings.oci-defaults.capabilities.sys-chroot` | true -`CAP_AUDIT_CONTROL` | `settings.oci-defaults.capabilities.audit-control` | - -`CAP_AUDIT_READ` | `settings.oci-defaults.capabilities.audit-read` | - -`CAP_BLOCK_SUSPEND` | `settings.oci-defaults.capabilities.block-suspend` | - -`CAP_BPF` | `settings.oci-defaults.capabilities.bpf` | - -`CAP_CHECKPOINT_RESTORE` | `settings.oci-defaults.capabilities.checkpoint-restore` | - -`CAP_DAC_READ_SEARCH` | `settings.oci-defaults.capabilities.dac-read-search` | - -`CAP_IPC_LOCK` | `settings.oci-defaults.capabilities.ipc-lock` | - -`CAP_IPC_OWNER` | `settings.oci-defaults.capabilities.ipc-owner` | - -`CAP_LEASE` | `settings.oci-defaults.capabilities.lease` | - -`CAP_LINUX_IMMUTABLE` | `settings.oci-defaults.capabilities.linux-immutable` | - -`CAP_MAC_ADMIN` | `settings.oci-defaults.capabilities.mac-admin` | - -`CAP_MAC_OVERRIDE` | `settings.oci-defaults.capabilities.mac-override` | - -`CAP_NET_ADMIN` | `settings.oci-defaults.capabilities.net-admin` | - -`CAP_NET_BROADCAST` | `settings.oci-defaults.capabilities.net-broadcast` | - -`CAP_NET_RAW` | `settings.oci-defaults.capabilities.net-raw` | - -`CAP_PERFMON` | `settings.oci-defaults.capabilities.perfmon` | - -`CAP_SYS_ADMIN` | `settings.oci-defaults.capabilities.sys-admin` | - -`CAP_SYS_BOOT` | `settings.oci-defaults.capabilities.sys-boot` | - -`CAP_SYS_MODULE` | `settings.oci-defaults.capabilities.sys-module` | - -`CAP_SYS_NICE` | `settings.oci-defaults.capabilities.sys-nice` | - -`CAP_SYS_PACCT` | `settings.oci-defaults.capabilities.sys-pacct` | - -`CAP_SYS_PTRACE` | `settings.oci-defaults.capabilities.sys-ptrace` | - -`CAP_SYS_RAWIO` | `settings.oci-defaults.capabilities.sys-rawio` | - -`CAP_SYS_RESOURCE` | `settings.oci-defaults.capabilities.sys-resource` | - -`CAP_SYS_TIME` | `settings.oci-defaults.capabilities.sys-time` | - -`CAP_SYS_TTY_CONFIG` | `settings.oci-defaults.capabilities.sys-tty-config` | - -`CAP_SYSLOG` | `settings.oci-defaults.capabilities.syslog` | - -`CAP_WAKE_ALARM` | `settings.oci-defaults.capabilities.wake-alarm` | - +See the ["Capabilities Settings" section in the `settings.oci-defaults.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/oci-defaults/). ##### OCI Defaults: Resource Limits -Each of the `resource-limits` settings below contain two fields: `hard-limit` and `soft-limit`. - -Please see the [`getrlimit` linux manpage](https://man7.org/linux/man-pages/man2/getrlimit.2.html) for meanings of `hard-limit` and `soft-limit`. - -The full list of resource limits that can be configured in Bottlerocket are: -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Resource limitSettingDefault valueUnit
RLIMIT_AS settings.oci-defaults.resource-limits.max-address-space.soft-limit-bytes
settings.oci-defaults.resource-limits.max-address-space.hard-limit-
RLIMIT_COREsettings.oci-defaults.resource-limits.max-core-file-size.soft-limit-bytes
settings.oci-defaults.resource-limits.max-core-file-size.hard-limit-
RLIMIT_CPUsettings.oci-defaults.resource-limits.max-cpu-time.soft-limit-seconds
settings.oci-defaults.resource-limits.max-cpu-time.hard-limit-
RLIMIT_DATAsettings.oci-defaults.resource-limits.max-data-size.soft-limit-bytes
settings.oci-defaults.resource-limits.max-data-size.hard-limit-
RLIMIT_LOCKSsettings.oci-defaults.resource-limits.max-file-locks.soft-limit-locks
settings.oci-defaults.resource-limits.max-file-locks.hard-limit-
RLIMIT_FSIZEsettings.oci-defaults.resource-limits.max-file-size.soft-limit-bytes
settings.oci-defaults.resource-limits.max-file-size.hard-limit-
RLIMIT_MEMLOCKsettings.oci-defaults.resource-limits.max-locked-memory.soft-limit-bytes
settings.oci-defaults.resource-limits.max-locked-memory.hard-limit-
RLIMIT_MSGQUEUEsettings.oci-defaults.resource-limits.max-msgqueue-size.soft-limit-bytes
settings.oci-defaults.resource-limits.max-msgqueue-size.hard-limit-
RLIMIT_NICEsettings.oci-defaults.resource-limits.max-nice-priority.soft-limit--
settings.oci-defaults.resource-limits.max-nice-priority.hard-limit-
RLIMIT_NOFILEsettings.oci-defaults.resource-limits.max-open-files.soft-limit65536files
settings.oci-defaults.resource-limits.max-open-files.hard-limit1048576
RLIMIT_SIGPENDINGsettings.oci-defaults.resource-limits.max-pending-signals.soft-limit-signals
settings.oci-defaults.resource-limits.max-pending-signals.hard-limit-
RLIMIT_NPROCsettings.oci-defaults.resource-limits.max-processes.soft-limit-processes
settings.oci-defaults.resource-limits.max-processes.hard-limit-
RLIMIT_RTPRIOsettings.oci-defaults.resource-limits.max-realtime-priority.soft-limit--
settings.oci-defaults.resource-limits.max-realtime-priority.hard-limit-
RLIMIT_RTTIMEsettings.oci-defaults.resource-limits.max-realtime-timeout.soft-limit-microseconds
settings.oci-defaults.resource-limits.max-realtime-timeout.hard-limit-
RLIMIT_RSSsettings.oci-defaults.resource-limits.max-resident-set.soft-limit-bytes
settings.oci-defaults.resource-limits.max-resident-set.hard-limit-
RLIMIT_STACKsettings.oci-defaults.resource-limits.max-stack-size.soft-limit-bytes
settings.oci-defaults.resource-limits.max-stack-size.hard-limit-
- -Limits can be any integer between 0 to `int64::MAX`. Either `-1` or `"unlimited"` can be used to remove the limit. -* Specifying the maximum value (`i64::MAX`) for a limit: - ```toml - [settings.oci-defaults.resource-limits.>] - soft-limit = 65536 - hard-limit = 9223372036854775807 - ``` -* Removing a limit: - ```toml - [settings.oci-defaults.resource-limits.>] - soft-limit = 65536 - hard-limit = "unlimited" - ``` +See the ["Resource Limits Settings" section in the `settings.oci-defaults.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/oci-defaults/). #### Container image registry settings -The following setting is optional and allows you to configure image registry mirrors and pull-through caches for your containers. - -* `settings.container-registry.mirrors`: An array of container image registry mirror settings. Each element specifies the registry and the endpoints for said registry. -When pulling an image from a registry, the container runtime will try the endpoints one by one and use the first working one. - (Docker and containerd will still try the default registry URL if the mirrors fail.) - - Example user data for setting up image registry mirrors: - - ```toml - [[settings.container-registry.mirrors]] - registry = "*" - endpoint = ["https://","https://"] - - [[settings.container-registry.mirrors]] - registry = "docker.io" - endpoint = [ "https://", "https://"] - ``` - - If you use a Bottlerocket variant that uses Docker as the container runtime, like `aws-ecs-1`, you should be aware that Docker only supports pull-through caches for images from Docker Hub (docker.io). Mirrors for other registries are ignored in this case. - -For [host-container](#host-containers-settings) and [bootstrap-container](#bootstrap-containers-settings) images from Amazon ECR private repositories, registry mirrors are currently unsupported. - -The following setting is optional and allows you to configure image registry credentials. - -* `settings.container-registry.credentials`: An array of container images registry credential settings. Each element specifies the registry and the credential information for said registry. -The credential fields map to [containerd's registry credential fields](https://github.com/containerd/containerd/blob/v1.6.0/docs/cri/registry.md#configure-registry-credentials), which in turn map to the fields in `.docker/config.json`. - - To avoid storing plaintext credentials in external systems, it is recommended to programmatically apply these settings via `apiclient` using a [bootstrap container](#bootstrap-containers-settings) or [host container](#host-containers-settings). - - Example `apiclient` call to set registry credentials for `gcr.io` and `docker.io`: - - ```shell - apiclient set --json '{ - "container-registry": { - "credentials": [ - { - "registry": "gcr.io", - "username": "example_username", - "password": "example_password" - }, - { - "registry": "docker.io", - "auth": "example_base64_encoded_auth_string" - } - ] - } - }' - ``` - - Example user data for setting up image registry credentials: - ```toml - [[settings.container-registry.credentials]] - registry = "docker.io" - username = "foo" - password = "bar" - - [[settings.container-registry.credentials]] - registry = "gcr.io" - auth = "example_base64_encoded_auth_string" - ``` - -In addition to the container runtime daemons, these credential settings will also apply to [host-container](#host-containers-settings) and [bootstrap-container](#bootstrap-containers-settings) image pulls as well. +See the [`settings.container-registry.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/container-registry/). #### Container runtime settings -Some behavior of the container runtime (currently `containerd`) can be modified with the following settings: - -* `settings.container-runtime.enable-unprivileged-icmp`: Allow unprivileged containers to open ICMP echo sockets. -* `settings.container-runtime.enable-unprivileged-ports`: Allow unprivileged containers to bind to ports < 1024. -* `settings.container-runtime.max-concurrent-downloads`: Restricts the number of concurrent layer downloads for each image. -* `settings.container-runtime.max-container-log-line-size`: Controls how long container log messages can be. - If the log output is longer than the limit, the log message will be broken into multiple lines. - -Example container runtime settings: - -```toml -[settings.container-runtime] -# Set log line length to unlimited -max-container-log-line-size = -1 -max-concurrent-downloads = 4 -enable-unprivileged-icmp = true -enable-unprivileged-ports = true -``` +See the [`settings.container-runtime.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/container-runtime/). #### Updates settings -* `settings.updates.ignore-waves`: Updates are rolled out in waves to reduce the impact of issues. For testing purposes, you can set this to `true` to ignore those waves and update immediately. -* `settings.updates.metadata-base-url`: The common portion of all URIs used to download update metadata. -* `settings.updates.seed`: A `u32` value that determines how far into the update schedule this machine will accept an update. We recommend leaving this at its default generated value so that updates can be somewhat randomized in your cluster. -* `settings.updates.targets-base-url`: The common portion of all URIs used to download update files. -* `settings.updates.version-lock`: Controls the version that will be selected when you issue an update request. Can be locked to a specific version like `v1.0.0`, or `latest` to take the latest available version. Defaults to `latest`. +See the [`settings.updates.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/updates/). #### Network settings -* `settings.network.hostname`: The desired hostname of the system. - - **Important note for all Kubernetes variants:** Changing this setting at runtime (not via user data) can cause issues with kubelet registration, as hostname is closely tied to the identity of the system for both registration and certificates/authorization purposes. - - Most users don't need to change this setting as the following defaults work for the majority of use cases. - If this setting isn't set we attempt to use DNS reverse lookup for the hostname. - If the lookup is unsuccessful, the IP of the node is used. - -* `settings.network.hosts`: A mapping of IP addresses to domain names which should resolve to those IP addresses. - This setting results in modifications to the `/etc/hosts` file for Bottlerocket. - - Note that this setting does not typically impact name resolution for containers, which usually rely on orchestrator-specific mechanisms for configuring static resolution. - (See [ECS](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_HostEntry.html) and [Kubernetes](https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/) documentation for those mechanisms.) - - Example: - - ```toml - [settings.network] - hosts = [ - ["10.0.0.0", ["test.example.com", "test1.example.com"]], - ["10.1.1.1", ["test2.example.com"]] - ] - ``` - - This example would result in an `/etc/hosts` file entries like so: - - ```txt - 10.0.0.0 test.example.com test1.example.com - 10.1.1.1 test2.example.com - ``` - - Repeated entries are merged (including loopback entries), with the first aliases listed taking precedence. e.g.: - - ```toml - [settings.network] - hosts = [ - ["10.0.0.0", ["test.example.com", "test1.example.com"]], - ["10.1.1.1", ["test2.example.com"]], - ["10.0.0.0", ["test3.example.com"]], - ] - ``` - - Would result in `/etc/hosts` entries like so: - - ```txt - 10.0.0.0 test.example.com test1.example.com test3.example.com - 10.1.1.1 test2.example.com - ``` - -The following allows for custom DNS settings, which are used to generate the `/etc/resolv.conf`. -If either DNS setting is not populated, the system will use the DHCP lease of the primary interface to gather these settings. -See the `resolv.conf` [man page](https://man7.org/linux/man-pages/man5/resolv.conf.5.html) for more detail. - -* `settings.dns.name-servers`: An array of IP address strings that represent the desired name server(s). -* `settings.dns.search-list`: An array of domain strings that represent the desired domain search path(s). - - ```toml - [settings.dns] - name-servers = ["1.2.3.4", "5.6.7.8"] - search-list = ["foo.bar", "baz.foo"] - ``` +See the [`settings.network.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/network/). ##### Proxy settings -These settings will configure the proxying behavior of the following services: - -* For all variants: - * [containerd.service](packages/containerd/containerd.service) - * [host-containerd.service](packages/host-ctr/host-containerd.service) -* For Kubernetes variants: - * [kubelet.service](packages/kubernetes-1.18/kubelet.service) -* For the ECS variant: - * [docker.service](packages/docker-engine/docker.service) - * [ecs.service](packages/ecs-agent/ecs.service) - -* `settings.network.https-proxy`: The HTTPS proxy server to be used by services listed above. -* `settings.network.no-proxy`: A list of hosts that are excluded from proxying. - - Example: - - ```toml - [settings.network] - https-proxy = "1.2.3.4:8080" - no-proxy = ["localhost", "127.0.0.1"] - ``` - -The no-proxy list will automatically include entries for localhost. - -If you're running a Kubernetes variant, the no-proxy list will automatically include the Kubernetes API server endpoint and other commonly used Kubernetes DNS suffixes to facilitate intra-cluster networking. - +See the ["Proxy Settings" section in the `settings.networks.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/network/). + #### Metrics settings -By default, Bottlerocket sends anonymous metrics when it boots, and once every six hours. -This can be disabled by setting `send-metrics` to false. -Here are the metrics settings: - -* `settings.metrics.metrics-url`: The endpoint to which metrics will be sent. The default is `https://metrics.bottlerocket.aws/v1/metrics`. -* `settings.metrics.send-metrics`: Whether Bottlerocket will send anonymous metrics. -* `settings.metrics.service-checks`: A list of systemd services that will be checked to determine whether a host is healthy. +See the [`settings.metrics.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/metrics/). #### Time settings -* `settings.ntp.time-servers`: A list of NTP servers used to set and verify the system time. +See the [`settings.ntp.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/ntp/). #### Kernel settings -* `settings.kernel.lockdown`: This allows further restrictions on what the Linux kernel will allow, for example preventing the loading of unsigned modules. - May be set to "none" (the default in `*-nvidia` and `*-dev` variants), "integrity" (the default for other variants), or "confidentiality". - **Important note:** this setting cannot be lowered (toward 'none') at runtime. - You must reboot for a change to a lower level to take effect. -* `settings.kernel.modules..allowed`: Whether the named kernel module is allowed to be loaded. - **Important note:** this setting does not affect kernel modules that are already loaded. - You may need to reboot for a change to disallow a kernel module to take effect. - - Example user data for blocking kernel modules: - - ```toml - [settings.kernel.modules.sctp] - allowed = false - - [settings.kernel.modules.udf] - allowed = false - ``` - -* `settings.kernel.modules..autoload`: Whether the named kernel modules shall be loaded automatically. - **Important note:** this setting needs to be used in conjunction with the `allowed` setting for the same module to ensure we are not auto-loading a module that is blocked. - - Example user data for auto-loading a kernel module on boot: - - ```toml - [settings.kernel.modules.ip_vs_lc] - allowed = true - autoload = true - ``` - -* `settings.kernel.sysctl`: Key/value pairs representing Linux kernel parameters. - Remember to quote keys (since they often contain ".") and to quote all values. - - Example user data for setting up sysctl: - - ```toml - [settings.kernel.sysctl] - "user.max_user_namespaces" = "16384" - "vm.max_map_count" = "262144" - ``` +See the [`settings.kernel.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/kernel/). #### Boot-related settings -*Please note that boot settings currently only exist for the bare metal variants and \*-k8s-1.23 variants. Boot settings will be added to any future variant introduced after Bottlerocket v1.8.0.* - -Specifying any of the following settings will generate a kernel boot config file to be loaded on subsequent boots: - -* `settings.boot.init-parameters`: This allows additional init parameters to be specified on the kernel command line during boot. -* `settings.boot.kernel-parameters`: This allows additional kernel parameters to be specified on the kernel command line during boot. -* `settings.boot.reboot-to-reconcile`: If set to `true`, Bottlerocket will automatically reboot again during boot if either the `settings.boot.kernel-parameters` or `settings.boot.init-parameters` were changed via user data or a bootstrap container so that these changes may take effect. - -You can learn more about kernel boot configuration [here](https://www.kernel.org/doc/html/latest/admin-guide/bootconfig.html). - -Example user data for specifying boot settings: - -```toml -[settings.boot] -reboot-to-reconcile = true - -[settings.boot.kernel-parameters] -"console" = [ - "tty0", - "ttyS1,115200n8", -] -"crashkernel" = [ - "2G-:256M", -] -"slub_debug" = [ - "options,slabs", -] -"usbcore.quirks" = [ - "0781:5580:bk", - "0a5c:5834:gij", -] - -[settings.boot.init-parameters] -"log_level" = ["debug"] -"splash" = [] -``` - -If boot config data exists at `/proc/bootconfig`, it will be used to generate these API settings on first boot. -Please note that Bottlerocket only supports boot configuration for `kernel` and `init`. If any other boot config key is specified, the settings generation will fail. +See the [`settings.boot.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/boot/). #### Custom CA certificates settings -By default, Bottlerocket ships with the Mozilla CA certificate store, but you can add self-signed certificates through the API using these settings: - -* `settings.pki..data`: Base64-encoded PEM-formatted certificates bundle; it can contain more than one certificate -* `settings.pki..trusted`: Whether the certificates in the bundle are trusted; defaults to `false` when not provided - -Here's an example of adding a bundle of self-signed certificates as user data: - -```toml -[settings.pki.my-trusted-bundle] -data="W3N..." -trusted=true - -[settings.pki.dont-trust-these] -data="W3N..." -trusted=false -``` - -Here's the same example but using API calls: - -```shell -apiclient set \ - pki.my-trusted-bundle.data="W3N..." \ - pki.my-trusted-bundle.trusted=true \ - pki.dont-trust-these.data="N3W..." \ - pki.dont-trust-there.trusted=false -``` - -You can use this method from within a [bootstrap container](#bootstrap-containers-settings), if your user data is over the size limit of the platform. +See the [`settings.pki.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/pki/). #### Host containers settings -* `settings.host-containers.admin.enabled`: Whether the admin container is enabled. -* `settings.host-containers.admin.source`: The URI of the [admin container](#admin-container). -* `settings.host-containers.admin.superpowered`: Whether the admin container has high levels of access to the Bottlerocket host. -* `settings.host-containers.control.enabled`: Whether the control container is enabled. -* `settings.host-containers.control.source`: The URI of the [control container](#control-container). -* `settings.host-containers.control.superpowered`: Whether the control container has high levels of access to the Bottlerocket host. +See the [`settings.host-containers.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/host-containers/). ##### Custom host containers -[`admin`](https://github.com/bottlerocket-os/bottlerocket-admin-container) and [`control`](https://github.com/bottlerocket-os/bottlerocket-control-container) are our default host containers, but you're free to change this. -Beyond just changing the settings above to affect the `admin` and `control` containers, you can add and remove host containers entirely. -As long as you define the three fields above -- `source` with a URI, and `enabled` and `superpowered` with true/false -- you can add host containers with an API call or user data. - -You can optionally define a `user-data` field with arbitrary base64-encoded data, which will be made available in the container at `/.bottlerocket/host-containers/$HOST_CONTAINER_NAME/user-data` and (since Bottlerocket v1.0.8) `/.bottlerocket/host-containers/current/user-data`. -(It was inspired by instance user data, but is entirely separate; it can be any data your host container feels like interpreting.) - -Keep in mind that the default admin container (since Bottlerocket v1.0.6) relies on `user-data` to store SSH keys. You can set `user-data` to [customize the keys](https://github.com/bottlerocket-os/bottlerocket-admin-container/#authenticating-with-the-admin-container), or you can use it for your own purposes in a custom container. - -Here's an example of adding a custom host container with API calls: - -```shell -apiclient set \ - host-containers.custom.source=MY-CONTAINER-URI \ - host-containers.custom.enabled=true \ - host-containers.custom.superpowered=false -``` - -Here's the same example, but with the settings you'd add to user data: - -```toml -[settings.host-containers.custom] -enabled = true -source = "MY-CONTAINER-URI" -superpowered = false -``` - -If the `enabled` flag is `true`, it will be started automatically. - -All host containers will have the `apiclient` binary available at `/usr/local/bin/apiclient` so they're able to [interact with the API](#using-the-api-client). -You can also use `apiclient` to run programs in other host containers. -For example, to access the admin container: - -```shell -apiclient exec admin bash -``` - -In addition, all host containers come with persistent storage that survives reboots and container start/stop cycles. -It's available at `/.bottlerocket/host-containers/$HOST_CONTAINER_NAME` and (since Bottlerocket v1.0.8) `/.bottlerocket/host-containers/current`. -The default `admin` host-container, for example, stores its SSH host keys under `/.bottlerocket/host-containers/admin/etc/ssh/`. - -There are a few important caveats to understand about host containers: - -* They're not orchestrated. They only start or stop according to that `enabled` flag. -* They run in a separate instance of containerd than the one used for orchestrated containers like Kubernetes pods. -* They're not updated automatically. You need to update the `source` and commit those changes. -* If you set `superpowered` to true, they'll essentially have root access to the host. -* If the container exits for any reason, Bottlerocket will attempt to restart it after 45 seconds. - -Because of these caveats, host containers are only intended for special use cases. -We use them for the control container because it needs to be available early to give you access to the OS, and for the admin container because it needs high levels of privilege and because you need it to debug when orchestration isn't working. - -Be careful, and make sure you have a similar low-level use case before reaching for host containers. +See the [Host Containers documentation](https://bottlerocket.dev/en/os/latest/#/concepts/host-containers/). #### Bootstrap containers settings -* `settings.bootstrap-containers..essential`: whether or not the container should fail the boot process, defaults to `false` -* `settings.bootstrap-containers..mode`: the mode of the container, it could be one of `off`, `once` or `always`. See below for a description of modes. -* `settings.bootstrap-containers..source`: the image for the container -* `settings.bootstrap-containers..user-data`: field with arbitrary base64-encoded data - -Bootstrap containers are host containers that can be used to "bootstrap" the host before services like ECS Agent, Kubernetes, and Docker start. - -Bootstrap containers are very similar to normal host containers; they come with persistent storage and with optional user data. -Unlike normal host containers, bootstrap containers can't be treated as `superpowered` containers. -However, bootstrap containers do have additional permissions that normal host containers do not have. -Bootstrap containers have access to the underlying root filesystem on `/.bottlerocket/rootfs` as well as to all the devices in the host, and they are set up with the `CAP_SYS_ADMIN` capability. -This allows bootstrap containers to create files, directories, and mounts that are visible to the host. - -Bootstrap containers are set up to run after the systemd `configured.target` unit is active. -The containers' systemd unit depends on this target (and not on any of the bootstrap containers' peers) which means that bootstrap containers will not execute in a deterministic order. -The boot process will "wait" for as long as the bootstrap containers run. -Bootstrap containers configured with `essential=true` will stop the boot process if they exit code is a non-zero value. - -Bootstrap containers have three different modes: - -* `always`: with this setting, the container is executed on every boot. -* `off`: the container won't run -* `once`: with this setting, the container only runs on the first boot where the container is defined. Upon completion, the mode is changed to `off`. - -Here's an example of adding a bootstrap container with API calls: - -```shell -apiclient set \ - bootstrap-containers.bootstrap.source=MY-CONTAINER-URI \ - bootstrap-containers.bootstrap.mode=once \ - bootstrap-containers.bootstrap.essential=true -``` - -Here's the same example, but with the settings you'd add to user data: - -```toml -[settings.bootstrap-containers.bootstrap] -source = "MY-CONTAINER-URI" -mode = "once" -essential = true -``` +See the [`settings.bootstrap-containers.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/bootstrap-containers/) as well as the [Bootstrap Containers documentation](https://bottlerocket.dev/en/os/latest/#/concepts/bootstrap-containers/) ##### Mount propagations in bootstrap and superpowered containers @@ -1379,27 +454,7 @@ They can be overridden for testing purposes in [the same way as other settings]( ##### AWS-specific settings -* `settings.aws.config`: The base64 encoded content to use for AWS configuration (e.g. `base64 -w0 ~/.aws/config`). -* `settings.aws.credentials`: The base64 encoded content to use for AWS credentials (e.g. `base64 -w0 ~/.aws/credentials`). -* `settings.aws.profile`: The profile name to use from the provided `config` and `credentials` settings. - - For example: - - ```toml - [settings.aws] - profile = "myprofile" - ``` - - **Note**: If `settings.aws.profile` is not set, the setting will fallback to the "default" profile. - In general it is recommended not to include a `[profile default]` section in the `aws.config` contents though. - This may have unintended side effects for other AWS services running on the node (e.g. `aws-iam-authenticator`). - - **Note:** The `config`, `credentials`, and `profile` are optional and do not need to be set when using an Instance Profile when running on an AWS instance. - -* `settings.aws.region`: This is set to the AWS region in which the instance is running, for example `us-west-2`. - - The `region` setting is automatically inferred based on calls to the Instance MetaData Service (IMDS) when running within AWS. - It does not need to be explicitly set unless you have a reason to override this default value. +See the [`settings.aws.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/aws/). ### Logs From 1fcb9a7c71307380872bbc49b4eaca4466de383d Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Tue, 31 Oct 2023 13:00:50 +0000 Subject: [PATCH 1136/1356] Change nightly GitHub Action to run weekly The nightly job was added to help catch cases where yanked Go modules would be found right away by using the GOPROXY=direct setting to bypass any caching. This has been in place for a few months, and so far the only failures seen have been due to Docker repo throttling (too many requests). Since this job runs every variant in the repo, it is a relatively expensive job to be running every night. This switches the job from nightly to weekly. This will still give an early warning in case of any yanked modules, but will limit the amount of overall runtime that is usually unnecessary. Signed-off-by: Sean McGinnis --- .github/workflows/{nightly.yml => weekly.yml} | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) rename .github/workflows/{nightly.yml => weekly.yml} (95%) diff --git a/.github/workflows/nightly.yml b/.github/workflows/weekly.yml similarity index 95% rename from .github/workflows/nightly.yml rename to .github/workflows/weekly.yml index 31c13bff..97f33522 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/weekly.yml @@ -2,12 +2,12 @@ # to try to catch errors close to their introduction due to yanked Go modules. These # could otherwise be covered up by caching and not discovered until much later when # bypassing the main cache. -name: Nightly +name: Weekly on: schedule: - # Run once a day at 02:15 UTC. Randomly chosen as a "quiet" time for this to run. + # Run Monday at 02:15 UTC. Randomly chosen as a "quiet" time for this to run. # See syntax for format details: https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#onschedule - - cron: '15 2 * * *' + - cron: '15 2 * * 1' env: # When Go packages are built, buildsys will vendor in dependent Go code for From 93c0a9663c9fbc8051e90992ad0f6581ffcb13db Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Wed, 25 Oct 2023 18:25:35 +0000 Subject: [PATCH 1137/1356] kmod-5.15-nvidia: Update to R535 --- packages/kmod-5.15-nvidia/Cargo.toml | 8 +- .../kmod-5.15-nvidia/kmod-5.15-nvidia.spec | 309 +++++++++--------- .../kmod-5.15-nvidia/nvidia-ld.so.conf.in | 2 +- .../nvidia-tesla-tmpfiles.conf | 3 + .../nvidia-tesla-tmpfiles.conf.in | 3 - 5 files changed, 168 insertions(+), 157 deletions(-) create mode 100644 packages/kmod-5.15-nvidia/nvidia-tesla-tmpfiles.conf delete mode 100644 packages/kmod-5.15-nvidia/nvidia-tesla-tmpfiles.conf.in diff --git a/packages/kmod-5.15-nvidia/Cargo.toml b/packages/kmod-5.15-nvidia/Cargo.toml index 5e211252..0f86bacc 100644 --- a/packages/kmod-5.15-nvidia/Cargo.toml +++ b/packages/kmod-5.15-nvidia/Cargo.toml @@ -13,13 +13,13 @@ package-name = "kmod-5.15-nvidia" releases-url = "https://docs.nvidia.com/datacenter/tesla/" [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/515.86.01/NVIDIA-Linux-x86_64-515.86.01.run" -sha512 = "9a31e14afc017e847f1208577f597c490adb63c256d6dff1a9eae56b65cf85374a604516b0be9da7a43e9af93b3c5aec47b2ffefd6b4050a4b7e55f348cf4e7b" +url = "https://us.download.nvidia.com/tesla/535.129.03/NVIDIA-Linux-x86_64-535.129.03.run" +sha512 = "3d7142658fe836e1debf7786857bdb293490ef33351e9b7d39face245fe8596b0f46052b86fae08350fcda1e2a9fd68d7309b94e107d1b016bd529d8fc37e31f" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/515.86.01/NVIDIA-Linux-aarch64-515.86.01.run" -sha512 = "43161f86143b1558d1f558acf4a060f53f538ea20e6235f76be24916fe4a9c374869645c7abf39eba66f1c2ca35f5d2b04f199bd1341b7ee6c1fdc879cb3ef96" +url = "https://us.download.nvidia.com/tesla/535.129.03/NVIDIA-Linux-aarch64-535.129.03.run" +sha512 = "706de7e53b81f909d8bc6a12a39c594754a164c49f5d23c7939dc3abcfc04f5d5b12b7d65762ae574582149a098f06ee5fe95be4f8ad1056a3307a6ce93f3c00" force-upstream = true [build-dependencies] diff --git a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec index 7ffe7da8..00e06416 100644 --- a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec +++ b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec @@ -1,10 +1,17 @@ -%global tesla_515 515.86.01 -%global tesla_515_libdir %{_cross_libdir}/nvidia/tesla/%{tesla_515} -%global tesla_515_bindir %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} -%global tesla_515_firmwaredir %{_cross_libdir}/firmware/nvidia/%{tesla_515} +%global tesla_major 535 +%global tesla_minor 129 +%global tesla_patch 03 +%global tesla_ver %{tesla_major}.%{tesla_minor}.%{tesla_patch} %global spdx_id %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml spdx-id nvidia) %global license_file %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml path nvidia -p ./licenses) +# With the split of the firmware binary from firmware/gsp.bin to firmware/gsp_ga10x.bin +# and firmware/gsp_tu10x.bin the file format changed from executable to relocatable. +# The __spec_install_post macro will by default try to strip all binary files. +# Unfortunately the strip used is not compatible with the new file format. +# Redefine strip, so that these firmware binaries do not derail the build. +%global __strip /usr/bin/true + Name: %{_cross_os}kmod-5.15-nvidia Version: 1.0.0 Release: 1%{?dist} @@ -15,15 +22,15 @@ License: Apache-2.0 OR MIT URL: http://www.nvidia.com/ # NVIDIA .run scripts from 0 to 199 -Source0: https://us.download.nvidia.com/tesla/%{tesla_515}/NVIDIA-Linux-x86_64-%{tesla_515}.run -Source1: https://us.download.nvidia.com/tesla/%{tesla_515}/NVIDIA-Linux-aarch64-%{tesla_515}.run +Source0: https://us.download.nvidia.com/tesla/%{tesla_ver}/NVIDIA-Linux-x86_64-%{tesla_ver}.run +Source1: https://us.download.nvidia.com/tesla/%{tesla_ver}/NVIDIA-Linux-aarch64-%{tesla_ver}.run # Common NVIDIA conf files from 200 to 299 Source200: nvidia-tmpfiles.conf.in Source202: nvidia-dependencies-modules-load.conf # NVIDIA tesla conf files from 300 to 399 -Source300: nvidia-tesla-tmpfiles.conf.in +Source300: nvidia-tesla-tmpfiles.conf Source301: nvidia-tesla-build-config.toml.in Source302: nvidia-tesla-path.env.in Source303: nvidia-ld.so.conf.in @@ -34,25 +41,25 @@ BuildRequires: %{_cross_os}kernel-5.15-archive %description %{summary}. -%package tesla-515 -Summary: NVIDIA 515 Tesla driver -Version: %{tesla_515} +%package tesla-%{tesla_major} +Summary: NVIDIA %{tesla_major} Tesla driver +Version: %{tesla_ver} License: %{spdx_id} Requires: %{name} -%description tesla-515 +%description tesla-%{tesla_major} %{summary} %prep # Extract nvidia sources with `-x`, otherwise the script will try to install # the driver in the current run -sh %{_sourcedir}/NVIDIA-Linux-%{_cross_arch}-%{tesla_515}.run -x +sh %{_sourcedir}/NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}.run -x %global kernel_sources %{_builddir}/kernel-devel tar -xf %{_cross_datadir}/bottlerocket/kernel-devel.tar.xz %build -pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_515}/kernel +pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}/kernel # This recipe was based in the NVIDIA yum/dnf specs: # https://github.com/NVIDIA/yum-packaging-precompiled-kmod @@ -94,81 +101,80 @@ install -p -m 0644 nvidia.conf %{buildroot}%{_cross_tmpfilesdir} install -d %{buildroot}%{_cross_libdir}/modules-load.d install -p -m 0644 %{S:202} %{buildroot}%{_cross_libdir}/modules-load.d/nvidia-dependencies.conf -# Begin NVIDIA tesla 515 -pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_515} +# Begin NVIDIA tesla driver +pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_ver} # We install bins and libs in a versioned directory to prevent collisions with future drivers versions -install -d %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} -install -d %{buildroot}%{tesla_515_libdir} -install -d %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d -install -d %{buildroot}%{_cross_factorydir}/nvidia/tesla/%{tesla_515} - -sed -e 's|__NVIDIA_VERSION__|%{tesla_515}|' %{S:300} > nvidia-tesla-%{tesla_515}.conf -install -m 0644 nvidia-tesla-%{tesla_515}.conf %{buildroot}%{_cross_tmpfilesdir}/ -sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/|' %{S:301} > \ - nvidia-tesla-%{tesla_515}.toml -install -m 0644 nvidia-tesla-%{tesla_515}.toml %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/drivers +install -d %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin +install -d %{buildroot}%{_cross_libdir}/nvidia/tesla +install -d %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d +install -d %{buildroot}%{_cross_factorydir}/nvidia/tesla + +install -m 0644 %{S:300} %{buildroot}%{_cross_tmpfilesdir}/nvidia-tesla.conf +sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/tesla/module-objects.d/|' %{S:301} > \ + nvidia-tesla.toml +install -m 0644 nvidia-tesla.toml %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/drivers # Install nvidia-path environment file, will be used as a drop-in for containerd.service since # libnvidia-container locates and mounts helper binaries into the containers from either # `PATH` or `NVIDIA_PATH` -sed -e 's|__NVIDIA_BINDIR__|%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}|' %{S:302} > nvidia-path.env -install -m 0644 nvidia-path.env %{buildroot}%{_cross_factorydir}/nvidia/tesla/%{tesla_515} -# We need to add `_cross_libdir/tesla_515` to the paths loaded by the ldconfig service +sed -e 's|__NVIDIA_BINDIR__|%{_cross_libexecdir}/nvidia/tesla/bin|' %{S:302} > nvidia-path.env +install -m 0644 nvidia-path.env %{buildroot}%{_cross_factorydir}/nvidia/tesla +# We need to add `_cross_libdir` to the paths loaded by the ldconfig service # because libnvidia-container uses the `ldcache` file created by the service, to locate and mount the # libraries into the containers -sed -e 's|__LIBDIR__|%{_cross_libdir}|' %{S:303} | sed -e 's|__NVIDIA_VERSION__|%{tesla_515}|' \ - > nvidia-tesla-%{tesla_515}.conf -install -m 0644 nvidia-tesla-%{tesla_515}.conf %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/ +sed -e 's|__LIBDIR__|%{_cross_libdir}|' %{S:303} > nvidia-tesla.conf +install -m 0644 nvidia-tesla.conf %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/ # driver -install kernel/nvidia.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d -install kernel/nvidia/nv-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d -install kernel/nvidia/nv-kernel.o_binary %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nv-kernel.o +install kernel/nvidia.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d +install kernel/nvidia/nv-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d +install kernel/nvidia/nv-kernel.o_binary %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d/nv-kernel.o # uvm -install kernel/nvidia-uvm.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d -install kernel/nvidia-uvm.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install kernel/nvidia-uvm.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d +install kernel/nvidia-uvm.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d # modeset -install kernel/nvidia-modeset.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d -install kernel/nvidia-modeset/nv-modeset-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d -install kernel/nvidia-modeset/nv-modeset-kernel.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install kernel/nvidia-modeset.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d +install kernel/nvidia-modeset/nv-modeset-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d +install kernel/nvidia-modeset/nv-modeset-kernel.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d # peermem -install kernel/nvidia-peermem.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d -install kernel/nvidia-peermem/nvidia-peermem.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install kernel/nvidia-peermem.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d +install kernel/nvidia-peermem/nvidia-peermem.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d # drm -install kernel/nvidia-drm.mod.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d -install kernel/nvidia-drm.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d +install kernel/nvidia-drm.mod.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/module-objects.d +install kernel/nvidia-drm.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/module-objects.d # Binaries -install -m 755 nvidia-smi %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} -install -m 755 nvidia-debugdump %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} -install -m 755 nvidia-cuda-mps-control %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} -install -m 755 nvidia-cuda-mps-server %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} +install -m 755 nvidia-smi %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin +install -m 755 nvidia-debugdump %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin +install -m 755 nvidia-cuda-mps-control %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin +install -m 755 nvidia-cuda-mps-server %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin %if "%{_cross_arch}" == "x86_64" -install -m 755 nvidia-ngx-updater %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} +install -m 755 nvidia-ngx-updater %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin %endif # We install all the libraries, and filter them out in the 'files' section, so we can catch # when new libraries are added -install -m 755 *.so* %{buildroot}/%{tesla_515_libdir}/ +install -m 755 *.so* %{buildroot}/%{_cross_libdir}/nvidia/tesla/ # This library has the same SONAME as libEGL.so.1.1.0, this will cause collisions while # the symlinks are created. For now, we only symlink libEGL.so.1.1.0. -EXCLUDED_LIBS="libEGL.so.%{tesla_515}" +EXCLUDED_LIBS="libEGL.so.%{tesla_ver}" for lib in $(find . -maxdepth 1 -type f -name 'lib*.so.*' -printf '%%P\n'); do [[ "${EXCLUDED_LIBS}" =~ "${lib}" ]] && continue soname="$(%{_cross_target}-readelf -d "${lib}" | awk '/SONAME/{print $5}' | tr -d '[]')" [ -n "${soname}" ] || continue [ "${lib}" == "${soname}" ] && continue - ln -s "${lib}" %{buildroot}/%{tesla_515_libdir}/"${soname}" + ln -s "${lib}" %{buildroot}/%{_cross_libdir}/nvidia/tesla/"${soname}" done # Include the firmware file for GSP support -install -d %{buildroot}%{tesla_515_firmwaredir} -install -p -m 0644 firmware/gsp.bin %{buildroot}%{tesla_515_firmwaredir} +install -d %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_ver} +install -p -m 0644 firmware/gsp_ga10x.bin %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_ver} +install -p -m 0644 firmware/gsp_tu10x.bin %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_ver} popd @@ -183,141 +189,146 @@ popd %{_cross_libdir}/systemd/system/ %{_cross_libdir}/modules-load.d/nvidia-dependencies.conf -%files tesla-515 +%files tesla-%{tesla_major} %license %{license_file} -%dir %{_cross_datadir}/nvidia/tesla/%{tesla_515} -%dir %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515} -%dir %{tesla_515_libdir} -%dir %{tesla_515_firmwaredir} -%dir %{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d -%dir %{_cross_factorydir}/nvidia/tesla/%{tesla_515} +%dir %{_cross_datadir}/nvidia/tesla +%dir %{_cross_libexecdir}/nvidia/tesla/bin +%dir %{_cross_libdir}/nvidia/tesla +%dir %{_cross_libdir}/firmware/nvidia/%{tesla_ver} +%dir %{_cross_datadir}/nvidia/tesla/module-objects.d +%dir %{_cross_factorydir}/nvidia/tesla # Binaries -%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}/nvidia-debugdump -%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}/nvidia-smi +%{_cross_libexecdir}/nvidia/tesla/bin/nvidia-debugdump +%{_cross_libexecdir}/nvidia/tesla/bin/nvidia-smi # Configuration files -%{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-tesla-%{tesla_515}.toml -%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/nvidia-tesla-%{tesla_515}.conf -%{_cross_factorydir}/nvidia/tesla/%{tesla_515}/nvidia-path.env +%{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-tesla.toml +%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/nvidia-tesla.conf +%{_cross_factorydir}/nvidia/tesla/nvidia-path.env # driver -%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia.mod.o -%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nv-interface.o -%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nv-kernel.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia.mod.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nv-interface.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nv-kernel.o # uvm -%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-uvm.mod.o -%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-uvm.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia-uvm.mod.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia-uvm.o # modeset -%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nv-modeset-interface.o -%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nv-modeset-kernel.o -%{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-modeset.mod.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nv-modeset-interface.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nv-modeset-kernel.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia-modeset.mod.o # tmpfiles -%{_cross_tmpfilesdir}/nvidia-tesla-%{tesla_515}.conf +%{_cross_tmpfilesdir}/nvidia-tesla.conf # We only install the libraries required by all the DRIVER_CAPABILITIES, described here: # https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html#driver-capabilities # Utility libs -%{tesla_515_libdir}/libnvidia-ml.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-ml.so.1 -%{tesla_515_libdir}/libnvidia-cfg.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-cfg.so.1 -%{tesla_515_libdir}/libnvidia-nvvm.so.4 -%{tesla_515_libdir}/libnvidia-nvvm.so.%{tesla_515} +%{_cross_libdir}/nvidia/tesla/libnvidia-api.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-ml.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-ml.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-cfg.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-cfg.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-nvvm.so.4 +%{_cross_libdir}/nvidia/tesla/libnvidia-nvvm.so.%{tesla_ver} # Compute libs -%{tesla_515_libdir}/libcuda.so.%{tesla_515} -%{tesla_515_libdir}/libcuda.so.1 -%{tesla_515_libdir}/libnvidia-opencl.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-opencl.so.1 -%{tesla_515_libdir}/libnvidia-ptxjitcompiler.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-ptxjitcompiler.so.1 -%{tesla_515_libdir}/libnvidia-allocator.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-allocator.so.1 -%{tesla_515_libdir}/libOpenCL.so.1.0.0 -%{tesla_515_libdir}/libOpenCL.so.1 +%{_cross_libdir}/nvidia/tesla/libcuda.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libcuda.so.1 +%{_cross_libdir}/nvidia/tesla/libcudadebugger.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libcudadebugger.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-opencl.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-opencl.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-ptxjitcompiler.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-ptxjitcompiler.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-allocator.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-allocator.so.1 +%{_cross_libdir}/nvidia/tesla/libOpenCL.so.1.0.0 +%{_cross_libdir}/nvidia/tesla/libOpenCL.so.1 %if "%{_cross_arch}" == "x86_64" -%{tesla_515_libdir}/libnvidia-compiler.so.%{tesla_515} +%{_cross_libdir}/nvidia/tesla/libnvidia-pkcs11.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-pkcs11-openssl3.so.%{tesla_ver} %endif # Video libs -%{tesla_515_libdir}/libvdpau_nvidia.so.%{tesla_515} -%{tesla_515_libdir}/libvdpau_nvidia.so.1 -%{tesla_515_libdir}/libnvidia-encode.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-encode.so.1 -%{tesla_515_libdir}/libnvidia-opticalflow.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-opticalflow.so.1 -%{tesla_515_libdir}/libnvcuvid.so.%{tesla_515} -%{tesla_515_libdir}/libnvcuvid.so.1 +%{_cross_libdir}/nvidia/tesla/libvdpau_nvidia.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libvdpau_nvidia.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-encode.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-encode.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-opticalflow.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-opticalflow.so.1 +%{_cross_libdir}/nvidia/tesla/libnvcuvid.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvcuvid.so.1 # Graphics libs -%{tesla_515_libdir}/libnvidia-eglcore.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-glcore.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-tls.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-glsi.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-rtcore.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-fbc.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-fbc.so.1 -%{tesla_515_libdir}/libnvoptix.so.%{tesla_515} -%{tesla_515_libdir}/libnvoptix.so.1 -%{tesla_515_libdir}/libnvidia-vulkan-producer.so.%{tesla_515} +%{_cross_libdir}/nvidia/tesla/libnvidia-eglcore.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-glcore.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-tls.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-glsi.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-rtcore.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-fbc.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-fbc.so.1 +%{_cross_libdir}/nvidia/tesla/libnvoptix.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvoptix.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-vulkan-producer.so.%{tesla_ver} # Graphics GLVND libs -%{tesla_515_libdir}/libnvidia-glvkspirv.so.%{tesla_515} -%{tesla_515_libdir}/libGLX_nvidia.so.%{tesla_515} -%{tesla_515_libdir}/libGLX_nvidia.so.0 -%{tesla_515_libdir}/libEGL_nvidia.so.%{tesla_515} -%{tesla_515_libdir}/libEGL_nvidia.so.0 -%{tesla_515_libdir}/libGLESv2_nvidia.so.%{tesla_515} -%{tesla_515_libdir}/libGLESv2_nvidia.so.2 -%{tesla_515_libdir}/libGLESv1_CM_nvidia.so.%{tesla_515} -%{tesla_515_libdir}/libGLESv1_CM_nvidia.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-glvkspirv.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libGLX_nvidia.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libGLX_nvidia.so.0 +%{_cross_libdir}/nvidia/tesla/libEGL_nvidia.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libEGL_nvidia.so.0 +%{_cross_libdir}/nvidia/tesla/libGLESv2_nvidia.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libGLESv2_nvidia.so.2 +%{_cross_libdir}/nvidia/tesla/libGLESv1_CM_nvidia.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libGLESv1_CM_nvidia.so.1 # Graphics compat -%{tesla_515_libdir}/libEGL.so.1.1.0 -%{tesla_515_libdir}/libEGL.so.1 -%{tesla_515_libdir}/libEGL.so.%{tesla_515} -%{tesla_515_libdir}/libGL.so.1.7.0 -%{tesla_515_libdir}/libGL.so.1 -%{tesla_515_libdir}/libGLESv1_CM.so.1.2.0 -%{tesla_515_libdir}/libGLESv1_CM.so.1 -%{tesla_515_libdir}/libGLESv2.so.2.1.0 -%{tesla_515_libdir}/libGLESv2.so.2 +%{_cross_libdir}/nvidia/tesla/libEGL.so.1.1.0 +%{_cross_libdir}/nvidia/tesla/libEGL.so.1 +%{_cross_libdir}/nvidia/tesla/libEGL.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libGL.so.1.7.0 +%{_cross_libdir}/nvidia/tesla/libGL.so.1 +%{_cross_libdir}/nvidia/tesla/libGLESv1_CM.so.1.2.0 +%{_cross_libdir}/nvidia/tesla/libGLESv1_CM.so.1 +%{_cross_libdir}/nvidia/tesla/libGLESv2.so.2.1.0 +%{_cross_libdir}/nvidia/tesla/libGLESv2.so.2 # NGX -%{tesla_515_libdir}/libnvidia-ngx.so.%{tesla_515} -%{tesla_515_libdir}/libnvidia-ngx.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-ngx.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-ngx.so.1 # Firmware -%{tesla_515_firmwaredir}/gsp.bin +%{_cross_libdir}/firmware/nvidia/%{tesla_ver}/gsp_ga10x.bin +%{_cross_libdir}/firmware/nvidia/%{tesla_ver}/gsp_tu10x.bin # Neither nvidia-peermem nor nvidia-drm are included in driver container images, we exclude them # for now, and we will add them if requested -%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-peermem.mod.o -%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-peermem.o -%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-drm.mod.o -%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_515}/module-objects.d/nvidia-drm.o -%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}/nvidia-cuda-mps-control -%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}/nvidia-cuda-mps-server +%exclude %{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia-peermem.mod.o +%exclude %{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia-peermem.o +%exclude %{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia-drm.mod.o +%exclude %{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia-drm.o +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/nvidia-cuda-mps-control +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/nvidia-cuda-mps-server %if "%{_cross_arch}" == "x86_64" -%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_515}/nvidia-ngx-updater +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/nvidia-ngx-updater %endif # None of these libraries are required by libnvidia-container, so they # won't be used by a containerized workload -%exclude %{tesla_515_libdir}/libGLX.so.0 -%exclude %{tesla_515_libdir}/libGLdispatch.so.0 -%exclude %{tesla_515_libdir}/libOpenGL.so.0 -%exclude %{tesla_515_libdir}/libglxserver_nvidia.so.%{tesla_515} -%exclude %{tesla_515_libdir}/libnvidia-gtk2.so.%{tesla_515} -%exclude %{tesla_515_libdir}/libnvidia-gtk3.so.%{tesla_515} -%exclude %{tesla_515_libdir}/nvidia_drv.so -%exclude %{tesla_515_libdir}/libnvidia-egl-wayland.so.1 -%exclude %{tesla_515_libdir}/libnvidia-egl-gbm.so.1 -%exclude %{tesla_515_libdir}/libnvidia-egl-gbm.so.1.1.0 -%exclude %{tesla_515_libdir}/libnvidia-egl-wayland.so.1.1.9 -%exclude %{tesla_515_libdir}/libnvidia-wayland-client.so.%{tesla_515} +%exclude %{_cross_libdir}/nvidia/tesla/libGLX.so.0 +%exclude %{_cross_libdir}/nvidia/tesla/libGLdispatch.so.0 +%exclude %{_cross_libdir}/nvidia/tesla/libOpenGL.so.0 +%exclude %{_cross_libdir}/nvidia/tesla/libglxserver_nvidia.so.%{tesla_ver} +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-gtk2.so.%{tesla_ver} +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-gtk3.so.%{tesla_ver} +%exclude %{_cross_libdir}/nvidia/tesla/nvidia_drv.so +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-wayland.so.1 +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-gbm.so.1 +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-gbm.so.1.1.0 +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-wayland.so.1.1.11 +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-wayland-client.so.%{tesla_ver} diff --git a/packages/kmod-5.15-nvidia/nvidia-ld.so.conf.in b/packages/kmod-5.15-nvidia/nvidia-ld.so.conf.in index a07b0ccb..f992bf22 100644 --- a/packages/kmod-5.15-nvidia/nvidia-ld.so.conf.in +++ b/packages/kmod-5.15-nvidia/nvidia-ld.so.conf.in @@ -1 +1 @@ -__LIBDIR__/nvidia/tesla/__NVIDIA_VERSION__/ +__LIBDIR__/nvidia/tesla/ diff --git a/packages/kmod-5.15-nvidia/nvidia-tesla-tmpfiles.conf b/packages/kmod-5.15-nvidia/nvidia-tesla-tmpfiles.conf new file mode 100644 index 00000000..ddcac3e4 --- /dev/null +++ b/packages/kmod-5.15-nvidia/nvidia-tesla-tmpfiles.conf @@ -0,0 +1,3 @@ +C /etc/drivers/nvidia-tesla.toml +C /etc/containerd/nvidia.env - - - - /usr/share/factory/nvidia/tesla/nvidia-path.env +C /etc/ld.so.conf.d/nvidia-tesla.conf diff --git a/packages/kmod-5.15-nvidia/nvidia-tesla-tmpfiles.conf.in b/packages/kmod-5.15-nvidia/nvidia-tesla-tmpfiles.conf.in deleted file mode 100644 index f208e1d2..00000000 --- a/packages/kmod-5.15-nvidia/nvidia-tesla-tmpfiles.conf.in +++ /dev/null @@ -1,3 +0,0 @@ -C /etc/drivers/nvidia-tesla-__NVIDIA_VERSION__.toml -C /etc/containerd/nvidia.env - - - - /usr/share/factory/nvidia/tesla/__NVIDIA_VERSION__/nvidia-path.env -C /etc/ld.so.conf.d/nvidia-tesla-__NVIDIA_VERSION__.conf From 4e027f8ff7846ac1fd579984f832d239bfd21e06 Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Tue, 31 Oct 2023 20:50:20 +0000 Subject: [PATCH 1138/1356] packages: move kmod-6.1-nvidia to 535.129.03 --- packages/kmod-6.1-nvidia/Cargo.toml | 8 ++++---- packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/kmod-6.1-nvidia/Cargo.toml b/packages/kmod-6.1-nvidia/Cargo.toml index 75fd1f3d..77365343 100644 --- a/packages/kmod-6.1-nvidia/Cargo.toml +++ b/packages/kmod-6.1-nvidia/Cargo.toml @@ -13,13 +13,13 @@ package-name = "kmod-6.1-nvidia" releases-url = "https://docs.nvidia.com/datacenter/tesla/" [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/535.54.03/NVIDIA-Linux-x86_64-535.54.03.run" -sha512 = "45b72b34272d3df14b56136bb61537d00145d55734b72d58390af4694d96f03b2b49433beb4a5bede4d978442b707b08e05f2f31b2fcfd9453091e7f0b945cff" +url = "https://us.download.nvidia.com/tesla/535.129.03/NVIDIA-Linux-x86_64-535.129.03.run" +sha512 = "3d7142658fe836e1debf7786857bdb293490ef33351e9b7d39face245fe8596b0f46052b86fae08350fcda1e2a9fd68d7309b94e107d1b016bd529d8fc37e31f" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/535.54.03/NVIDIA-Linux-aarch64-535.54.03.run" -sha512 = "57b06a6fa16838176866c364a8722c546084529ad91c57e979aca7750692127cab1485b5a44aee398c5494782ed987e82f66061aa39e802bc6eefa2b40a33bc3" +url = "https://us.download.nvidia.com/tesla/535.129.03/NVIDIA-Linux-aarch64-535.129.03.run" +sha512 = "706de7e53b81f909d8bc6a12a39c594754a164c49f5d23c7939dc3abcfc04f5d5b12b7d65762ae574582149a098f06ee5fe95be4f8ad1056a3307a6ce93f3c00" force-upstream = true [build-dependencies] diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index e208bf93..6173a23b 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -1,5 +1,5 @@ %global tesla_major 535 -%global tesla_minor 54 +%global tesla_minor 129 %global tesla_patch 03 %global tesla_ver %{tesla_major}.%{tesla_minor}.%{tesla_patch} %global spdx_id %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml spdx-id nvidia) From a6faf3c4000d6b490f6ea39b605f927eed1080a9 Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Tue, 31 Oct 2023 20:54:16 +0000 Subject: [PATCH 1139/1356] packages: update kmod-5.10-nvidia to 470.161.03 --- packages/kmod-5.10-nvidia/Cargo.toml | 8 ++++---- packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/kmod-5.10-nvidia/Cargo.toml b/packages/kmod-5.10-nvidia/Cargo.toml index d62972b4..afbc4cb7 100644 --- a/packages/kmod-5.10-nvidia/Cargo.toml +++ b/packages/kmod-5.10-nvidia/Cargo.toml @@ -13,13 +13,13 @@ package-name = "kmod-5.10-nvidia" releases-url = "https://docs.nvidia.com/datacenter/tesla/" [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/470.161.03/NVIDIA-Linux-x86_64-470.161.03.run" -sha512 = "26b1640f9427847b68233ffacf5c4a07e75ed9923429dfc9e5de3d7e5c1f109dfaf0fe0a0639cbd47f056784ed3e00e2e741d5c84532df79590a0c9ffa5ba625" +url = "https://us.download.nvidia.com/tesla/470.223.02/NVIDIA-Linux-x86_64-470.223.02.run" +sha512 = "66e470343b6f0c04703c81169cd03674be06b5315db738cab64308ec073b5bf5b87508b58ac8b6288d10e95307072d99e874e7884207a323a3dd08887bbc8750" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/470.161.03/NVIDIA-Linux-aarch64-470.161.03.run" -sha512 = "16e83c4d3ea66b2da07c43fca912c839e5feb9d42bee279b9de3476ffbd5e2314fddc83c1a38c198adb2d5ea6b4f2b00bb4a4c32d6fd0bfcdbccc392043f99ce" +url = "https://us.download.nvidia.com/tesla/470.223.02/NVIDIA-Linux-aarch64-470.223.02.run" +sha512 = "c22eab4ec6aa1868bbe55200ba74187939571ae78645c333fe05d544869c54b84d63e26f5c4f922bbe4e768da1f394d15d0b85cacbd4bbbc2b1dfd5074734a02" force-upstream = true [build-dependencies] diff --git a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec index 0f4a31f0..042683fe 100644 --- a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec +++ b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec @@ -1,4 +1,4 @@ -%global tesla_470 470.161.03 +%global tesla_470 470.223.02 %global tesla_470_libdir %{_cross_libdir}/nvidia/tesla/%{tesla_470} %global tesla_470_bindir %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} %global spdx_id %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml spdx-id nvidia) From 63e72f3261cc45e8415f1a1783b1f7fbbdf5f2ea Mon Sep 17 00:00:00 2001 From: "Kyle J. Davis" Date: Fri, 3 Nov 2023 15:07:48 -0600 Subject: [PATCH 1140/1356] adds ecs-2 to the readme (#3569) --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 871b83db..e1be85ab 100644 --- a/README.md +++ b/README.md @@ -78,6 +78,8 @@ The following variants support ECS: * `aws-ecs-1` * `aws-ecs-1-nvidia` +* `aws-ecs-2` +* `aws-ecs-2-nvidia` We also have variants that are designed to be Kubernetes worker nodes in VMware: @@ -337,7 +339,7 @@ If your user data is over the size limit of the platform (e.g. 16KiB for EC2) yo Here we'll describe each setting you can change. -**Note:** You can see the default values (for any settings that are not generated at runtime) by looking in the `defaults.d` directory for a variant, for example [aws-ecs-1](sources/models/src/aws-ecs-1/defaults.d/). +**Note:** You can see the default values (for any settings that are not generated at runtime) by looking in the `defaults.d` directory for a variant, for example [aws-ecs-2](sources/models/src/aws-ecs-2/defaults.d/). When you're sending settings to the API, or receiving settings from the API, they're in a structured JSON format. This allows modification of any number of keys at once. From a4faf54baaa9c08951cc4350e5871cfdfb66a6c5 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 2 Nov 2023 15:55:43 +0000 Subject: [PATCH 1141/1356] kernel-5.10: update to 5.10.198 Rebase to Amazon Linux upstream version 5.10.198-187.748.amzn2. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 9b7664f4..7faac1d0 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/9f9ded8eec13c7cacb468496c899e93063db7800ad20b12d07c1fee60e05eb33/kernel-5.10.197-186.748.amzn2.src.rpm" -sha512 = "c5986ab33ef52cfe61a67e29db2856072cb68c525c69dc0be14efbba58ad7df9f9989ddad27ebd722088d6f01b58875b49bf1aed06901e3d9966c0fed95ba722" +url = "https://cdn.amazonlinux.com/blobstore/5c8155b74bb2980fed073710617014a21ad836d9b6aa2c1d39e9168289236fde/kernel-5.10.198-187.748.amzn2.src.rpm" +sha512 = "ae931ec40f8edd7cf76dfc10e7f6e8719cf680e3aa4b65a4efaf37b3075b36d71f01cb28fa59f0e7a73eba7a41f6e0b753bd1eed2fda66563e7d6f2ac36394d5" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 89349680..8e37e40e 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.197 +Version: 5.10.198 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/9f9ded8eec13c7cacb468496c899e93063db7800ad20b12d07c1fee60e05eb33/kernel-5.10.197-186.748.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/5c8155b74bb2980fed073710617014a21ad836d9b6aa2c1d39e9168289236fde/kernel-5.10.198-187.748.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 66afe3729e7e1916f1d255108cda6fadc50d3c86 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 2 Nov 2023 15:56:21 +0000 Subject: [PATCH 1142/1356] kernel-5.15: update to 5.15.136 Rebase to Amazon Linux upstream version 5.15.136-90.144.amzn2. Signed-off-by: Leonard Foerster --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 9b1a4169..cada059a 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/418a9aab17cff76bb9577affa1df20b27fa223168e2fafef62510de157e1957d/kernel-5.15.134-87.145.amzn2.src.rpm" -sha512 = "fdc386b82928c7a29bbdbf0ff0c55e22a36f03d725aedfe8d6d309628e79484d2743ec00322713009fb2a56c49e1c20f3938fef7075215c76028b00f3149bdad" +url = "https://cdn.amazonlinux.com/blobstore/8bbf53203badda16f39f6dabe8974acac6f4b3d0dcf96378a434a32c897da379/kernel-5.15.136-90.144.amzn2.src.rpm" +sha512 = "5f1cf5c446a96805f54f8e38a32d779782e510af0c4efe5015d07d3d5606216410939f883c7769f9faede67d77e4d9fc3a2ba4a9251a150417ce1a2283304066" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index ddc3ffae..52a5e29d 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.134 +Version: 5.15.136 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/418a9aab17cff76bb9577affa1df20b27fa223168e2fafef62510de157e1957d/kernel-5.15.134-87.145.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/8bbf53203badda16f39f6dabe8974acac6f4b3d0dcf96378a434a32c897da379/kernel-5.15.136-90.144.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 65c5194cda5256ad3426699d26bd2d086a0654b8 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 2 Nov 2023 15:57:23 +0000 Subject: [PATCH 1143/1356] kernel-6.1: update to 6.1.56 Rebase to Amazon Linux upstream version 6.1.56-82.125.amzn2023. Signed-off-by: Leonard Foerster --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 0825db0e..058ae68b 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/b857edbf6e8d7c005d0e6e25d052548bb4a1113e504b6d2f50357998d94f9d07/kernel-6.1.55-75.123.amzn2023.src.rpm" -sha512 = "b87a14ab06804d1574a5a9b91df0749be4e22af5531a45b1bd2933656f92ac3688ea36adb06dd440234eb82f2c6139351a0efa1efa95259d151f91b3c242b67d" +url = "https://cdn.amazonlinux.com/al2023/blobstore/4c8745cd575d4358f74f7088fcfb66ec0026d3cf812356255425847141782ab4/kernel-6.1.56-82.125.amzn2023.src.rpm" +sha512 = "6b152f9e2e14b99fb7a88a45cb369db330165355e679a536e182bcf3c53cf23a1e3dd46703971dc32d34f757aaabacbc8fed6eaf57bfe6b3ac68ec0c4b1a31c0" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index b239587f..e5763d17 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.55 +Version: 6.1.56 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/b857edbf6e8d7c005d0e6e25d052548bb4a1113e504b6d2f50357998d94f9d07/kernel-6.1.55-75.123.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/4c8745cd575d4358f74f7088fcfb66ec0026d3cf812356255425847141782ab4/kernel-6.1.56-82.125.amzn2023.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From c0dff39edb8adf02e38f65642bc7cb83d68b52bc Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Fri, 3 Nov 2023 12:26:14 +0000 Subject: [PATCH 1144/1356] kernel-6.1: Remove unnecessary drivers Amazon Linux added these classes of drivers (atkeyboard, ps2 mouse, basic framebuffer) for usage in development setups. For Bottlerocket we do not need them in the generic case. On platforms where we need some of these drivers (metal, vmware) we have explicitly enabled them already. Signed-off-by: Leonard Foerster --- packages/kernel-6.1/config-bottlerocket | 11 +++++++++++ packages/kernel-6.1/config-bottlerocket-metal | 1 + 2 files changed, 12 insertions(+) diff --git a/packages/kernel-6.1/config-bottlerocket b/packages/kernel-6.1/config-bottlerocket index f95e720c..000efd66 100644 --- a/packages/kernel-6.1/config-bottlerocket +++ b/packages/kernel-6.1/config-bottlerocket @@ -193,3 +193,14 @@ CONFIG_ISCSI_TARGET=m # Disable DAMON subsystem. We currently do not have a good use-case for DAMON. # CONFIG_DAMON is not set + +# Disable unnecessary keyboard and mouse drivers. +# CONFIG_MOUSE_PS2 is not set +# CONFIG_SERIO is not set +# CONFIG_KEYBOARD_ATKBD is not set + +# Disable unnecessary framebuffer/drm drivers +# CONFIG_DRM_BOCHS is not set +# CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_SYSFB_SIMPLEFB is not set + diff --git a/packages/kernel-6.1/config-bottlerocket-metal b/packages/kernel-6.1/config-bottlerocket-metal index 94d46c91..72f4a63d 100644 --- a/packages/kernel-6.1/config-bottlerocket-metal +++ b/packages/kernel-6.1/config-bottlerocket-metal @@ -141,6 +141,7 @@ CONFIG_MOUSE_PS2=m # CONFIG_MOUSE_PS2_SENTELIC is not set # CONFIG_MOUSE_PS2_TOUCHKIT is not set # CONFIG_MOUSE_PS2_FOCALTECH is not set +# CONFIG_MOUSE_PS2_VMMOUSE is not set # Intel Volume Management Device driver, to support boot disks in a separate # PCI domain. From 22a1aa903606cedd900d811a74acac31c7bdb20d Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Mon, 6 Nov 2023 13:08:44 +0000 Subject: [PATCH 1145/1356] kernel-6.1: update to 6.1.59 Rebase to Amazon Linux upstream version 6.1.59-84.139.amzn2023. Signed-off-by: Leonard Foerster --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 058ae68b..ec016baa 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/4c8745cd575d4358f74f7088fcfb66ec0026d3cf812356255425847141782ab4/kernel-6.1.56-82.125.amzn2023.src.rpm" -sha512 = "6b152f9e2e14b99fb7a88a45cb369db330165355e679a536e182bcf3c53cf23a1e3dd46703971dc32d34f757aaabacbc8fed6eaf57bfe6b3ac68ec0c4b1a31c0" +url = "https://cdn.amazonlinux.com/al2023/blobstore/7f6b70d0766761e79bb6dae9a840ac4fb6ca95c78dad994ea97abac37dd2a061/kernel-6.1.59-84.139.amzn2023.src.rpm" +sha512 = "9e5c3dab3583742254775c82710007360da8d1a0b252f2acb9096788f6ed33d04599ef61bffc489f78540a4f8194440e79aa3e9ff25ae3be802973ade868bfb1" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index e5763d17..dc2c7025 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.56 +Version: 6.1.59 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/4c8745cd575d4358f74f7088fcfb66ec0026d3cf812356255425847141782ab4/kernel-6.1.56-82.125.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/7f6b70d0766761e79bb6dae9a840ac4fb6ca95c78dad994ea97abac37dd2a061/kernel-6.1.59-84.139.amzn2023.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 5bbd15648950cd741e1badbb26bd70928fee25c7 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Wed, 15 Nov 2023 14:49:15 +0000 Subject: [PATCH 1146/1356] microcode: Update Intel and AMD microcode to November 2023 releases While we update AMD microcode, we can now drop the downstream backport of `linux-firmware: Update AMD cpu microcode` from 2023-08-08, in favor of its upstream variant contained in this update. Signed-off-by: Leonard Foerster --- ...ux-firmware-Update-AMD-cpu-microcode.patch | 486 ------------------ packages/microcode/Cargo.toml | 8 +- packages/microcode/microcode.spec | 6 +- 3 files changed, 6 insertions(+), 494 deletions(-) delete mode 100644 packages/microcode/0001-linux-firmware-Update-AMD-cpu-microcode.patch diff --git a/packages/microcode/0001-linux-firmware-Update-AMD-cpu-microcode.patch b/packages/microcode/0001-linux-firmware-Update-AMD-cpu-microcode.patch deleted file mode 100644 index b19a577b..00000000 --- a/packages/microcode/0001-linux-firmware-Update-AMD-cpu-microcode.patch +++ /dev/null @@ -1,486 +0,0 @@ -From f2eb058afc57348cde66852272d6bf11da1eef8f Mon Sep 17 00:00:00 2001 -From: John Allen -Date: Tue, 8 Aug 2023 19:02:39 +0000 -Subject: [PATCH] linux-firmware: Update AMD cpu microcode - -* Update AMD cpu microcode for processor family 19h - -Key Name = AMD Microcode Signing Key (for signing microcode container files only) -Key ID = F328AE73 -Key Fingerprint = FC7C 6C50 5DAF CC14 7183 57CA E4BE 5339 F328 AE73 - -Signed-off-by: John Allen -Signed-off-by: Josh Boyer ---- - WHENCE | 2 +- - amd-ucode/README | 13 +++++++++++++ - amd-ucode/microcode_amd_fam19h.bin | Bin 16804 -> 39172 bytes - amd-ucode/microcode_amd_fam19h.bin.asc | 16 ++++++++-------- - 4 files changed, 22 insertions(+), 9 deletions(-) - -diff --git a/WHENCE b/WHENCE -index 99cee970..54aadb0c 100644 ---- a/WHENCE -+++ b/WHENCE -@@ -3924,7 +3924,7 @@ Raw: amd-ucode/microcode_amd_fam17h.bin - Version: 2023-07-19 - File: amd-ucode/microcode_amd_fam19h.bin - Raw: amd-ucode/microcode_amd_fam19h.bin --Version: 2023-07-18 -+Version: 2023-08-08 - File: amd-ucode/README - - License: Redistributable. See LICENSE.amd-ucode for details -diff --git a/amd-ucode/README b/amd-ucode/README -index 1d39da3b..fac11524 100644 ---- a/amd-ucode/README -+++ b/amd-ucode/README -@@ -37,6 +37,19 @@ Microcode patches in microcode_amd_fam17h.bin: - Family=0x17 Model=0x01 Stepping=0x02: Patch=0x0800126e Length=3200 bytes - - Microcode patches in microcode_amd_fam19h.bin: -+ Family=0x19 Model=0x11 Stepping=0x01: Patch=0x0a10113e Length=5568 bytes -+ Family=0x19 Model=0x11 Stepping=0x02: Patch=0x0a10123e Length=5568 bytes -+ Family=0x19 Model=0xa0 Stepping=0x02: Patch=0x0aa00212 Length=5568 bytes - Family=0x19 Model=0x01 Stepping=0x01: Patch=0x0a0011d1 Length=5568 bytes - Family=0x19 Model=0x01 Stepping=0x00: Patch=0x0a001079 Length=5568 bytes - Family=0x19 Model=0x01 Stepping=0x02: Patch=0x0a001234 Length=5568 bytes -+ Family=0x19 Model=0xa0 Stepping=0x01: Patch=0x0aa00116 Length=5568 bytes -+ -+NOTE: For Genoa (Family=0x19 Model=0x11) and Bergamo (Family=0x19 Model=0xa0), -+either AGESA version >= 1.0.0.8 OR a kernel with the following commit is -+required: -+a32b0f0db3f3 ("x86/microcode/AMD: Load late on both threads too") -+ -+When late loading the patches for Genoa or Bergamo, there may be one spurious -+NMI observed per physical core. These NMIs are benign and don't cause any -+functional issue but will result in kernel messages being logged. -diff --git a/amd-ucode/microcode_amd_fam19h.bin b/amd-ucode/microcode_amd_fam19h.bin -index 50470c3f461a068e832a1ebe33683a041d4515fe..02a5d051d58b8028275ee6a0b091f11f8d1b6e27 100644 -GIT binary patch -delta 20542 -zcmZ^~b8z56_wE_n&cqW;Y)@?4$;7sud}B^*+nU(6ZQHi*`|jS|{Z-xCr>d*Y>F(<4 -zdj9JAoYRGYph0zD+#<4qApe!$e+DXY-hadYOQ`<|(f&t{_MaRKx%~fG1_3LNgJVVl -z{U0-+<#F8j05KFFDheE=pAP8%yY)Z8e^2|ba?!lG*_H})h9c;ajFKQ+n`rnHq2st{7`Xymh9xn(55h{1PelZ4( -z8)Cd2J()0bW4j{$oYD|O%1JLSP6mi8dD@uqn+Nk4Ni+EdNRK_hB4=x$;+<&Mo+g{& -z*KXyM!$#1!qASd{&9SB02gb{Wk>5TSb+4Id$e=iKOW#2M> -z50LG^X$0N~_F-(Rr6Da*9EC=6h$n7EYmWMyr4wGa})a(=Jk9V -zwIwD<`_dsUurwAYS;82Il!+DiDt+=zEwb11U_*cb|Me&$(i2a816XMfU9IWl(u?Jp -zY`(;-_)v85_Y16@$F{@bkS&gXmDI?HTK8L8XYeeBWzf#@Jc|quRbU2%jC<$A=U#+? -z_2ZOt`PhKYu>rN`KbDs#C|}#Hx&o6vyvxk^H}J55Il(V6w#>o_OTAehMkYV`=U@R6 -z7X$bj5vX<)>iP|q1QoZa4Ie}Kpg78Q<63ABGe8AN(6d6DMh&giP_mo#p{L|)ZfQ9R -zR-gR5JiUHf?4qi)HS(^WLgR9Og<7g{lda}@ZH;STPW$X=M=)sb@;B;?(3p9mi#;H@ -z8MBIc-}rXWi7sVTH5WB@HE4}WoEd{G^zO-r!E&t+S`tB?GaDTxXjS_=WwLsLII{?drAYw~FF;OqM*BAHf -znwA@Y9-qk=K#Yvk_{5*dO9Ue2$V^uZ1C^8e;M7(sId7r<#QZ6)UShs0yXv_cBmfe7 -zCu22d9X*v~1D|ICM2=A4$zel%rTd^2##O8`S(ZNhz+TS74M}ZWNrUz -zF56-OcG@Px00YtGO)|MbTX*aXG=Lb;C%~*zC)*-QlBEct__EWii+AUjlSK$tpYs8e -zzX%dk4Y2hQoZKm#fq$g%-*OcZjUat_nLm~hT;59{Z`!wT&2iUT!N7ePfnsyN%l}Gq -zix(BBJDj_u2p83=?enFz%HHe)QT@5{T&{2H5`Bb-?J2*6Ai3T--T`@34X|B0m0n`? -z^v+7O)QD6KyNT2MkVkYc5iFwW!FBEXexZENvaPAwazdm*(n3(PA5a65b?&sw8Yh1A -zzcoZ!B=*`WYzT#Xlf$nIidpcZhqxVTt(IzLmci}vIqn{MI2GkG)@I?p3A-r_RK(S1t7ff}cHKwZ{;0UbFahDD=zPAYar|ZTypdzX_A|zF -z*5akl>`DQIf?M23a?#r9m~~GAv&zv%@)qOE@&Pd;-ep2LFWVQ11F+xiHV@>ecUpa7 -z8QavpLww2=q%!#yJ%dwZX0pN;FZB?Z?XcnX*Gq977Lvks$^Rws686S<}lr7#EWalB)=PJ)BDn -zLCKC0t2OzmZ}&)e0jx}S56QSn{Hp36IUcukTw|^wT7=W9`W1F1+#^BVMqCBRUSYCp -zy}ySZ3zVaT`{2QQ08gR89x(~99I;lA%vy#o7iStT-*LFQyA6?h-zYQnP -zkXlrU0I&-+SAk&I*)MwIlQv9G!t<4j#ri#EMaah$E2rB2+_N-&7b}olRXU09K;IXs8Cj0xt&z+M0Aj#o?e31`cUIfqC(( -zCX^V18{Iz;CncA&5K-lFT=Eiia$$+vdc-J}ChXWM8B&Gg__bJ3FPeyO|9Cwas&tL- -zYM;5g4n(TQ-zTfBY(OZ97@=h|pJPEeenE>Ake9iZkM^3nJlBF-W?^DRAA$!rr=7^R -zo&-4(qbHDh%dE+2?kQBhFyF;Fdz9P2C3Rmd`C~Rf<1)hIVQYHb0r#gc!M~btXc3Cu -zSGA*GA-Ynt#pGUW*@j@2pxohIllJ!ht7UEa=t^=e-|rNZ>l)2raWc)X+bN?c@_(a> -z0mfer3(%~RLMJeoV@gIR5FUp+c8^5tHm6Jt+IWhf9wEf$gYp{G!+0k8XtB+$X?cms -z8BbsC&)6N^-O6w~202`>b(;dPfre%oH`V9jMx$q8=*}!V&k0rIL0q9FGq^C4PVH2@`Nr9vXg67xO0c^<=|_%E0_<1l$Y<|x5{(;R;(eJAtYEcoId_ -zgYOQTst;WL38@Ce_gl!5M{`(MFWc7nR~0=Ee|kfC`FL_O@C{taZQmoo0*6)lmUpyC -zRiSZytHqsSv&imuw%Yrg#k1!gA}d3Bmkn22`Uh-fk@y^qlN_1$;TOC@^-9u+1}s1EC>$6MSdCR)bFlb@$Ak*MQ>Rr7&Zslb&w{$)EgQ@4qW~W;WzyDD<@dl8@|CHcg -zY~U(0Bsar9zJBrOD0*aj1_qrN(x66s)16!FCBP}CeeW|?R5k1wd9>U>`R{~RGT;~p -zdtYb?;GpLwc@&w+Mg_DDU^zGTzwj&?^hQ7dv!p4>0_}S*Yw5FY0Z9h9%%bz2Ki@mQ -z^(2Hg7DvkI?t>Owr92Hq=}Y@`c-$(TG73h2m>!TKD)^R57j?750K^QgOgPXe5TG`- -zn!`-=sBoMoA(j1JU1vr49uXyQR=S#K{7vq?$1+I1r#63nF_P<#!6hcw$&tKO=C -zG!Qr&s?|4DtT7rjFC3{Q5fp6u4Lpnu7b%QEJl`e319IOspTSns%dfW)7$c*$l|}&# -z^5p2NIHLGk$dj*`>OaEJP@^&)pLYzu!;_JaCVys%;J4s(0$yynQibtt1ap_APB&G# -zNOp!YF)hi6(d@9Hj>^sKjTQ)skP=(5>qxaXFuZs*AP-N*&Xel4xaS?PUD>C+U%mSW -z{a(|Xhb+uRVE4!hN)6S&9Z0wBE-L7k^{|`7DQO!x-4bvM%cSxnsec34y|cuKwDAco -zdJ}~q>#3wafPr3m$W<#CPm4;Q*^#cGB`&yvuQwb`bK8%GRyN@rHY1cDlHOOl!UZ;E8#GjWp -zCSAS}4=T*_Qj9$r8eHpnd?&FpMlr&`+%qxK>hYme0QC$ni9f~R208gV)>(+IvVal%8CLE -z6f(Gu?R7|>kV64>7-gc%yUe~Cl5KYkGx-LY3Nz+o>@ox#p=o#N4U|T%J^bI)(Ht;a -zEtpkQK)^KyxZbyP1~bNK7+=nHw_XOKWVpdUDl)NNNp2s -zZY%Vj57cK@WeL%07M&?M{9b&6A;x0$a+FGSrWS>0oaU)FpBB1)R8IrUqU5aXvKo4g -zcx#A}{<2hZu+v!;7-oRSd+z(98)*okUd@pgXk~UJ?uK(`eK!r!($%3%PwROc7OmLT -zGSz~`9ac&HqWu#oWa44lPd>1BW%8@DoJzN8oFFGdU%}`J;gDGH`i_9<-MsjYhMyUY -zEC5d|-gOCF{haW+Mq#3TejGZtFpDJz;>92E81|Kc>#FXSN>z4z>gTml-!80UeitQHTA@~8J7F|P;r%!O|4&!rr7Nlu$IXA+PbVClv8)Qh)caDLKL2&l;) -z$6ktRlkp{$QGrwJ@M!gc45P)WKjIVv&bOEtP>o#QJ2_UPajjg-<#2kARov}byiD!_ -zn8m?1*T68@rgwW=8CT%Kd(A1r7O_8TjSPquFG=1b_@fp|IG++)Bo2!EgwBGJjT^>a -zFD1e)y>kB^Q~S@g4nju6SxI&(xRL9oE*ly?U$20>j*v2>;@TD}PF1VeM!RSN<^RG8 -zy-V0GAW&``;uxZJ8Tj1Rt|l0p%kteqw6~2hV^w@CkGGoeNBh=c>iWW@`|zQRmRg-N -zp+NY?qpXgUey^UJDI#!5EFrA!{BHDz|Ld(Zwochpx>@eEVQz7ES3(Q$r?@R*gY=TK -z?zX6P;yC_nm&{sa|3-q(3?R(}!Wbl*{H(ZD#-bftA$o -zE>Py-xSN;}sZ14V%VVfXT}ZurEXQnw;)Rk@sx{G7!alz1$<$RO(H`;}m#nRUc-YVDor-^rEr1ec>cLA443R3G`5`o;1NGd2 -z*+5?o0(DGDmWE0ZSNHV`i>o>wnW=;fuWcPE%6hZ -zkVR_)?;nMxMa=UKYD3y@E;Zb}rk@Mvo}_Ta+S+e!X1IR9#d~EqH5r+XD}~qT{X})Pds$P6J&YQAE=}yS7$Q^Vxd{P+B(#G^h=a -zz5^o*FKB*Cmv?_f0h}_cFOVnSp=K5w;fTOOajvO`uD$4CZFKs$S1c*#vYu-V{(AYm -z7v%5`&R@}g$6a+Oy@PzFQ#4n|Rl;a>u#Xs<8}6{OI$h=u@}m?vz^{1LVu`p$N;;69 -zCljHT$JTC${QNV$L6+KMiS>7?_i47|uTSa8Y-kyqa@sDK0S+%Vh|;$3$uGAb$%I^jmW54SDR=NDjB>AEAJrl0T`+%c9N8{@D)(S5TE)oB>{p -zOvq3EsbmGjcFFGoT>C@v@D`Hbu(~v2`%|*bsAS;lk6lGIIgk5Uem;n_h;J-N3pW<6 -zJB-n$F1S5)UXz|=?^}C?ZfJq44%IvFN)+Ga%wf|IK$1+`D&&u?Ii7l^TzyUc9~xEG -zf3A_=Urh=G1{>DE0n^05cQ%DNWNW9v)whS@ASaY*?IevXX)1?3Tm)s|3F-gRy&g;u -z5K`j*06(<<6ZriX7yehFvoP5mZ9-Jlu=|?@sl24HsH)Db+kgmIaWTnTJ#QJUH)E9d -zimS=8_q)vIT1w0z0%2;2oTmpX7x!$SlsyB3_?*e#QS>_I=OBG|ZBM}cp2-^pMW4By6a>V(|J!fUu^o%hrr5mJg~onFsGKCn2E -z3RD}jeb0d3pgeU{WZ8wiI}TAU8|CPiI`S*q$n@-sW__Q2sphb2mHr -zmb{SUrtl~C|Eqp2t+ZhA8(P)V=YM#|f-BRuqOJRbYr+`trZUzVJI!+Pa<7hc17fy2 -zTi=#m(IWWF1EfC8mmMhr4y -zFI6L6u|r5syTv|R-bixMKyjZAQG~K{JwN?ny!v`b0yLyp^#}b{oA|ls7kV`1L6X20 -z)m=K&$!<8eob^B%kAUfL`zhUP3)qovXxNiENOjAWu#~<`7(&D_lpv{zX{!C;q`sF5 -zjUAgP@a4}}F;m?2>`)QBQc?zi3Kwzjl=3j`Qw=DEztGZ;AR3uB@3j1LSup6jGk7v& -zI|21G00{4AHprbVY>PD@EF?+SZ}+-IO~UWjL;)T#00D*-WG -z--Ex}yK4zviU2D9%3vy+>#nY@x|(vfu4(t;kGJly6A>-1)Y$}YR*pKJ>5s)am)Xpn -z%uJn+!!&xLO+--@dmA;h7PeD>--A&g-JScL6)mm1SFYb3?0(S+ -zUzuucW^9-Wpo&~*BfJq7s%bp_tyk_IKsmv3+6izbCc}1kQs9WEa`x>MDAr4op_ev% -zWyWk!fuhT=EK%k91Ev^`G&8U!@f$Da2Z#1o0AY1qx@9$RF{Kh(``na(xe -zdz_0+^@e_=h;KD}`?$D9ee`g~87rfBcLLBZg5Y1a4~dwtV32HcOfNE&j -zccl^PxFmuSnA0y_5m#B)-k&UVTUfE-V$sZdHy^>+c)^ECXyh|bTlayLSDqjBB%N*r -zE*Z@ZVvP=Ib(EP}%rn9b`0}J4dN+$LN9WdNo$Y4*kexwk;HfB%FVPCRiablCCGrL< -zlSJw1aFO-P;nGBHJ`d^0{|F9-0NWo#Uih6W0KBHGxel;{KUXMNXqb46T}hsf@krrmd{{Q -z>v8rQ<)puSuXTt2=yKT^*A5pepvR%s{LS#L(h%UH&G=({VOF&PYGm&&lNOsMZ?7dVZ_DE;M-8&RuC7e{Hjk -z`aW%(y-=*(Qt4)_weFnS8atidBQ{lJ$yI^iRPZU|f>_!rz4Z0~1{mShMd4G#zsu)7 -z7fq&U$+3+x16!>s&NNh|#70N5=5XldFTRNNU1#!kR@|r|F|#|%e}ke~>{?b1?i@fv -zB`{3L)jbaRFP|OP<`LAg1`Id;<#+@?hAXXdx~5{YrORlAhfL4Zu?+h){udEyzKJ&d -zb_6Q9#M3wb^E164c+TxF!Rf`2+TY{uf-%+|K4H>*zL*HzXLh!FA6bTg40{m<_o}j) -z5vhwT#S?@tS;k%!6P-;;XN4WW4o|wXuU+d6&nDHCKSVTKw?esXZkxcU`RTnYmG1xV -z9wIWTH!Gmw!*!G;$=1A$la+_ErXJo43u{Z(#RlwrYA~`JQ1|R7=!f#PS905j_)$02 -z&m*f#NewC5Dh->ZZoT*d>oO>XFP<3VqJ2F5!rBD~V3 -zqcg2X2ghvCjYTyz=;m}W@HZcw%NFm@(<;Pypo=bM5mmVW0qm{zj~?w}{*jt-@BMtLhO -zFkDn`GKTm}rT%69%qy{J_|GG5IL^g?&T94?3stN#H*^jBl`eX9Jy6}FmfsSvZwuh_ -z<&UQ>l7pH_y_xxHAOZ6Pwrk;S^Hf$x|3)stJkECe)X;h+hOT%q -z<^BX{unqhMo-vE~!scL%6LZuzLJ$3#o3JWz{~p3)K0;2XedfDkvz->oc1OW{=V8#O -z>QXjr#sA1u3)n3fOgLL=HQ$n+%=$3_5vhQ|bkDQg{wu1vbj|{;XO)7Oh -zpjo!3h%~xW@Y~T_r@{)SPM;~Sc-_)uR&0J=g9mvYY&UzFCM{w)A!Q1F%ZiG>2ExZ+ -zm1jqg@|!jPc+|&wPGL0SO}8c@26C1Vl%9|82HN+|E%#T~=IVEiTy?CoTze)9H#uHD -zE(|6Ny5+4-(7SCPy0P0y`=i5;E9t==zz&Ulvy4uaM(YBplZ_zJn1~P`@6e9JEvWls -zauEY-e6>4;Ta*OyE*q;(vJ?T=*dbPLLVhWN0%Bt|@Xus^hY}~=!k-uLs9UASN8`b- -zDpqLEBeLRy>x9FFGR*)HcJCJ5#TBX0=s_P`d74*h!5Ne9q4it-&F?ii++6)=VDi3j -z3904AXSr}0oMA%aB_miCiJn^G76*P|5aOj6fs`plsT95caqp%qnn$0I^*R(5Y0%en -z5mVmi&m;9O8T|Jzt4F5wWSls2tO8GSut#*NFn1iztp#+m1)0xZ97TkAfggL~NMCZc -zm?N3Y>er_<>=A!Ph9Y$t@#dCs0rVlv2fYdl+Ea6zbr7a~|l%(mNlTmn`t_@WwWi<{ -zrasf3C!lwY;A(~1X?r-P9e>#)-zm4tQUsp1kN?ShtCapy^*nw;5^C3B0Nj8`h>A}fd_J#XNiD{iovTtTh|AG!~aYR!;0WFcbW@SATN^wod;b&%@TKg2`F^`Sik*vs52DV%CW7S?`K%Q;Qter -z0ms?8Px5pp*-gix7XCTY{Yi{rf>W_PPdxiX04Z?hG)z6ku?D+34OHMza+>D}qm#Zj -z2YH(#KrmP;mBk7hG*(aai0F1di1-#1PTj0>?c7#_L8s36w_}~<3gvKl*+fhcIMeg7 -z7Buf3R8`sW>T?*y@c>MPJqN0~_U8GTbFx>;+fcr-S5bkIY+FBK5cQyeA2VidjgFA% -zbXY3`T_uGv7F}etz!LFc=}EC1obh3jpNah}t92qB-*k%>v7pz8hScXm?WJj|U1rAO -z+?vMULZu7`w0U@tLifpOHHFF69Xk6Y=Gm9ID4|hh??{sy6 -z%&NZva=nwQfk3lnvzy#u`obKF4MWdCq+5Z7=-y0+WAn-dxSWpufEFyq?}t+xl$5t+ -zNc{=^W&Vw9M8wv0_!k!Te1A3YHl0kq7ObaEkF@CS_K*4kjae8+tPx+pNcgR{KC)wO -z(BoY7x^8m=Hov{Plg;D2oxBd@Ue&AwsApr{3?#9qxMYGjwT@5qpN5y7cIF3X-D7Cc#!46kAW~!@dz91Sh&n@S#R$9OH`OeJ -z{K~l3Pvo|IRY=*XGCfC$yP`Q}f;g1cc@_NJjB}q81ZU$*#jLYT;I<|QnIA?l45!xNDUB- -zwNdB;u#!@_t*l$2JFdH)!ZAospjEcEhO)98W~tzC)1dy^av_CTt#vl4?HZp7=m-SJ -zdHdrtb4gF2{_(N@8y;*kbj+VeE#b=eS+R>ZyC{vNz; -zGdFqCj#dJX<-5WGuO=jzT}`PKNDsQXVXqIspduM^qO~mkORT>Li(h^tT=P9Xm>1=Pu3_e!y*P-k;0;~w)C -zBOH%Zz^-tO`rGlbwyoGnj0V0y=&2VU`CayTcLy*X_3Kka9oz+IG|dm+rn$hCf1c|A -zv;oymCC$s%q+9ePt?tra3Cr1+Acdg^WL$HCw3eh!hB3nq77Y8ueG()1WPrQwyRBXuyU85Sy$VLb(!FB}-uZ-{>9Z^+1cKQFuWBl)Vy0xO%t)>DyB6d3+r{H!a=Qx9sjY-&G^oC`su_ -zIx{?wq~I^ZxlqY@3bYc36V0-=U9nw+v6OPc(}|1F2to*Q0GKKHg|a)>Mcvrv{ESc6 -zdGcRKtjMK;a(}`_Ls#v28O-u#RFXIy%Mp(S6eE6HZ|Vh#^*lNPI9qw3x++N7weihf -zkx}Nlw@c!Ps<~|Q_77}L#=?4s8OfzMS>D}dGarn?)mGU+D!W8idT3k; -zu|~ov%xLg4Wr*oEDY~J7b9*}tTU%)xb7nSC)0!*05a^OU055i@G_>G0q&F#sl9C^M -zv|4DIQWYJxwukdC+WZOpLvQ#lX>c{RF{wKl$9s2$S`2p3zwy%j5MMxIz^LO}g6P$9 -zV(^}ljOm%i0V$XFrHDJz+|0d%PK-pZ&W}%Jafa5CNj1UwIY&R{+6v#{*Fh7c%w2TD -ziL@Xp+JY{Je!Vr&$cP@{O)A0GK<@CE+6W&d{%#+A-FbsStEdPx?*7XyE}%6}c_v2~ -z&I#Q#Fq-fh5bPV&wSQC?Fd~DwV|8L@H%7qBmfRmuLzh=e*M346 -z$|H&JKT_ZYLcpLNX0f=gWGVaDJwm5LciRYn;2AQ5$K06`QRVl7Y|MJa{X*ds+;)nVB!Hq;#1CGfKa$mnQ^Xh)rIKB8L}dkz((l``OFDyg#=&rmGOUQjX9 -zmjy>G*<-;%(R{oBScIf2cEoHk0^67)0=htWN^i%%i|)P%0nqTBThb!Gk0eislIST! -zFiL4(p(8#jb1m|_MWnN`YLs-Tijvi1a^Gres#vHGPzH1)y_JKJaiyF^bc#{AFohEc -zZCX~vjV40dF^XYlBch@Ie+c+r->7#9Slg(R9(?m9{RXtBn#>)BZ3Nn0@e$9Ls2dsq -zy2P3h8$((CJIens-=bsj)8zE?C-QOiNE;Zv(9)tvKBH@9@xj@v -z_2N_*0aD)hpV9fPlkq(MmScS-|GC&omu%$&UI`!6i|1GhqC;CT;Asuw5UA&|d3>n2 -z)A_}(b9-qpenNJew;M7}|3M>L!~|NH7I(~E&Q%)}W)bup3zv!!F|mYz=?Yns&$?Ti -zZQx9nbcJeOSx~2!jA-RRkbrG!5WHha{cWD0Xm}lPW3$~ZC$(~Y{z>c=s;uOS)N&RK -z4cUoL(iiYYRNmbiz`K_3+AIZom{+$Tr=Z>Zu{fr`C4WI~L=rLXCrjF=LF>7}|IZ^r;J}Q@$2Yd|`JvG@W&m -zpHoq?(niOKHXr~~_x4vez?J}dZI)4yTG1c!ZrbVJ9R`jzAc0nO5HZfV3?j@ux-Ue? -z1ql`(o?x-A+$YmY=TYaS{px*PbKQ -zHj8CME77zO%$wfk(eRf(Bgm|`>{bFb@k(M+N8|k8FJ#&9eO1;%;1X7PbI>i!=Bx=0wy*W3WjC>> -z$=DPlLwm3tO`w7{ldJVR -zKFz;}tZ8WFY*jjZ+Y`l){AaT_nWBOj!iep>N_CCeT2gBGC!&GDrWus3_4_?X=!fB6 -zCPJb-@O-tJza#wqX`vd&m4_kI;NK$ZY}1&CTLu>1sED8~EVNVp4Knt`cNo(r%2qL; -z(BzYa-j&N;7x-k^{<6gT$2F3Q0^YV;@GKE0`ozBtLM9f&<)<+!&Pq--1mR2nE3N9^>QwE -zJCSGoS4(&-19?l~rZm)Q>I7NX-Gj4il6{2MI4XwXEJfC!;fyKviF5Pb7bIF~DUyl~ -z@WaE+p^Aeuj`);fw3F~&#*4;j5tjFe9bA{8J=i%I6ATeLzP%N#V;BZP$&ZrbJnIIZ -zR7;T#N12YEkykfq#;|KC9KGI}Q2~0eR9r -zuqOeM>UBuzLLr00`bi=4mzAqS9e87wyPoc+JFlKqD`R7Qsj2cfatdmK(p8hkmWZ`N12d4nr`qodZ?>n@2$3*H|8zj*wF7CQyCntXeIQ&jORXE>F-~hK#!GVt -z*-tWZD<)Gv(gxoc`IRn9SNtFNUd+pC_=a9SU<+q5uvS{mKZ8nU+RYy4K8znw&Ty4eH1&j$s1x{dz!J^N+s0@kpE|Z--~YCg%-p5u&{#4v -zRZK`}Hm2>M@I5SY)sEU`jg%HcS@}}SvxHIOfvQydWgX;xwR39|T&X@QoQb8XTYXzs -zQYrHcTtF@<`M3Aqx0rjEdcCy9P*sY_z1!M|#-|VoQ5UWY=}7`!sO&-*k&>i45D=|_ -zGScD{AQ!Y4XA|+cE;vjz?WK`W7Hj8X_)HDV20e=Mt5C7SAY`EeJ_Fisq~*cZSN32@ -zCT~a_4#|W7UwGz^l*I5}Q8R{uG0?9X+(kD|!UR8oS4vSp -zpzS9sz`eQ!K=_kwS0phCA&WR#{iqsf42cjmB_q_)I{_{WYaaR&4|QptVGW}~s-eq~ -zv7lUDIX7SATfxLV4T;0b(Y!n|@^$ZR$wfytt*xYHxQ0ed#Fr0(32hGqykYsR3f&%P -zb77TS^|^VLHjW;1Pw9^)=mkX<$K71{f4cP3OfMzbG4f+nY-#3@302a8=kE;a6F%z0Hz2f^2;#6RZ$q(H^f9t1;z -zwKwGSD)B$7hBLmQWjJHC1f~{Uu7a03-$o?-S+dO4!YvJ-*+i6a6(6*Tf^H^@52q4? -zuD#XeqWDX`|1SDgC)fIxLYb)NZg}==xW`fI!T?dI+5lF0wEwL5DfjlR2}9w!cS!&% -zLgUG2B+r5|wswrtnnMwx!plzWir17Y=jvRvnolMOm6 -zY~V^6iASwSC}mY3Q@xxbG_P5oh^VWX-=_Bp=>eCP6}B_qP0qVNF1!EfNZf{ued)b| -zRR`bA!D&>-qJPCIi=e>SlCciJ(oPY}<`ro)?6B*8de+((6nR9C4}U%Tb9AGZ+>9g? -zwK#5)+Hk-m^`auv6LmaQNw!MrtFD;t3FPaVXv+Wx4^vcSf6;V3p;U&CKVmI>Cx@(i -z&&hv4l}Qnaf<$Xh*Z4c*N^4uYxm8|NJFTY?x}@JxA=^r)@?U+{VW35;6RF^4cSiP4 -zW1-)4)jsPsUp^FPB<#SyFPV+aV^=q9Ou~SgB7pxS#&Zq{+Q?PfqAS#HgV+Pt1v+$9 -zF+bZL@~OWPi9RG9JEIs{j9Ms%q`7l$-C}6-!^L*sc!ve=aT12i9K}F!#j&P!Qv6fu -zjC0YXXWb*lM8zxviK&XdjjsUi`D4_1yNt|~P3$Fwmfz&5^d_y{B6~3}Q)mTxHuzuo -zf(-v2hXh-}rsGG{XM>Uo=&Ag~0kem!5PSyxQtAr1tYe!^MX@ZBxADo47(@OHuAOuc -z5M0gR7}K_k>j=#!=8?60EeLqy$S7Rm)uH=_!W5<<9i9F>Q^Pz=^a?dMxZ${AA=j&d -zjO$=fc%vFrCaVqTRgUH_j;BExZTE-B61ENkSv2sQAs_2ciiV79r~|Jipc2wu+2o%d -z!=AesN4zEw^%GGml>Je611urQwhg$UC!XTyTz#ptk!fj_dDyVcziRR8uS%jf5UMjg -zJVRPDwL$olI2H=Zv(_vftJV3nn9QzL5-3P*!I*wyI*$1}(192a@n1c)Qej)Y3xYMBv5CTTrQUOKm0*=`qYocx@-fP&BpY%|DVU4A4*#K`;FXozC -zLrH`X$c0`txEkcuDtCk^53#L^n$@L8N;=z`2ZS$BbWb&$h-i$nMAeD#MZ`Qs)2>TP -zV$GyO%~PbGc7*V~i(?B?J|laVIxvsEo)d71l1dB%UkEmr$JyNZ>Nc}h -z?T-F)?w}xBf0Tj=oBBZZ5wf$!Dtp199mG#SK!hS8RB7~ -z{)1Q$Ls5F^ygnlF@o_yKU1168ces5_0C}D=Tah9K*O*i`oFkZ*Yg?LS`_>Z5o5krX -z*eygoR^Lvpleh%Kyuv)mQPs!IR7%(`1tiTx3|`ncX43xx1N+x%yL}+*GYRWAd# -zFCJnhx>zG{Mr9~TzI0IhUX4GNV#gMh-g&2uO=TMGG#-3P`AvKZZ@FvM7#gmNA|Id+ -zs#!6Qn;00KfZa1A@Qhsl`~z5-IE`>J|Mu7ZMz!x;F=n1gBqyF5X+kIz!43KEUlFQj -z$H_;?fxKIxS5HEBlNHLYzD3;Kns!D<2a1UP*iEh(U4pc3Ozz96dl~_*O+NdRehUto -ztbvM2zh)4s801nG19LF3s|+^Ex3#8h3myOdd!pPG2F98cc_m^44X(2K?=NzR7i3Q+=}49~N)uGg#lN*L`3l51K?F>6PyZU0ex!(w-|>=$ -z*UX^WJNyIhwV5g_U(eW1>N*Jv;(op8ApKCP*iz;;nF$p#|MGR&6-A_>%x^KTs*aI7 -z0*u`emL76Ln_SsQ%8VaK5j+$8iA9Vq=ck&O1aiVm>mv)a8KubScehrdWeJD1I*D

rww=MrnvD5Mk}X -zhE=K-H;s>w6p#I9R2io7J-Nq$LE$)DHdMOhgV-%)80(alWYiv=?jmtS -ztqNI3j7Xrf-aGQyaRjAUxs0ibvC0*bUo*sM&R@^m&8f!e_P^6S>sz? -z9rcz@kmBr^=LT00P)ek+r3OH;7Jx|a3>I!+M9L%WZngQ80JEH!{*5IGNKw9GT0*hR -z`6m3%{+<|j9N{Jk5wPAMe7GaJCQecuuVY`$AJRQBqCe}AxDSy6lD5TrFXu!D5*F3- -zD#|j)a||~URbqL2RGXz$jnnj$$J(UpCNKINoxd24;n}tchATsV`LHDr2KsOP`_I=C -zB<(E=#Od!X(zS&p2n)Oj!5xc7yn=`ATa7;q%8-Li@c-n8n9{wy7csejVX4*Y+#aGE -zQ|vJ8A0}h7!3Tk=OZgD4aS;*7Lc#~@zRe4D#ZI*{8iK(8B!O6<|0f9q{of>zo0FTk -zSWpbiO$#i3;Gx{FvJt<%O(vV~6Ju@tfc2oQ1w~D!o9eh9cfm-L1caA)^0fSj;mR{; -z!cv;@5nYV!;^vl%`iFw5x)0|HI(k2zm7xqKC!-gjlw3;Pgc}PnL$wX+djm?WX%bq< -zoJh*xB8?cFxuBoKrHRH9(clyQ*!&yf!QnziP4GlE*?DFoNVr)ZC+UYPK7k_1N+(Qx -zDBM0Ks6z8I96t9Si;;UfC2Os4qaq%<%*sK6iuYLLIxDVXyeT^`QW*0bXzU(p|NIII -zy*obYMz6p7G_%?Dr*uopw(;G8U*b-I4#)H%LE~Q_$sKI6EoVFI9GcLJ!OO}W^Mn|8`deIOl&%_mZxY$o -z%wFEyYXy6S~MNBq%QZ;8rn101ILgJ(0e9elIfU)j;>Mv -z$-6wi>H)v7Q7-3}Ka_nI_C6@EYMFRi8+cXvyOfBas{jE(l33G7Br3T(QaRt_spYIr -z?N}2nePS3#T^N9|VMim2y|yn9KlJOHx*Vq(Mi21>K_nQ8VcqU~_eNHamwa>q`$ -zQu){BOY#e8aoE2i^~^wfS%ynwiue5A5(dz4-GBU0CajuYED<#-P( -z8{Ejix~AKgw|#W|#crO0wpSt99(mI35g$F+b|#@OID-1!>@4|ulUbMJw21_Ce(1~O -z=X>%WJ#5XN-Ybe5DQqt`Ymxf}As+pa=ppG%NqaQk7gp1&z#kd`F -z!Td2Coc%%+DjV3_e~WniuK;fkknlvN`q{PgR|r{d<`CedIIuKXGi^pt7c^~fW7>cW -zl#G#)XsRm1#-MWoYQblJD5~y6rL16hOAFP^007X5_ldassfF*{!`MsZ62k8hps|WM -zPgaJ1eF4lQS+&G~`DAp10khqki&j)C!noDeUffPVwA&+E&L>!bi^R!T%@)jB5i396 -z{qU#dJo4`A7I%3zm}Wh)sOEXq)Y>Q3x!J6At$u#?=dEqxfiVPs-|j2WNaNsB-w_8? -z{mJsb53jKnGoH&ib1|Qn&~d=U%?zl#D7j@8e1Y4V9*~ -ziG&uZN~Z)>hGK+^%U|$E&NU-eJ8xpn?PbwxQ?S{;HnTT&1aDg^Aj|WqKRIJ -z>KKY@irSpt?G9Ccr!{Ua@CDHcfdB<855x+uh39Tu%?GGE`?g~3@X^FQ1D(?X-0(&JfIXX -zq9Dx3L8)ecMIKHhxg$>m2j6H1Y9h1mvtRd5cBqTPvD2kd$t5nXfxvjCy0g`4{)Ums -zMoM5w>`eq<<%dU#nj|Y0(yF5TUt?7P -zRz5{LRW+tmYy-h#TOCa3$Xf!xD1cPHI(={Y#dJu#Jb3jYv%dvbCX{LnS@wrkV2$O+-^DP@!!Qa=eI)>vJ`vXKGgQwD#3Hu%7a -z!hzp^O0|!Od=IT^mt|SH>=fvhU -zk2A%u0+$Xu^9K?@J&J-pI~%+nse6Xu0)%YlqBl3sO1o1g+GIjQoUV;>_7!t%ec-O- -z_xIv$E{Zpx%!IUQ-dxTJ_M`hVL}pe6O$_sY$|({WSAIJXVPX+fg>kw?Gr9&`q~ymo!S0fIq%s;N!frOLX1f6@T{SZ(m?pFy|-DWXJ -zYWF2YiLAxU;xryc-RT6(6y2-aBu@1{wRL)06O=5AkOldP^a;AE7E>yi;&VdqgH8E?A#}ije2K&;t1coh;-K>H -zf)`B;a(lu;rI5V-HrG#}rDyhaqj);{09EcD;n6LoJfphTmX$jvhmV`CvQ~$~n~Iyq -zaKkcL(Z5rb;!<-L|4$FBoPJxIxtG{~P*7ktk3O67&-f<(39kph7^B=0JwMFFLyC~@ -zH6T%$8Deq>=vZ3##O3)3%&Y{*VxBwZf@CkhgPLii^T}8jXUf`IrgPJsVE5XC(od4T -z_J>l`_Kg!);6fZeY@AT5KM(FlG^&W&oO8>iqN}Z05I#?lv(?*e*I{?Eh6HwhBnGQt -z`b{3u$g~G;A+iHfa}IzUzSU8WX)62-K-)eG*eNCmdlIuSLZ_ZrDX|Wfokcy*XQ=5m -z3Yz#TusjRRCCVEFoDrh^H@jH -zCgKjiR1&F|N3~5aJm}#iLm?@Ds_f|gfBV=klmV7q%?xF2CYg+V;K@`9O%9X6ivxL0 -zj<2&-xz}$;=9Ov*??j4EEXp>3Q9e*p>I&X#nM& -z9kvMaEvD=4{e(EnJ-ie5&=L9Q;c&I!#GofsE&1f*n37S3!y^Nblq+z5O{<7>!w -zW&W0ct0arZh4}EagdsF5p-E_>rGsX+=^BxkN<~Ad6fHg^PfVwT22)w(C6+&K>0Rcx -zph7@KUA^_?$8pZrf&9IHWfU;@=o?8fFwEp53ZjuDRO-fJXbz~)j@4IolVZA?wQYmyTcQI?Xa%*^d9+5A?s_f -z8y?|I=KI4SR;30E;mh~8OAVPf$$jEX^0UfN&-n^0^rS-x63P)2+TNuU2_n?d=XBiy -zoDoQrMyjsP&QlNNV9*4Q+8~I=?jv(LfmGr&iSw91hj83D)V(l5BP950BHQ+(>`pP* -zbk8iSOS$qo+stl%Mp`Omzahi{I4BdHgS5Ntwsl_iJMn>wUA}$UY^)lTy~@hh?$FXT -z-s+m*-0;yel5w>=`_!@#Xl%IT9*q(&{$kfACS(b(T|Siw=B7G(>J~a>Po}G~-2BbQ -z0;-5yACeoSPnkpBgmoOpdji~H!fwq6=>zfP~~6Z$p* -zH-^O3ahaL#g2$l4+C)GaG7Y;cLSf4s^xA5*I`#ya+6}i>b)}r^You*3rH8vfwW(=U -zNz=<|-&;3-_=$WT>6Z{%NLT!_)K8+9p}J-LdZt7h(%l0eucNt<1(mf>P+Op|J0dtE -z;6TL+$Ph|bIl?}oj#WYBzfYE5A%s7t@BPmNFmQEw7vb_*hGbq -zRKr{MUQz*ya%RXNdxd>bFfx)@GsME|7HZyTES=hahGThA@nduW5W)sGh$1x<1Ybf) -z3g}6Uv-D^I$NZ}k?;Kgvz{MWwm0rx>z+B`&vO;`1?vgKUV$qfbrJGvh4V5eUBZV5) -zP^$KN8AGwUJca>;2Wo -z;u!OP?vpE-3alwVtU{>ZecpkFR5Tt*Zi%H!Fy>#4AGehQulOa;Two+Pd$TU=;LROi{aq%J(B(`|mfpnq3rO^_)rdAuc@aatZXXb?t>t#P&wt9(!AFesoJf -z?^+VNm6r$lMI$koHXZ|4h7+rBEy;b>ij3`lERk;?rG)u`)>zZWDi5RPJUv4+jH81< -zGuu#Fn5Y;AjHB8uFsrZ-%CAqW$h;m@j?|n`5kZZ>*&uF9JkQADMkd$YqY?4~m;F4B -zMy;fbp5kvB&^06%FU+3UXujbOOZ^7}mjS^O_XQ#;8OifQd-&PLEL#&tVbJExuB^X* -z@n|MOjt;wm+RC;*QH7fv`Apt$hAL9UIwajtWLujJP%TmQXiNjFEYqXA%cY#&-)B(B -zrk#@ZMLANg%omz}Ov=CLC5TxqGpB3@Tp#TaqRXUgtZ-ROtSDg1$)ZEvO!3%Yg7SGa -zUly@SK -zO6>DqWadm%eRncLZ=>^KBt18Rc4*k0kRnR>Jfd8SIFt(sCf-TaGKHi|(Ab1V8rq9j -z*=-aZM-jx5ia>JC8Cy@0Wr}KDaGvZ>CgTGmn#lCW@IZ(!vM}2?#+9qd{cV4L(YyW& -zJBhn$xqiBrhLKv|4G%}EyL39MiDxOa4BwhJ7GC;hj(=|^RO|Rs~T~lu)$~)&LvZnQC{eOiLbkBO%8Y0 -zNci&?(za1apd49%2jsHn)9mOe1KFW)*tT|NvGa!ehE`_^R0j0m!F{4+t>ySyn57LNwv8(dz{k8OqjfOD!p=3fO&$k -zIO-r~6gzf4Heao$nuX?n`;h)UAg27ph0rt>tG?wCjB}2*qs2;6;98|`dPa~o#tcDn -zDiA1z3+WepBR};M%vb3QHH-LvzuRUP(Car>z`s`}oD|&yqSucR&U% -z=hcRpAc4j?E=i^OJpaH6sp^uA9BI3=eAyz@X9m(ZoolEF_D0r!JebH%F-`6_`h?2< -zR9yoRD)!W)q}vjuOhA*Z_UgROyx~(bW#Iq1N%tSA_Vsewl0AUjo_5FVwg$yokkaM% -z>o0;XzIdUmi(;zd7p#X_}{=4LSCp=4SSs7UNz#H45dU4lexOn -z3je`hZa-yr<)(a1&)RmhE0aw`Klqv4EE=vpeMMx8%H6Lzc+pQdO^5v5 -zEmS=jE4^Oz*j?%5rHPRW+Yp+!eZ!kPKXWXSh8>ky4i?-c)7`#FMSXC#=lFdG-C}$;h7_1s%9wgVi+gMATta03 -zry)_qnStosRd0OiOqebT=_K3-MEZF9IA01VVVFCTA-p|~3#JD|nlGnNx&iY|dVf?j -z#@MhHaxLjWr3fgvl9Rr<8mNU(H@=9+4Zc!AOBivp_Z^@B2FjMb_9tuwYC0{dMJnK2 -YVQu*TZ8nl?f%`zS*L1OeLEkX&JxSZWhyVZp - -delta 28 -hcmZqK#I&TDk;}!`kpTo87$#a7O{|gH{LazO5ddxw2oeAQ - -diff --git a/amd-ucode/microcode_amd_fam19h.bin.asc b/amd-ucode/microcode_amd_fam19h.bin.asc -index a32b4d61..8cff9013 100644 ---- a/amd-ucode/microcode_amd_fam19h.bin.asc -+++ b/amd-ucode/microcode_amd_fam19h.bin.asc -@@ -1,11 +1,11 @@ - -----BEGIN PGP SIGNATURE----- - --iQEzBAABCgAdFiEE/HxsUF2vzBRxg1fK5L5TOfMornMFAmS3F00ACgkQ5L5TOfMo --rnNEhQgAizSV8IFpvaYNytaJKLA4uevrZneGPV4czjCXnnj1yHpfQmCTyZQnoLnx --7gyzf7K5271zO51FBQ5z2Nm48a3XPUhMbQLNP4BZdekLiA3bRpMtSyHct6zD0ULm --xaFaOQ7MR1tGADhlon1bDvtnOuixUhwrZhEIlR9MzQAzERKDMOAVTbxn9ZhMfYiT --LhA791Blyyi+6Z9uh7BpaA8l8uvoxt+uuvlBTjQMR3ER/TEjgcsoy+XhhK4QKS0V --wJCtcDle/3pF+N6SAFWiXbNZ+P8p19afhcYddDl97xtpzA6/8b20a2eHkrqnu/Ds --jTozF9kmhiifYMYpXtXgSOwI3GRZbQ== --=t+j1 -+iQEzBAABCgAdFiEE/HxsUF2vzBRxg1fK5L5TOfMornMFAmTEYrcACgkQ5L5TOfMo -+rnN4IQf/QKbOezXZ4OYzaPANvsZQEAzLNfuylC/aQMwrPaO7daz5/zmCN4HU5XkH -+dDT8DYfPg+fQHIgxAw0/L24xPOm5Op/QuLVDyDqVr4qvL8+65eeI+JqxD/wXMXYN -+V34kkLM2p8iuyY1Nc8IDLXu4X75KGNPbKZlMRKMU3Pr7ai5O4ihmiAM+N6qv1KEJ -+YToNN6vrg0qt1cv0SLM8sa4e7L1+oblUrg/o0FViYE8pxsU3ZRRVSJMUg+lKjvl/ -+1ZPGKOdD80fcNJ+ItYGHNNs3eCc3WgW7Kc/E668eH75Yu9Zt7ewWZX8Sg/mygleY -+OzMwhbPJg4bF4zm7C/Pku7i1T2Omcg== -+=km2X - -----END PGP SIGNATURE----- --- -2.40.1 - diff --git a/packages/microcode/Cargo.toml b/packages/microcode/Cargo.toml index e53448b0..f8469310 100644 --- a/packages/microcode/Cargo.toml +++ b/packages/microcode/Cargo.toml @@ -11,9 +11,9 @@ path = "../packages.rs" # Check the two upstream repositories for the latest releases [[package.metadata.build-package.external-files]] -url = "https://www.kernel.org/pub/linux/kernel/firmware/linux-firmware-20230804.tar.xz" -sha512 = "b7fdffd49530223394a0896608a746395cbe9d1a3ca7e4e744bc8381e937845e085f08e2b56854a233426164072f4c365b281db2f0dbb47192a97a94ada8fae6" +url = "https://www.kernel.org/pub/linux/kernel/firmware/linux-firmware-20231111.tar.xz" +sha512 = "dd8eb7e8a51fe14479e2f4e1081a8a31f13d041ddd6180eaae6fe865dbad303c89aaaed2b9df237923f74f1cf9fe8c2e5492b4de5fce991ddb02bb091c95dc58" [[package.metadata.build-package.external-files]] -url = "https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files/archive/refs/tags/microcode-20230808.tar.gz" -sha512 = "8316eb9d35b315e630c6c9fab1ba601b91e72cc42926ef14e7c2b77e7025d276ae06c143060f44cd1a873d3879c067d11ad82e1886c796e6be6bf466243ad85b" +url = "https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files/archive/refs/tags/microcode-20231114.tar.gz" +sha512 = "a684444ef81e81687ff43b8255e95675eed1d728053bb1a483a60e94e2d2d43f10fc12522510b22daf90c4debd8f035e6b9a565813aa799c2e1e3a464124f59b" diff --git a/packages/microcode/microcode.spec b/packages/microcode/microcode.spec index d2394adb..697949db 100644 --- a/packages/microcode/microcode.spec +++ b/packages/microcode/microcode.spec @@ -3,8 +3,8 @@ # These are specific to the upstream source RPM, and will likely need to be # updated for each new version. -%global amd_ucode_version 20230804 -%global intel_ucode_version 20230808 +%global amd_ucode_version 20231111 +%global intel_ucode_version 20231114 Name: %{_cross_os}microcode Version: 0.0 @@ -21,8 +21,6 @@ URL: https://github.com/bottlerocket-os/bottlerocket/tree/develop/packages/micro Source0: https://www.kernel.org/pub/linux/kernel/firmware/linux-firmware-%{amd_ucode_version}.tar.xz Source1: https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files/archive/refs/tags/microcode-%{intel_ucode_version}.tar.gz -Patch1: 0001-linux-firmware-Update-AMD-cpu-microcode.patch - # Lets us install "microcode" to pull in the AMD and Intel updates. Requires: %{_cross_os}microcode-amd Requires: %{_cross_os}microcode-intel From ef95500dd9ef2ac2b78c09ac7611ddbb03449726 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 16 Nov 2023 10:52:48 +0000 Subject: [PATCH 1147/1356] tools/diff-kernel-config: Write summary to file Write out the overview of config changes to a file in addition to showing it on the console. This can come in handy when using this script as part of other automation to recall that information in a final bigger report. An example application could be automatic composition of PR cover letters. Signed-off-by: Leonard Foerster --- tools/diff-kernel-config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/diff-kernel-config b/tools/diff-kernel-config index c433c63d..3fa5c62a 100755 --- a/tools/diff-kernel-config +++ b/tools/diff-kernel-config @@ -270,7 +270,7 @@ for config_diff in "${output_dir}"/config-*-diff; do / -> / { changed += 1 } END { printf \"${config_base}:\t%3d removed, %3d added, %3d changed\n\", removed, added, changed } " "${config_diff}" -done | sort -V +done | sort -V | tee "${output_dir}"/diff-summary echo # Generate combined report of changes From d0e60b996648c599af250f6becf4ecea378e2ca9 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 23 Nov 2023 09:37:51 +0000 Subject: [PATCH 1148/1356] kernel-5.10: update to 5.10.199 Rebase to Amazon Linux upstream version 5.10.199-190.747.amzn2. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 7faac1d0..7f07695e 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/5c8155b74bb2980fed073710617014a21ad836d9b6aa2c1d39e9168289236fde/kernel-5.10.198-187.748.amzn2.src.rpm" -sha512 = "ae931ec40f8edd7cf76dfc10e7f6e8719cf680e3aa4b65a4efaf37b3075b36d71f01cb28fa59f0e7a73eba7a41f6e0b753bd1eed2fda66563e7d6f2ac36394d5" +url = "https://cdn.amazonlinux.com/blobstore/bd5e8c34551ab8c3014f5992f3561dcdf6525a3ded7dddbd6e84028bedb222c6/kernel-5.10.199-190.747.amzn2.src.rpm" +sha512 = "eafa55b9faa750ca594fb5b28a345c6f95f46edec74b17fa389111b8cb7d09d932a8add9fae1a5eb8daf10c27fa858d678bb6da4123ffab139056b92356525ac" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 8e37e40e..dee1464b 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.198 +Version: 5.10.199 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/5c8155b74bb2980fed073710617014a21ad836d9b6aa2c1d39e9168289236fde/kernel-5.10.198-187.748.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/bd5e8c34551ab8c3014f5992f3561dcdf6525a3ded7dddbd6e84028bedb222c6/kernel-5.10.199-190.747.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From bacd6aef623e923aa4639f59a5635aaee08f176d Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 23 Nov 2023 09:38:39 +0000 Subject: [PATCH 1149/1356] kernel-5.15: update to 5.15.137 Rebase to Amazon Linux upstream version 5.15.137-91.144.amzn2. Signed-off-by: Leonard Foerster --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index cada059a..ed7513ab 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/8bbf53203badda16f39f6dabe8974acac6f4b3d0dcf96378a434a32c897da379/kernel-5.15.136-90.144.amzn2.src.rpm" -sha512 = "5f1cf5c446a96805f54f8e38a32d779782e510af0c4efe5015d07d3d5606216410939f883c7769f9faede67d77e4d9fc3a2ba4a9251a150417ce1a2283304066" +url = "https://cdn.amazonlinux.com/blobstore/920e6b84cc4b3dad00df2d1a77a63242a9338b4a11be7b2e4bfb2b32c92e5cf4/kernel-5.15.137-91.144.amzn2.src.rpm" +sha512 = "10339ba4db07782fd0058043afd3b271ad9903755b4a1f3727190681d3fcac4253d7b022d251bd8864a5cb7e66c38fecd1b8092bf7de885b8fc34b28394ebdc1" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 52a5e29d..18e9b8a4 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.136 +Version: 5.15.137 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/8bbf53203badda16f39f6dabe8974acac6f4b3d0dcf96378a434a32c897da379/kernel-5.15.136-90.144.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/920e6b84cc4b3dad00df2d1a77a63242a9338b4a11be7b2e4bfb2b32c92e5cf4/kernel-5.15.137-91.144.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From f07e42b1b34bd10a5be6c074082c1e95db89a3ad Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 23 Nov 2023 09:39:11 +0000 Subject: [PATCH 1150/1356] kernel-6.1: update to 6.1.61 Rebase to Amazon Linux upstream version 6.1.61-85.141.amzn2023. Signed-off-by: Leonard Foerster --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index ec016baa..61cdebbb 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/7f6b70d0766761e79bb6dae9a840ac4fb6ca95c78dad994ea97abac37dd2a061/kernel-6.1.59-84.139.amzn2023.src.rpm" -sha512 = "9e5c3dab3583742254775c82710007360da8d1a0b252f2acb9096788f6ed33d04599ef61bffc489f78540a4f8194440e79aa3e9ff25ae3be802973ade868bfb1" +url = "https://cdn.amazonlinux.com/al2023/blobstore/64195460250d20bac796e24a69da55beb4bdd09fb3ed41f8d4c9ef984bd35f7c/kernel-6.1.61-85.141.amzn2023.src.rpm" +sha512 = "94871ce78edf9475b3e4ccef44292172dc4a59f7cf00e0c765b5be4688d7c8e46fdd2a6152cc6006c9f191fcbc1377e3a40dd81896dc9971741c814bbf36799f" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index dc2c7025..65f9f6d6 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.59 +Version: 6.1.61 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/7f6b70d0766761e79bb6dae9a840ac4fb6ca95c78dad994ea97abac37dd2a061/kernel-6.1.59-84.139.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/64195460250d20bac796e24a69da55beb4bdd09fb3ed41f8d4c9ef984bd35f7c/kernel-6.1.61-85.141.amzn2023.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From a8e57076be63a1fe64d1659d4a4249f41080449a Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 23 Nov 2023 14:54:32 +0000 Subject: [PATCH 1151/1356] kernel-6.1: drop MOUSE_PS2_VMMOUSE from vmware config With the update to 6.1.59 we picked up some additional mouse PS2 drivers from our upstream Amazon Linux. We do not need these settings on Bottlerocket, so remove the extra driver. Signed-off-by: Leonard Foerster --- packages/kernel-6.1/config-bottlerocket-vmware | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/kernel-6.1/config-bottlerocket-vmware b/packages/kernel-6.1/config-bottlerocket-vmware index ec1cc1a5..6f350325 100644 --- a/packages/kernel-6.1/config-bottlerocket-vmware +++ b/packages/kernel-6.1/config-bottlerocket-vmware @@ -14,3 +14,4 @@ CONFIG_MOUSE_PS2=m # CONFIG_MOUSE_PS2_SENTELIC is not set # CONFIG_MOUSE_PS2_TOUCHKIT is not set # CONFIG_MOUSE_PS2_FOCALTECH is not set +# CONFIG_MOUSE_PS2_VMMOUSE is not set From ba8477f51d677b1209b46a8853456b01fdbd2a50 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Wed, 6 Dec 2023 07:25:57 +0000 Subject: [PATCH 1152/1356] kernel-5.10: update to 5.10.201 Rebase to Amazon Linux upstream version 5.10.201-191.748.amzn2. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 7f07695e..2a37fa30 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/bd5e8c34551ab8c3014f5992f3561dcdf6525a3ded7dddbd6e84028bedb222c6/kernel-5.10.199-190.747.amzn2.src.rpm" -sha512 = "eafa55b9faa750ca594fb5b28a345c6f95f46edec74b17fa389111b8cb7d09d932a8add9fae1a5eb8daf10c27fa858d678bb6da4123ffab139056b92356525ac" +url = "https://cdn.amazonlinux.com/blobstore/d56d799376346afd56ebe3eec4b500b23c6fd3954f66b65aa7c867848d17a950/kernel-5.10.201-191.748.amzn2.src.rpm" +sha512 = "8d8715a1e06dbf7adbc1b5f80e99ec6b5d448c9aefd69f48f77fe6c80bd0e2668a963f85360fefd93ea979e2d2a5323d916301f2f59961e125a5fe6c8b38065f" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index dee1464b..20a3d994 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.199 +Version: 5.10.201 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/bd5e8c34551ab8c3014f5992f3561dcdf6525a3ded7dddbd6e84028bedb222c6/kernel-5.10.199-190.747.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/d56d799376346afd56ebe3eec4b500b23c6fd3954f66b65aa7c867848d17a950/kernel-5.10.201-191.748.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From e4ee47501971bd4371c79a6605ded45a4e2276d0 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Wed, 6 Dec 2023 07:26:26 +0000 Subject: [PATCH 1153/1356] kernel-5.15: update to 5.15.139 Rebase to Amazon Linux upstream version 5.15.139-93.147.amzn2. Signed-off-by: Leonard Foerster --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index ed7513ab..bca97934 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/920e6b84cc4b3dad00df2d1a77a63242a9338b4a11be7b2e4bfb2b32c92e5cf4/kernel-5.15.137-91.144.amzn2.src.rpm" -sha512 = "10339ba4db07782fd0058043afd3b271ad9903755b4a1f3727190681d3fcac4253d7b022d251bd8864a5cb7e66c38fecd1b8092bf7de885b8fc34b28394ebdc1" +url = "https://cdn.amazonlinux.com/blobstore/76d66a34d25e5ebc08dc424d9b03b0ecb44046eb05d95e47459447a5ab582cd2/kernel-5.15.139-93.147.amzn2.src.rpm" +sha512 = "f63269b6466df01b5a6a3387091665cb8931090fa73d848f0d77a6729aa7241d04be77383e34a41bed575cd295bf722bd6668965d1f1147d9ca975ae04b53219" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 18e9b8a4..061d3e20 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.137 +Version: 5.15.139 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/920e6b84cc4b3dad00df2d1a77a63242a9338b4a11be7b2e4bfb2b32c92e5cf4/kernel-5.15.137-91.144.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/76d66a34d25e5ebc08dc424d9b03b0ecb44046eb05d95e47459447a5ab582cd2/kernel-5.15.139-93.147.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 83d550ef1cf9f32a9fb672c52304c0b15cad8197 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 2 Jan 2024 13:28:25 +0000 Subject: [PATCH 1154/1356] kernel-6.1: update to 6.1.66 Rebase to Amazon Linux upstream version 6.1.66-91.160.amzn2023. Signed-off-by: Leonard Foerster --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 61cdebbb..7ea34ed2 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/64195460250d20bac796e24a69da55beb4bdd09fb3ed41f8d4c9ef984bd35f7c/kernel-6.1.61-85.141.amzn2023.src.rpm" -sha512 = "94871ce78edf9475b3e4ccef44292172dc4a59f7cf00e0c765b5be4688d7c8e46fdd2a6152cc6006c9f191fcbc1377e3a40dd81896dc9971741c814bbf36799f" +url = "https://cdn.amazonlinux.com/al2023/blobstore/5880ce1298c2bb541461845a29b2787036b8d18aff0f0bc308117a5f9990057e/kernel-6.1.66-91.160.amzn2023.src.rpm" +sha512 = "4a2b52e6fc8045a5bdf3f7a4a8080623206352e2921d9cf899e367c9102a806dc1135985863cb5efc43c4a757971404e88abdea9c78dcb55cb64e204f97dc232" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 65f9f6d6..8a4fa68c 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.61 +Version: 6.1.66 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/64195460250d20bac796e24a69da55beb4bdd09fb3ed41f8d4c9ef984bd35f7c/kernel-6.1.61-85.141.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/5880ce1298c2bb541461845a29b2787036b8d18aff0f0bc308117a5f9990057e/kernel-6.1.66-91.160.amzn2023.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From ea092f2aca323f72f16993d8e7b441b3333cdea7 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Thu, 4 Jan 2024 21:24:44 +0000 Subject: [PATCH 1155/1356] actions: ignore changes to eni-max-pods mapping --- .github/workflows/build.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f3a7db9b..3ee66c2f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -17,6 +17,8 @@ on: - '**.tpl' # Sample config files and OpenAPI docs - '**.yaml' + # Other files that don't affect the build + - 'packages/os/eni-max-pods' concurrency: group: ${{ github.ref }} From 9481a9d111b413dd2eff9ed90e20cc9198472cb3 Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Thu, 4 Jan 2024 21:26:31 +0000 Subject: [PATCH 1156/1356] README: add additional commentary on data volume --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index e1be85ab..35928373 100644 --- a/README.md +++ b/README.md @@ -589,6 +589,9 @@ Bottlerocket operates with two default storage volumes. * The root device, holds the active and passive [partition sets](#updates-1). It also contains the bootloader, the dm-verity hash tree for verifying the [immutable root filesystem](SECURITY_FEATURES.md#immutable-rootfs-backed-by-dm-verity), and the data store for the Bottlerocket API. * The data device is used as persistent storage for container images, container orchestration, [host-containers](#Custom-host-containers), and [bootstrap containers](#Bootstrap-containers-settings). + The operating system does not typically make changes to this volume during regular updates, though changes to upstream software such as containerd or kubelet could result in changes to their stored data. + This device (mounted to `/local` on the host) can be used for application storage for orchestrated workloads; however, we recommend using an additional volume if possible for such cases. + See [this section of the Security Guidance documentation](./SECURITY_GUIDANCE.md#limit-access-to-system-mounts) for more information. On boot Bottlerocket will increase the data partition size to use all of the data device. If you increase the size of the device, you can reboot Bottlerocket to extend the data partition. From 51252da8c2dbacfe87ca8628ff41c037767a68f2 Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Fri, 5 Jan 2024 13:12:08 +0100 Subject: [PATCH 1157/1356] kernel-6.1: cherry-pick fix for creating kprobes using unqualified names Commit b022f0c7e404 ("tracing/kprobes: Return EADDRNOTAVAIL when func matches several symbols") in the upstream kernel introduced a regression where kprobes cannot be created on functions residing in loadable modules if the probe location is identified by an unqualified function name. The faulty commit was backported e.g. to kernel 6.1.60. The backported fix is on track to be released in kernel 6.1.71. Cherry-pick the fix until 6.1.71 is released and our upstream catches up to it. While the faulty commit was backported to the 5.15 series as well, the backport has not hit our upstream yet. There is nothing to be done for our other kernel packages (but to be vigilant about not picking up a release with the faulty backport in the near future). Signed-off-by: Markus Boehme --- ...dule_kallsyms_on_each_symbol-general.patch | 68 +++++++++++++++++ ...Fix-symbol-counting-logic-by-looking.patch | 75 +++++++++++++++++++ packages/kernel-6.1/kernel-6.1.spec | 4 + 3 files changed, 147 insertions(+) create mode 100644 packages/kernel-6.1/5001-kallsyms-Make-module_kallsyms_on_each_symbol-general.patch create mode 100644 packages/kernel-6.1/5002-tracing-kprobes-Fix-symbol-counting-logic-by-looking.patch diff --git a/packages/kernel-6.1/5001-kallsyms-Make-module_kallsyms_on_each_symbol-general.patch b/packages/kernel-6.1/5001-kallsyms-Make-module_kallsyms_on_each_symbol-general.patch new file mode 100644 index 00000000..cdb8e164 --- /dev/null +++ b/packages/kernel-6.1/5001-kallsyms-Make-module_kallsyms_on_each_symbol-general.patch @@ -0,0 +1,68 @@ +From beb3f3e7170b7ee99803d941a07a9a249e381fa6 Mon Sep 17 00:00:00 2001 +From: Jiri Olsa +Date: Tue, 25 Oct 2022 15:41:41 +0200 +Subject: [PATCH] kallsyms: Make module_kallsyms_on_each_symbol generally + available + +commit 73feb8d5fa3b755bb51077c0aabfb6aa556fd498 upstream. + +Making module_kallsyms_on_each_symbol generally available, so it +can be used outside CONFIG_LIVEPATCH option in following changes. + +Rather than adding another ifdef option let's make the function +generally available (when CONFIG_KALLSYMS and CONFIG_MODULES +options are defined). + +Cc: Christoph Hellwig +Acked-by: Song Liu +Signed-off-by: Jiri Olsa +Link: https://lore.kernel.org/r/20221025134148.3300700-2-jolsa@kernel.org +Signed-off-by: Alexei Starovoitov +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/module.h | 9 +++++++++ + kernel/module/kallsyms.c | 2 -- + 2 files changed, 9 insertions(+), 2 deletions(-) + +diff --git a/include/linux/module.h b/include/linux/module.h +index ec61fb53979a9..35876e89eb93f 100644 +--- a/include/linux/module.h ++++ b/include/linux/module.h +@@ -879,8 +879,17 @@ static inline bool module_sig_ok(struct module *module) + } + #endif /* CONFIG_MODULE_SIG */ + ++#if defined(CONFIG_MODULES) && defined(CONFIG_KALLSYMS) + int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + struct module *, unsigned long), + void *data); ++#else ++static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, ++ struct module *, unsigned long), ++ void *data) ++{ ++ return -EOPNOTSUPP; ++} ++#endif /* CONFIG_MODULES && CONFIG_KALLSYMS */ + + #endif /* _LINUX_MODULE_H */ +diff --git a/kernel/module/kallsyms.c b/kernel/module/kallsyms.c +index f5c5c9175333d..4523f99b03589 100644 +--- a/kernel/module/kallsyms.c ++++ b/kernel/module/kallsyms.c +@@ -494,7 +494,6 @@ unsigned long module_kallsyms_lookup_name(const char *name) + return ret; + } + +-#ifdef CONFIG_LIVEPATCH + int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + struct module *, unsigned long), + void *data) +@@ -531,4 +530,3 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + mutex_unlock(&module_mutex); + return ret; + } +-#endif /* CONFIG_LIVEPATCH */ +-- +2.25.1 + diff --git a/packages/kernel-6.1/5002-tracing-kprobes-Fix-symbol-counting-logic-by-looking.patch b/packages/kernel-6.1/5002-tracing-kprobes-Fix-symbol-counting-logic-by-looking.patch new file mode 100644 index 00000000..a7dba91f --- /dev/null +++ b/packages/kernel-6.1/5002-tracing-kprobes-Fix-symbol-counting-logic-by-looking.patch @@ -0,0 +1,75 @@ +From abbcb10d6691e6f98838753c216c92ecd560a59a Mon Sep 17 00:00:00 2001 +From: Andrii Nakryiko +Date: Fri, 27 Oct 2023 16:31:26 -0700 +Subject: [PATCH] tracing/kprobes: Fix symbol counting logic by looking at + modules as well + +commit 926fe783c8a64b33997fec405cf1af3e61aed441 upstream. + +Recent changes to count number of matching symbols when creating +a kprobe event failed to take into account kernel modules. As such, it +breaks kprobes on kernel module symbols, by assuming there is no match. + +Fix this my calling module_kallsyms_on_each_symbol() in addition to +kallsyms_on_each_match_symbol() to perform a proper counting. + +Link: https://lore.kernel.org/all/20231027233126.2073148-1-andrii@kernel.org/ + +Cc: Francis Laniel +Cc: stable@vger.kernel.org +Cc: Masami Hiramatsu +Cc: Steven Rostedt +Fixes: b022f0c7e404 ("tracing/kprobes: Return EADDRNOTAVAIL when func matches several symbols") +Signed-off-by: Andrii Nakryiko +Acked-by: Song Liu +Signed-off-by: Masami Hiramatsu (Google) +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Hao Wei Tee +Signed-off-by: Greg Kroah-Hartman +--- + kernel/trace/trace_kprobe.c | 25 +++++++++++++++++++++---- + 1 file changed, 21 insertions(+), 4 deletions(-) + +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c +index a34a4fcdab7b1..e3993d19687db 100644 +--- a/kernel/trace/trace_kprobe.c ++++ b/kernel/trace/trace_kprobe.c +@@ -714,14 +714,31 @@ static int count_symbols(void *data, unsigned long unused) + return 0; + } + ++struct sym_count_ctx { ++ unsigned int count; ++ const char *name; ++}; ++ ++static int count_mod_symbols(void *data, const char *name, ++ struct module *module, unsigned long unused) ++{ ++ struct sym_count_ctx *ctx = data; ++ ++ if (strcmp(name, ctx->name) == 0) ++ ctx->count++; ++ ++ return 0; ++} ++ + static unsigned int number_of_same_symbols(char *func_name) + { +- unsigned int count; ++ struct sym_count_ctx ctx = { .count = 0, .name = func_name }; ++ ++ kallsyms_on_each_match_symbol(count_symbols, func_name, &ctx.count); + +- count = 0; +- kallsyms_on_each_match_symbol(count_symbols, func_name, &count); ++ module_kallsyms_on_each_symbol(count_mod_symbols, &ctx); + +- return count; ++ return ctx.count; + } + + static int __trace_kprobe_create(int argc, const char *argv[]) +-- +2.25.1 + diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 8a4fa68c..bfb192a2 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -25,6 +25,10 @@ Patch1004: 1004-af_unix-increase-default-max_dgram_qlen-to-512.patch # options for nvidia are instead included through DRM_SIMPLE Patch1005: 1005-Revert-Revert-drm-fb_helper-improve-CONFIG_FB-depend.patch +# Cherry-pick fix for #3691. Can be dropped after rebasing to 6.1.71 or later. +Patch5001: 5001-kallsyms-Make-module_kallsyms_on_each_symbol-general.patch +Patch5002: 5002-tracing-kprobes-Fix-symbol-counting-logic-by-looking.patch + BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From 6a8f32a66e01ad13961079ea732f60c7dc1d1a81 Mon Sep 17 00:00:00 2001 From: Tianhao Geng Date: Wed, 29 Nov 2023 23:50:52 +0000 Subject: [PATCH 1158/1356] Add kubernetes-1.29 package and variants This adds a k8s-1.29 package. Existing 1.28 models are moved to be 1.29, with symlinks created for 1.28 models to point to the new locations. Finally, new variants are defined for aws-k8s-1.29, aws-k8s-1.29-nvidia, metal-k8s-1.29, and vmware-k8s-1.29. --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 35928373..c34bb093 100644 --- a/README.md +++ b/README.md @@ -67,12 +67,14 @@ The following variants support EKS, as described above: * `aws-k8s-1.26` * `aws-k8s-1.27` * `aws-k8s-1.28` +* `aws-k8s-1.29` * `aws-k8s-1.23-nvidia` * `aws-k8s-1.24-nvidia` * `aws-k8s-1.25-nvidia` * `aws-k8s-1.26-nvidia` * `aws-k8s-1.27-nvidia` * `aws-k8s-1.28-nvidia` +* `aws-k8s-1.29-nvidia` The following variants support ECS: @@ -88,6 +90,7 @@ We also have variants that are designed to be Kubernetes worker nodes in VMware: * `vmware-k8s-1.26` * `vmware-k8s-1.27` * `vmware-k8s-1.28` +* `vmware-k8s-1.29` The following variants are designed to be Kubernetes worker nodes on bare metal: @@ -96,6 +99,7 @@ The following variants are designed to be Kubernetes worker nodes on bare metal: * `metal-k8s-1.26` * `metal-k8s-1.27` * `metal-k8s-1.28` +* `metal-k8s-1.29` The following variants are no longer supported: From b2db50d5f0331f9fd2d2a683b7300bb37911f355 Mon Sep 17 00:00:00 2001 From: Matthew James Briggs Date: Wed, 10 Jan 2024 10:41:33 -0800 Subject: [PATCH 1159/1356] add tgz to gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 81553e84..e28b1f51 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ /build *.tar.* +*.tgz **/target/ **/vendor/ /.cargo From 44a98a217bf39a94326efb7b79ecf0550a237338 Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Wed, 10 Jan 2024 11:01:18 +0000 Subject: [PATCH 1160/1356] kernel-5.10: update to 5.10.205 Rebase to Amazon Linux upstream version 5.10.205-195.804.amzn2. Signed-off-by: Markus Boehme --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 2a37fa30..e86801b2 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/d56d799376346afd56ebe3eec4b500b23c6fd3954f66b65aa7c867848d17a950/kernel-5.10.201-191.748.amzn2.src.rpm" -sha512 = "8d8715a1e06dbf7adbc1b5f80e99ec6b5d448c9aefd69f48f77fe6c80bd0e2668a963f85360fefd93ea979e2d2a5323d916301f2f59961e125a5fe6c8b38065f" +url = "https://cdn.amazonlinux.com/blobstore/9feb6cecf780648ffd492525552ba31ce039667031f7ff32ff8a8802145f4827/kernel-5.10.205-195.804.amzn2.src.rpm" +sha512 = "409ddcfcd2f29ab7e7cff8d38ea00fbbd2f2960e4eeffbb4031997921becc797b7e29a66c3869b70e362530d2ed30b747472dbee229f21f9499e3d90f40a880e" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 20a3d994..7210a0ea 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.201 +Version: 5.10.205 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/d56d799376346afd56ebe3eec4b500b23c6fd3954f66b65aa7c867848d17a950/kernel-5.10.201-191.748.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/9feb6cecf780648ffd492525552ba31ce039667031f7ff32ff8a8802145f4827/kernel-5.10.205-195.804.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From d3f4edc44e399c3709df45e2d536db8bca10479a Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Wed, 10 Jan 2024 11:02:27 +0000 Subject: [PATCH 1161/1356] kernel-5.15: update to 5.15.145 Rebase to Amazon Linux upstream version 5.15.145-95.156.amzn2. Signed-off-by: Markus Boehme --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index bca97934..3bc83fdc 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/76d66a34d25e5ebc08dc424d9b03b0ecb44046eb05d95e47459447a5ab582cd2/kernel-5.15.139-93.147.amzn2.src.rpm" -sha512 = "f63269b6466df01b5a6a3387091665cb8931090fa73d848f0d77a6729aa7241d04be77383e34a41bed575cd295bf722bd6668965d1f1147d9ca975ae04b53219" +url = "https://cdn.amazonlinux.com/blobstore/ee035f7e9f6cfe0e00a4c32b3f7d9170e85ca9e9f5cc5026b601b532343b260d/kernel-5.15.145-95.156.amzn2.src.rpm" +sha512 = "372738cd8139c6904a047e9298373ee60dc64a5776a624399615ad13736fc21cd9ab70bccee4a0aa12fcdbbf3fd2b1d01a9d8bf4ad6c4b9063f0404c904db250" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 061d3e20..c3f1dce9 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.139 +Version: 5.15.145 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/76d66a34d25e5ebc08dc424d9b03b0ecb44046eb05d95e47459447a5ab582cd2/kernel-5.15.139-93.147.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/ee035f7e9f6cfe0e00a4c32b3f7d9170e85ca9e9f5cc5026b601b532343b260d/kernel-5.15.145-95.156.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 709c8fef52f88588aa8082176e4b987d293d7f0f Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Wed, 10 Jan 2024 11:03:13 +0000 Subject: [PATCH 1162/1356] kernel-6.1: update to 6.1.66 Rebase to Amazon Linux upstream version 6.1.66-93.164.amzn2023. Signed-off-by: Markus Boehme --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 7ea34ed2..55e36acd 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/5880ce1298c2bb541461845a29b2787036b8d18aff0f0bc308117a5f9990057e/kernel-6.1.66-91.160.amzn2023.src.rpm" -sha512 = "4a2b52e6fc8045a5bdf3f7a4a8080623206352e2921d9cf899e367c9102a806dc1135985863cb5efc43c4a757971404e88abdea9c78dcb55cb64e204f97dc232" +url = "https://cdn.amazonlinux.com/al2023/blobstore/f0517376e35e75defefac98e867091249abdcbad3d0958d58d19c0db26a8d0c7/kernel-6.1.66-93.164.amzn2023.src.rpm" +sha512 = "215330abf659fd459d79bee3cd1c7beb6075a08f3eacd484ce1836169e436ea6b98462e9e5bd6d1ccb79474b1b5c54f21f3e5643fc47fac954ae84701c32a00c" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index bfb192a2..14012289 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -7,7 +7,7 @@ Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/5880ce1298c2bb541461845a29b2787036b8d18aff0f0bc308117a5f9990057e/kernel-6.1.66-91.160.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/f0517376e35e75defefac98e867091249abdcbad3d0958d58d19c0db26a8d0c7/kernel-6.1.66-93.164.amzn2023.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From ea2e7af3bb7042b1a4f6fe521d29f35a212061bd Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Wed, 10 Jan 2024 19:22:14 +0000 Subject: [PATCH 1163/1356] kernel-5.10: disable unused Mellanox network drivers The recent update to the 5.10 kernel picked up a bunch of Mellanox network drivers from the Amazon Linux kernel. We don't need those, so deactivate them again. Signed-off-by: Markus Boehme --- packages/kernel-5.10/config-bottlerocket | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index 7a1b1ef0..264e12b6 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -154,6 +154,11 @@ CONFIG_EXT4_USE_FOR_EXT2=y # Disable obsolete NIC drivers # CONFIG_QLGE is not set +# CONFIG_MLX4_CORE is not set +# CONFIG_MLX4_EN is not set +# CONFIG_MLX4_INFINIBAND is not set +# CONFIG_MLXSW_CORE is not set +# CONFIG_MLXFW is not set # Disable unused qdiscs # - sch_cake targets home routers and residential links @@ -171,3 +176,4 @@ CONFIG_SCSI_SMARTPQI=m # Disable AL port of BBR2 congestion algorithm # CONFIG_TCP_CONG_BBR2 is not set + From 46bda6dda0c3eb22c7869c9565b3703e70f629d8 Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Wed, 10 Jan 2024 20:46:42 +0000 Subject: [PATCH 1164/1356] kernel-5.15: backport fix for creating kprobes using unqualified names Commit b022f0c7e404 ("tracing/kprobes: Return EADDRNOTAVAIL when func matches several symbols") in the upstream kernel introduced a regression where kprobes cannot be created on functions residing in loadable modules if the probe location is identified by an unqualified function name. The faulty commit was backported to the 5.15, but a backport of the fix is not yet available. Carry the fix here for release preparation and seek resolution upstream as soon as possible. This may mean upstreaming, or reverting the faulty commit entirely (sentiment for the faulty commit has soured, and it has been yanked already from patch queues for older stable series). Signed-off-by: Markus Boehme --- ...dule_kallsyms_on_each_symbol-general.patch | 73 +++++++++++++++++++ ...Fix-symbol-counting-logic-by-looking.patch | 49 +++++++++++++ packages/kernel-5.15/kernel-5.15.spec | 4 + 3 files changed, 126 insertions(+) create mode 100644 packages/kernel-5.15/5001-kallsyms-Make-module_kallsyms_on_each_symbol-general.patch create mode 100644 packages/kernel-5.15/5002-tracing-kprobes-Fix-symbol-counting-logic-by-looking.patch diff --git a/packages/kernel-5.15/5001-kallsyms-Make-module_kallsyms_on_each_symbol-general.patch b/packages/kernel-5.15/5001-kallsyms-Make-module_kallsyms_on_each_symbol-general.patch new file mode 100644 index 00000000..0b7c54e1 --- /dev/null +++ b/packages/kernel-5.15/5001-kallsyms-Make-module_kallsyms_on_each_symbol-general.patch @@ -0,0 +1,73 @@ +From 5d5e377de988002f0db05c90ef39010503c61cee Mon Sep 17 00:00:00 2001 +From: Jiri Olsa +Date: Tue, 25 Oct 2022 15:41:41 +0200 +Subject: [PATCH] kallsyms: Make module_kallsyms_on_each_symbol generally + available + +commit 73feb8d5fa3b755bb51077c0aabfb6aa556fd498 upstream. + +Making module_kallsyms_on_each_symbol generally available, so it +can be used outside CONFIG_LIVEPATCH option in following changes. + +Rather than adding another ifdef option let's make the function +generally available (when CONFIG_KALLSYMS and CONFIG_MODULES +options are defined). + +Cc: Christoph Hellwig +Acked-by: Song Liu +Signed-off-by: Jiri Olsa +Link: https://lore.kernel.org/r/20221025134148.3300700-2-jolsa@kernel.org +Signed-off-by: Alexei Starovoitov +Signed-off-by: Greg Kroah-Hartman +Stable-dep-of: 926fe783c8a6 ("tracing/kprobes: Fix symbol counting logic by looking at modules as well") +Signed-off-by: Markus Boehme +--- + include/linux/module.h | 9 +++++++++ + kernel/module.c | 2 -- + 2 files changed, 9 insertions(+), 2 deletions(-) + +diff --git a/include/linux/module.h b/include/linux/module.h +index c9f1200b2312..701c150485b2 100644 +--- a/include/linux/module.h ++++ b/include/linux/module.h +@@ -867,8 +867,17 @@ static inline bool module_sig_ok(struct module *module) + } + #endif /* CONFIG_MODULE_SIG */ + ++#if defined(CONFIG_MODULES) && defined(CONFIG_KALLSYMS) + int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + struct module *, unsigned long), + void *data); ++#else ++static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, ++ struct module *, unsigned long), ++ void *data) ++{ ++ return -EOPNOTSUPP; ++} ++#endif /* CONFIG_MODULES && CONFIG_KALLSYMS */ + + #endif /* _LINUX_MODULE_H */ +diff --git a/kernel/module.c b/kernel/module.c +index 3c90840133c0..ba9f2bb57889 100644 +--- a/kernel/module.c ++++ b/kernel/module.c +@@ -4482,7 +4482,6 @@ unsigned long module_kallsyms_lookup_name(const char *name) + return ret; + } + +-#ifdef CONFIG_LIVEPATCH + int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + struct module *, unsigned long), + void *data) +@@ -4514,7 +4513,6 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + mutex_unlock(&module_mutex); + return ret; + } +-#endif /* CONFIG_LIVEPATCH */ + #endif /* CONFIG_KALLSYMS */ + + static void cfi_init(struct module *mod) +-- +2.40.1 + diff --git a/packages/kernel-5.15/5002-tracing-kprobes-Fix-symbol-counting-logic-by-looking.patch b/packages/kernel-5.15/5002-tracing-kprobes-Fix-symbol-counting-logic-by-looking.patch new file mode 100644 index 00000000..d7753b89 --- /dev/null +++ b/packages/kernel-5.15/5002-tracing-kprobes-Fix-symbol-counting-logic-by-looking.patch @@ -0,0 +1,49 @@ +From 7bc06ef49649ff064c6bc3825e047c500eea141a Mon Sep 17 00:00:00 2001 +From: Andrii Nakryiko +Date: Fri, 27 Oct 2023 16:31:26 -0700 +Subject: [PATCH] tracing/kprobes: Fix symbol counting logic by looking at + modules as well + +commit 926fe783c8a64b33997fec405cf1af3e61aed441 upstream. + +Recent changes to count number of matching symbols when creating +a kprobe event failed to take into account kernel modules. As such, it +breaks kprobes on kernel module symbols, by assuming there is no match. + +Fix this my calling module_kallsyms_on_each_symbol() in addition to +kallsyms_on_each_match_symbol() to perform a proper counting. + +Link: https://lore.kernel.org/all/20231027233126.2073148-1-andrii@kernel.org/ + +Cc: Francis Laniel +Cc: stable@vger.kernel.org +Cc: Masami Hiramatsu +Cc: Steven Rostedt +Fixes: b022f0c7e404 ("tracing/kprobes: Return EADDRNOTAVAIL when func matches several symbols") +Signed-off-by: Andrii Nakryiko +Acked-by: Song Liu +Signed-off-by: Masami Hiramatsu (Google) +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Hao Wei Tee +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Markus Boehme +--- + kernel/trace/trace_kprobe.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c +index 1c565db2de7b..21aef22a8489 100644 +--- a/kernel/trace/trace_kprobe.c ++++ b/kernel/trace/trace_kprobe.c +@@ -735,6 +735,8 @@ static unsigned int number_of_same_symbols(char *func_name) + + kallsyms_on_each_symbol(count_symbols, &args); + ++ module_kallsyms_on_each_symbol(count_symbols, &args); ++ + return args.count; + } + +-- +2.40.1 + diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index c3f1dce9..296c15e4 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -22,6 +22,10 @@ Patch1003: 1003-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch # Increase default of sysctl net.unix.max_dgram_qlen to 512. Patch1004: 1004-af_unix-increase-default-max_dgram_qlen-to-512.patch +# Backport fix for #3691. Needs to be upstreamed to 5.15 stable series. +Patch5001: 5001-kallsyms-Make-module_kallsyms_on_each_symbol-general.patch +Patch5002: 5002-tracing-kprobes-Fix-symbol-counting-logic-by-looking.patch + BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From 446cd6e924fcc1a378f7e7b1260b23702b837918 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Wed, 24 Jan 2024 10:05:48 +0000 Subject: [PATCH 1165/1356] kernel-6.1: update to 6.1.72 Rebase to Amazon Linux upstream version 6.1.72-96.166.amzn2023. Signed-off-by: Leonard Foerster --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 55e36acd..18a2a60f 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/f0517376e35e75defefac98e867091249abdcbad3d0958d58d19c0db26a8d0c7/kernel-6.1.66-93.164.amzn2023.src.rpm" -sha512 = "215330abf659fd459d79bee3cd1c7beb6075a08f3eacd484ce1836169e436ea6b98462e9e5bd6d1ccb79474b1b5c54f21f3e5643fc47fac954ae84701c32a00c" +url = "https://cdn.amazonlinux.com/al2023/blobstore/8a4c9c7ba87d627d236ca69292a052edc0724ac289d48523da77fe3f479d27aa/kernel-6.1.72-96.166.amzn2023.src.rpm" +sha512 = "538abdc500a3ce35b8faa9b3f9910639e4c81c2c02ea5f382e89d7f6e364b2fd0c945a6869be2942c48901993560c3a8e45c5e7c2f0a4d5c4add3079b8009d0c" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 14012289..47f813aa 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.66 +Version: 6.1.72 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/f0517376e35e75defefac98e867091249abdcbad3d0958d58d19c0db26a8d0c7/kernel-6.1.66-93.164.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/8a4c9c7ba87d627d236ca69292a052edc0724ac289d48523da77fe3f479d27aa/kernel-6.1.72-96.166.amzn2023.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From d808c88afbbf1e3a71580b19c0a5ad683d411483 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Wed, 24 Jan 2024 10:06:17 +0000 Subject: [PATCH 1166/1356] kernel-5.15: update to 5.15.145 Rebase to Amazon Linux upstream version 5.15.145-95.161.amzn2. Signed-off-by: Leonard Foerster --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 3bc83fdc..16ad837e 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/ee035f7e9f6cfe0e00a4c32b3f7d9170e85ca9e9f5cc5026b601b532343b260d/kernel-5.15.145-95.156.amzn2.src.rpm" -sha512 = "372738cd8139c6904a047e9298373ee60dc64a5776a624399615ad13736fc21cd9ab70bccee4a0aa12fcdbbf3fd2b1d01a9d8bf4ad6c4b9063f0404c904db250" +url = "https://cdn.amazonlinux.com/blobstore/bc7e2bdcb4414f6d629c0ff58bfdfff1c471e5a4e5033c6d371535991233cec4/kernel-5.15.145-95.161.amzn2.src.rpm" +sha512 = "12df106be137e85b822f10d395b36f7f1edce37fa4704c9ace8e58a4354093f50eaa6f8b121ea85ab21a87eb4ec8108afc999d8a6b8945fe9f04dfe4a7a79f61" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 296c15e4..aee434c5 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -7,7 +7,7 @@ Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/ee035f7e9f6cfe0e00a4c32b3f7d9170e85ca9e9f5cc5026b601b532343b260d/kernel-5.15.145-95.156.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/bc7e2bdcb4414f6d629c0ff58bfdfff1c471e5a4e5033c6d371535991233cec4/kernel-5.15.145-95.161.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From af8c1bd5747559049907dced5cab8147a0720d23 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Wed, 24 Jan 2024 10:07:05 +0000 Subject: [PATCH 1167/1356] kernel-5.10: update to 5.10.205 Rebase to Amazon Linux upstream version 5.10.205-195.807.amzn2. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index e86801b2..98c4d276 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/9feb6cecf780648ffd492525552ba31ce039667031f7ff32ff8a8802145f4827/kernel-5.10.205-195.804.amzn2.src.rpm" -sha512 = "409ddcfcd2f29ab7e7cff8d38ea00fbbd2f2960e4eeffbb4031997921becc797b7e29a66c3869b70e362530d2ed30b747472dbee229f21f9499e3d90f40a880e" +url = "https://cdn.amazonlinux.com/blobstore/44af81a4c59431d8729930480eb04e70c6e3be7d23fccbcbddb17395f16c73ce/kernel-5.10.205-195.807.amzn2.src.rpm" +sha512 = "e49f39ad7ae0ffc602c28d12e649dcc77c0a3fcff65fe98b2a2e56aab97f501f0af35fb0f257f03a9a4da24f83a0c30c7c64a0c502bcf46d56f412021d10f1d2" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 7210a0ea..4741dd30 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -7,7 +7,7 @@ Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/9feb6cecf780648ffd492525552ba31ce039667031f7ff32ff8a8802145f4827/kernel-5.10.205-195.804.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/44af81a4c59431d8729930480eb04e70c6e3be7d23fccbcbddb17395f16c73ce/kernel-5.10.205-195.807.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 0e36cb4ba604f5c3e4ab6a416643a0fc60faf444 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 25 Jan 2024 13:43:12 +0000 Subject: [PATCH 1168/1356] Revert "kernel-5.15: backport fix for creating kprobes using unqualified names" This reverts commit e97ca0c7d17c4e7983bc7907765d7a61c7779f72. These patches have been included in the latest AL releases, so we can drop our downstream variants of these patches. Signed-off-by: Leonard Foerster --- ...dule_kallsyms_on_each_symbol-general.patch | 73 ------------------- ...Fix-symbol-counting-logic-by-looking.patch | 49 ------------- packages/kernel-5.15/kernel-5.15.spec | 4 - 3 files changed, 126 deletions(-) delete mode 100644 packages/kernel-5.15/5001-kallsyms-Make-module_kallsyms_on_each_symbol-general.patch delete mode 100644 packages/kernel-5.15/5002-tracing-kprobes-Fix-symbol-counting-logic-by-looking.patch diff --git a/packages/kernel-5.15/5001-kallsyms-Make-module_kallsyms_on_each_symbol-general.patch b/packages/kernel-5.15/5001-kallsyms-Make-module_kallsyms_on_each_symbol-general.patch deleted file mode 100644 index 0b7c54e1..00000000 --- a/packages/kernel-5.15/5001-kallsyms-Make-module_kallsyms_on_each_symbol-general.patch +++ /dev/null @@ -1,73 +0,0 @@ -From 5d5e377de988002f0db05c90ef39010503c61cee Mon Sep 17 00:00:00 2001 -From: Jiri Olsa -Date: Tue, 25 Oct 2022 15:41:41 +0200 -Subject: [PATCH] kallsyms: Make module_kallsyms_on_each_symbol generally - available - -commit 73feb8d5fa3b755bb51077c0aabfb6aa556fd498 upstream. - -Making module_kallsyms_on_each_symbol generally available, so it -can be used outside CONFIG_LIVEPATCH option in following changes. - -Rather than adding another ifdef option let's make the function -generally available (when CONFIG_KALLSYMS and CONFIG_MODULES -options are defined). - -Cc: Christoph Hellwig -Acked-by: Song Liu -Signed-off-by: Jiri Olsa -Link: https://lore.kernel.org/r/20221025134148.3300700-2-jolsa@kernel.org -Signed-off-by: Alexei Starovoitov -Signed-off-by: Greg Kroah-Hartman -Stable-dep-of: 926fe783c8a6 ("tracing/kprobes: Fix symbol counting logic by looking at modules as well") -Signed-off-by: Markus Boehme ---- - include/linux/module.h | 9 +++++++++ - kernel/module.c | 2 -- - 2 files changed, 9 insertions(+), 2 deletions(-) - -diff --git a/include/linux/module.h b/include/linux/module.h -index c9f1200b2312..701c150485b2 100644 ---- a/include/linux/module.h -+++ b/include/linux/module.h -@@ -867,8 +867,17 @@ static inline bool module_sig_ok(struct module *module) - } - #endif /* CONFIG_MODULE_SIG */ - -+#if defined(CONFIG_MODULES) && defined(CONFIG_KALLSYMS) - int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, - struct module *, unsigned long), - void *data); -+#else -+static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, -+ struct module *, unsigned long), -+ void *data) -+{ -+ return -EOPNOTSUPP; -+} -+#endif /* CONFIG_MODULES && CONFIG_KALLSYMS */ - - #endif /* _LINUX_MODULE_H */ -diff --git a/kernel/module.c b/kernel/module.c -index 3c90840133c0..ba9f2bb57889 100644 ---- a/kernel/module.c -+++ b/kernel/module.c -@@ -4482,7 +4482,6 @@ unsigned long module_kallsyms_lookup_name(const char *name) - return ret; - } - --#ifdef CONFIG_LIVEPATCH - int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, - struct module *, unsigned long), - void *data) -@@ -4514,7 +4513,6 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, - mutex_unlock(&module_mutex); - return ret; - } --#endif /* CONFIG_LIVEPATCH */ - #endif /* CONFIG_KALLSYMS */ - - static void cfi_init(struct module *mod) --- -2.40.1 - diff --git a/packages/kernel-5.15/5002-tracing-kprobes-Fix-symbol-counting-logic-by-looking.patch b/packages/kernel-5.15/5002-tracing-kprobes-Fix-symbol-counting-logic-by-looking.patch deleted file mode 100644 index d7753b89..00000000 --- a/packages/kernel-5.15/5002-tracing-kprobes-Fix-symbol-counting-logic-by-looking.patch +++ /dev/null @@ -1,49 +0,0 @@ -From 7bc06ef49649ff064c6bc3825e047c500eea141a Mon Sep 17 00:00:00 2001 -From: Andrii Nakryiko -Date: Fri, 27 Oct 2023 16:31:26 -0700 -Subject: [PATCH] tracing/kprobes: Fix symbol counting logic by looking at - modules as well - -commit 926fe783c8a64b33997fec405cf1af3e61aed441 upstream. - -Recent changes to count number of matching symbols when creating -a kprobe event failed to take into account kernel modules. As such, it -breaks kprobes on kernel module symbols, by assuming there is no match. - -Fix this my calling module_kallsyms_on_each_symbol() in addition to -kallsyms_on_each_match_symbol() to perform a proper counting. - -Link: https://lore.kernel.org/all/20231027233126.2073148-1-andrii@kernel.org/ - -Cc: Francis Laniel -Cc: stable@vger.kernel.org -Cc: Masami Hiramatsu -Cc: Steven Rostedt -Fixes: b022f0c7e404 ("tracing/kprobes: Return EADDRNOTAVAIL when func matches several symbols") -Signed-off-by: Andrii Nakryiko -Acked-by: Song Liu -Signed-off-by: Masami Hiramatsu (Google) -Signed-off-by: Greg Kroah-Hartman -Signed-off-by: Hao Wei Tee -Signed-off-by: Greg Kroah-Hartman -Signed-off-by: Markus Boehme ---- - kernel/trace/trace_kprobe.c | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c -index 1c565db2de7b..21aef22a8489 100644 ---- a/kernel/trace/trace_kprobe.c -+++ b/kernel/trace/trace_kprobe.c -@@ -735,6 +735,8 @@ static unsigned int number_of_same_symbols(char *func_name) - - kallsyms_on_each_symbol(count_symbols, &args); - -+ module_kallsyms_on_each_symbol(count_symbols, &args); -+ - return args.count; - } - --- -2.40.1 - diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index aee434c5..f6b2d498 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -22,10 +22,6 @@ Patch1003: 1003-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch # Increase default of sysctl net.unix.max_dgram_qlen to 512. Patch1004: 1004-af_unix-increase-default-max_dgram_qlen-to-512.patch -# Backport fix for #3691. Needs to be upstreamed to 5.15 stable series. -Patch5001: 5001-kallsyms-Make-module_kallsyms_on_each_symbol-general.patch -Patch5002: 5002-tracing-kprobes-Fix-symbol-counting-logic-by-looking.patch - BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From 04ebd6b789380055668b1fe8f246912162df52b8 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 25 Jan 2024 13:49:08 +0000 Subject: [PATCH 1169/1356] Revert "kernel-6.1: cherry-pick fix for creating kprobes using unqualified names" This reverts commit cb6cb40a3b95291536217acb80fe8ec595ad6f68. Drop the downstream ports of kprobe fixes as we have caught up with the upstream including them. Signed-off-by: Leonard Foerster --- ...dule_kallsyms_on_each_symbol-general.patch | 68 ----------------- ...Fix-symbol-counting-logic-by-looking.patch | 75 ------------------- packages/kernel-6.1/kernel-6.1.spec | 4 - 3 files changed, 147 deletions(-) delete mode 100644 packages/kernel-6.1/5001-kallsyms-Make-module_kallsyms_on_each_symbol-general.patch delete mode 100644 packages/kernel-6.1/5002-tracing-kprobes-Fix-symbol-counting-logic-by-looking.patch diff --git a/packages/kernel-6.1/5001-kallsyms-Make-module_kallsyms_on_each_symbol-general.patch b/packages/kernel-6.1/5001-kallsyms-Make-module_kallsyms_on_each_symbol-general.patch deleted file mode 100644 index cdb8e164..00000000 --- a/packages/kernel-6.1/5001-kallsyms-Make-module_kallsyms_on_each_symbol-general.patch +++ /dev/null @@ -1,68 +0,0 @@ -From beb3f3e7170b7ee99803d941a07a9a249e381fa6 Mon Sep 17 00:00:00 2001 -From: Jiri Olsa -Date: Tue, 25 Oct 2022 15:41:41 +0200 -Subject: [PATCH] kallsyms: Make module_kallsyms_on_each_symbol generally - available - -commit 73feb8d5fa3b755bb51077c0aabfb6aa556fd498 upstream. - -Making module_kallsyms_on_each_symbol generally available, so it -can be used outside CONFIG_LIVEPATCH option in following changes. - -Rather than adding another ifdef option let's make the function -generally available (when CONFIG_KALLSYMS and CONFIG_MODULES -options are defined). - -Cc: Christoph Hellwig -Acked-by: Song Liu -Signed-off-by: Jiri Olsa -Link: https://lore.kernel.org/r/20221025134148.3300700-2-jolsa@kernel.org -Signed-off-by: Alexei Starovoitov -Signed-off-by: Greg Kroah-Hartman ---- - include/linux/module.h | 9 +++++++++ - kernel/module/kallsyms.c | 2 -- - 2 files changed, 9 insertions(+), 2 deletions(-) - -diff --git a/include/linux/module.h b/include/linux/module.h -index ec61fb53979a9..35876e89eb93f 100644 ---- a/include/linux/module.h -+++ b/include/linux/module.h -@@ -879,8 +879,17 @@ static inline bool module_sig_ok(struct module *module) - } - #endif /* CONFIG_MODULE_SIG */ - -+#if defined(CONFIG_MODULES) && defined(CONFIG_KALLSYMS) - int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, - struct module *, unsigned long), - void *data); -+#else -+static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, -+ struct module *, unsigned long), -+ void *data) -+{ -+ return -EOPNOTSUPP; -+} -+#endif /* CONFIG_MODULES && CONFIG_KALLSYMS */ - - #endif /* _LINUX_MODULE_H */ -diff --git a/kernel/module/kallsyms.c b/kernel/module/kallsyms.c -index f5c5c9175333d..4523f99b03589 100644 ---- a/kernel/module/kallsyms.c -+++ b/kernel/module/kallsyms.c -@@ -494,7 +494,6 @@ unsigned long module_kallsyms_lookup_name(const char *name) - return ret; - } - --#ifdef CONFIG_LIVEPATCH - int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, - struct module *, unsigned long), - void *data) -@@ -531,4 +530,3 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, - mutex_unlock(&module_mutex); - return ret; - } --#endif /* CONFIG_LIVEPATCH */ --- -2.25.1 - diff --git a/packages/kernel-6.1/5002-tracing-kprobes-Fix-symbol-counting-logic-by-looking.patch b/packages/kernel-6.1/5002-tracing-kprobes-Fix-symbol-counting-logic-by-looking.patch deleted file mode 100644 index a7dba91f..00000000 --- a/packages/kernel-6.1/5002-tracing-kprobes-Fix-symbol-counting-logic-by-looking.patch +++ /dev/null @@ -1,75 +0,0 @@ -From abbcb10d6691e6f98838753c216c92ecd560a59a Mon Sep 17 00:00:00 2001 -From: Andrii Nakryiko -Date: Fri, 27 Oct 2023 16:31:26 -0700 -Subject: [PATCH] tracing/kprobes: Fix symbol counting logic by looking at - modules as well - -commit 926fe783c8a64b33997fec405cf1af3e61aed441 upstream. - -Recent changes to count number of matching symbols when creating -a kprobe event failed to take into account kernel modules. As such, it -breaks kprobes on kernel module symbols, by assuming there is no match. - -Fix this my calling module_kallsyms_on_each_symbol() in addition to -kallsyms_on_each_match_symbol() to perform a proper counting. - -Link: https://lore.kernel.org/all/20231027233126.2073148-1-andrii@kernel.org/ - -Cc: Francis Laniel -Cc: stable@vger.kernel.org -Cc: Masami Hiramatsu -Cc: Steven Rostedt -Fixes: b022f0c7e404 ("tracing/kprobes: Return EADDRNOTAVAIL when func matches several symbols") -Signed-off-by: Andrii Nakryiko -Acked-by: Song Liu -Signed-off-by: Masami Hiramatsu (Google) -Signed-off-by: Greg Kroah-Hartman -Signed-off-by: Hao Wei Tee -Signed-off-by: Greg Kroah-Hartman ---- - kernel/trace/trace_kprobe.c | 25 +++++++++++++++++++++---- - 1 file changed, 21 insertions(+), 4 deletions(-) - -diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c -index a34a4fcdab7b1..e3993d19687db 100644 ---- a/kernel/trace/trace_kprobe.c -+++ b/kernel/trace/trace_kprobe.c -@@ -714,14 +714,31 @@ static int count_symbols(void *data, unsigned long unused) - return 0; - } - -+struct sym_count_ctx { -+ unsigned int count; -+ const char *name; -+}; -+ -+static int count_mod_symbols(void *data, const char *name, -+ struct module *module, unsigned long unused) -+{ -+ struct sym_count_ctx *ctx = data; -+ -+ if (strcmp(name, ctx->name) == 0) -+ ctx->count++; -+ -+ return 0; -+} -+ - static unsigned int number_of_same_symbols(char *func_name) - { -- unsigned int count; -+ struct sym_count_ctx ctx = { .count = 0, .name = func_name }; -+ -+ kallsyms_on_each_match_symbol(count_symbols, func_name, &ctx.count); - -- count = 0; -- kallsyms_on_each_match_symbol(count_symbols, func_name, &count); -+ module_kallsyms_on_each_symbol(count_mod_symbols, &ctx); - -- return count; -+ return ctx.count; - } - - static int __trace_kprobe_create(int argc, const char *argv[]) --- -2.25.1 - diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 47f813aa..d44df49d 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -25,10 +25,6 @@ Patch1004: 1004-af_unix-increase-default-max_dgram_qlen-to-512.patch # options for nvidia are instead included through DRM_SIMPLE Patch1005: 1005-Revert-Revert-drm-fb_helper-improve-CONFIG_FB-depend.patch -# Cherry-pick fix for #3691. Can be dropped after rebasing to 6.1.71 or later. -Patch5001: 5001-kallsyms-Make-module_kallsyms_on_each_symbol-general.patch -Patch5002: 5002-tracing-kprobes-Fix-symbol-counting-logic-by-looking.patch - BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From 9b5520414aecf533bd46332daeca312ac044bdae Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Sat, 27 Jan 2024 00:50:49 +0000 Subject: [PATCH 1170/1356] Drop K8s 1.24 metal and VMware variants This removes the metal and VMware 1.24 variants. This version of Kubernetes has gone end-of-life and these variants are no longer supported. Signed-off-by: Matthew Yeazel --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index c34bb093..be7222da 100644 --- a/README.md +++ b/README.md @@ -85,7 +85,6 @@ The following variants support ECS: We also have variants that are designed to be Kubernetes worker nodes in VMware: -* `vmware-k8s-1.24` * `vmware-k8s-1.25` * `vmware-k8s-1.26` * `vmware-k8s-1.27` @@ -94,7 +93,6 @@ We also have variants that are designed to be Kubernetes worker nodes in VMware: The following variants are designed to be Kubernetes worker nodes on bare metal: -* `metal-k8s-1.24` * `metal-k8s-1.25` * `metal-k8s-1.26` * `metal-k8s-1.27` @@ -104,6 +102,7 @@ The following variants are designed to be Kubernetes worker nodes on bare metal: The following variants are no longer supported: * All Kubernetes variants using Kubernetes 1.22 and earlier +* Bare metal and VMware variants using Kubernetes 1.24 and earlier We recommend users replace nodes running these variants with the [latest variant compatible with their cluster](variants/). From a96ba2da019e3039b921cda3b666a40b6abab0a3 Mon Sep 17 00:00:00 2001 From: Tianhao Geng Date: Mon, 5 Feb 2024 23:33:40 +0000 Subject: [PATCH 1171/1356] actions-workflow: bump action go version to 1.21 --- .github/workflows/golangci-lint.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/golangci-lint.yaml b/.github/workflows/golangci-lint.yaml index b1e5a0b5..5872c576 100644 --- a/.github/workflows/golangci-lint.yaml +++ b/.github/workflows/golangci-lint.yaml @@ -16,7 +16,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: 1.19 + go-version: 1.21 - uses: actions/checkout@v4 - name: lint-host-ctr uses: golangci/golangci-lint-action@v3 From f9454ec50911ced5e9d706bcd85f8b9dfd5a3fa1 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 6 Feb 2024 08:05:01 +0000 Subject: [PATCH 1172/1356] kernel-5.15: update to 5.15.148 Rebase to Amazon Linux upstream version 5.15.148-97.158.amzn2. Signed-off-by: Leonard Foerster --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 16ad837e..00c6ac05 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/bc7e2bdcb4414f6d629c0ff58bfdfff1c471e5a4e5033c6d371535991233cec4/kernel-5.15.145-95.161.amzn2.src.rpm" -sha512 = "12df106be137e85b822f10d395b36f7f1edce37fa4704c9ace8e58a4354093f50eaa6f8b121ea85ab21a87eb4ec8108afc999d8a6b8945fe9f04dfe4a7a79f61" +url = "https://cdn.amazonlinux.com/blobstore/099e5401bd8b5ffe59330ca1179847c32a481136173f21b7af16d20b780de422/kernel-5.15.148-97.158.amzn2.src.rpm" +sha512 = "b2a1169b1d11ca4a93a4267d4d88fb97a87481a3b8f60702c0715fa2ad73d05a3b4ac74d25dd2cea2410038d58d8b6783a25382b3fb68879821595b1d42b230d" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index f6b2d498..df6f8271 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.145 +Version: 5.15.148 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/bc7e2bdcb4414f6d629c0ff58bfdfff1c471e5a4e5033c6d371535991233cec4/kernel-5.15.145-95.161.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/099e5401bd8b5ffe59330ca1179847c32a481136173f21b7af16d20b780de422/kernel-5.15.148-97.158.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 6e6a041ca20bdf45b78a5f6c510a278e989640dd Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Tue, 6 Feb 2024 08:06:00 +0000 Subject: [PATCH 1173/1356] kernel-5.10: update to 5.10.209 Rebase to Amazon Linux upstream version 5.10.209-198.812.amzn2. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 98c4d276..69c561fa 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/44af81a4c59431d8729930480eb04e70c6e3be7d23fccbcbddb17395f16c73ce/kernel-5.10.205-195.807.amzn2.src.rpm" -sha512 = "e49f39ad7ae0ffc602c28d12e649dcc77c0a3fcff65fe98b2a2e56aab97f501f0af35fb0f257f03a9a4da24f83a0c30c7c64a0c502bcf46d56f412021d10f1d2" +url = "https://cdn.amazonlinux.com/blobstore/3d52b3205bf9d50e870cd4edaece690532e22b3738f95a1ca05a66f98be13aed/kernel-5.10.209-198.812.amzn2.src.rpm" +sha512 = "20075e0f9d0def7f41a0dc79711fccb57e3f5b79079791da2f4cc56b11c9d1f306cc63753b6004bfcc43443bb2c6668c44c69a6d326da45d8a169ffe4a510213" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 4741dd30..a325ac25 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.205 +Version: 5.10.209 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/44af81a4c59431d8729930480eb04e70c6e3be7d23fccbcbddb17395f16c73ce/kernel-5.10.205-195.807.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/3d52b3205bf9d50e870cd4edaece690532e22b3738f95a1ca05a66f98be13aed/kernel-5.10.209-198.812.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 990bab422641450ae2c37ebb28b3ddf0e4a30255 Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Wed, 7 Feb 2024 07:08:28 +0000 Subject: [PATCH 1174/1356] kernel-6.1: update to 6.1.75 Rebase to Amazon Linux upstream version 6.1.75-99.163.amzn2023. Signed-off-by: Leonard Foerster --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 18a2a60f..39273c36 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/8a4c9c7ba87d627d236ca69292a052edc0724ac289d48523da77fe3f479d27aa/kernel-6.1.72-96.166.amzn2023.src.rpm" -sha512 = "538abdc500a3ce35b8faa9b3f9910639e4c81c2c02ea5f382e89d7f6e364b2fd0c945a6869be2942c48901993560c3a8e45c5e7c2f0a4d5c4add3079b8009d0c" +url = "https://cdn.amazonlinux.com/al2023/blobstore/5d1de5166545475a39271909f57fe0e4e4527f4ce6f891c94fe741b36357d40b/kernel-6.1.75-99.163.amzn2023.src.rpm" +sha512 = "7ddc100d62acca20ec1b0f383c9a24424f9e6eb411250a02daaef70459e56a044e791826b5cc6600eea706c38d036d9264bb11f06bdb5f16ec6a6319b47b38d1" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index d44df49d..1dacaaa2 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.72 +Version: 6.1.75 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/8a4c9c7ba87d627d236ca69292a052edc0724ac289d48523da77fe3f479d27aa/kernel-6.1.72-96.166.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/5d1de5166545475a39271909f57fe0e4e4527f4ce6f891c94fe741b36357d40b/kernel-6.1.75-99.163.amzn2023.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 9b2376be57b069ea5042be6ac94b7406c81ab845 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Fri, 9 Feb 2024 22:15:48 +0000 Subject: [PATCH 1175/1356] shim: replace %make_build with make Switches from a parallel make macro to a standard make command to avoid a race condition that can occur during a Bottlerocket build. --- packages/shim/shim.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/shim/shim.spec b/packages/shim/shim.spec index f887537c..718382dd 100644 --- a/packages/shim/shim.spec +++ b/packages/shim/shim.spec @@ -35,7 +35,7 @@ mv gnu-efi-shim-%{gnuefiver} gnu-efi truncate -s 4080 empty.cer %global shim_make \ -%make_build\\\ +make\\\ ARCH="%{_cross_arch}"\\\ CROSS_COMPILE="%{_cross_target}-"\\\ COMMIT_ID="%{commit}"\\\ From 4e972625e0f31e3f197fde6501842c5346246c0a Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sat, 10 Feb 2024 18:54:59 +0000 Subject: [PATCH 1176/1356] shim: update to 15.8 Switch to the release archive, which bundles the gnu-efi sources. Add `-N` to the PE post-process flags, to disable the NX compat flag by default. GRUB and the Linux kernel are not yet ready for this. Signed-off-by: Ben Cressey --- packages/shim/Cargo.toml | 9 ++------- packages/shim/shim.spec | 12 ++++-------- 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/packages/shim/Cargo.toml b/packages/shim/Cargo.toml index 40eacb8b..c402f5bf 100644 --- a/packages/shim/Cargo.toml +++ b/packages/shim/Cargo.toml @@ -9,10 +9,5 @@ build = "../build.rs" path = "../packages.rs" [[package.metadata.build-package.external-files]] -url = "https://github.com/rhboot/shim/archive/15.7/shim-15.7.tar.gz" -sha512 = "95ef9c0125269cfa0263a32e4f343d8ccc8813d71fa918a2f54850781e3a2d6a06a719249be355fdb24c935899e0e11370815501ecde1800bdd974a9a79c5612" - -[[package.metadata.build-package.external-files]] -url = "https://github.com/rhboot/gnu-efi/archive/refs/heads/shim-15.6.tar.gz" -path = "gnu-efi-shim-15.6.tar.gz" -sha512 = "d09dbb9e461d60e23294326ed4178301a6ab5959ade912bf559dbeb050362d994c8e63c8e062c19569055a269e5dbb65f0572317da4725177e19aae82e3c6978" +url = "https://github.com/rhboot/shim/releases/download/15.8/shim-15.8.tar.bz2" +sha512 = "30b3390ae935121ea6fe728d8f59d37ded7b918ad81bea06e213464298b4bdabbca881b30817965bd397facc596db1ad0b8462a84c87896ce6c1204b19371cd1" diff --git a/packages/shim/shim.spec b/packages/shim/shim.spec index 718382dd..159e9992 100644 --- a/packages/shim/shim.spec +++ b/packages/shim/shim.spec @@ -7,9 +7,8 @@ %global shim_efi_image shim%{_cross_efi_arch}.efi %global mokm_efi_image mm%{_cross_efi_arch}.efi -%global shimver 15.7 -%global gnuefiver 15.6 -%global commit 11491619f4336fef41c3519877ba242161763580 +%global shimver 15.8 +%global commit 5914984a1ffeab841f482c791426d7ca9935a5e6 Name: %{_cross_os}shim Version: %{shimver} @@ -17,17 +16,13 @@ Release: 1%{?dist} Summary: UEFI shim loader License: BSD-3-Clause URL: https://github.com/rhboot/shim/ -Source0: https://github.com/rhboot/shim/archive/%{shimver}/shim-%{shimver}.tar.gz -Source1: https://github.com/rhboot/gnu-efi/archive/refs/heads/shim-%{gnuefiver}.tar.gz#/gnu-efi-shim-%{gnuefiver}.tar.gz +Source0: https://github.com/rhboot/shim/archive/%{shimver}/shim-%{shimver}.tar.bz2 %description %{summary}. %prep %autosetup -n shim-%{shimver} -p1 -%setup -T -D -n shim-%{shimver} -a 1 -rmdir gnu-efi -mv gnu-efi-shim-%{gnuefiver} gnu-efi # Make sure the `.vendor_cert` section is large enough to cover a replacement # certificate, or `objcopy` may silently retain the existing section. @@ -45,6 +40,7 @@ make\\\ DESTDIR="%{buildroot}"\\\ EFIDIR="BOOT"\\\ VENDOR_CERT_FILE="empty.cer"\\\ + POST_PROCESS_PE_FLAGS="-N"\\\ %{nil} %build From 3366e6f5d95939e51da98ad5d583ac65c139d4b4 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 13 Feb 2024 22:39:03 +0000 Subject: [PATCH 1177/1356] grub: update to grub2-2.06-61.amzn2023.0.9 Revert two Amazon Linux patches that aren't needed for Bottlerocket's Secure Boot implementation, which uses shim and verifies the grub.cfg signature. Signed-off-by: Ben Cressey --- ...llback-to-EFI-LoadImage-if-shim_lock.patch | 96 +++++++++++++++++++ ...U-Move-verifiers-after-decompressors.patch | 48 ++++++++++ packages/grub/Cargo.toml | 4 +- packages/grub/grub.spec | 4 +- 4 files changed, 149 insertions(+), 3 deletions(-) create mode 100644 packages/grub/0046-Revert-sb-Add-fallback-to-EFI-LoadImage-if-shim_lock.patch create mode 100644 packages/grub/0047-Revert-UBUNTU-Move-verifiers-after-decompressors.patch diff --git a/packages/grub/0046-Revert-sb-Add-fallback-to-EFI-LoadImage-if-shim_lock.patch b/packages/grub/0046-Revert-sb-Add-fallback-to-EFI-LoadImage-if-shim_lock.patch new file mode 100644 index 00000000..217d3895 --- /dev/null +++ b/packages/grub/0046-Revert-sb-Add-fallback-to-EFI-LoadImage-if-shim_lock.patch @@ -0,0 +1,96 @@ +From 2773f01f5d9292c68b08f9392a8ae0bf9c2e3e30 Mon Sep 17 00:00:00 2001 +From: Ben Cressey +Date: Tue, 13 Feb 2024 22:20:16 +0000 +Subject: [PATCH] Revert "sb: Add fallback to EFI LoadImage if shim_lock is + absent" + +For Secure Boot in Bottlerocket, we expect that shim_lock will always +be present, and don't need a fallback. + +Signed-off-by: Ben Cressey +--- + grub-core/Makefile.core.def | 1 - + grub-core/kern/efi/sb.c | 43 +++---------------------------------- + 2 files changed, 3 insertions(+), 41 deletions(-) + +diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def +index 5b8728e..3096cd4 100644 +--- a/grub-core/Makefile.core.def ++++ b/grub-core/Makefile.core.def +@@ -214,7 +214,6 @@ kernel = { + efi = kern/efi/sb.c; + efi = kern/lockdown.c; + efi = lib/envblk.c; +- efi = lib/crc.c; + i386_coreboot = kern/i386/pc/acpi.c; + i386_multiboot = kern/i386/pc/acpi.c; + i386_coreboot = kern/acpi.c; +diff --git a/grub-core/kern/efi/sb.c b/grub-core/kern/efi/sb.c +index 70f9d9d..db42c25 100644 +--- a/grub-core/kern/efi/sb.c ++++ b/grub-core/kern/efi/sb.c +@@ -29,7 +29,6 @@ + #include + #include + #include +-#include + + static grub_efi_guid_t shim_lock_guid = GRUB_EFI_SHIM_LOCK_GUID; + +@@ -171,50 +170,14 @@ shim_lock_verifier_init (grub_file_t io __attribute__ ((unused)), + } + } + +-static int grub_shim_lock_load_image_fallback(void *data, grub_uint32_t size) +-{ +- grub_efi_memory_mapped_device_path_t *mempath; +- grub_efi_handle_t image_handle = 0; +- grub_efi_boot_services_t *b; +- grub_efi_status_t status; +- int len; +- +- mempath = grub_malloc (2 * sizeof (grub_efi_memory_mapped_device_path_t)); +- if (!mempath) +- return grub_errno; +- +- mempath[0].header.type = GRUB_EFI_HARDWARE_DEVICE_PATH_TYPE; +- mempath[0].header.subtype = GRUB_EFI_MEMORY_MAPPED_DEVICE_PATH_SUBTYPE; +- mempath[0].header.length = grub_cpu_to_le16_compile_time (sizeof (*mempath)); +- mempath[0].memory_type = GRUB_EFI_LOADER_DATA; +- mempath[0].start_address = (grub_addr_t)data; +- mempath[0].end_address = (grub_addr_t)data + size; +- +- mempath[1].header.type = GRUB_EFI_END_DEVICE_PATH_TYPE; +- mempath[1].header.subtype = GRUB_EFI_END_ENTIRE_DEVICE_PATH_SUBTYPE; +- mempath[1].header.length = sizeof (grub_efi_device_path_t); +- +- b = grub_efi_system_table->boot_services; +- status = efi_call_6 (b->load_image, 0, grub_efi_image_handle, +- (grub_efi_device_path_t *) mempath, +- data, size, &image_handle); +- if (status != GRUB_EFI_SUCCESS) { +- return grub_error (GRUB_ERR_ACCESS_DENIED, +- "Cannot verify image, EFI err: %ld", (long)status); +- } +- efi_call_1 (b->unload_image, image_handle); +- return GRUB_ERR_NONE; +-} +- + static grub_err_t + shim_lock_verifier_write (void *context __attribute__ ((unused)), void *buf, grub_size_t size) + { + grub_efi_shim_lock_protocol_t *sl = grub_efi_locate_protocol (&shim_lock_guid, 0); + +- if (!sl) { +- grub_dprintf ("secureboot", "shim not available, trying UEFI validation\n"); +- return grub_shim_lock_load_image_fallback(buf, size); +- } ++ if (!sl) ++ return grub_error (GRUB_ERR_ACCESS_DENIED, N_("shim_lock protocol not found")); ++ + if (sl->verify (buf, size) != GRUB_EFI_SUCCESS) + return grub_error (GRUB_ERR_BAD_SIGNATURE, N_("bad shim signature")); + +-- +2.43.0 + diff --git a/packages/grub/0047-Revert-UBUNTU-Move-verifiers-after-decompressors.patch b/packages/grub/0047-Revert-UBUNTU-Move-verifiers-after-decompressors.patch new file mode 100644 index 00000000..d657801b --- /dev/null +++ b/packages/grub/0047-Revert-UBUNTU-Move-verifiers-after-decompressors.patch @@ -0,0 +1,48 @@ +From 95cafe6cf7dd2a02bd33ddee624bb9b7c3a931ae Mon Sep 17 00:00:00 2001 +From: Ben Cressey +Date: Tue, 13 Feb 2024 22:21:41 +0000 +Subject: [PATCH] Revert "UBUNTU: Move verifiers after decompressors" + +We use the PGP verifier to validate the signature of grub.cfg, and do +not want to expose the decompressors to untrusted input. + +Signed-off-by: Ben Cressey +--- + include/grub/file.h | 4 ++-- + tests/file_filter/test.cfg | 2 +- + 2 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/include/grub/file.h b/include/grub/file.h +index fa23688..96827a4 100644 +--- a/include/grub/file.h ++++ b/include/grub/file.h +@@ -180,13 +180,13 @@ extern grub_disk_read_hook_t EXPORT_VAR(grub_file_progress_hook); + /* Filters with lower ID are executed first. */ + typedef enum grub_file_filter_id + { ++ GRUB_FILE_FILTER_VERIFY, + GRUB_FILE_FILTER_GZIO, + GRUB_FILE_FILTER_XZIO, + GRUB_FILE_FILTER_LZOPIO, ++ GRUB_FILE_FILTER_MAX, + GRUB_FILE_FILTER_COMPRESSION_FIRST = GRUB_FILE_FILTER_GZIO, + GRUB_FILE_FILTER_COMPRESSION_LAST = GRUB_FILE_FILTER_LZOPIO, +- GRUB_FILE_FILTER_VERIFY, +- GRUB_FILE_FILTER_MAX, + } grub_file_filter_id_t; + + typedef grub_file_t (*grub_file_filter_t) (grub_file_t in, enum grub_file_type type); +diff --git a/tests/file_filter/test.cfg b/tests/file_filter/test.cfg +index 17dc4a8..4308aac 100644 +--- a/tests/file_filter/test.cfg ++++ b/tests/file_filter/test.cfg +@@ -1,5 +1,5 @@ + trust /keys.pub +-set check_signatures= ++set check_signatures=enforce + cat /file.gz + cat /file.xz + cat /file.lzop +-- +2.43.0 + diff --git a/packages/grub/Cargo.toml b/packages/grub/Cargo.toml index 8ea9177b..c61feae9 100644 --- a/packages/grub/Cargo.toml +++ b/packages/grub/Cargo.toml @@ -9,5 +9,5 @@ build = "../build.rs" path = "../packages.rs" [[package.metadata.build-package.external-files]] -url = "https://cdn.amazonlinux.com/al2023/blobstore/74f9ee6e75b8f89fe91ccda86896243179968a8664ba045bece11dc5aff61f4e/grub2-2.06-61.amzn2023.0.6.src.rpm" -sha512 = "aac3fbee3ec5e5a28176d338eab85c660c9525ef3b34ccf84f7c837c724c72b089bc2b57207e36b12c09a7cdd2c7d6e658288c98b9a66cb98e8edd650f302ba5" +url = "https://cdn.amazonlinux.com/al2023/blobstore/f4fa28cb4e1586d622925449b1e24748c6ab09ccebe0fd8ddfa20cf5e7ce182a/grub2-2.06-61.amzn2023.0.9.src.rpm" +sha512 = "57886df0580f166bd741126f19109a0e464bc2408aafca38e68def077a2ab1f64c239d85015c44162b88d787da7ec55a623f4e7d2601942391f0996038393f99" diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index c99a4f40..f252efc6 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -15,7 +15,7 @@ Release: 1%{?dist} Summary: Bootloader with support for Linux and more License: GPL-3.0-or-later AND Unicode-DFS-2015 URL: https://www.gnu.org/software/grub/ -Source0: https://cdn.amazonlinux.com/al2023/blobstore/74f9ee6e75b8f89fe91ccda86896243179968a8664ba045bece11dc5aff61f4e/grub2-2.06-61.amzn2023.0.6.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/f4fa28cb4e1586d622925449b1e24748c6ab09ccebe0fd8ddfa20cf5e7ce182a/grub2-2.06-61.amzn2023.0.9.src.rpm Source1: bios.cfg Source2: efi.cfg Source3: sbat.csv.in @@ -64,6 +64,8 @@ Patch0042: 0042-util-mkimage-Bump-EFI-PE-header-size-to-accommodate-.patch Patch0043: 0043-util-mkimage-avoid-adding-section-table-entry-outsid.patch Patch0044: 0044-efi-return-virtual-size-of-section-found-by-grub_efi.patch Patch0045: 0045-mkimage-pgp-move-single-public-key-into-its-own-sect.patch +Patch0046: 0046-Revert-sb-Add-fallback-to-EFI-LoadImage-if-shim_lock.patch +Patch0047: 0047-Revert-UBUNTU-Move-verifiers-after-decompressors.patch BuildRequires: automake BuildRequires: bison From 11c26698ca0c4984aa053cbefc9dc34e3fc8e14c Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 14 Feb 2024 00:57:14 +0000 Subject: [PATCH 1178/1356] grub: restrict search to root device The private data partition is assumed to be on the same device as the root filesystem. Enforce that assumption by passing `--root-dev-only` in the embedded grub.cfg. Signed-off-by: Ben Cressey --- ...048-add-flag-to-only-search-root-dev.patch | 163 ++++++++++++++++++ packages/grub/bios.cfg | 2 +- packages/grub/efi.cfg | 2 +- packages/grub/grub.spec | 1 + 4 files changed, 166 insertions(+), 2 deletions(-) create mode 100644 packages/grub/0048-add-flag-to-only-search-root-dev.patch diff --git a/packages/grub/0048-add-flag-to-only-search-root-dev.patch b/packages/grub/0048-add-flag-to-only-search-root-dev.patch new file mode 100644 index 00000000..a92993b5 --- /dev/null +++ b/packages/grub/0048-add-flag-to-only-search-root-dev.patch @@ -0,0 +1,163 @@ +From 115f44e341b8f204bccaf63686579a66507c2ded Mon Sep 17 00:00:00 2001 +From: Marta Lewandowska +Date: Mon, 9 Oct 2023 08:53:18 +0200 +Subject: [PATCH] add flag to only search root dev + +fixes bz#2223437 + +Signed-off-by: Marta Lewandowska +--- + grub-core/commands/search.c | 36 ++++++++++++++++++++++++++++++++ + grub-core/commands/search_wrap.c | 5 +++++ + grub-core/kern/misc.c | 30 ++++++++++++++++++++++++++ + include/grub/misc.h | 1 + + include/grub/search.h | 3 ++- + 5 files changed, 74 insertions(+), 1 deletion(-) + +diff --git a/grub-core/commands/search.c b/grub-core/commands/search.c +index ec03c75..e0a3b22 100644 +--- a/grub-core/commands/search.c ++++ b/grub-core/commands/search.c +@@ -89,6 +89,42 @@ iterate_device (const char *name, void *data) + grub_device_close (dev); + } + ++ /* Skip it if it's not the root device when requested. */ ++ if (ctx->flags & SEARCH_FLAGS_ROOTDEV_ONLY) ++ { ++ const char *root_dev; ++ root_dev = grub_env_get ("root"); ++ if (root_dev != NULL && *root_dev != '\0') ++ { ++ char *root_disk = grub_malloc (grub_strlen(root_dev) + 1); ++ char *name_disk = grub_malloc (grub_strlen(name) + 1); ++ char *rem_1 = grub_malloc(grub_strlen(root_dev) + 1); ++ char *rem_2 = grub_malloc(grub_strlen(name) + 1); ++ ++ if (root_disk != NULL && name_disk != NULL && ++ rem_1 != NULL && rem_2 != NULL) ++ { ++ /* get just the disk name; partitions will be different. */ ++ grub_str_sep (root_dev, root_disk, ',', rem_1); ++ grub_str_sep (name, name_disk, ',', rem_2); ++ if (root_disk != NULL && *root_disk != '\0' && ++ name_disk != NULL && *name_disk != '\0') ++ if (grub_strcmp(root_disk, name_disk) != 0) ++ { ++ grub_free (root_disk); ++ grub_free (name_disk); ++ grub_free (rem_1); ++ grub_free (rem_2); ++ return 0; ++ } ++ } ++ grub_free (root_disk); ++ grub_free (name_disk); ++ grub_free (rem_1); ++ grub_free (rem_2); ++ } ++ } ++ + #if defined(DO_SEARCH_FS_UUID) || defined(DO_SEARCH_DISK_UUID) + #define compare_fn grub_strcasecmp + #else +diff --git a/grub-core/commands/search_wrap.c b/grub-core/commands/search_wrap.c +index c8152b1..fd77b1c 100644 +--- a/grub-core/commands/search_wrap.c ++++ b/grub-core/commands/search_wrap.c +@@ -47,6 +47,7 @@ static const struct grub_arg_option options[] = + ARG_TYPE_STRING}, + {"no-floppy", 'n', 0, N_("Do not probe any floppy drive."), 0, 0}, + {"efidisk-only", 0, 0, N_("Only probe EFI disks."), 0, 0}, ++ {"root-dev-only", 'r', 0, N_("Only probe root device."), 0, 0}, + {"hint", 'h', GRUB_ARG_OPTION_REPEATABLE, + N_("First try the device HINT. If HINT ends in comma, " + "also try subpartitions"), N_("HINT"), ARG_TYPE_STRING}, +@@ -84,6 +85,7 @@ enum options + SEARCH_SET, + SEARCH_NO_FLOPPY, + SEARCH_EFIDISK_ONLY, ++ SEARCH_ROOTDEV_ONLY, + SEARCH_HINT, + SEARCH_HINT_IEEE1275, + SEARCH_HINT_BIOS, +@@ -198,6 +200,9 @@ grub_cmd_search (grub_extcmd_context_t ctxt, int argc, char **args) + if (state[SEARCH_EFIDISK_ONLY].set) + flags |= SEARCH_FLAGS_EFIDISK_ONLY; + ++ if (state[SEARCH_ROOTDEV_ONLY].set) ++ flags |= SEARCH_FLAGS_ROOTDEV_ONLY; ++ + if (state[SEARCH_LABEL].set) + grub_search_label (id, var, flags, hints, nhints); + else if (state[SEARCH_FS_UUID].set) +diff --git a/grub-core/kern/misc.c b/grub-core/kern/misc.c +index 5d2b246..a95d182 100644 +--- a/grub-core/kern/misc.c ++++ b/grub-core/kern/misc.c +@@ -598,6 +598,36 @@ grub_reverse (char *str) + } + } + ++/* Separate string into two parts, broken up by delimiter delim. */ ++void ++grub_str_sep (const char *s, char *p, char delim, char *r) ++{ ++ char* t = grub_strndup(s, grub_strlen(s)); ++ ++ if (t != NULL && *t != '\0') ++ { ++ char* tmp = t; ++ ++ while (((*p = *t) != '\0') && ((*p = *t) != delim)) ++ { ++ p++; ++ t++; ++ } ++ *p = '\0'; ++ ++ if (*t != '\0') ++ { ++ t++; ++ while ((*r++ = *t++) != '\0') ++ ; ++ *r = '\0'; ++ } ++ grub_free (tmp); ++ } ++ else ++ grub_free (t); ++} ++ + /* Divide N by D, return the quotient, and store the remainder in *R. */ + grub_uint64_t + grub_divmod64 (grub_uint64_t n, grub_uint64_t d, grub_uint64_t *r) +diff --git a/include/grub/misc.h b/include/grub/misc.h +index 0a26855..a359b0d 100644 +--- a/include/grub/misc.h ++++ b/include/grub/misc.h +@@ -314,6 +314,7 @@ void *EXPORT_FUNC(grub_memset) (void *s, int c, grub_size_t n); + grub_size_t EXPORT_FUNC(grub_strlen) (const char *s) WARN_UNUSED_RESULT; + int EXPORT_FUNC(grub_printf) (const char *fmt, ...) __attribute__ ((format (GNU_PRINTF, 1, 2))); + int EXPORT_FUNC(grub_printf_) (const char *fmt, ...) __attribute__ ((format (GNU_PRINTF, 1, 2))); ++void EXPORT_FUNC(grub_str_sep) (const char *s, char *p, char delim, char *r); + + /* Replace all `ch' characters of `input' with `with' and copy the + result into `output'; return EOS address of `output'. */ +diff --git a/include/grub/search.h b/include/grub/search.h +index a5f56b2..8727409 100644 +--- a/include/grub/search.h ++++ b/include/grub/search.h +@@ -22,7 +22,8 @@ + enum search_flags + { + SEARCH_FLAGS_NO_FLOPPY = 1, +- SEARCH_FLAGS_EFIDISK_ONLY = 2 ++ SEARCH_FLAGS_EFIDISK_ONLY = 2, ++ SEARCH_FLAGS_ROOTDEV_ONLY = 4 + }; + + void grub_search_fs_file (const char *key, const char *var, +-- +2.43.0 + diff --git a/packages/grub/bios.cfg b/packages/grub/bios.cfg index ab8b5e14..f3b46aa5 100644 --- a/packages/grub/bios.cfg +++ b/packages/grub/bios.cfg @@ -5,7 +5,7 @@ gptprio.next -d boot_dev -u boot_uuid set root=$boot_dev set prefix=($root)/grub export boot_uuid -search --no-floppy --set private --part-label BOTTLEROCKET-PRIVATE +search --no-floppy --root-dev-only --set private --part-label BOTTLEROCKET-PRIVATE configfile /grub/grub.cfg echo "boot failed (device ($boot_dev), uuid $boot_uuid)" echo "rebooting in 30 seconds..." diff --git a/packages/grub/efi.cfg b/packages/grub/efi.cfg index 5d2fd3e6..fa2e5574 100644 --- a/packages/grub/efi.cfg +++ b/packages/grub/efi.cfg @@ -4,7 +4,7 @@ gptprio.next -d boot_dev -u boot_uuid set root=$boot_dev set prefix=($root)/grub export boot_uuid -search --no-floppy --set private --part-label BOTTLEROCKET-PRIVATE +search --no-floppy --root-dev-only --set private --part-label BOTTLEROCKET-PRIVATE configfile /grub/grub.cfg echo "boot failed (device ($boot_dev), uuid $boot_uuid)" echo "rebooting in 30 seconds..." diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index f252efc6..a4a7e314 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -66,6 +66,7 @@ Patch0044: 0044-efi-return-virtual-size-of-section-found-by-grub_efi.patch Patch0045: 0045-mkimage-pgp-move-single-public-key-into-its-own-sect.patch Patch0046: 0046-Revert-sb-Add-fallback-to-EFI-LoadImage-if-shim_lock.patch Patch0047: 0047-Revert-UBUNTU-Move-verifiers-after-decompressors.patch +Patch0048: 0048-add-flag-to-only-search-root-dev.patch BuildRequires: automake BuildRequires: bison From 6f1e468b561ffcb6f9fa46ace2c6103b2fcfb9db Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 14 Feb 2024 17:20:23 +0000 Subject: [PATCH 1179/1356] grub: increment SBAT "grub,4" denotes a build that includes fixes for CVE-2023-4692 and CVE-2023-4693, so it is correct even though our build never enabled the vulnerable NTFS module. Signed-off-by: Ben Cressey --- packages/grub/sbat.csv.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/grub/sbat.csv.in b/packages/grub/sbat.csv.in index 78e44b9d..dabc2612 100644 --- a/packages/grub/sbat.csv.in +++ b/packages/grub/sbat.csv.in @@ -1,3 +1,3 @@ sbat,1,SBAT Version,sbat,1,https://github.com/rhboot/shim/blob/main/SBAT.md -grub,3,Free Software Foundation,grub,__VERSION__,https://www.gnu.org/software/grub/ +grub,4,Free Software Foundation,grub,__VERSION__,https://www.gnu.org/software/grub/ grub.bottlerocket,1,Bottlerocket,grub,__VERSION__,https://github.com/bottlerocket-os/bottlerocket/blob/develop/SECURITY.md From f3f0c6f6f3f5ebac2c363cca54d75274a7a6779a Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 22 Feb 2024 08:53:09 +0000 Subject: [PATCH 1180/1356] kernel-5.10: update to 5.10.209 Rebase to Amazon Linux upstream version 5.10.209-198.858.amzn2. Signed-off-by: Leonard Foerster --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 69c561fa..478dba1e 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/3d52b3205bf9d50e870cd4edaece690532e22b3738f95a1ca05a66f98be13aed/kernel-5.10.209-198.812.amzn2.src.rpm" -sha512 = "20075e0f9d0def7f41a0dc79711fccb57e3f5b79079791da2f4cc56b11c9d1f306cc63753b6004bfcc43443bb2c6668c44c69a6d326da45d8a169ffe4a510213" +url = "https://cdn.amazonlinux.com/blobstore/836671087eb8725263480f50a3717b7737dc62ec71b9acc07dbe77d721052145/kernel-5.10.209-198.858.amzn2.src.rpm" +sha512 = "14b219aad20496915ff7a80fee2a7f57eab0cafe2931936b1e0e51da65b8c80d7b464a5df76f8b62d38515088be3d2ebdde4970eb1c625c43e07ccd2eba612b5" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index a325ac25..5fc69759 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -7,7 +7,7 @@ Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/3d52b3205bf9d50e870cd4edaece690532e22b3738f95a1ca05a66f98be13aed/kernel-5.10.209-198.812.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/836671087eb8725263480f50a3717b7737dc62ec71b9acc07dbe77d721052145/kernel-5.10.209-198.858.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 6847189ea0e85a5abdf2f7173bbb31bb66aff1ca Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 22 Feb 2024 08:53:48 +0000 Subject: [PATCH 1181/1356] kernel-5.15: update to 5.15.148 Rebase to Amazon Linux upstream version 5.15.148-97.161.amzn2. Signed-off-by: Leonard Foerster --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 00c6ac05..4faadd6a 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/099e5401bd8b5ffe59330ca1179847c32a481136173f21b7af16d20b780de422/kernel-5.15.148-97.158.amzn2.src.rpm" -sha512 = "b2a1169b1d11ca4a93a4267d4d88fb97a87481a3b8f60702c0715fa2ad73d05a3b4ac74d25dd2cea2410038d58d8b6783a25382b3fb68879821595b1d42b230d" +url = "https://cdn.amazonlinux.com/blobstore/42ac40513bf403555b444c8eb2792a334a4db9983e83106d6a75b335e0ab1a92/kernel-5.15.148-97.161.amzn2.src.rpm" +sha512 = "2c8f6886da223166196a969ef58f4abaf2549cbe3407599ce92e0529954f520f4eb4d7aaa0574eba38cf64e999b1de4e25c7f237a04b484bd810236a57e3679d" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index df6f8271..7c44089e 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -7,7 +7,7 @@ Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/099e5401bd8b5ffe59330ca1179847c32a481136173f21b7af16d20b780de422/kernel-5.15.148-97.158.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/42ac40513bf403555b444c8eb2792a334a4db9983e83106d6a75b335e0ab1a92/kernel-5.15.148-97.161.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 988a82b00f3002e6844ffb918eb0006666ad5e7e Mon Sep 17 00:00:00 2001 From: Leonard Foerster Date: Thu, 22 Feb 2024 08:54:29 +0000 Subject: [PATCH 1182/1356] kernel-6.1: update to 6.1.77 Rebase to Amazon Linux upstream version 6.1.77-99.164.amzn2023. Signed-off-by: Leonard Foerster --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 39273c36..b960b89d 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/5d1de5166545475a39271909f57fe0e4e4527f4ce6f891c94fe741b36357d40b/kernel-6.1.75-99.163.amzn2023.src.rpm" -sha512 = "7ddc100d62acca20ec1b0f383c9a24424f9e6eb411250a02daaef70459e56a044e791826b5cc6600eea706c38d036d9264bb11f06bdb5f16ec6a6319b47b38d1" +url = "https://cdn.amazonlinux.com/al2023/blobstore/bb5b0dc5f0e4b3b6c9174c124b0ed7b8a4a9c500b4f2a9ef64a7ac6a44f6c2bc/kernel-6.1.77-99.164.amzn2023.src.rpm" +sha512 = "a504e14b35437ea3455a3a719e54c5da6520a49ab8026126d10e6ad3fbb079944b9be8d52cd966d750fd8cb77efe11fb76509c1c2fe9418e92dff81d227b5af4" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 1dacaaa2..9524416f 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.75 +Version: 6.1.77 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/5d1de5166545475a39271909f57fe0e4e4527f4ce6f891c94fe741b36357d40b/kernel-6.1.75-99.163.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/bb5b0dc5f0e4b3b6c9174c124b0ed7b8a4a9c500b4f2a9ef64a7ac6a44f6c2bc/kernel-6.1.77-99.164.amzn2023.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From a187f36e2529381bcf1fc0dce7286bb42a38a466 Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Thu, 22 Feb 2024 19:17:36 +0000 Subject: [PATCH 1183/1356] packages: update kmod-5.10-nvidia to 470.239.06 Signed-off-by: Matthew Yeazel --- packages/kmod-5.10-nvidia/Cargo.toml | 8 ++++---- packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/kmod-5.10-nvidia/Cargo.toml b/packages/kmod-5.10-nvidia/Cargo.toml index afbc4cb7..fff89483 100644 --- a/packages/kmod-5.10-nvidia/Cargo.toml +++ b/packages/kmod-5.10-nvidia/Cargo.toml @@ -13,13 +13,13 @@ package-name = "kmod-5.10-nvidia" releases-url = "https://docs.nvidia.com/datacenter/tesla/" [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/470.223.02/NVIDIA-Linux-x86_64-470.223.02.run" -sha512 = "66e470343b6f0c04703c81169cd03674be06b5315db738cab64308ec073b5bf5b87508b58ac8b6288d10e95307072d99e874e7884207a323a3dd08887bbc8750" +url = "https://us.download.nvidia.com/tesla/470.239.06/NVIDIA-Linux-x86_64-470.239.06.run" +sha512 = "92bdfb11db405071cd58deed2a0853448932657e256258e0a0bda5069f00485e2b6e49b4a0eeff499a4991be4f884273f3564c164110b1ed1f5d924506f13e2d" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/470.223.02/NVIDIA-Linux-aarch64-470.223.02.run" -sha512 = "c22eab4ec6aa1868bbe55200ba74187939571ae78645c333fe05d544869c54b84d63e26f5c4f922bbe4e768da1f394d15d0b85cacbd4bbbc2b1dfd5074734a02" +url = "https://us.download.nvidia.com/tesla/470.239.06/NVIDIA-Linux-aarch64-470.239.06.run" +sha512 = "e448c18cf243233387d3bde4fff4d6fa1eaccc743706f18fd3c6431ce73c8f4ac49009a18ff6bd7796456ce719905bb7611548bf68d61259285f5d5f1d061c0f" force-upstream = true [build-dependencies] diff --git a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec index 042683fe..0de7ac53 100644 --- a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec +++ b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec @@ -1,4 +1,4 @@ -%global tesla_470 470.223.02 +%global tesla_470 470.239.06 %global tesla_470_libdir %{_cross_libdir}/nvidia/tesla/%{tesla_470} %global tesla_470_bindir %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} %global spdx_id %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml spdx-id nvidia) From 15bf12fdd45e68a16208c504879ab5da4630f577 Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Thu, 22 Feb 2024 19:18:12 +0000 Subject: [PATCH 1184/1356] packages: update kmod-5.15-nvidia to 535.161.07 Signed-off-by: Matthew Yeazel --- packages/kmod-5.15-nvidia/Cargo.toml | 8 ++++---- packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/kmod-5.15-nvidia/Cargo.toml b/packages/kmod-5.15-nvidia/Cargo.toml index 0f86bacc..b8b41905 100644 --- a/packages/kmod-5.15-nvidia/Cargo.toml +++ b/packages/kmod-5.15-nvidia/Cargo.toml @@ -13,13 +13,13 @@ package-name = "kmod-5.15-nvidia" releases-url = "https://docs.nvidia.com/datacenter/tesla/" [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/535.129.03/NVIDIA-Linux-x86_64-535.129.03.run" -sha512 = "3d7142658fe836e1debf7786857bdb293490ef33351e9b7d39face245fe8596b0f46052b86fae08350fcda1e2a9fd68d7309b94e107d1b016bd529d8fc37e31f" +url = "https://us.download.nvidia.com/tesla/535.161.07/NVIDIA-Linux-x86_64-535.161.07.run" +sha512 = "4e8dd709157c15519f01a8d419daa098da64666d20a80edf3894239707ff1e83b48553f3edc5d567109d36e52b31ac7c0c7218ea77862a04e89aa3cc1f16a5ba" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/535.129.03/NVIDIA-Linux-aarch64-535.129.03.run" -sha512 = "706de7e53b81f909d8bc6a12a39c594754a164c49f5d23c7939dc3abcfc04f5d5b12b7d65762ae574582149a098f06ee5fe95be4f8ad1056a3307a6ce93f3c00" +url = "https://us.download.nvidia.com/tesla/535.161.07/NVIDIA-Linux-aarch64-535.161.07.run" +sha512 = "bb96a28b45197003480ae223c71a5426ef5258a31eaa485cab0cf4b86bed166482734784f20c6370a1155f3ff991652cac15f1b1083d2fb056677e6881b219e2" force-upstream = true [build-dependencies] diff --git a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec index 00e06416..6e41533a 100644 --- a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec +++ b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec @@ -1,6 +1,6 @@ %global tesla_major 535 -%global tesla_minor 129 -%global tesla_patch 03 +%global tesla_minor 161 +%global tesla_patch 07 %global tesla_ver %{tesla_major}.%{tesla_minor}.%{tesla_patch} %global spdx_id %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml spdx-id nvidia) %global license_file %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml path nvidia -p ./licenses) From 77d2fe36e1e41d06ee0e50386464327e380691b7 Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Thu, 22 Feb 2024 19:19:22 +0000 Subject: [PATCH 1185/1356] packages: update kmod-6.1-nvidia to 535.161.07 Signed-off-by: Matthew Yeazel --- packages/kmod-6.1-nvidia/Cargo.toml | 8 ++++---- packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/kmod-6.1-nvidia/Cargo.toml b/packages/kmod-6.1-nvidia/Cargo.toml index 77365343..0fd3c461 100644 --- a/packages/kmod-6.1-nvidia/Cargo.toml +++ b/packages/kmod-6.1-nvidia/Cargo.toml @@ -13,13 +13,13 @@ package-name = "kmod-6.1-nvidia" releases-url = "https://docs.nvidia.com/datacenter/tesla/" [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/535.129.03/NVIDIA-Linux-x86_64-535.129.03.run" -sha512 = "3d7142658fe836e1debf7786857bdb293490ef33351e9b7d39face245fe8596b0f46052b86fae08350fcda1e2a9fd68d7309b94e107d1b016bd529d8fc37e31f" +url = "https://us.download.nvidia.com/tesla/535.161.07/NVIDIA-Linux-x86_64-535.161.07.run" +sha512 = "4e8dd709157c15519f01a8d419daa098da64666d20a80edf3894239707ff1e83b48553f3edc5d567109d36e52b31ac7c0c7218ea77862a04e89aa3cc1f16a5ba" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/535.129.03/NVIDIA-Linux-aarch64-535.129.03.run" -sha512 = "706de7e53b81f909d8bc6a12a39c594754a164c49f5d23c7939dc3abcfc04f5d5b12b7d65762ae574582149a098f06ee5fe95be4f8ad1056a3307a6ce93f3c00" +url = "https://us.download.nvidia.com/tesla/535.161.07/NVIDIA-Linux-aarch64-535.161.07.run" +sha512 = "bb96a28b45197003480ae223c71a5426ef5258a31eaa485cab0cf4b86bed166482734784f20c6370a1155f3ff991652cac15f1b1083d2fb056677e6881b219e2" force-upstream = true [build-dependencies] diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index 6173a23b..e54e300c 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -1,6 +1,6 @@ %global tesla_major 535 -%global tesla_minor 129 -%global tesla_patch 03 +%global tesla_minor 161 +%global tesla_patch 07 %global tesla_ver %{tesla_major}.%{tesla_minor}.%{tesla_patch} %global spdx_id %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml spdx-id nvidia) %global license_file %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml path nvidia -p ./licenses) From 72e2cddf29ea960a06ae6f0a5a48a19eb711beb2 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Mon, 26 Feb 2024 22:24:25 +0000 Subject: [PATCH 1186/1356] Update github checkout and cache actions to v4 Github has deprecated node 16, so we should be using the @v4 versions of the checkout and cache actions. --- .github/actions/list-variants/action.yml | 2 +- .github/actions/setup-node/action.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/actions/list-variants/action.yml b/.github/actions/list-variants/action.yml index a6022a0d..d2bc6c0b 100644 --- a/.github/actions/list-variants/action.yml +++ b/.github/actions/list-variants/action.yml @@ -10,7 +10,7 @@ outputs: runs: using: "composite" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - id: get-variants name: Determine variants shell: bash diff --git a/.github/actions/setup-node/action.yml b/.github/actions/setup-node/action.yml index d8f9f192..3a16aff2 100644 --- a/.github/actions/setup-node/action.yml +++ b/.github/actions/setup-node/action.yml @@ -13,13 +13,13 @@ runs: echo "OS_ARCH=`uname -m`" >> $GITHUB_ENV sudo apt -y install build-essential openssl libssl-dev pkg-config liblz4-tool shell: bash - - uses: actions/cache@v3 + - uses: actions/cache@v4 # Cache `cargo-make`, `cargo-cache` with: path: | ~/.cargo key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }} - - uses: actions/cache@v3 + - uses: actions/cache@v4 # Cache first-party code dependencies with: path: | From b290b3fde2013d49973a35539b159294d970471b Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Fri, 8 Mar 2024 17:37:56 +0000 Subject: [PATCH 1187/1356] kernel-5.10: update to 5.10.210 Rebase to Amazon Linux upstream version 5.10.210-201.852.amzn2. Signed-off-by: Martin Harriman --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 478dba1e..5934641a 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/836671087eb8725263480f50a3717b7737dc62ec71b9acc07dbe77d721052145/kernel-5.10.209-198.858.amzn2.src.rpm" -sha512 = "14b219aad20496915ff7a80fee2a7f57eab0cafe2931936b1e0e51da65b8c80d7b464a5df76f8b62d38515088be3d2ebdde4970eb1c625c43e07ccd2eba612b5" +url = "https://cdn.amazonlinux.com/blobstore/b1e8ee5486de775eb34fe9d96ae2e1dcbb8484d2c657a11db84a52738669af3f/kernel-5.10.210-201.852.amzn2.src.rpm" +sha512 = "cf944517e594f30140d99b91df7f673eb280af28a459b09c3a4735ae0aa888c0b0cf3a4515c0a50c1ef7840583ef005144941156af0cb13b5ec1967c054e86c0" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 5fc69759..9b47496d 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.209 +Version: 5.10.210 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/836671087eb8725263480f50a3717b7737dc62ec71b9acc07dbe77d721052145/kernel-5.10.209-198.858.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/b1e8ee5486de775eb34fe9d96ae2e1dcbb8484d2c657a11db84a52738669af3f/kernel-5.10.210-201.852.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 4a4545110ec7112ddb3cddb3482f58f2db11ece0 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Fri, 8 Mar 2024 17:38:18 +0000 Subject: [PATCH 1188/1356] kernel-5.15: update to 5.15.149 Rebase to Amazon Linux upstream version 5.15.149-99.161.amzn2. Signed-off-by: Martin Harriman --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 4faadd6a..405e0de3 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/42ac40513bf403555b444c8eb2792a334a4db9983e83106d6a75b335e0ab1a92/kernel-5.15.148-97.161.amzn2.src.rpm" -sha512 = "2c8f6886da223166196a969ef58f4abaf2549cbe3407599ce92e0529954f520f4eb4d7aaa0574eba38cf64e999b1de4e25c7f237a04b484bd810236a57e3679d" +url = "https://cdn.amazonlinux.com/blobstore/c9a6f101b5d843eb394fcb0a400dc397ddeb0682a170fa606855688a6364d63e/kernel-5.15.149-99.161.amzn2.src.rpm" +sha512 = "148ed4c3b84719e69b0a5f1c89ecb548b93593e1c849aa3ab245c93241236443f7056d2af99695eadfee3fb834654398ab363b06712f9bc291be991867eed34d" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 7c44089e..b6b88b2c 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.148 +Version: 5.15.149 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/42ac40513bf403555b444c8eb2792a334a4db9983e83106d6a75b335e0ab1a92/kernel-5.15.148-97.161.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/c9a6f101b5d843eb394fcb0a400dc397ddeb0682a170fa606855688a6364d63e/kernel-5.15.149-99.161.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 2bc31eb4fa4c0da6e02bab9cba6e8565b1a5fa17 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Fri, 8 Mar 2024 17:38:43 +0000 Subject: [PATCH 1189/1356] kernel-6.1: update to 6.1.79 Rebase to Amazon Linux upstream version 6.1.79-99.164.amzn2023. Signed-off-by: Martin Harriman --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index b960b89d..e289ff82 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/bb5b0dc5f0e4b3b6c9174c124b0ed7b8a4a9c500b4f2a9ef64a7ac6a44f6c2bc/kernel-6.1.77-99.164.amzn2023.src.rpm" -sha512 = "a504e14b35437ea3455a3a719e54c5da6520a49ab8026126d10e6ad3fbb079944b9be8d52cd966d750fd8cb77efe11fb76509c1c2fe9418e92dff81d227b5af4" +url = "https://cdn.amazonlinux.com/al2023/blobstore/d8f882b99ae44db57c86fba424e3be17f0c29d1eb669933169c985eac3cb7c9e/kernel-6.1.79-99.164.amzn2023.src.rpm" +sha512 = "54efad66a0d7b6db4111b041de8a3463e7feae847a51c795ae072a79d802a2879a7adebf1f880b5d227ce04f577f0b2247a81caaa2d34b890e1d56b99108f22c" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 9524416f..184cd6ee 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.77 +Version: 6.1.79 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/bb5b0dc5f0e4b3b6c9174c124b0ed7b8a4a9c500b4f2a9ef64a7ac6a44f6c2bc/kernel-6.1.77-99.164.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/d8f882b99ae44db57c86fba424e3be17f0c29d1eb669933169c985eac3cb7c9e/kernel-6.1.79-99.164.amzn2023.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From d490accecc58f286ed93854aa24cd4b78fce6cc3 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Mon, 25 Mar 2024 15:21:39 +0000 Subject: [PATCH 1190/1356] kernel-5.10: update to 5.10.210 Rebase to Amazon Linux upstream version 5.10.210-201.855.amzn2. Signed-off-by: Martin Harriman --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 5934641a..8cc61f6d 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/b1e8ee5486de775eb34fe9d96ae2e1dcbb8484d2c657a11db84a52738669af3f/kernel-5.10.210-201.852.amzn2.src.rpm" -sha512 = "cf944517e594f30140d99b91df7f673eb280af28a459b09c3a4735ae0aa888c0b0cf3a4515c0a50c1ef7840583ef005144941156af0cb13b5ec1967c054e86c0" +url = "https://cdn.amazonlinux.com/blobstore/5dc866850c576c78dc05635db2b2cba76b11a08ad012d2a90d7fceac3a41ef0a/kernel-5.10.210-201.855.amzn2.src.rpm" +sha512 = "6a30c999fb4851b84c580c907ec749f77edc8f424bdc37d10d1325132fac1cf97991918872634ab9fa3493430123a9a637e6dd0f19a67e2a62cf7efe7162adf2" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 9b47496d..441e39da 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -7,7 +7,7 @@ Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/b1e8ee5486de775eb34fe9d96ae2e1dcbb8484d2c657a11db84a52738669af3f/kernel-5.10.210-201.852.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/5dc866850c576c78dc05635db2b2cba76b11a08ad012d2a90d7fceac3a41ef0a/kernel-5.10.210-201.855.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 57032db2526c939880c4a912aea4376ad69e7fcc Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Mon, 25 Mar 2024 15:22:03 +0000 Subject: [PATCH 1191/1356] kernel-5.15: update to 5.15.149 Rebase to Amazon Linux upstream version 5.15.149-99.162.amzn2. Signed-off-by: Martin Harriman --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 405e0de3..bd8b063e 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/c9a6f101b5d843eb394fcb0a400dc397ddeb0682a170fa606855688a6364d63e/kernel-5.15.149-99.161.amzn2.src.rpm" -sha512 = "148ed4c3b84719e69b0a5f1c89ecb548b93593e1c849aa3ab245c93241236443f7056d2af99695eadfee3fb834654398ab363b06712f9bc291be991867eed34d" +url = "https://cdn.amazonlinux.com/blobstore/19610ac0e9db4f43b411af72588acd9a0b4edc3103d72c075a233982bf18f5a5/kernel-5.15.149-99.162.amzn2.src.rpm" +sha512 = "a51577d353eb3fe639eef06b1db411ddbc23e5f1819995ff5dde146b943533bb09b42bb9c915d0f7d7ee9c71730a45149b335490222934fffbbe22c68bf93a13" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index b6b88b2c..3da6bddf 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -7,7 +7,7 @@ Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/c9a6f101b5d843eb394fcb0a400dc397ddeb0682a170fa606855688a6364d63e/kernel-5.15.149-99.161.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/19610ac0e9db4f43b411af72588acd9a0b4edc3103d72c075a233982bf18f5a5/kernel-5.15.149-99.162.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 5386b04f23e753e1ee9423deee8a996f3e8a372a Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Mon, 25 Mar 2024 15:22:21 +0000 Subject: [PATCH 1192/1356] kernel-6.1: update to 6.1.79 Rebase to Amazon Linux upstream version 6.1.79-99.167.amzn2023. Signed-off-by: Martin Harriman --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index e289ff82..5ed5e33f 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/d8f882b99ae44db57c86fba424e3be17f0c29d1eb669933169c985eac3cb7c9e/kernel-6.1.79-99.164.amzn2023.src.rpm" -sha512 = "54efad66a0d7b6db4111b041de8a3463e7feae847a51c795ae072a79d802a2879a7adebf1f880b5d227ce04f577f0b2247a81caaa2d34b890e1d56b99108f22c" +url = "https://cdn.amazonlinux.com/al2023/blobstore/c26f813e14f0867fda99398c0bae01ae7990746bf3340bb22a375d16a358b4e7/kernel-6.1.79-99.167.amzn2023.src.rpm" +sha512 = "8151b4982dc283c508d3448488ddabc22b16366155e798705b8b162d679cb795486cb521af713193fc0bab84ef520dcab37bad02dc7d08d88bfd7cc4931c1439" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 184cd6ee..9bf1a5d3 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -7,7 +7,7 @@ Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/d8f882b99ae44db57c86fba424e3be17f0c29d1eb669933169c985eac3cb7c9e/kernel-6.1.79-99.164.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/c26f813e14f0867fda99398c0bae01ae7990746bf3340bb22a375d16a358b4e7/kernel-6.1.79-99.167.amzn2023.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 19ddd07e30bcdb8762677b4e65452de8d7512a83 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 29 Mar 2024 17:12:59 +0000 Subject: [PATCH 1193/1356] kernel: compress modules with gzip Signed-off-by: Ben Cressey --- packages/kernel-5.10/config-bottlerocket | 4 ++-- packages/kernel-5.15/config-bottlerocket | 4 ++-- packages/kernel-6.1/config-bottlerocket | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index 264e12b6..48a9d61a 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -91,9 +91,9 @@ CONFIG_ZSTD_COMPRESS=y CONFIG_ZSTD_DECOMPRESS=y CONFIG_DECOMPRESS_ZSTD=y -# Enable xz modules compression +# Enable gz modules compression CONFIG_MODULE_COMPRESS=y -CONFIG_MODULE_COMPRESS_XZ=y +CONFIG_MODULE_COMPRESS_GZIP=y # Load i8042 controller, keyboard, and mouse as modules, to avoid waiting for # them before mounting the root device. diff --git a/packages/kernel-5.15/config-bottlerocket b/packages/kernel-5.15/config-bottlerocket index 3e963c6e..17c21158 100644 --- a/packages/kernel-5.15/config-bottlerocket +++ b/packages/kernel-5.15/config-bottlerocket @@ -115,9 +115,9 @@ CONFIG_ZSTD_COMPRESS=y CONFIG_ZSTD_DECOMPRESS=y CONFIG_DECOMPRESS_ZSTD=y -# Enable xz modules compression +# Enable gz modules compression # CONFIG_MODULE_COMPRESS_NONE is not set -CONFIG_MODULE_COMPRESS_XZ=y +CONFIG_MODULE_COMPRESS_GZIP=y # Add virtio drivers for development setups running as guests in qemu CONFIG_VIRTIO_CONSOLE=m diff --git a/packages/kernel-6.1/config-bottlerocket b/packages/kernel-6.1/config-bottlerocket index 000efd66..812bf19c 100644 --- a/packages/kernel-6.1/config-bottlerocket +++ b/packages/kernel-6.1/config-bottlerocket @@ -120,9 +120,9 @@ CONFIG_ZSTD_COMPRESS=y CONFIG_ZSTD_DECOMPRESS=y CONFIG_DECOMPRESS_ZSTD=y -# Enable xz modules compression +# Enable gz modules compression # CONFIG_MODULE_COMPRESS_NONE is not set -CONFIG_MODULE_COMPRESS_XZ=y +CONFIG_MODULE_COMPRESS_GZIP=y # Add virtio drivers for development setups running as guests in qemu CONFIG_VIRTIO_CONSOLE=m From 45110a04b63026561913725dfccd9e385e43246a Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sun, 31 Mar 2024 15:01:52 +0000 Subject: [PATCH 1194/1356] actions: prefer nvidia flavor of aws variants The "aws-*-nvidia" variants have the same settings model as their unflavored counterparts, and build a superset of the packages. For CI purposes, they provide enough coverage that there's no need to build the non-nvidia ones. Signed-off-by: Ben Cressey --- .github/actions/list-variants/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/list-variants/action.yml b/.github/actions/list-variants/action.yml index d2bc6c0b..a6e800c7 100644 --- a/.github/actions/list-variants/action.yml +++ b/.github/actions/list-variants/action.yml @@ -16,7 +16,7 @@ runs: shell: bash run: | cd variants - output="variants=$(ls -d */ | cut -d'/' -f 1 | grep -vE '^(shared|target)$' | jq -R -s -c 'split("\n")[:-1]')" + output="variants=$(ls -d */ | cut -d'/' -f 1 | grep -vE '^(shared|target)$' | sort | awk '$0 != x "-nvidia" && NR>1 {print x} {x=$0} END {print}' | jq -R -s -c 'split("\n")[:-1]')" echo $output echo $output >> $GITHUB_OUTPUT output="aarch-enemies=$(ls -d */ | cut -d'/' -f 1 | grep -E '(^(metal|vmware)|\-dev$)' | jq -R -s -c 'split("\n")[:-1] | [ .[] | {"variant": ., "arch": "aarch64"}]')" From 9bf65e7d8925dc0f8b6fda49e4c0ca93048501cb Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sun, 31 Mar 2024 15:09:07 +0000 Subject: [PATCH 1195/1356] actions: unset upstream source fallback for nvidia The "kmod-*-nvidia" packages have `force-upstream = true` set in the build metadata. All of the other packages are expected to be in the lookaside cache. Signed-off-by: Ben Cressey --- .github/workflows/build.yml | 1 - .github/workflows/weekly.yml | 1 - 2 files changed, 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3ee66c2f..2cffc553 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -80,5 +80,4 @@ jobs: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} \ -e BUILDSYS_ARCH=${{ matrix.arch }} \ -e BUILDSYS_JOBS=12 \ - -e BUILDSYS_UPSTREAM_SOURCE_FALLBACK="${{ contains(matrix.variant, 'nvidia') }}" \ -e BUILDSYS_UPSTREAM_LICENSE_FETCH="${{ contains(matrix.variant, 'nvidia') }}" diff --git a/.github/workflows/weekly.yml b/.github/workflows/weekly.yml index 97f33522..125e554f 100644 --- a/.github/workflows/weekly.yml +++ b/.github/workflows/weekly.yml @@ -62,5 +62,4 @@ jobs: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} \ -e BUILDSYS_ARCH=${{ matrix.arch }} \ -e BUILDSYS_JOBS=12 \ - -e BUILDSYS_UPSTREAM_SOURCE_FALLBACK="${{ contains(matrix.variant, 'nvidia') }}" \ -e BUILDSYS_UPSTREAM_LICENSE_FETCH="${{ contains(matrix.variant, 'nvidia') }}" From 17ab8de6adeef641ebdc9238b9150031189efbe7 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Tue, 2 Apr 2024 22:22:21 +0000 Subject: [PATCH 1196/1356] kernel-5.10: update to 5.10.213 Rebase to Amazon Linux upstream version 5.10.213-201.855.amzn2. Signed-off-by: Martin Harriman --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 8cc61f6d..a19773c6 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/5dc866850c576c78dc05635db2b2cba76b11a08ad012d2a90d7fceac3a41ef0a/kernel-5.10.210-201.855.amzn2.src.rpm" -sha512 = "6a30c999fb4851b84c580c907ec749f77edc8f424bdc37d10d1325132fac1cf97991918872634ab9fa3493430123a9a637e6dd0f19a67e2a62cf7efe7162adf2" +url = "https://cdn.amazonlinux.com/blobstore/f2f4a85aff9b0efec71d75bc29454ce8ab73974486a2a8ba541343cee1c7a622/kernel-5.10.213-201.855.amzn2.src.rpm" +sha512 = "9e61a292106ab4872ff8bd89aa0c32613c7e78f3d6776ada31ba1d63e26f923a5b08e4fb2e5c927459cc057476d0e8e45c2f125ee226a6d402ba0c4025d78cde" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 441e39da..d616a599 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.210 +Version: 5.10.213 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/5dc866850c576c78dc05635db2b2cba76b11a08ad012d2a90d7fceac3a41ef0a/kernel-5.10.210-201.855.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/f2f4a85aff9b0efec71d75bc29454ce8ab73974486a2a8ba541343cee1c7a622/kernel-5.10.213-201.855.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 4093b80371a1ab7e3e01b32693130fbdc1323dc8 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Tue, 2 Apr 2024 22:22:44 +0000 Subject: [PATCH 1197/1356] kernel-5.15: update to 5.15.152 Rebase to Amazon Linux upstream version 5.15.152-100.162.amzn2. Signed-off-by: Martin Harriman --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index bd8b063e..a4ef8ad3 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/19610ac0e9db4f43b411af72588acd9a0b4edc3103d72c075a233982bf18f5a5/kernel-5.15.149-99.162.amzn2.src.rpm" -sha512 = "a51577d353eb3fe639eef06b1db411ddbc23e5f1819995ff5dde146b943533bb09b42bb9c915d0f7d7ee9c71730a45149b335490222934fffbbe22c68bf93a13" +url = "https://cdn.amazonlinux.com/blobstore/29a1d43caffcebd032ece82a974ba5db68b1354f508a35f6df62d8e1f6106ee8/kernel-5.15.152-100.162.amzn2.src.rpm" +sha512 = "3d0ea5442f26d315d2d96968c4c1b8a5b2a2bd1a12ac0892351df9ef837efe2bae90cc9d4f3687acf8a5eddb96971d805407fac9dcdcb1d24d7cfef304eda77a" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 3da6bddf..26dad62e 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.149 +Version: 5.15.152 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/19610ac0e9db4f43b411af72588acd9a0b4edc3103d72c075a233982bf18f5a5/kernel-5.15.149-99.162.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/29a1d43caffcebd032ece82a974ba5db68b1354f508a35f6df62d8e1f6106ee8/kernel-5.15.152-100.162.amzn2.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From 6a4e4ecbb27f7a14a08cda5220a9c64c1fc07c10 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Tue, 2 Apr 2024 22:23:02 +0000 Subject: [PATCH 1198/1356] kernel-6.1: update to 6.1.82 Rebase to Amazon Linux upstream version 6.1.82-99.168.amzn2023. Signed-off-by: Martin Harriman --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 5ed5e33f..cc0ced88 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/c26f813e14f0867fda99398c0bae01ae7990746bf3340bb22a375d16a358b4e7/kernel-6.1.79-99.167.amzn2023.src.rpm" -sha512 = "8151b4982dc283c508d3448488ddabc22b16366155e798705b8b162d679cb795486cb521af713193fc0bab84ef520dcab37bad02dc7d08d88bfd7cc4931c1439" +url = "https://cdn.amazonlinux.com/al2023/blobstore/4004a1fe6830de6cabbf60ae7345aef54260400b86ac4973fd29cc6a31d9bf9c/kernel-6.1.82-99.168.amzn2023.src.rpm" +sha512 = "249f3b440248062cc1b67fe89c0bfc75d2b6f6cdac63c539884c7257d334ef7bdaeb2f87fffbfd12d4a5389cc65627b1a64d7c6b3a32b7247e222811dc06f6bc" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 9bf1a5d3..1ddb005a 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.79 +Version: 6.1.82 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/c26f813e14f0867fda99398c0bae01ae7990746bf3340bb22a375d16a358b4e7/kernel-6.1.79-99.167.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/4004a1fe6830de6cabbf60ae7345aef54260400b86ac4973fd29cc6a31d9bf9c/kernel-6.1.82-99.168.amzn2023.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From fc8536b8b4e25355562381ddd366a7c574ce3493 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Wed, 10 Apr 2024 16:16:52 +0000 Subject: [PATCH 1199/1356] Update linter, fix new warnings. Update setup-go from v3 to v5, and golangci-lint-action from v3 to v4. Update .golangci.yaml for the new version of golangci-lint, and fix the three unused-parameter lints it finds in host-ctr. --- .github/workflows/golangci-lint.yaml | 6 +++--- .golangci.yaml | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/golangci-lint.yaml b/.github/workflows/golangci-lint.yaml index 5872c576..6adbf290 100644 --- a/.github/workflows/golangci-lint.yaml +++ b/.github/workflows/golangci-lint.yaml @@ -14,17 +14,17 @@ jobs: name: lint runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v5 with: go-version: 1.21 - uses: actions/checkout@v4 - name: lint-host-ctr - uses: golangci/golangci-lint-action@v3 + uses: golangci/golangci-lint-action@v4 with: version: latest working-directory: sources/host-ctr - name: lint-ecs-gpu-init - uses: golangci/golangci-lint-action@v3 + uses: golangci/golangci-lint-action@v4 with: version: latest working-directory: sources/ecs-gpu-init diff --git a/.golangci.yaml b/.golangci.yaml index 7ebf24ee..964e117a 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -12,6 +12,7 @@ linters: run: timeout: 3m - skip-dirs: +issues: + exclude-dirs: - vendor - .gomodcache From 050ee44fa793629c222afdcbafd49dd4ec054c43 Mon Sep 17 00:00:00 2001 From: monirul Date: Fri, 5 Apr 2024 19:28:47 +0000 Subject: [PATCH 1200/1356] Add Fabric Manager Support to Bottlerocket Signed-off-by: monirul --- packages/kmod-5.15-nvidia/Cargo.toml | 10 +++ .../kmod-5.15-nvidia/kmod-5.15-nvidia.spec | 61 ++++++++++++++++++- .../kmod-5.15-nvidia/nvidia-fabricmanager.cfg | 34 +++++++++++ .../nvidia-fabricmanager.service | 16 +++++ .../kmod-5.15-nvidia/nvidia-tmpfiles.conf.in | 2 + packages/kmod-6.1-nvidia/Cargo.toml | 10 +++ packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 61 ++++++++++++++++++- .../kmod-6.1-nvidia/nvidia-fabricmanager.cfg | 34 +++++++++++ .../nvidia-fabricmanager.service | 16 +++++ .../kmod-6.1-nvidia/nvidia-tmpfiles.conf.in | 2 + 10 files changed, 242 insertions(+), 4 deletions(-) create mode 100644 packages/kmod-5.15-nvidia/nvidia-fabricmanager.cfg create mode 100644 packages/kmod-5.15-nvidia/nvidia-fabricmanager.service create mode 100644 packages/kmod-6.1-nvidia/nvidia-fabricmanager.cfg create mode 100644 packages/kmod-6.1-nvidia/nvidia-fabricmanager.service diff --git a/packages/kmod-5.15-nvidia/Cargo.toml b/packages/kmod-5.15-nvidia/Cargo.toml index b8b41905..20d2c316 100644 --- a/packages/kmod-5.15-nvidia/Cargo.toml +++ b/packages/kmod-5.15-nvidia/Cargo.toml @@ -22,6 +22,16 @@ url = "https://us.download.nvidia.com/tesla/535.161.07/NVIDIA-Linux-aarch64-535. sha512 = "bb96a28b45197003480ae223c71a5426ef5258a31eaa485cab0cf4b86bed166482734784f20c6370a1155f3ff991652cac15f1b1083d2fb056677e6881b219e2" force-upstream = true +[[package.metadata.build-package.external-files]] +url = "https://developer.download.nvidia.com/compute/nvidia-driver/redist/fabricmanager/linux-x86_64/fabricmanager-linux-x86_64-535.161.07-archive.tar.xz" +sha512 = "868b35d567e4c6dccbff0f7e8f74bc55781c8d71db995fd9e471829afec0b44fd430caba964377052678e244d18ea999133487f9a3c50c7289f381480b24c55d" +force-upstream = true + +[[package.metadata.build-package.external-files]] +url = "https://developer.download.nvidia.com/compute/nvidia-driver/redist/fabricmanager/linux-sbsa/fabricmanager-linux-sbsa-535.161.07-archive.tar.xz" +sha512 = "f37f7a24e31dd6ed184d1041616abb8cfcb0ddaec79778930db79bbef8b23b3d468daaa9c156a6cf7a7f2ffc0507e78e2bb6215f70bc39d11bb0ee16c5ef4c82" +force-upstream = true + [build-dependencies] glibc = { path = "../glibc" } kernel-5_15 = { path = "../kernel-5.15" } diff --git a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec index 6e41533a..e0df743c 100644 --- a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec +++ b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec @@ -2,6 +2,12 @@ %global tesla_minor 161 %global tesla_patch 07 %global tesla_ver %{tesla_major}.%{tesla_minor}.%{tesla_patch} +%if "%{?_cross_arch}" == "aarch64" +%global fm_arch sbsa +%else +%global fm_arch %{_cross_arch} +%endif + %global spdx_id %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml spdx-id nvidia) %global license_file %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml path nvidia -p ./licenses) @@ -21,13 +27,20 @@ Summary: NVIDIA drivers for the 5.15 kernel License: Apache-2.0 OR MIT URL: http://www.nvidia.com/ -# NVIDIA .run scripts from 0 to 199 +# NVIDIA archives from 0 to 199 +# NVIDIA .run scripts for kernel and userspace drivers Source0: https://us.download.nvidia.com/tesla/%{tesla_ver}/NVIDIA-Linux-x86_64-%{tesla_ver}.run Source1: https://us.download.nvidia.com/tesla/%{tesla_ver}/NVIDIA-Linux-aarch64-%{tesla_ver}.run +# fabricmanager for NVSwitch +Source10: https://developer.download.nvidia.com/compute/nvidia-driver/redist/fabricmanager/linux-x86_64/fabricmanager-linux-x86_64-%{tesla_ver}-archive.tar.xz +Source11: https://developer.download.nvidia.com/compute/nvidia-driver/redist/fabricmanager/linux-sbsa/fabricmanager-linux-sbsa-%{tesla_ver}-archive.tar.xz + # Common NVIDIA conf files from 200 to 299 Source200: nvidia-tmpfiles.conf.in Source202: nvidia-dependencies-modules-load.conf +Source203: nvidia-fabricmanager.service +Source204: nvidia-fabricmanager.cfg # NVIDIA tesla conf files from 300 to 399 Source300: nvidia-tesla-tmpfiles.conf @@ -41,11 +54,20 @@ BuildRequires: %{_cross_os}kernel-5.15-archive %description %{summary}. +%package fabricmanager +Summary: NVIDIA fabricmanager config and service files +Requires: %{name}-tesla(fabricmanager) + +%description fabricmanager +%{summary}. + %package tesla-%{tesla_major} Summary: NVIDIA %{tesla_major} Tesla driver Version: %{tesla_ver} License: %{spdx_id} Requires: %{name} +Requires: %{name}-fabricmanager +Provides: %{name}-tesla(fabricmanager) %description tesla-%{tesla_major} %{summary} @@ -55,6 +77,10 @@ Requires: %{name} # the driver in the current run sh %{_sourcedir}/NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}.run -x +# Extract fabricmanager archive. Use `tar` rather than `%%setup` since the +# correct source is architecture-dependent. +tar -xf %{_sourcedir}/fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive.tar.xz + %global kernel_sources %{_builddir}/kernel-devel tar -xf %{_cross_datadir}/bottlerocket/kernel-devel.tar.xz @@ -101,6 +127,11 @@ install -p -m 0644 nvidia.conf %{buildroot}%{_cross_tmpfilesdir} install -d %{buildroot}%{_cross_libdir}/modules-load.d install -p -m 0644 %{S:202} %{buildroot}%{_cross_libdir}/modules-load.d/nvidia-dependencies.conf +# NVIDIA fabric manager service unit and config +install -p -m 0644 %{S:203} %{buildroot}%{_cross_unitdir} +install -d %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/nvidia +install -p -m 0644 %{S:204} %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/nvidia/fabricmanager.cfg + # Begin NVIDIA tesla driver pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_ver} # We install bins and libs in a versioned directory to prevent collisions with future drivers versions @@ -178,6 +209,18 @@ install -p -m 0644 firmware/gsp_tu10x.bin %{buildroot}%{_cross_libdir}/firmware/ popd +# Begin NVIDIA fabric manager binaries and topologies +pushd fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive +install -p -m 0755 bin/nv-fabricmanager %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin +install -p -m 0755 bin/nvswitch-audit %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin + +install -d %{buildroot}%{_cross_datadir}/nvidia/tesla/nvswitch +for t in share/nvidia/nvswitch/*_topology ; do + install -p -m 0644 "${t}" %{buildroot}%{_cross_datadir}/nvidia/tesla/nvswitch +done + +popd + %files %{_cross_attribution_file} %dir %{_cross_libexecdir}/nvidia @@ -185,12 +228,13 @@ popd %dir %{_cross_datadir}/nvidia %dir %{_cross_libdir}/modules-load.d %dir %{_cross_factorydir}%{_cross_sysconfdir}/drivers +%dir %{_cross_factorydir}%{_cross_sysconfdir}/nvidia %{_cross_tmpfilesdir}/nvidia.conf -%{_cross_libdir}/systemd/system/ %{_cross_libdir}/modules-load.d/nvidia-dependencies.conf %files tesla-%{tesla_major} %license %{license_file} +%license fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive/third-party-notices.txt %dir %{_cross_datadir}/nvidia/tesla %dir %{_cross_libexecdir}/nvidia/tesla/bin %dir %{_cross_libdir}/nvidia/tesla @@ -201,6 +245,15 @@ popd # Binaries %{_cross_libexecdir}/nvidia/tesla/bin/nvidia-debugdump %{_cross_libexecdir}/nvidia/tesla/bin/nvidia-smi +%{_cross_libexecdir}/nvidia/tesla/bin/nv-fabricmanager +%{_cross_libexecdir}/nvidia/tesla/bin/nvswitch-audit + +# nvswitch topologies +%dir %{_cross_datadir}/nvidia/tesla/nvswitch +%{_cross_datadir}/nvidia/tesla/nvswitch/dgxa100_hgxa100_topology +%{_cross_datadir}/nvidia/tesla/nvswitch/dgx2_hgx2_topology +%{_cross_datadir}/nvidia/tesla/nvswitch/dgxh100_hgxh100_topology +%{_cross_datadir}/nvidia/tesla/nvswitch/dgxh800_hgxh800_topology # Configuration files %{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-tesla.toml @@ -332,3 +385,7 @@ popd %exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-gbm.so.1.1.0 %exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-wayland.so.1.1.11 %exclude %{_cross_libdir}/nvidia/tesla/libnvidia-wayland-client.so.%{tesla_ver} + +%files fabricmanager +%{_cross_factorydir}%{_cross_sysconfdir}/nvidia/fabricmanager.cfg +%{_cross_unitdir}/nvidia-fabricmanager.service diff --git a/packages/kmod-5.15-nvidia/nvidia-fabricmanager.cfg b/packages/kmod-5.15-nvidia/nvidia-fabricmanager.cfg new file mode 100644 index 00000000..f8dc08ea --- /dev/null +++ b/packages/kmod-5.15-nvidia/nvidia-fabricmanager.cfg @@ -0,0 +1,34 @@ +# Modern, systemd-aware settings: +# - Log to journal via stderr +# - Keep running in the foreground +LOG_LEVEL=4 +LOG_FILE_NAME= +DAEMONIZE=0 + +# Use Unix domain sockets instead of localhost ports. +UNIX_SOCKET_PATH=/run/nvidia/fabricmanager.sock +FM_CMD_UNIX_SOCKET_PATH=/run/nvidia/fabricmanager-cmd.sock + +# Start Fabric Manager in bare metal or full pass through virtualization mode. +FABRIC_MODE=0 +FABRIC_MODE_RESTART=0 + +# Terminate on NVSwitch and GPU config failure. +FM_STAY_RESIDENT_ON_FAILURES=0 + +# When there is a GPU to NVSwitch NVLink failure, remove the GPU with the failure +# from NVLink P2P capability. +ACCESS_LINK_FAILURE_MODE=0 + +# When there is an NVSwitch to NVSwitch NVLink failure, exit Fabric Manager. +TRUNK_LINK_FAILURE_MODE=0 + +# When there is an NVSwitch failure or an NVSwitch is excluded, abort Fabric Manager. +NVSWITCH_FAILURE_MODE=0 + +# When Fabric Manager service is stopped or terminated, abort all running CUDA jobs. +ABORT_CUDA_JOBS_ON_FM_EXIT=1 + +# Path to topology and database files. +TOPOLOGY_FILE_PATH=/usr/share/nvidia/tesla/nvswitch +DATABASE_PATH=/usr/share/nvidia/tesla/nvswitch diff --git a/packages/kmod-5.15-nvidia/nvidia-fabricmanager.service b/packages/kmod-5.15-nvidia/nvidia-fabricmanager.service new file mode 100644 index 00000000..62ae1368 --- /dev/null +++ b/packages/kmod-5.15-nvidia/nvidia-fabricmanager.service @@ -0,0 +1,16 @@ +[Unit] +Description=NVIDIA fabric manager service + +[Service] +ExecStart=/usr/libexec/nvidia/tesla/bin/nv-fabricmanager -c /etc/nvidia/fabricmanager.cfg +Type=simple +TimeoutSec=0 +RestartSec=5 +Restart=always +RemainAfterExit=true +StandardError=journal+console +SuccessExitStatus=255 +LimitCORE=infinity + +[Install] +WantedBy=multi-user.target diff --git a/packages/kmod-5.15-nvidia/nvidia-tmpfiles.conf.in b/packages/kmod-5.15-nvidia/nvidia-tmpfiles.conf.in index d4763f28..2bee2471 100644 --- a/packages/kmod-5.15-nvidia/nvidia-tmpfiles.conf.in +++ b/packages/kmod-5.15-nvidia/nvidia-tmpfiles.conf.in @@ -1,2 +1,4 @@ R __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/tesla - - - - - d __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/tesla 0755 root root - - +C /etc/nvidia/fabricmanager.cfg - - - - +d /run/nvidia 0700 root root - diff --git a/packages/kmod-6.1-nvidia/Cargo.toml b/packages/kmod-6.1-nvidia/Cargo.toml index 0fd3c461..d1ab1a3c 100644 --- a/packages/kmod-6.1-nvidia/Cargo.toml +++ b/packages/kmod-6.1-nvidia/Cargo.toml @@ -22,6 +22,16 @@ url = "https://us.download.nvidia.com/tesla/535.161.07/NVIDIA-Linux-aarch64-535. sha512 = "bb96a28b45197003480ae223c71a5426ef5258a31eaa485cab0cf4b86bed166482734784f20c6370a1155f3ff991652cac15f1b1083d2fb056677e6881b219e2" force-upstream = true +[[package.metadata.build-package.external-files]] +url = "https://developer.download.nvidia.com/compute/nvidia-driver/redist/fabricmanager/linux-x86_64/fabricmanager-linux-x86_64-535.161.07-archive.tar.xz" +sha512 = "868b35d567e4c6dccbff0f7e8f74bc55781c8d71db995fd9e471829afec0b44fd430caba964377052678e244d18ea999133487f9a3c50c7289f381480b24c55d" +force-upstream = true + +[[package.metadata.build-package.external-files]] +url = "https://developer.download.nvidia.com/compute/nvidia-driver/redist/fabricmanager/linux-sbsa/fabricmanager-linux-sbsa-535.161.07-archive.tar.xz" +sha512 = "f37f7a24e31dd6ed184d1041616abb8cfcb0ddaec79778930db79bbef8b23b3d468daaa9c156a6cf7a7f2ffc0507e78e2bb6215f70bc39d11bb0ee16c5ef4c82" +force-upstream = true + [build-dependencies] glibc = { path = "../glibc" } kernel-6_1 = { path = "../kernel-6.1" } diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index e54e300c..5b08cd60 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -2,6 +2,12 @@ %global tesla_minor 161 %global tesla_patch 07 %global tesla_ver %{tesla_major}.%{tesla_minor}.%{tesla_patch} +%if "%{?_cross_arch}" == "aarch64" +%global fm_arch sbsa +%else +%global fm_arch %{_cross_arch} +%endif + %global spdx_id %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml spdx-id nvidia) %global license_file %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml path nvidia -p ./licenses) @@ -21,13 +27,20 @@ Summary: NVIDIA drivers for the 6.1 kernel License: Apache-2.0 OR MIT URL: http://www.nvidia.com/ -# NVIDIA .run scripts from 0 to 199 +# NVIDIA archives from 0 to 199 +# NVIDIA .run scripts for kernel and userspace drivers Source0: https://us.download.nvidia.com/tesla/%{tesla_ver}/NVIDIA-Linux-x86_64-%{tesla_ver}.run Source1: https://us.download.nvidia.com/tesla/%{tesla_ver}/NVIDIA-Linux-aarch64-%{tesla_ver}.run +# fabricmanager for NVSwitch +Source10: https://developer.download.nvidia.com/compute/nvidia-driver/redist/fabricmanager/linux-x86_64/fabricmanager-linux-x86_64-%{tesla_ver}-archive.tar.xz +Source11: https://developer.download.nvidia.com/compute/nvidia-driver/redist/fabricmanager/linux-sbsa/fabricmanager-linux-sbsa-%{tesla_ver}-archive.tar.xz + # Common NVIDIA conf files from 200 to 299 Source200: nvidia-tmpfiles.conf.in Source202: nvidia-dependencies-modules-load.conf +Source203: nvidia-fabricmanager.service +Source204: nvidia-fabricmanager.cfg # NVIDIA tesla conf files from 300 to 399 Source300: nvidia-tesla-tmpfiles.conf @@ -41,11 +54,20 @@ BuildRequires: %{_cross_os}kernel-6.1-archive %description %{summary}. +%package fabricmanager +Summary: NVIDIA fabricmanager config and service files +Requires: %{name}-tesla(fabricmanager) + +%description fabricmanager +%{summary}. + %package tesla-%{tesla_major} Summary: NVIDIA %{tesla_major} Tesla driver Version: %{tesla_ver} License: %{spdx_id} Requires: %{name} +Requires: %{name}-fabricmanager +Provides: %{name}-tesla(fabricmanager) %description tesla-%{tesla_major} %{summary} @@ -55,6 +77,10 @@ Requires: %{name} # the driver in the current run sh %{_sourcedir}/NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}.run -x +# Extract fabricmanager archive. Use `tar` rather than `%%setup` since the +# correct source is architecture-dependent. +tar -xf %{_sourcedir}/fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive.tar.xz + %global kernel_sources %{_builddir}/kernel-devel tar -xf %{_cross_datadir}/bottlerocket/kernel-devel.tar.xz @@ -101,6 +127,11 @@ install -p -m 0644 nvidia.conf %{buildroot}%{_cross_tmpfilesdir} install -d %{buildroot}%{_cross_libdir}/modules-load.d install -p -m 0644 %{S:202} %{buildroot}%{_cross_libdir}/modules-load.d/nvidia-dependencies.conf +# NVIDIA fabric manager service unit and config +install -p -m 0644 %{S:203} %{buildroot}%{_cross_unitdir} +install -d %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/nvidia +install -p -m 0644 %{S:204} %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/nvidia/fabricmanager.cfg + # Begin NVIDIA tesla driver pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_ver} # We install bins and libs in a versioned directory to prevent collisions with future drivers versions @@ -178,6 +209,18 @@ install -p -m 0644 firmware/gsp_tu10x.bin %{buildroot}%{_cross_libdir}/firmware/ popd +# Begin NVIDIA fabric manager binaries and topologies +pushd fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive +install -p -m 0755 bin/nv-fabricmanager %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin +install -p -m 0755 bin/nvswitch-audit %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin + +install -d %{buildroot}%{_cross_datadir}/nvidia/tesla/nvswitch +for t in share/nvidia/nvswitch/*_topology ; do + install -p -m 0644 "${t}" %{buildroot}%{_cross_datadir}/nvidia/tesla/nvswitch +done + +popd + %files %{_cross_attribution_file} %dir %{_cross_libexecdir}/nvidia @@ -185,12 +228,13 @@ popd %dir %{_cross_datadir}/nvidia %dir %{_cross_libdir}/modules-load.d %dir %{_cross_factorydir}%{_cross_sysconfdir}/drivers +%dir %{_cross_factorydir}%{_cross_sysconfdir}/nvidia %{_cross_tmpfilesdir}/nvidia.conf -%{_cross_libdir}/systemd/system/ %{_cross_libdir}/modules-load.d/nvidia-dependencies.conf %files tesla-%{tesla_major} %license %{license_file} +%license fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive/third-party-notices.txt %dir %{_cross_datadir}/nvidia/tesla %dir %{_cross_libexecdir}/nvidia/tesla/bin %dir %{_cross_libdir}/nvidia/tesla @@ -201,6 +245,15 @@ popd # Binaries %{_cross_libexecdir}/nvidia/tesla/bin/nvidia-debugdump %{_cross_libexecdir}/nvidia/tesla/bin/nvidia-smi +%{_cross_libexecdir}/nvidia/tesla/bin/nv-fabricmanager +%{_cross_libexecdir}/nvidia/tesla/bin/nvswitch-audit + +# nvswitch topologies +%dir %{_cross_datadir}/nvidia/tesla/nvswitch +%{_cross_datadir}/nvidia/tesla/nvswitch/dgxa100_hgxa100_topology +%{_cross_datadir}/nvidia/tesla/nvswitch/dgx2_hgx2_topology +%{_cross_datadir}/nvidia/tesla/nvswitch/dgxh100_hgxh100_topology +%{_cross_datadir}/nvidia/tesla/nvswitch/dgxh800_hgxh800_topology # Configuration files %{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-tesla.toml @@ -332,3 +385,7 @@ popd %exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-gbm.so.1.1.0 %exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-wayland.so.1.1.11 %exclude %{_cross_libdir}/nvidia/tesla/libnvidia-wayland-client.so.%{tesla_ver} + +%files fabricmanager +%{_cross_factorydir}%{_cross_sysconfdir}/nvidia/fabricmanager.cfg +%{_cross_unitdir}/nvidia-fabricmanager.service diff --git a/packages/kmod-6.1-nvidia/nvidia-fabricmanager.cfg b/packages/kmod-6.1-nvidia/nvidia-fabricmanager.cfg new file mode 100644 index 00000000..f8dc08ea --- /dev/null +++ b/packages/kmod-6.1-nvidia/nvidia-fabricmanager.cfg @@ -0,0 +1,34 @@ +# Modern, systemd-aware settings: +# - Log to journal via stderr +# - Keep running in the foreground +LOG_LEVEL=4 +LOG_FILE_NAME= +DAEMONIZE=0 + +# Use Unix domain sockets instead of localhost ports. +UNIX_SOCKET_PATH=/run/nvidia/fabricmanager.sock +FM_CMD_UNIX_SOCKET_PATH=/run/nvidia/fabricmanager-cmd.sock + +# Start Fabric Manager in bare metal or full pass through virtualization mode. +FABRIC_MODE=0 +FABRIC_MODE_RESTART=0 + +# Terminate on NVSwitch and GPU config failure. +FM_STAY_RESIDENT_ON_FAILURES=0 + +# When there is a GPU to NVSwitch NVLink failure, remove the GPU with the failure +# from NVLink P2P capability. +ACCESS_LINK_FAILURE_MODE=0 + +# When there is an NVSwitch to NVSwitch NVLink failure, exit Fabric Manager. +TRUNK_LINK_FAILURE_MODE=0 + +# When there is an NVSwitch failure or an NVSwitch is excluded, abort Fabric Manager. +NVSWITCH_FAILURE_MODE=0 + +# When Fabric Manager service is stopped or terminated, abort all running CUDA jobs. +ABORT_CUDA_JOBS_ON_FM_EXIT=1 + +# Path to topology and database files. +TOPOLOGY_FILE_PATH=/usr/share/nvidia/tesla/nvswitch +DATABASE_PATH=/usr/share/nvidia/tesla/nvswitch diff --git a/packages/kmod-6.1-nvidia/nvidia-fabricmanager.service b/packages/kmod-6.1-nvidia/nvidia-fabricmanager.service new file mode 100644 index 00000000..62ae1368 --- /dev/null +++ b/packages/kmod-6.1-nvidia/nvidia-fabricmanager.service @@ -0,0 +1,16 @@ +[Unit] +Description=NVIDIA fabric manager service + +[Service] +ExecStart=/usr/libexec/nvidia/tesla/bin/nv-fabricmanager -c /etc/nvidia/fabricmanager.cfg +Type=simple +TimeoutSec=0 +RestartSec=5 +Restart=always +RemainAfterExit=true +StandardError=journal+console +SuccessExitStatus=255 +LimitCORE=infinity + +[Install] +WantedBy=multi-user.target diff --git a/packages/kmod-6.1-nvidia/nvidia-tmpfiles.conf.in b/packages/kmod-6.1-nvidia/nvidia-tmpfiles.conf.in index d4763f28..2bee2471 100644 --- a/packages/kmod-6.1-nvidia/nvidia-tmpfiles.conf.in +++ b/packages/kmod-6.1-nvidia/nvidia-tmpfiles.conf.in @@ -1,2 +1,4 @@ R __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/tesla - - - - - d __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/tesla 0755 root root - - +C /etc/nvidia/fabricmanager.cfg - - - - +d /run/nvidia 0700 root root - From 9becfff4f668889b98e5d206591f52177ab75796 Mon Sep 17 00:00:00 2001 From: Gavin Inglis Date: Fri, 12 Apr 2024 22:58:12 +0000 Subject: [PATCH 1201/1356] Drop k8s 1.25 metal and VMware variants This removes the metal and VMware 1.25 variants. This version of Kubernetes has gone end-of-life and these variants are no longer supported. Signed-off-by: Gavin Inglis --- README.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/README.md b/README.md index be7222da..8e865b8a 100644 --- a/README.md +++ b/README.md @@ -85,7 +85,6 @@ The following variants support ECS: We also have variants that are designed to be Kubernetes worker nodes in VMware: -* `vmware-k8s-1.25` * `vmware-k8s-1.26` * `vmware-k8s-1.27` * `vmware-k8s-1.28` @@ -93,7 +92,6 @@ We also have variants that are designed to be Kubernetes worker nodes in VMware: The following variants are designed to be Kubernetes worker nodes on bare metal: -* `metal-k8s-1.25` * `metal-k8s-1.26` * `metal-k8s-1.27` * `metal-k8s-1.28` @@ -102,7 +100,7 @@ The following variants are designed to be Kubernetes worker nodes on bare metal: The following variants are no longer supported: * All Kubernetes variants using Kubernetes 1.22 and earlier -* Bare metal and VMware variants using Kubernetes 1.24 and earlier +* Bare metal and VMware variants using Kubernetes 1.25 and earlier We recommend users replace nodes running these variants with the [latest variant compatible with their cluster](variants/). From 9261123ae4148e8ead993e0ddb0a0a9e3706716b Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 12 Apr 2024 22:48:26 +0000 Subject: [PATCH 1202/1356] kernel-5.10: remove variant sensitivity Now that the only in-tree users of the 5.10 kernel are aws variants, merge the aws-specific config into the main config and remove the variant sensitive marker from the package. Signed-off-by: Ben Cressey --- packages/kernel-5.10/Cargo.toml | 1 - packages/kernel-5.10/config-bottlerocket | 13 ++ packages/kernel-5.10/config-bottlerocket-aws | 13 -- .../kernel-5.10/config-bottlerocket-metal | 115 ------------------ .../kernel-5.10/config-bottlerocket-vmware | 0 packages/kernel-5.10/kernel-5.10.spec | 6 +- 6 files changed, 14 insertions(+), 134 deletions(-) delete mode 100644 packages/kernel-5.10/config-bottlerocket-aws delete mode 100644 packages/kernel-5.10/config-bottlerocket-metal delete mode 100644 packages/kernel-5.10/config-bottlerocket-vmware diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index a19773c6..700364ed 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -6,7 +6,6 @@ publish = false build = "../build.rs" [package.metadata.build-package] -variant-sensitive = "platform" package-name = "kernel-5.10" [lib] diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index 48a9d61a..b7b4baa0 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -177,3 +177,16 @@ CONFIG_SCSI_SMARTPQI=m # Disable AL port of BBR2 congestion algorithm # CONFIG_TCP_CONG_BBR2 is not set +# Support boot from IDE disks +CONFIG_ATA=y +CONFIG_ATA_PIIX=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y + +# Mellanox network support +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX5_CORE=m +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_INFINIBAND=m +CONFIG_MLXFW=m diff --git a/packages/kernel-5.10/config-bottlerocket-aws b/packages/kernel-5.10/config-bottlerocket-aws deleted file mode 100644 index 6b4ed404..00000000 --- a/packages/kernel-5.10/config-bottlerocket-aws +++ /dev/null @@ -1,13 +0,0 @@ -# Support boot from IDE disks -CONFIG_ATA=y -CONFIG_ATA_PIIX=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y - -# Mellanox network support -CONFIG_NET_SWITCHDEV=y -CONFIG_NET_VENDOR_MELLANOX=y -CONFIG_MLX5_CORE=m -CONFIG_MLX5_CORE_EN=y -CONFIG_MLX5_INFINIBAND=m -CONFIG_MLXFW=m diff --git a/packages/kernel-5.10/config-bottlerocket-metal b/packages/kernel-5.10/config-bottlerocket-metal deleted file mode 100644 index f6a0a5a1..00000000 --- a/packages/kernel-5.10/config-bottlerocket-metal +++ /dev/null @@ -1,115 +0,0 @@ -# This file holds all the settings that are specific to hardware enablement -# we do for the metal variants. - -# SATA support -CONFIG_BLK_DEV_SD=y -CONFIG_SATA_AHCI=y -CONFIG_ATA=y -CONFIG_ATA_PIIX=y - -# AMD network support -CONFIG_NET_VENDOR_AMD=y -CONFIG_AMD_XGBE=m -# CONFIG_AMD_XGBE_DCB is not set - -# Broadcom network support -CONFIG_NET_VENDOR_BROADCOM=y -CONFIG_TIGON3_HWMON=y -CONFIG_TIGON3=m -CONFIG_BNX2X=m -CONFIG_BNX2X_SRIOV=y -CONFIG_BNXT=m - -# Chelsio network support -CONFIG_NET_VENDOR_CHELSIO=y -CONFIG_CHELSIO_T4=m -CONFIG_CHELSIO_T4VF=m -# CONFIG_CHELSIO_T4_DCB is not set -# CONFIG_CHELSIO_INLINE_CRYPTO is not set -# CONFIG_INFINIBAND_CXGB4 is not set -# CONFIG_ISCSI_TARGET_CXGB4 is not set - -# Cisco UCS network support -CONFIG_NET_VENDOR_CISCO=y -CONFIG_ENIC=m -CONFIG_INFINIBAND_USNIC=m - -# Emulex network support -CONFIG_NET_VENDOR_EMULEX=y -CONFIG_BE2NET=m -CONFIG_BE2NET_BE2=y -CONFIG_BE2NET_BE3=y -CONFIG_BE2NET_HWMON=y -CONFIG_BE2NET_LANCER=y -CONFIG_BE2NET_SKYHAWK=y - -# Huawei network support -CONFIG_NET_VENDOR_HUAWEI=y -CONFIG_HINIC=m - -# Intel network support -CONFIG_NET_VENDOR_INTEL=y -CONFIG_E1000=m -CONFIG_E1000E=m -CONFIG_E1000E_HWTS=y -CONFIG_IGB=m -CONFIG_IGB_HWMON=y -CONFIG_IGBVF=m - -# Intel 10G network support -CONFIG_I40E=m -# CONFIG_I40E_DCB is not set -# CONFIG_INFINIBAND_I40IW is not set -CONFIG_ICE=m -CONFIG_PLDMFW=y -CONFIG_IXGB=m -CONFIG_IXGBE=m -CONFIG_IXGBE_HWMON=y -CONFIG_IXGBE_DCB=y -CONFIG_IXGBEVF=m -CONFIG_FM10K=m - -# Mellanox network support -CONFIG_MLXFW=m -CONFIG_MLX5_CORE=m -CONFIG_MLX5_INFINIBAND=m -CONFIG_NET_VENDOR_MELLANOX=y -CONFIG_MLX5_CORE_EN=y -CONFIG_NET_SWITCHDEV=y - -# Myricom network support -CONFIG_NET_VENDOR_MYRI=y -CONFIG_MYRI10GE=m -CONFIG_MYRI10GE_DCA=y - -# Pensando network support -CONFIG_NET_VENDOR_PENSANDO=y -CONFIG_IONIC=m - -# Solarflare network support -CONFIG_NET_VENDOR_SOLARFLARE=y -CONFIG_SFC=m -CONFIG_SFC_SRIOV=y -# CONFIG_SFC_MCDI_LOGGING is not set -CONFIG_SFC_MCDI_MON=y -CONFIG_SFC_FALCON=m - -# Cisco UCS HBA support -CONFIG_FCOE_FNIC=m -CONFIG_SCSI_SNIC=m - -# LSI Logic's SAS based RAID controllers -CONFIG_MEGARAID_SAS=y - -# Microsemi PQI controllers -CONFIG_SCSI_SMARTPQI=y - -# Support for virtio scsi boot devices for other cloud providers -CONFIG_SCSI_VIRTIO=y - -# Intel Volume Management Device driver, to support boot disks in a separate -# PCI domain. -CONFIG_VMD=y - -# Support handling of compressed firmware -CONFIG_FW_LOADER_COMPRESS=y diff --git a/packages/kernel-5.10/config-bottlerocket-vmware b/packages/kernel-5.10/config-bottlerocket-vmware deleted file mode 100644 index e69de29b..00000000 diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index d616a599..ec405edb 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -9,9 +9,6 @@ URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. Source0: https://cdn.amazonlinux.com/blobstore/f2f4a85aff9b0efec71d75bc29454ce8ab73974486a2a8ba541343cee1c7a622/kernel-5.10.213-201.855.amzn2.src.rpm Source100: config-bottlerocket -Source101: config-bottlerocket-aws -Source102: config-bottlerocket-metal -Source103: config-bottlerocket-vmware # Help out-of-tree module builds run `make prepare` automatically. Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch @@ -103,8 +100,7 @@ scripts/kconfig/merge_config.sh \ %if "%{_cross_arch}" == "x86_64" ../config-microcode \ %endif - %{SOURCE100} \ - %{_sourcedir}/config-bottlerocket-%{_cross_variant_platform} + %{SOURCE100} rm -f ../config-* ../*.patch From cd199bc69206f3f33756034f6950f986f781fca7 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 22 Apr 2024 18:37:02 +0000 Subject: [PATCH 1203/1356] kernel-5.15: remove variant sensitivity Now that the only in-tree users of the 5.15 kernel are aws variants, merge the aws-specific config into the main config and remove the variant sensitive marker from the package. Signed-off-by: Ben Cressey --- packages/kernel-5.15/Cargo.toml | 1 - packages/kernel-5.15/config-bottlerocket | 18 +++ packages/kernel-5.15/config-bottlerocket-aws | 17 -- .../kernel-5.15/config-bottlerocket-metal | 152 ------------------ .../kernel-5.15/config-bottlerocket-vmware | 16 -- packages/kernel-5.15/kernel-5.15.spec | 6 +- 6 files changed, 19 insertions(+), 191 deletions(-) delete mode 100644 packages/kernel-5.15/config-bottlerocket-aws delete mode 100644 packages/kernel-5.15/config-bottlerocket-metal delete mode 100644 packages/kernel-5.15/config-bottlerocket-vmware diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index a4ef8ad3..bf0d1d9e 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -6,7 +6,6 @@ publish = false build = "../build.rs" [package.metadata.build-package] -variant-sensitive = "platform" package-name = "kernel-5.15" [lib] diff --git a/packages/kernel-5.15/config-bottlerocket b/packages/kernel-5.15/config-bottlerocket index 17c21158..1d36e8ee 100644 --- a/packages/kernel-5.15/config-bottlerocket +++ b/packages/kernel-5.15/config-bottlerocket @@ -186,3 +186,21 @@ CONFIG_ISCSI_TARGET=m # Disable edac driver for intel 10nm memory controllers # CONFIG_EDAC_I10NM is not set + +# Support boot from IDE disks +CONFIG_ATA=y +CONFIG_ATA_PIIX=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y + +# Mellanox network support +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX5_CORE=m +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_INFINIBAND=m +CONFIG_MLXFW=m +# CONFIG_MLX5_FPGA is not set +# CONFIG_MLX5_IPSEC is not set +# CONFIG_MLX5_CORE_IPOIB is not set +# CONFIG_MLX5_SF is not set diff --git a/packages/kernel-5.15/config-bottlerocket-aws b/packages/kernel-5.15/config-bottlerocket-aws deleted file mode 100644 index 1bfa27e1..00000000 --- a/packages/kernel-5.15/config-bottlerocket-aws +++ /dev/null @@ -1,17 +0,0 @@ -# Support boot from IDE disks -CONFIG_ATA=y -CONFIG_ATA_PIIX=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y - -# Mellanox network support -CONFIG_NET_SWITCHDEV=y -CONFIG_NET_VENDOR_MELLANOX=y -CONFIG_MLX5_CORE=m -CONFIG_MLX5_CORE_EN=y -CONFIG_MLX5_INFINIBAND=m -CONFIG_MLXFW=m -# CONFIG_MLX5_FPGA is not set -# CONFIG_MLX5_IPSEC is not set -# CONFIG_MLX5_CORE_IPOIB is not set -# CONFIG_MLX5_SF is not set diff --git a/packages/kernel-5.15/config-bottlerocket-metal b/packages/kernel-5.15/config-bottlerocket-metal deleted file mode 100644 index 6e38510c..00000000 --- a/packages/kernel-5.15/config-bottlerocket-metal +++ /dev/null @@ -1,152 +0,0 @@ -# This file holds all the settings that are specific to hardware enablement -# we do for the metal variants. - -# SATA support -CONFIG_BLK_DEV_SD=y -CONFIG_SATA_AHCI=y -CONFIG_ATA=y -CONFIG_ATA_PIIX=y - -# AMD network support -CONFIG_NET_VENDOR_AMD=y -CONFIG_AMD_XGBE=m -# CONFIG_AMD_XGBE_DCB is not set - -# Broadcom network support -CONFIG_NET_VENDOR_BROADCOM=y -CONFIG_TIGON3_HWMON=y -CONFIG_TIGON3=m -CONFIG_BNX2X=m -CONFIG_BNX2X_SRIOV=y -CONFIG_BNXT=m - -# Chelsio network support -CONFIG_NET_VENDOR_CHELSIO=y -CONFIG_CHELSIO_T4=m -CONFIG_CHELSIO_T4VF=m -# CONFIG_CHELSIO_T4_DCB is not set -# CONFIG_CHELSIO_INLINE_CRYPTO is not set -# CONFIG_INFINIBAND_CXGB4 is not set -# CONFIG_ISCSI_TARGET_CXGB4 is not set - -# Cisco UCS network support -CONFIG_NET_VENDOR_CISCO=y -CONFIG_ENIC=m -CONFIG_INFINIBAND_USNIC=m - -# Emulex network support -CONFIG_NET_VENDOR_EMULEX=y -CONFIG_BE2NET=m -CONFIG_BE2NET_BE2=y -CONFIG_BE2NET_BE3=y -CONFIG_BE2NET_LANCER=y -CONFIG_BE2NET_SKYHAWK=y -CONFIG_BE2NET_HWMON=y - -# Huawei network support -CONFIG_NET_VENDOR_HUAWEI=y -CONFIG_HINIC=m - -# Intel network support -CONFIG_NET_VENDOR_INTEL=y -CONFIG_E1000=m -CONFIG_E1000E=m -CONFIG_E1000E_HWTS=y -CONFIG_IGB=m -CONFIG_IGB_HWMON=y -CONFIG_IGBVF=m - -# Intel 10G network support -CONFIG_I40E=m -# CONFIG_I40E_DCB is not set -CONFIG_ICE=m -# CONFIG_INFINIBAND_IRDMA is not set -CONFIG_PLDMFW=y -CONFIG_IXGB=m -CONFIG_IXGBE=m -CONFIG_IXGBE_HWMON=y -CONFIG_IXGBE_DCB=y -CONFIG_IXGBEVF=m -CONFIG_FM10K=m - -# Mellanox network support -CONFIG_MLXFW=m -CONFIG_MLX5_CORE=m -CONFIG_MLX5_INFINIBAND=m -CONFIG_NET_VENDOR_MELLANOX=y -CONFIG_MLX5_CORE_EN=y -CONFIG_NET_SWITCHDEV=y -# CONFIG_MLX5_FPGA is not set -# CONFIG_MLX5_IPSEC is not set -# CONFIG_MLX5_CORE_IPOIB is not set -# CONFIG_MLX5_SF is not set - -# Myricom network support -CONFIG_NET_VENDOR_MYRI=y -CONFIG_MYRI10GE=m -CONFIG_MYRI10GE_DCA=y - -# Pensando network support -CONFIG_NET_VENDOR_PENSANDO=y -CONFIG_IONIC=m - -# Solarflare network support -CONFIG_NET_VENDOR_SOLARFLARE=y -CONFIG_SFC=m -CONFIG_SFC_SRIOV=y -# CONFIG_SFC_MCDI_LOGGING is not set -CONFIG_SFC_MCDI_MON=y -CONFIG_SFC_FALCON=m - -# QLogic network support -CONFIG_NET_VENDOR_QLOGIC=y -CONFIG_QED=m -CONFIG_QED_SRIOV=y -CONFIG_QEDE=m -# CONFIG_INFINIBAND_QEDR is not set -# CONFIG_QEDF is not set -# CONFIG_QEDI is not set -# CONFIG_QLA3XXX is not set -CONFIG_QLCNIC=m -CONFIG_QLCNIC_SRIOV=y -# CONFIG_QLCNIC_DCB is not set -# CONFIG_QLCNIC_HWMON is not set -# CONFIG_NETXEN_NIC is not set - -# Cisco UCS HBA support -CONFIG_FCOE_FNIC=m -CONFIG_SCSI_SNIC=m - -# LSI Logic's SAS based RAID controllers -CONFIG_SCSI_MPT3SAS=y -CONFIG_MEGARAID_SAS=y - -# Microsemi PQI controllers -CONFIG_SCSI_SMARTPQI=y - -# Support for virtio scsi boot devices for other cloud providers -CONFIG_SCSI_VIRTIO=y - -# Load i8042 controller, keyboard, and mouse as modules, to avoid waiting for -# them before mounting the root device. -CONFIG_SERIO_I8042=m -CONFIG_KEYBOARD_ATKBD=m -CONFIG_MOUSE_PS2=m -# CONFIG_MOUSE_PS2_ALPS is not set -# CONFIG_MOUSE_PS2_BYD is not set -# CONFIG_MOUSE_PS2_LOGIPS2PP is not set -# CONFIG_MOUSE_PS2_SYNAPTICS is not set -# CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS is not set -# CONFIG_MOUSE_PS2_CYPRESS is not set -# CONFIG_MOUSE_PS2_TRACKPOINT is not set -# CONFIG_MOUSE_PS2_ELANTECH is not set -# CONFIG_MOUSE_PS2_SENTELIC is not set -# CONFIG_MOUSE_PS2_TOUCHKIT is not set -# CONFIG_MOUSE_PS2_FOCALTECH is not set - -# Intel Volume Management Device driver, to support boot disks in a separate -# PCI domain. -CONFIG_VMD=y - -# Support handling of compressed firmware -CONFIG_FW_LOADER_COMPRESS=y diff --git a/packages/kernel-5.15/config-bottlerocket-vmware b/packages/kernel-5.15/config-bottlerocket-vmware deleted file mode 100644 index ec1cc1a5..00000000 --- a/packages/kernel-5.15/config-bottlerocket-vmware +++ /dev/null @@ -1,16 +0,0 @@ -# Load i8042 controller, keyboard, and mouse as modules, to avoid waiting for -# them before mounting the root device. -CONFIG_SERIO_I8042=m -CONFIG_KEYBOARD_ATKBD=m -CONFIG_MOUSE_PS2=m -# CONFIG_MOUSE_PS2_ALPS is not set -# CONFIG_MOUSE_PS2_BYD is not set -# CONFIG_MOUSE_PS2_LOGIPS2PP is not set -# CONFIG_MOUSE_PS2_SYNAPTICS is not set -# CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS is not set -# CONFIG_MOUSE_PS2_CYPRESS is not set -# CONFIG_MOUSE_PS2_TRACKPOINT is not set -# CONFIG_MOUSE_PS2_ELANTECH is not set -# CONFIG_MOUSE_PS2_SENTELIC is not set -# CONFIG_MOUSE_PS2_TOUCHKIT is not set -# CONFIG_MOUSE_PS2_FOCALTECH is not set diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 26dad62e..0a9ca083 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -9,9 +9,6 @@ URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. Source0: https://cdn.amazonlinux.com/blobstore/29a1d43caffcebd032ece82a974ba5db68b1354f508a35f6df62d8e1f6106ee8/kernel-5.15.152-100.162.amzn2.src.rpm Source100: config-bottlerocket -Source101: config-bottlerocket-aws -Source102: config-bottlerocket-metal -Source103: config-bottlerocket-vmware # Help out-of-tree module builds run `make prepare` automatically. Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch @@ -98,8 +95,7 @@ scripts/kconfig/merge_config.sh \ %if "%{_cross_arch}" == "x86_64" ../config-microcode \ %endif - %{SOURCE100} \ - %{_sourcedir}/config-bottlerocket-%{_cross_variant_platform} + %{SOURCE100} rm -f ../config-* ../*.patch From 747d55a2280e62ae248191c4eb29c893e1593259 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Mon, 22 Apr 2024 17:17:54 +0000 Subject: [PATCH 1204/1356] kernel-5.10: update to 5.10.214 Rebase to Amazon Linux upstream version 5.10.214-202.855.amzn2. Signed-off-by: Martin Harriman --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 700364ed..88268418 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/f2f4a85aff9b0efec71d75bc29454ce8ab73974486a2a8ba541343cee1c7a622/kernel-5.10.213-201.855.amzn2.src.rpm" -sha512 = "9e61a292106ab4872ff8bd89aa0c32613c7e78f3d6776ada31ba1d63e26f923a5b08e4fb2e5c927459cc057476d0e8e45c2f125ee226a6d402ba0c4025d78cde" +url = "https://cdn.amazonlinux.com/blobstore/6ed4450682e3cd4bb4a66245eff09d376f623d8bb7646a386814fbf6d8e55691/kernel-5.10.214-202.855.amzn2.src.rpm" +sha512 = "7f201e8e747ebf3b1d93ad39ced49436d276e8446fc84a56cf971beb1422f8f727c5250750dfee8451f377189ac884d0bf7a156fcaa4d63732a0ee8c0e671394" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index ec405edb..02132fee 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.213 +Version: 5.10.214 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/f2f4a85aff9b0efec71d75bc29454ce8ab73974486a2a8ba541343cee1c7a622/kernel-5.10.213-201.855.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/6ed4450682e3cd4bb4a66245eff09d376f623d8bb7646a386814fbf6d8e55691/kernel-5.10.214-202.855.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 4db1bc339adb276f0a7fae3cb18b1c8407ebc1f6 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Mon, 22 Apr 2024 17:18:27 +0000 Subject: [PATCH 1205/1356] kernel-5.15: update to 5.15.153 Rebase to Amazon Linux upstream version 5.15.153-100.162.amzn2. Signed-off-by: Martin Harriman --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index bf0d1d9e..8e757bb6 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/29a1d43caffcebd032ece82a974ba5db68b1354f508a35f6df62d8e1f6106ee8/kernel-5.15.152-100.162.amzn2.src.rpm" -sha512 = "3d0ea5442f26d315d2d96968c4c1b8a5b2a2bd1a12ac0892351df9ef837efe2bae90cc9d4f3687acf8a5eddb96971d805407fac9dcdcb1d24d7cfef304eda77a" +url = "https://cdn.amazonlinux.com/blobstore/b0b83af53711690ad1bbc3de7e01e03f6d93582a3fc506cf79a063c4937833aa/kernel-5.15.153-100.162.amzn2.src.rpm" +sha512 = "3d84822f9401d8902b6ecf84cca8d0546be67c9516a51a22e4e0036741f74210a8398159a44e6aff6ee9481dbfd44a297f56b84ef4da4336af2c2e9efcaca680" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 0a9ca083..09116405 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.152 +Version: 5.15.153 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/29a1d43caffcebd032ece82a974ba5db68b1354f508a35f6df62d8e1f6106ee8/kernel-5.15.152-100.162.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/b0b83af53711690ad1bbc3de7e01e03f6d93582a3fc506cf79a063c4937833aa/kernel-5.15.153-100.162.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From ace61c5510f029b0012a820ec46637cf1304d339 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Mon, 22 Apr 2024 17:18:53 +0000 Subject: [PATCH 1206/1356] kernel-6.1: update to 6.1.84 Rebase to Amazon Linux upstream version 6.1.84-99.169.amzn2023. Signed-off-by: Martin Harriman --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index cc0ced88..3f7bc2aa 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -14,8 +14,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/4004a1fe6830de6cabbf60ae7345aef54260400b86ac4973fd29cc6a31d9bf9c/kernel-6.1.82-99.168.amzn2023.src.rpm" -sha512 = "249f3b440248062cc1b67fe89c0bfc75d2b6f6cdac63c539884c7257d334ef7bdaeb2f87fffbfd12d4a5389cc65627b1a64d7c6b3a32b7247e222811dc06f6bc" +url = "https://cdn.amazonlinux.com/al2023/blobstore/bdca6b79db0d3d5ad549b61951208fbf474daebe38ca619f8c706070dc252239/kernel-6.1.84-99.169.amzn2023.src.rpm" +sha512 = "3e1b219fc89e5c051b321088ad464db1e1278bc9e8ca90ffa2b17853a9db23310f7a7f370e3253671c7cf74e492cde4effb38c38500da3b391c7295027b134e1" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 1ddb005a..196e6442 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.82 +Version: 6.1.84 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/4004a1fe6830de6cabbf60ae7345aef54260400b86ac4973fd29cc6a31d9bf9c/kernel-6.1.82-99.168.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/bdca6b79db0d3d5ad549b61951208fbf474daebe38ca619f8c706070dc252239/kernel-6.1.84-99.169.amzn2023.src.rpm Source100: config-bottlerocket Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal From d2fe8600791240273f553ede502418a105ffb452 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Mon, 22 Apr 2024 17:22:43 +0000 Subject: [PATCH 1207/1356] Enable CONFIG_FS_ENCRYPTION in kernel 6.1 Enable encryption by removing the bottlerocket override (upstream already enables CONFIG_FS_ENCRYPTION). --- packages/kernel-6.1/config-bottlerocket | 5 ----- 1 file changed, 5 deletions(-) diff --git a/packages/kernel-6.1/config-bottlerocket b/packages/kernel-6.1/config-bottlerocket index 812bf19c..6cb6147a 100644 --- a/packages/kernel-6.1/config-bottlerocket +++ b/packages/kernel-6.1/config-bottlerocket @@ -1,11 +1,6 @@ # Because Bottlerocket does not have an initramfs, modules required to mount # the root filesystem must be set to y. -# disable filesystem encryption support as it may lock users into certain -# filesystems inadvertantly. For now EBS volume encryption or dm-crypt seems -# to be the more universal choice. -# CONFIG_FS_ENCRYPTION is not set - # The root filesystem is ext4 CONFIG_EXT4_FS=y From 803804bfbe5e3a4879ee3448d6494beef2f0af1d Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Tue, 23 Apr 2024 22:34:48 +0000 Subject: [PATCH 1208/1356] Enable CONFIG_FS_ENCRYPTION in kernel 5.15 --- packages/kernel-5.15/config-bottlerocket | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/kernel-5.15/config-bottlerocket b/packages/kernel-5.15/config-bottlerocket index 1d36e8ee..11cde58f 100644 --- a/packages/kernel-5.15/config-bottlerocket +++ b/packages/kernel-5.15/config-bottlerocket @@ -204,3 +204,6 @@ CONFIG_MLXFW=m # CONFIG_MLX5_IPSEC is not set # CONFIG_MLX5_CORE_IPOIB is not set # CONFIG_MLX5_SF is not set + +# Support filesystem encryption for ext4 +CONFIG_FS_ENCRYPTION=y From d202d4e4a36f920263e8aa1b81adec60aeacc25a Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Wed, 24 Apr 2024 22:17:10 +0000 Subject: [PATCH 1209/1356] Enable CONFIG_FS_ENCRYPTION_INLINE_CRYPT --- packages/kernel-5.15/config-bottlerocket | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/kernel-5.15/config-bottlerocket b/packages/kernel-5.15/config-bottlerocket index 11cde58f..454d1666 100644 --- a/packages/kernel-5.15/config-bottlerocket +++ b/packages/kernel-5.15/config-bottlerocket @@ -207,3 +207,4 @@ CONFIG_MLXFW=m # Support filesystem encryption for ext4 CONFIG_FS_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y From ea112046a709aec7c01d8ba3e7731b7353ac641d Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sat, 6 Apr 2024 18:23:47 +0000 Subject: [PATCH 1210/1356] packages: add libkcapi The `sha512hmac` program is required to check kernel integrity when running in FIPS mode. Signed-off-by: Ben Cressey --- packages/libkcapi/Cargo.toml | 19 ++++++++ packages/libkcapi/libkcapi.spec | 79 +++++++++++++++++++++++++++++++++ 2 files changed, 98 insertions(+) create mode 100644 packages/libkcapi/Cargo.toml create mode 100644 packages/libkcapi/libkcapi.spec diff --git a/packages/libkcapi/Cargo.toml b/packages/libkcapi/Cargo.toml new file mode 100644 index 00000000..cd7ad6a2 --- /dev/null +++ b/packages/libkcapi/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "libkcapi" +version = "0.1.0" +edition = "2021" +publish = false +build = "../build.rs" + +[lib] +path = "../packages.rs" + +[package.metadata.build-package] +releases-url = "https://github.com/smuellerDD/libkcapi/releases" + +[[package.metadata.build-package.external-files]] +url = "https://github.com/smuellerDD/libkcapi/archive/v1.5.0/libkcapi-1.5.0.tar.gz" +sha512 = "510d0606cdc9479a77ed07bd3ac59b07c3996402a85cee012e6836d0a31cb06f5b7f715cdb76f3745784aab3154595caec4537b4c774236a139ebfe6e1a8be9b" + +[build-dependencies] +glibc = { path = "../glibc" } diff --git a/packages/libkcapi/libkcapi.spec b/packages/libkcapi/libkcapi.spec new file mode 100644 index 00000000..f68fc293 --- /dev/null +++ b/packages/libkcapi/libkcapi.spec @@ -0,0 +1,79 @@ +# libkcapi since 85bce6035b (1.5.0) uses sha512hmac with the same key for all +# self-checks. Earlier versions used sha256hmac with a different key to check +# the shared library. +%global openssl_sha512_hmac openssl sha512 -hmac FIPS-FTW-RHT2009 -hex + +# We need to compute the HMAC after the binaries have been stripped. +%define __spec_install_post\ +%{?__debug_package:%{__debug_install_post}}\ +%{__arch_install_post}\ +%{__os_install_post}\ +cd %{buildroot}/%{_cross_bindir}\ +%openssl_sha512_hmac kcapi-hasher\\\ + | awk '{ print $2 }' > .kcapi-hasher.hmac\ +ln -s .kcapi-hasher.hmac .sha512hmac.hmac\ +cd %{buildroot}/%{_cross_libdir}\ +%openssl_sha512_hmac libkcapi.so.%{version}\\\ + | awk '{ print $2 }' > .libkcapi.so.%{version}.hmac\ +ln -s .libkcapi.so.%{version}.hmac .libkcapi.so.1.hmac\ +%{nil} + +Name: %{_cross_os}libkcapi +Version: 1.5.0 +Release: 1%{?dist} +Summary: Library for kernel crypto API +License: BSD-3-Clause OR GPL-2.0-only +URL: https://www.chronox.de/libkcapi/html/index.html +Source0: https://github.com/smuellerDD/libkcapi/archive/v%{version}/libkcapi-%{version}.tar.gz +BuildRequires: %{_cross_os}glibc-devel + +%description +%{summary}. + +%package devel +Summary: Files for development using the library for kernel crypto API +Requires: %{name} + +%description devel +%{summary}. + +%prep +%autosetup -n libkcapi-%{version} -p1 + +%build +autoreconf -fi +%cross_configure \ + --enable-static \ + --enable-shared \ + --enable-kcapi-hasher \ + +%force_disable_rpath + +%make_build + +%install +%make_install + +ln -s kcapi-hasher %{buildroot}%{_cross_bindir}/sha512hmac +find %{buildroot} -type f -name '*.hmac' -delete + +%files +%license COPYING COPYING.bsd COPYING.gplv2 +%{_cross_attribution_file} +%{_cross_libdir}/*.so.* +%{_cross_libdir}/.*.so.*.hmac +%{_cross_bindir}/kcapi-hasher +%{_cross_bindir}/.kcapi-hasher.hmac +%{_cross_bindir}/sha512hmac +%{_cross_bindir}/.sha512hmac.hmac + +%exclude %{_cross_libexecdir}/libkcapi +%exclude %{_cross_mandir} + +%files devel +%{_cross_libdir}/*.a +%{_cross_libdir}/*.so +%{_cross_includedir}/kcapi.h +%{_cross_pkgconfigdir}/*.pc + +%changelog From 6931dcc80550dba8218d1c9482573c0eefa09fa3 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sat, 6 Apr 2024 19:01:43 +0000 Subject: [PATCH 1211/1356] kernel-5.10, -5.15: conflict with FIPS image feature No FIPS certification is planned for the upstream Amazon Linux 5.10 and 5.15 kernels, so prevent them from being installed in the image when the FIPS image feature flag is enabled. Signed-off-by: Ben Cressey --- packages/kernel-5.10/kernel-5.10.spec | 3 +++ packages/kernel-5.15/kernel-5.15.spec | 3 +++ 2 files changed, 6 insertions(+) diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 02132fee..75dfd7ed 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -42,6 +42,9 @@ Requires: %{_cross_os}microcode-licenses Requires: %{name}-modules = %{version}-%{release} Requires: %{name}-devel = %{version}-%{release} +# The 5.10 kernel is not FIPS certified. +Conflicts: %{_cross_os}image-feature(fips) + %global kernel_sourcedir %{_cross_usrsrc}/kernels %global kernel_libdir %{_cross_libdir}/modules/%{version} diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 09116405..55d32704 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -37,6 +37,9 @@ Requires: %{_cross_os}microcode-licenses Requires: %{name}-modules = %{version}-%{release} Requires: %{name}-devel = %{version}-%{release} +# The 5.15 kernel is not FIPS certified. +Conflicts: %{_cross_os}image-feature(fips) + %global kernel_sourcedir %{_cross_usrsrc}/kernels %global kernel_libdir %{_cross_libdir}/modules/%{version} From 4369d0741268791b4a1a44b37d65194825c0a0fa Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sat, 6 Apr 2024 19:04:55 +0000 Subject: [PATCH 1212/1356] kernel-6.1: add FIPS modules for crypto self-test Signed-off-by: Ben Cressey --- .../check-fips-modules.drop-in.conf.in | 3 ++ packages/kernel-6.1/fipsmodules-aarch64 | 52 +++++++++++++++++++ packages/kernel-6.1/fipsmodules-x86_64 | 44 ++++++++++++++++ packages/kernel-6.1/kernel-6.1.spec | 34 ++++++++++++ 4 files changed, 133 insertions(+) create mode 100644 packages/kernel-6.1/check-fips-modules.drop-in.conf.in create mode 100644 packages/kernel-6.1/fipsmodules-aarch64 create mode 100644 packages/kernel-6.1/fipsmodules-x86_64 diff --git a/packages/kernel-6.1/check-fips-modules.drop-in.conf.in b/packages/kernel-6.1/check-fips-modules.drop-in.conf.in new file mode 100644 index 00000000..c5585479 --- /dev/null +++ b/packages/kernel-6.1/check-fips-modules.drop-in.conf.in @@ -0,0 +1,3 @@ +[Unit] +Requires=fips-modprobe@__FIPS_MODULE__.service +After=fips-modprobe@__FIPS_MODULE__.service diff --git a/packages/kernel-6.1/fipsmodules-aarch64 b/packages/kernel-6.1/fipsmodules-aarch64 new file mode 100644 index 00000000..16831394 --- /dev/null +++ b/packages/kernel-6.1/fipsmodules-aarch64 @@ -0,0 +1,52 @@ +sha1 +sha224 +sha256 +sha384 +sha512 +sha3-224 +sha3-256 +sha3-384 +sha3-512 +crc32c +crct10dif +ghash +xxhash64 +ghash-ce +sha1-ce +sha2-ce +sha256-arm64 +sha3-ce +sha512-arm64 +sha512-ce +cipher_null +des3_ede +aes +cfb +dh +ecdh +aes-arm64 +aes-ce-blk +aes-ce-ccm +aes-ce-cipher +aes-neon-blk +aes-neon-bs +ecb +cbc +ctr +xts +gcm +ccm +authenc +hmac +cmac +ofb +cts +lzo +essiv +seqiv +drbg +aead +cryptomgr +tcrypt +crypto_user +rsa diff --git a/packages/kernel-6.1/fipsmodules-x86_64 b/packages/kernel-6.1/fipsmodules-x86_64 new file mode 100644 index 00000000..a674fe57 --- /dev/null +++ b/packages/kernel-6.1/fipsmodules-x86_64 @@ -0,0 +1,44 @@ +sha1 +sha224 +sha256 +sha384 +sha512 +sha3-224 +sha3-256 +sha3-384 +sha3-512 +crc32c +crct10dif +ghash +xxhash64 +ghash_clmulni_intel +sha1-ssse3 +sha256-ssse3 +sha512-ssse3 +cipher_null +des3_ede +aes +cfb +dh +ecdh +aesni-intel +ecb +cbc +ctr +xts +gcm +ccm +authenc +hmac +cmac +ofb +cts +lzo +essiv +seqiv +drbg +aead +cryptomgr +tcrypt +crypto_user +rsa diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 196e6442..8e4fd580 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -13,6 +13,12 @@ Source101: config-bottlerocket-aws Source102: config-bottlerocket-metal Source103: config-bottlerocket-vmware +# This list of FIPS modules is extracted from /etc/fipsmodules in the initramfs +# after placing AL2023 in FIPS mode. +Source200: check-fips-modules.drop-in.conf.in +Source201: fipsmodules-x86_64 +Source202: fipsmodules-aarch64 + # Help out-of-tree module builds run `make prepare` automatically. Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch # Expose tools/* targets for out-of-tree module builds. @@ -43,6 +49,9 @@ Requires: %{_cross_os}microcode-licenses Requires: %{name}-modules = %{version}-%{release} Requires: %{name}-devel = %{version}-%{release} +# Pull in FIPS-related files if needed. +Requires: (%{name}-fips if %{_cross_os}image-feature(fips)) + %global kernel_sourcedir %{_cross_usrsrc}/kernels %global kernel_libdir %{_cross_libdir}/modules/%{version} @@ -73,6 +82,14 @@ Summary: Header files for the Linux kernel for use by glibc %description headers %{summary}. +%package fips +Summary: FIPS related configuration for the Linux kernel +Requires: (%{_cross_os}image-feature(fips) and %{name}) +Conflicts: %{_cross_os}image-feature(no-fips) + +%description fips +%{summary}. + %prep rpm2cpio %{SOURCE0} | cpio -iu linux-%{version}.tar config-%{_cross_arch} "*.patch" tar -xof linux-%{version}.tar; rm linux-%{version}.tar @@ -233,6 +250,20 @@ rm -f %{buildroot}%{kernel_libdir}/build %{buildroot}%{kernel_libdir}/source ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{kernel_libdir}/build ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{kernel_libdir}/source +# Ensure that each required FIPS module is loaded as a dependency of the +# check-fips-module.service. The list of FIPS modules is different across +# kernels but the check is consistent: it loads the "tcrypt" module after +# the other modules are loaded. +mkdir -p %{buildroot}%{_cross_unitdir}/check-fips-modules.service.d +i=0 +for fipsmod in $(cat %{_sourcedir}/fipsmodules-%{_cross_arch}) ; do + [ "${fipsmod}" == "tcrypt" ] && continue + drop_in="$(printf "%03d\n" "${i}")-${fipsmod}.conf" + sed -e "s|__FIPS_MODULE__|${fipsmod}|g" %{S:200} \ + > %{buildroot}%{_cross_unitdir}/check-fips-modules.service.d/"${drop_in}" + (( i+=1 )) +done + %files %license COPYING LICENSES/preferred/GPL-2.0 LICENSES/exceptions/Linux-syscall-note %{_cross_attribution_file} @@ -274,4 +305,7 @@ ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{kernel_libdir}/source %files archive %{_cross_datadir}/bottlerocket/kernel-devel.tar.xz +%files fips +%{_cross_unitdir}/check-fips-modules.service.d/*.conf + %changelog From 9fb0ba093c5ede4f75bc0a9ace6f31b42d10c10f Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 23 Apr 2024 05:24:48 +0000 Subject: [PATCH 1213/1356] linux-firmware: switch to zstd compression Support for zstd-compressed firmware was added to the 5.19 kernel. Now that all `metal-*` variants are on the 6.1 kernel, switch from xz to zstd as part of the larger effort to eliminate xz usage. Signed-off-by: Ben Cressey --- packages/linux-firmware/linux-firmware.spec | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/linux-firmware/linux-firmware.spec b/packages/linux-firmware/linux-firmware.spec index dd8b3024..a70d43ab 100644 --- a/packages/linux-firmware/linux-firmware.spec +++ b/packages/linux-firmware/linux-firmware.spec @@ -48,9 +48,9 @@ Patch0010: 0010-linux-firmware-amd-ucode-Remove-amd-microcode.patch mkdir -p %{buildroot}/%{fwdir} mkdir -p %{buildroot}/%{fwdir}/updates -# Use xz compression for firmware files to reduce size on disk. This relies on -# kernel support through FW_LOADER_COMPRESS (and FW_LOADER_COMPRESS_XZ for kernels >=5.19) -make DESTDIR=%{buildroot}/ FIRMWAREDIR=%{fwdir} install-xz +# Use zstd compression for firmware files to reduce size on disk. This relies on +# kernel support through FW_LOADER_COMPRESS (and FW_LOADER_COMPRESS_ZSTD for kernels >=5.19) +make DESTDIR=%{buildroot}/ FIRMWAREDIR=%{fwdir} install-zst %files %dir %{fwdir} From d674c00d9744d79f36afab7cf47acba9d7598fdc Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 30 Apr 2024 00:07:54 +0000 Subject: [PATCH 1214/1356] kernel-6.1: merge platform configs Remove the need for conditional compilation by merging the platform- specific config snippets together. Storage drivers that were set to "Y" in any platform config are now "Y" for all platforms. Network drivers that were set to "M" in any platform config are likewise now "M" in the merged config. The simple DRM driver was disabled for `metal` and `vmware` but enabled for `aws`; it is now enabled for all platforms. The i8042 driver was disabled for `aws` but enabled for `metal` and `vmware`; it is also now enabled for all platforms. Switch to a full list of all packaged modules to clarify the full set of available modules. This also paves the way to move certain modules into platform-specific subpackages in a subsequent change. Signed-off-by: Ben Cressey --- packages/kernel-6.1/config-bottlerocket | 176 ++- packages/kernel-6.1/config-bottlerocket-aws | 20 - packages/kernel-6.1/config-bottlerocket-metal | 152 --- .../kernel-6.1/config-bottlerocket-vmware | 17 - packages/kernel-6.1/kernel-6.1.spec | 1009 ++++++++++++++++- 5 files changed, 1157 insertions(+), 217 deletions(-) delete mode 100644 packages/kernel-6.1/config-bottlerocket-aws delete mode 100644 packages/kernel-6.1/config-bottlerocket-metal delete mode 100644 packages/kernel-6.1/config-bottlerocket-vmware diff --git a/packages/kernel-6.1/config-bottlerocket b/packages/kernel-6.1/config-bottlerocket index 6cb6147a..a88a6fc0 100644 --- a/packages/kernel-6.1/config-bottlerocket +++ b/packages/kernel-6.1/config-bottlerocket @@ -37,6 +37,30 @@ CONFIG_VIRTIO=y CONFIG_VIRTIO_BLK=y CONFIG_VIRTIO_PCI=y +# Support for virtio scsi boot devices for other cloud providers +CONFIG_SCSI_VIRTIO=y + +# IDE, SCSI, and SATA disks +CONFIG_ATA=y +CONFIG_ATA_PIIX=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_SATA_AHCI=y + +# Disable specific SCSI drivers. +# CONFIG_SCSI_MPI3MR is not set + +# LSI Logic's SAS based RAID controllers +CONFIG_SCSI_MPT3SAS=y +CONFIG_MEGARAID_SAS=y + +# Microsemi PQI controllers +CONFIG_SCSI_SMARTPQI=y + +# Intel Volume Management Device driver, to support boot disks in a separate +# PCI domain. +CONFIG_VMD=y + # dm-verity and enabling it on the kernel command line CONFIG_BLK_DEV_DM=y CONFIG_DAX=y @@ -119,6 +143,10 @@ CONFIG_DECOMPRESS_ZSTD=y # CONFIG_MODULE_COMPRESS_NONE is not set CONFIG_MODULE_COMPRESS_GZIP=y +# Support handling of compressed firmware +CONFIG_FW_LOADER_COMPRESS=y +CONFIG_FW_LOADER_COMPRESS_XZ=y + # Add virtio drivers for development setups running as guests in qemu CONFIG_VIRTIO_CONSOLE=m CONFIG_HW_RANDOM_VIRTIO=m @@ -170,13 +198,6 @@ CONFIG_EXT4_USE_FOR_EXT2=y # - sch_cake targets home routers and residential links # CONFIG_NET_SCH_CAKE is not set -# Disable specific SCSI drivers for the generic case. We have enabled necessary -# drivers on metal specifically -# CONFIG_SCSI_MPI3MR is not set -# CONFIG_SCSI_MPT3SAS is not set -# CONFIG_SCSI_SMARTPQI is not set -# CONFIG_SCSI_SAS_ATTRS is not set - # Provide minimal iSCSI via TCP support for initiator and target mode # initiator side CONFIG_ISCSI_TCP=m @@ -189,13 +210,144 @@ CONFIG_ISCSI_TARGET=m # Disable DAMON subsystem. We currently do not have a good use-case for DAMON. # CONFIG_DAMON is not set -# Disable unnecessary keyboard and mouse drivers. -# CONFIG_MOUSE_PS2 is not set -# CONFIG_SERIO is not set -# CONFIG_KEYBOARD_ATKBD is not set +# Load i8042 controller, keyboard, and mouse as modules, to avoid waiting for +# them before mounting the root device. +CONFIG_SERIO_I8042=m +CONFIG_KEYBOARD_ATKBD=m +CONFIG_MOUSE_PS2=m +# CONFIG_MOUSE_PS2_ALPS is not set +# CONFIG_MOUSE_PS2_BYD is not set +# CONFIG_MOUSE_PS2_CYPRESS is not set +# CONFIG_MOUSE_PS2_ELANTECH is not set +# CONFIG_MOUSE_PS2_FOCALTECH is not set +# CONFIG_MOUSE_PS2_LIFEBOOK is not set +# CONFIG_MOUSE_PS2_LOGIPS2PP is not set +# CONFIG_MOUSE_PS2_SENTELIC is not set +# CONFIG_MOUSE_PS2_SYNAPTICS is not set +# CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS is not set +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +# CONFIG_MOUSE_PS2_TRACKPOINT is not set +# CONFIG_MOUSE_PS2_VMMOUSE is not set # Disable unnecessary framebuffer/drm drivers # CONFIG_DRM_BOCHS is not set -# CONFIG_DRM_SIMPLEDRM is not set # CONFIG_SYSFB_SIMPLEFB is not set +# With 6.1 some of the functionalities used by the nvidia driver have moved behind +# some extra config options CONFIG_DRM_KMS_HELPER and CONFIG_DRM_DISPLAY_HELPER. +# These config options can not be selected individually, but are selected by certain +# drivers. Enable the SIMPLEDRM driver, which is a minimal drm driver enabling +# those helpers for platform provided framebuffers. +CONFIG_DRM_SIMPLEDRM=m + +# =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= + +# Enable various network vendors for bare metal usage. + +# Mellanox network support +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX5_CORE=m +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_INFINIBAND=m +CONFIG_MLXFW=m + +# AMD network support +CONFIG_NET_VENDOR_AMD=y +CONFIG_AMD_XGBE=m +# CONFIG_AMD_XGBE_DCB is not set + +# Broadcom network support +CONFIG_NET_VENDOR_BROADCOM=y +CONFIG_TIGON3_HWMON=y +CONFIG_TIGON3=m +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +CONFIG_BNXT=m + +# Chelsio network support +CONFIG_NET_VENDOR_CHELSIO=y +CONFIG_CHELSIO_T4=m +CONFIG_CHELSIO_T4VF=m +# CONFIG_CHELSIO_T4_DCB is not set +# CONFIG_CHELSIO_INLINE_CRYPTO is not set +# CONFIG_INFINIBAND_CXGB4 is not set +# CONFIG_ISCSI_TARGET_CXGB4 is not set + +# Cisco UCS network support +CONFIG_NET_VENDOR_CISCO=y +CONFIG_ENIC=m +CONFIG_INFINIBAND_USNIC=m + +# Emulex network support +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_BE2NET=m +CONFIG_BE2NET_BE2=y +CONFIG_BE2NET_BE3=y +CONFIG_BE2NET_LANCER=y +CONFIG_BE2NET_SKYHAWK=y +CONFIG_BE2NET_HWMON=y + +# Huawei network support +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m + +# Intel network support +CONFIG_NET_VENDOR_INTEL=y +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_E1000E_HWTS=y +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m + +# Intel 10G network support +CONFIG_I40E=m +# CONFIG_I40E_DCB is not set +CONFIG_ICE=m +# CONFIG_ICE_HWTS is not set +# CONFIG_ICE_SWITCHDEV is not set +# CONFIG_INFINIBAND_IRDMA is not set +CONFIG_PLDMFW=y +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_FM10K=m + +# Myricom network support +CONFIG_NET_VENDOR_MYRI=y +CONFIG_MYRI10GE=m +CONFIG_MYRI10GE_DCA=y + +# Pensando network support +CONFIG_NET_VENDOR_PENSANDO=y +CONFIG_IONIC=m + +# Solarflare network support +CONFIG_NET_VENDOR_SOLARFLARE=y +CONFIG_SFC=m +CONFIG_SFC_SRIOV=y +# CONFIG_SFC_MCDI_LOGGING is not set +CONFIG_SFC_MCDI_MON=y +CONFIG_SFC_FALCON=m + +# QLogic network support +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QED=m +CONFIG_QED_SRIOV=y +CONFIG_QEDE=m +# CONFIG_INFINIBAND_QEDR is not set +# CONFIG_QEDF is not set +# CONFIG_QEDI is not set +# CONFIG_QLA3XXX is not set +CONFIG_QLCNIC=m +CONFIG_QLCNIC_SRIOV=y +# CONFIG_QLCNIC_DCB is not set +# CONFIG_QLCNIC_HWMON is not set +# CONFIG_NETXEN_NIC is not set + +# Cisco UCS HBA support +CONFIG_FCOE_FNIC=m +CONFIG_SCSI_SNIC=m diff --git a/packages/kernel-6.1/config-bottlerocket-aws b/packages/kernel-6.1/config-bottlerocket-aws deleted file mode 100644 index 27237b59..00000000 --- a/packages/kernel-6.1/config-bottlerocket-aws +++ /dev/null @@ -1,20 +0,0 @@ -# Support boot from IDE disks -CONFIG_ATA=y -CONFIG_ATA_PIIX=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y - -# Mellanox network support -CONFIG_NET_SWITCHDEV=y -CONFIG_NET_VENDOR_MELLANOX=y -CONFIG_MLX5_CORE=m -CONFIG_MLX5_CORE_EN=y -CONFIG_MLX5_INFINIBAND=m -CONFIG_MLXFW=m - -# With 6.1 some of the functionalities used by the nvidia driver have moved behind -# some extra config options CONFIG_DRM_KMS_HELPER and CONFIG_DRM_DISPLAY_HELPER. -# These config options can not be selected individually, but are selected by certain -# drivers. Enable the SIMPLEDRM driver, which is a minimal drm driver enabling -# those helpers for platform provided framebuffers. -CONFIG_DRM_SIMPLEDRM=m diff --git a/packages/kernel-6.1/config-bottlerocket-metal b/packages/kernel-6.1/config-bottlerocket-metal deleted file mode 100644 index 72f4a63d..00000000 --- a/packages/kernel-6.1/config-bottlerocket-metal +++ /dev/null @@ -1,152 +0,0 @@ -# This file holds all the settings that are specific to hardware enablement -# we do for the metal variants. - -# SATA support -CONFIG_BLK_DEV_SD=y -CONFIG_SATA_AHCI=y -CONFIG_ATA=y -CONFIG_ATA_PIIX=y - -# AMD network support -CONFIG_NET_VENDOR_AMD=y -CONFIG_AMD_XGBE=m -# CONFIG_AMD_XGBE_DCB is not set - -# Broadcom network support -CONFIG_NET_VENDOR_BROADCOM=y -CONFIG_TIGON3_HWMON=y -CONFIG_TIGON3=m -CONFIG_BNX2X=m -CONFIG_BNX2X_SRIOV=y -CONFIG_BNXT=m - -# Chelsio network support -CONFIG_NET_VENDOR_CHELSIO=y -CONFIG_CHELSIO_T4=m -CONFIG_CHELSIO_T4VF=m -# CONFIG_CHELSIO_T4_DCB is not set -# CONFIG_CHELSIO_INLINE_CRYPTO is not set -# CONFIG_INFINIBAND_CXGB4 is not set -# CONFIG_ISCSI_TARGET_CXGB4 is not set - -# Cisco UCS network support -CONFIG_NET_VENDOR_CISCO=y -CONFIG_ENIC=m -CONFIG_INFINIBAND_USNIC=m - -# Emulex network support -CONFIG_NET_VENDOR_EMULEX=y -CONFIG_BE2NET=m -CONFIG_BE2NET_BE2=y -CONFIG_BE2NET_BE3=y -CONFIG_BE2NET_LANCER=y -CONFIG_BE2NET_SKYHAWK=y -CONFIG_BE2NET_HWMON=y - -# Huawei network support -CONFIG_NET_VENDOR_HUAWEI=y -CONFIG_HINIC=m - -# Intel network support -CONFIG_NET_VENDOR_INTEL=y -CONFIG_E1000=m -CONFIG_E1000E=m -CONFIG_E1000E_HWTS=y -CONFIG_IGB=m -CONFIG_IGB_HWMON=y -CONFIG_IGBVF=m - -# Intel 10G network support -CONFIG_I40E=m -# CONFIG_I40E_DCB is not set -CONFIG_ICE=m -# CONFIG_ICE_HWTS is not set -# CONFIG_ICE_SWITCHDEV is not set -# CONFIG_INFINIBAND_IRDMA is not set -CONFIG_PLDMFW=y -CONFIG_IXGB=m -CONFIG_IXGBE=m -CONFIG_IXGBE_HWMON=y -CONFIG_IXGBE_DCB=y -CONFIG_IXGBEVF=m -CONFIG_FM10K=m - -# Mellanox network support -CONFIG_MLXFW=m -CONFIG_MLX5_CORE=m -CONFIG_MLX5_INFINIBAND=m -CONFIG_NET_VENDOR_MELLANOX=y -CONFIG_MLX5_CORE_EN=y -CONFIG_NET_SWITCHDEV=y - -# Myricom network support -CONFIG_NET_VENDOR_MYRI=y -CONFIG_MYRI10GE=m -CONFIG_MYRI10GE_DCA=y - -# Pensando network support -CONFIG_NET_VENDOR_PENSANDO=y -CONFIG_IONIC=m - -# Solarflare network support -CONFIG_NET_VENDOR_SOLARFLARE=y -CONFIG_SFC=m -CONFIG_SFC_SRIOV=y -# CONFIG_SFC_MCDI_LOGGING is not set -CONFIG_SFC_MCDI_MON=y -CONFIG_SFC_FALCON=m - -# QLogic network support -CONFIG_NET_VENDOR_QLOGIC=y -CONFIG_QED=m -CONFIG_QED_SRIOV=y -CONFIG_QEDE=m -# CONFIG_INFINIBAND_QEDR is not set -# CONFIG_QEDF is not set -# CONFIG_QEDI is not set -# CONFIG_QLA3XXX is not set -CONFIG_QLCNIC=m -CONFIG_QLCNIC_SRIOV=y -# CONFIG_QLCNIC_DCB is not set -# CONFIG_QLCNIC_HWMON is not set -# CONFIG_NETXEN_NIC is not set - -# Cisco UCS HBA support -CONFIG_FCOE_FNIC=m -CONFIG_SCSI_SNIC=m - -# LSI Logic's SAS based RAID controllers -CONFIG_SCSI_MPT3SAS=y -CONFIG_MEGARAID_SAS=y - -# Microsemi PQI controllers -CONFIG_SCSI_SMARTPQI=y - -# Support for virtio scsi boot devices for other cloud providers -CONFIG_SCSI_VIRTIO=y - -# Load i8042 controller, keyboard, and mouse as modules, to avoid waiting for -# them before mounting the root device. -CONFIG_SERIO_I8042=m -CONFIG_KEYBOARD_ATKBD=m -CONFIG_MOUSE_PS2=m -# CONFIG_MOUSE_PS2_ALPS is not set -# CONFIG_MOUSE_PS2_BYD is not set -# CONFIG_MOUSE_PS2_LOGIPS2PP is not set -# CONFIG_MOUSE_PS2_SYNAPTICS is not set -# CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS is not set -# CONFIG_MOUSE_PS2_CYPRESS is not set -# CONFIG_MOUSE_PS2_TRACKPOINT is not set -# CONFIG_MOUSE_PS2_ELANTECH is not set -# CONFIG_MOUSE_PS2_SENTELIC is not set -# CONFIG_MOUSE_PS2_TOUCHKIT is not set -# CONFIG_MOUSE_PS2_FOCALTECH is not set -# CONFIG_MOUSE_PS2_VMMOUSE is not set - -# Intel Volume Management Device driver, to support boot disks in a separate -# PCI domain. -CONFIG_VMD=y - -# Support handling of compressed firmware -CONFIG_FW_LOADER_COMPRESS=y -CONFIG_FW_LOADER_COMPRESS_XZ=y diff --git a/packages/kernel-6.1/config-bottlerocket-vmware b/packages/kernel-6.1/config-bottlerocket-vmware deleted file mode 100644 index 6f350325..00000000 --- a/packages/kernel-6.1/config-bottlerocket-vmware +++ /dev/null @@ -1,17 +0,0 @@ -# Load i8042 controller, keyboard, and mouse as modules, to avoid waiting for -# them before mounting the root device. -CONFIG_SERIO_I8042=m -CONFIG_KEYBOARD_ATKBD=m -CONFIG_MOUSE_PS2=m -# CONFIG_MOUSE_PS2_ALPS is not set -# CONFIG_MOUSE_PS2_BYD is not set -# CONFIG_MOUSE_PS2_LOGIPS2PP is not set -# CONFIG_MOUSE_PS2_SYNAPTICS is not set -# CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS is not set -# CONFIG_MOUSE_PS2_CYPRESS is not set -# CONFIG_MOUSE_PS2_TRACKPOINT is not set -# CONFIG_MOUSE_PS2_ELANTECH is not set -# CONFIG_MOUSE_PS2_SENTELIC is not set -# CONFIG_MOUSE_PS2_TOUCHKIT is not set -# CONFIG_MOUSE_PS2_FOCALTECH is not set -# CONFIG_MOUSE_PS2_VMMOUSE is not set diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 8e4fd580..41a4222d 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -9,9 +9,6 @@ URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. Source0: https://cdn.amazonlinux.com/al2023/blobstore/bdca6b79db0d3d5ad549b61951208fbf474daebe38ca619f8c706070dc252239/kernel-6.1.84-99.169.amzn2023.src.rpm Source100: config-bottlerocket -Source101: config-bottlerocket-aws -Source102: config-bottlerocket-metal -Source103: config-bottlerocket-vmware # This list of FIPS modules is extracted from /etc/fipsmodules in the initramfs # after placing AL2023 in FIPS mode. @@ -52,8 +49,8 @@ Requires: %{name}-devel = %{version}-%{release} # Pull in FIPS-related files if needed. Requires: (%{name}-fips if %{_cross_os}image-feature(fips)) -%global kernel_sourcedir %{_cross_usrsrc}/kernels -%global kernel_libdir %{_cross_libdir}/modules/%{version} +%global _cross_ksrcdir %{_cross_usrsrc}/kernels +%global _cross_kmoddir %{_cross_libdir}/modules/%{version} %description %{summary}. @@ -118,8 +115,7 @@ scripts/kconfig/merge_config.sh \ %if "%{_cross_arch}" == "x86_64" ../config-microcode \ %endif - %{SOURCE100} \ - %{_sourcedir}/config-bottlerocket-%{_cross_variant_platform} + %{SOURCE100} rm -f ../config-* ../*.patch @@ -241,14 +237,14 @@ xz -T0 kernel-devel.tar install -D kernel-devel.squashfs %{buildroot}%{_cross_datadir}/bottlerocket/kernel-devel.squashfs install -D kernel-devel.tar.xz %{buildroot}%{_cross_datadir}/bottlerocket/kernel-devel.tar.xz -install -d %{buildroot}%{kernel_sourcedir} +install -d %{buildroot}%{_cross_ksrcdir} # Replace the incorrect links from modules_install. These will be bound # into a host container (and unused in the host) so they must not point # to %{_cross_usrsrc} (eg. /x86_64-bottlerocket-linux-gnu/sys-root/...) -rm -f %{buildroot}%{kernel_libdir}/build %{buildroot}%{kernel_libdir}/source -ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{kernel_libdir}/build -ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{kernel_libdir}/source +rm -f %{buildroot}%{_cross_kmoddir}/build %{buildroot}%{_cross_kmoddir}/source +ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{_cross_kmoddir}/build +ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{_cross_kmoddir}/source # Ensure that each required FIPS module is loaded as a dependency of the # check-fips-module.service. The list of FIPS modules is different across @@ -270,10 +266,6 @@ done /boot/vmlinuz /boot/config -%files modules -%dir %{_cross_libdir}/modules -%{_cross_libdir}/modules/* - %files headers %dir %{_cross_includedir}/asm %dir %{_cross_includedir}/asm-generic @@ -299,8 +291,10 @@ done %{_cross_includedir}/xen/* %files devel -%dir %{kernel_sourcedir} +%dir %{_cross_ksrcdir} %{_cross_datadir}/bottlerocket/kernel-devel.squashfs +%{_cross_kmoddir}/source +%{_cross_kmoddir}/build %files archive %{_cross_datadir}/bottlerocket/kernel-devel.tar.xz @@ -308,4 +302,987 @@ done %files fips %{_cross_unitdir}/check-fips-modules.service.d/*.conf +%files modules +%dir %{_cross_libdir}/modules +%dir %{_cross_kmoddir} +%{_cross_kmoddir}/modules.alias +%{_cross_kmoddir}/modules.alias.bin +%{_cross_kmoddir}/modules.builtin +%{_cross_kmoddir}/modules.builtin.alias.bin +%{_cross_kmoddir}/modules.builtin.bin +%{_cross_kmoddir}/modules.builtin.modinfo +%{_cross_kmoddir}/modules.dep +%{_cross_kmoddir}/modules.dep.bin +%{_cross_kmoddir}/modules.devname +%{_cross_kmoddir}/modules.order +%{_cross_kmoddir}/modules.softdep +%{_cross_kmoddir}/modules.symbols +%{_cross_kmoddir}/modules.symbols.bin + +%if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/arch/x86/crypto/aesni-intel.ko.* +%{_cross_kmoddir}/kernel/arch/x86/crypto/blowfish-x86_64.ko.* +%{_cross_kmoddir}/kernel/arch/x86/crypto/camellia-aesni-avx2.ko.* +%{_cross_kmoddir}/kernel/arch/x86/crypto/camellia-aesni-avx-x86_64.ko.* +%{_cross_kmoddir}/kernel/arch/x86/crypto/camellia-x86_64.ko.* +%{_cross_kmoddir}/kernel/arch/x86/crypto/cast5-avx-x86_64.ko.* +%{_cross_kmoddir}/kernel/arch/x86/crypto/cast6-avx-x86_64.ko.* +%{_cross_kmoddir}/kernel/arch/x86/crypto/chacha-x86_64.ko.* +%{_cross_kmoddir}/kernel/arch/x86/crypto/crc32c-intel.ko.* +%{_cross_kmoddir}/kernel/arch/x86/crypto/crc32-pclmul.ko.* +%{_cross_kmoddir}/kernel/arch/x86/crypto/curve25519-x86_64.ko.* +%{_cross_kmoddir}/kernel/arch/x86/crypto/des3_ede-x86_64.ko.* +%{_cross_kmoddir}/kernel/arch/x86/crypto/ghash-clmulni-intel.ko.* +%{_cross_kmoddir}/kernel/arch/x86/crypto/poly1305-x86_64.ko.* +%{_cross_kmoddir}/kernel/arch/x86/crypto/serpent-avx2.ko.* +%{_cross_kmoddir}/kernel/arch/x86/crypto/serpent-avx-x86_64.ko.* +%{_cross_kmoddir}/kernel/arch/x86/crypto/serpent-sse2-x86_64.ko.* +%{_cross_kmoddir}/kernel/arch/x86/crypto/twofish-avx-x86_64.ko.* +%{_cross_kmoddir}/kernel/arch/x86/crypto/twofish-x86_64-3way.ko.* +%{_cross_kmoddir}/kernel/arch/x86/crypto/twofish-x86_64.ko.* +%{_cross_kmoddir}/kernel/arch/x86/kvm/kvm-amd.ko.* +%{_cross_kmoddir}/kernel/arch/x86/kvm/kvm-intel.ko.* +%{_cross_kmoddir}/kernel/arch/x86/kvm/kvm.ko.* +%{_cross_kmoddir}/kernel/arch/x86/platform/intel/iosf_mbi.ko.* +%endif +%if "%{_cross_arch}" == "aarch64" +%{_cross_kmoddir}/kernel/arch/arm64/crypto/aes-arm64.ko.* +%{_cross_kmoddir}/kernel/arch/arm64/crypto/aes-ce-blk.ko.* +%{_cross_kmoddir}/kernel/arch/arm64/crypto/aes-ce-ccm.ko.* +%{_cross_kmoddir}/kernel/arch/arm64/crypto/aes-ce-cipher.ko.* +%{_cross_kmoddir}/kernel/arch/arm64/crypto/aes-neon-blk.ko.* +%{_cross_kmoddir}/kernel/arch/arm64/crypto/aes-neon-bs.ko.* +%{_cross_kmoddir}/kernel/arch/arm64/crypto/chacha-neon.ko.* +%{_cross_kmoddir}/kernel/arch/arm64/crypto/ghash-ce.ko.* +%{_cross_kmoddir}/kernel/arch/arm64/crypto/poly1305-neon.ko.* +%{_cross_kmoddir}/kernel/arch/arm64/crypto/sha1-ce.ko.* +%{_cross_kmoddir}/kernel/arch/arm64/crypto/sha256-arm64.ko.* +%{_cross_kmoddir}/kernel/arch/arm64/crypto/sha2-ce.ko.* +%{_cross_kmoddir}/kernel/arch/arm64/crypto/sha3-ce.ko.* +%{_cross_kmoddir}/kernel/arch/arm64/crypto/sha512-arm64.ko.* +%{_cross_kmoddir}/kernel/arch/arm64/crypto/sha512-ce.ko.* +%{_cross_kmoddir}/kernel/arch/arm64/crypto/sm3-ce.ko.* +%{_cross_kmoddir}/kernel/arch/arm64/crypto/sm4-ce-cipher.ko.* +%{_cross_kmoddir}/kernel/arch/arm64/lib/xor-neon.ko.* +%endif +%{_cross_kmoddir}/kernel/crypto/af_alg.ko.* +%{_cross_kmoddir}/kernel/crypto/algif_aead.ko.* +%{_cross_kmoddir}/kernel/crypto/algif_hash.ko.* +%{_cross_kmoddir}/kernel/crypto/algif_rng.ko.* +%{_cross_kmoddir}/kernel/crypto/algif_skcipher.ko.* +%{_cross_kmoddir}/kernel/crypto/ansi_cprng.ko.* +%{_cross_kmoddir}/kernel/crypto/anubis.ko.* +%{_cross_kmoddir}/kernel/crypto/arc4.ko.* +%{_cross_kmoddir}/kernel/crypto/asymmetric_keys/pkcs7_test_key.ko.* +%{_cross_kmoddir}/kernel/crypto/async_tx/async_memcpy.ko.* +%{_cross_kmoddir}/kernel/crypto/async_tx/async_pq.ko.* +%{_cross_kmoddir}/kernel/crypto/async_tx/async_raid6_recov.ko.* +%{_cross_kmoddir}/kernel/crypto/async_tx/async_tx.ko.* +%{_cross_kmoddir}/kernel/crypto/async_tx/async_xor.ko.* +%{_cross_kmoddir}/kernel/crypto/authencesn.ko.* +%{_cross_kmoddir}/kernel/crypto/authenc.ko.* +%{_cross_kmoddir}/kernel/crypto/blake2b_generic.ko.* +%{_cross_kmoddir}/kernel/crypto/blowfish_common.ko.* +%{_cross_kmoddir}/kernel/crypto/blowfish_generic.ko.* +%{_cross_kmoddir}/kernel/crypto/camellia_generic.ko.* +%{_cross_kmoddir}/kernel/crypto/cast5_generic.ko.* +%{_cross_kmoddir}/kernel/crypto/cast6_generic.ko.* +%{_cross_kmoddir}/kernel/crypto/cast_common.ko.* +%{_cross_kmoddir}/kernel/crypto/cbc.ko.* +%{_cross_kmoddir}/kernel/crypto/ccm.ko.* +%{_cross_kmoddir}/kernel/crypto/cfb.ko.* +%{_cross_kmoddir}/kernel/crypto/chacha20poly1305.ko.* +%{_cross_kmoddir}/kernel/crypto/chacha_generic.ko.* +%{_cross_kmoddir}/kernel/crypto/cmac.ko.* +%{_cross_kmoddir}/kernel/crypto/crc32_generic.ko.* +%{_cross_kmoddir}/kernel/crypto/cryptd.ko.* +%{_cross_kmoddir}/kernel/crypto/crypto_user.ko.* +%{_cross_kmoddir}/kernel/crypto/cts.ko.* +%{_cross_kmoddir}/kernel/crypto/des_generic.ko.* +%{_cross_kmoddir}/kernel/crypto/ecb.ko.* +%{_cross_kmoddir}/kernel/crypto/echainiv.ko.* +%{_cross_kmoddir}/kernel/crypto/essiv.ko.* +%{_cross_kmoddir}/kernel/crypto/fcrypt.ko.* +%{_cross_kmoddir}/kernel/crypto/gcm.ko.* +%{_cross_kmoddir}/kernel/crypto/keywrap.ko.* +%{_cross_kmoddir}/kernel/crypto/khazad.ko.* +%{_cross_kmoddir}/kernel/crypto/lrw.ko.* +%{_cross_kmoddir}/kernel/crypto/lz4hc.ko.* +%{_cross_kmoddir}/kernel/crypto/lz4.ko.* +%{_cross_kmoddir}/kernel/crypto/md4.ko.* +%{_cross_kmoddir}/kernel/crypto/michael_mic.ko.* +%{_cross_kmoddir}/kernel/crypto/ofb.ko.* +%{_cross_kmoddir}/kernel/crypto/pcbc.ko.* +%{_cross_kmoddir}/kernel/crypto/pcrypt.ko.* +%{_cross_kmoddir}/kernel/crypto/poly1305_generic.ko.* +%{_cross_kmoddir}/kernel/crypto/rmd160.ko.* +%{_cross_kmoddir}/kernel/crypto/seed.ko.* +%{_cross_kmoddir}/kernel/crypto/serpent_generic.ko.* +%{_cross_kmoddir}/kernel/crypto/tcrypt.ko.* +%{_cross_kmoddir}/kernel/crypto/tea.ko.* +%{_cross_kmoddir}/kernel/crypto/twofish_common.ko.* +%{_cross_kmoddir}/kernel/crypto/twofish_generic.ko.* +%{_cross_kmoddir}/kernel/crypto/vmac.ko.* +%{_cross_kmoddir}/kernel/crypto/wp512.ko.* +%{_cross_kmoddir}/kernel/crypto/xcbc.ko.* +%{_cross_kmoddir}/kernel/crypto/xor.ko.* +%{_cross_kmoddir}/kernel/crypto/xts.ko.* +%{_cross_kmoddir}/kernel/crypto/xxhash_generic.ko.* +%{_cross_kmoddir}/kernel/crypto/zstd.ko.* +%if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/crypto/crypto_simd.ko.* +%endif +%if "%{_cross_arch}" == "aarch64" +%{_cross_kmoddir}/kernel/crypto/sm3.ko.* +%{_cross_kmoddir}/kernel/crypto/sm4.ko.* +%endif +%{_cross_kmoddir}/kernel/drivers/acpi/ac.ko.* +%{_cross_kmoddir}/kernel/drivers/acpi/button.ko.* +%{_cross_kmoddir}/kernel/drivers/acpi/thermal.ko.* +%if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/drivers/acpi/acpi_extlog.ko.* +%{_cross_kmoddir}/kernel/drivers/acpi/acpi_pad.ko.* +%{_cross_kmoddir}/kernel/drivers/acpi/video.ko.* +%endif +%{_cross_kmoddir}/kernel/drivers/amazon/net/efa/efa.ko.* +%{_cross_kmoddir}/kernel/drivers/amazon/net/ena/ena.ko.* +%if "%{_cross_arch}" == "aarch64" +%{_cross_kmoddir}/kernel/drivers/ata/ahci_platform.ko.* +%{_cross_kmoddir}/kernel/drivers/ata/libahci_platform.ko.* +%endif +%{_cross_kmoddir}/kernel/drivers/block/brd.ko.* +%{_cross_kmoddir}/kernel/drivers/block/drbd/drbd.ko.* +%{_cross_kmoddir}/kernel/drivers/block/loop.ko.* +%{_cross_kmoddir}/kernel/drivers/block/nbd.ko.* +%{_cross_kmoddir}/kernel/drivers/block/null_blk/null_blk.ko.* +%{_cross_kmoddir}/kernel/drivers/block/pktcdvd.ko.* +%{_cross_kmoddir}/kernel/drivers/block/rbd.ko.* +%{_cross_kmoddir}/kernel/drivers/block/zram/zram.ko.* +%{_cross_kmoddir}/kernel/drivers/cdrom/cdrom.ko.* +%{_cross_kmoddir}/kernel/drivers/char/ipmi/ipmi_msghandler.ko.* +%{_cross_kmoddir}/kernel/drivers/char/virtio_console.ko.* +%if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/drivers/char/agp/intel-gtt.ko.* +%{_cross_kmoddir}/kernel/drivers/char/hangcheck-timer.ko.* +%{_cross_kmoddir}/kernel/drivers/char/nvram.ko.* +%endif +%{_cross_kmoddir}/kernel/drivers/char/hw_random/rng-core.ko.* +%{_cross_kmoddir}/kernel/drivers/char/hw_random/virtio-rng.ko.* +%if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/drivers/char/hw_random/amd-rng.ko.* +%{_cross_kmoddir}/kernel/drivers/char/hw_random/intel-rng.ko.* +%endif +%if "%{_cross_arch}" == "aarch64" +%{_cross_kmoddir}/kernel/drivers/char/hw_random/arm_smccc_trng.ko.* +%{_cross_kmoddir}/kernel/drivers/char/hw_random/cn10k-rng.ko.* +%{_cross_kmoddir}/kernel/drivers/char/hw_random/graviton-rng.ko.* +%endif +%{_cross_kmoddir}/kernel/drivers/cpufreq/cpufreq_conservative.ko.* +%{_cross_kmoddir}/kernel/drivers/cpufreq/cpufreq_ondemand.ko.* +%{_cross_kmoddir}/kernel/drivers/cpufreq/cpufreq_powersave.ko.* +%{_cross_kmoddir}/kernel/drivers/cpufreq/cpufreq_userspace.ko.* +%if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/drivers/cpufreq/acpi-cpufreq.ko.* +%{_cross_kmoddir}/kernel/drivers/cpufreq/pcc-cpufreq.ko.* +%{_cross_kmoddir}/kernel/drivers/dca/dca.ko.* +%{_cross_kmoddir}/kernel/drivers/dma/ioat/ioatdma.ko.* +%{_cross_kmoddir}/kernel/drivers/edac/amd64_edac.ko.* +%{_cross_kmoddir}/kernel/drivers/edac/e752x_edac.ko.* +%{_cross_kmoddir}/kernel/drivers/edac/i3000_edac.ko.* +%{_cross_kmoddir}/kernel/drivers/edac/i3200_edac.ko.* +%{_cross_kmoddir}/kernel/drivers/edac/i5000_edac.ko.* +%{_cross_kmoddir}/kernel/drivers/edac/i5100_edac.ko.* +%{_cross_kmoddir}/kernel/drivers/edac/i5400_edac.ko.* +%{_cross_kmoddir}/kernel/drivers/edac/i7300_edac.ko.* +%{_cross_kmoddir}/kernel/drivers/edac/i7core_edac.ko.* +%{_cross_kmoddir}/kernel/drivers/edac/i82975x_edac.ko.* +%{_cross_kmoddir}/kernel/drivers/edac/ie31200_edac.ko.* +%{_cross_kmoddir}/kernel/drivers/edac/pnd2_edac.ko.* +%{_cross_kmoddir}/kernel/drivers/edac/sb_edac.ko.* +%{_cross_kmoddir}/kernel/drivers/edac/skx_edac.ko.* +%{_cross_kmoddir}/kernel/drivers/edac/x38_edac.ko.* +%endif +%{_cross_kmoddir}/kernel/drivers/firmware/dmi-sysfs.ko.* +%if "%{_cross_arch}" == "aarch64" +%{_cross_kmoddir}/kernel/drivers/firmware/arm_scpi.ko.* +%{_cross_kmoddir}/kernel/drivers/firmware/scpi_pm_domain.ko.* +%endif +%{_cross_kmoddir}/kernel/drivers/gpu/drm/drm_kms_helper.ko.* +%{_cross_kmoddir}/kernel/drivers/gpu/drm/drm.ko.* +%{_cross_kmoddir}/kernel/drivers/gpu/drm/drm_shmem_helper.ko.* +%{_cross_kmoddir}/kernel/drivers/gpu/drm/tiny/simpledrm.ko.* +%if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/drivers/gpu/drm/drm_buddy.ko.* +%{_cross_kmoddir}/kernel/drivers/gpu/drm/display/drm_display_helper.ko.* +%{_cross_kmoddir}/kernel/drivers/gpu/drm/i915/i915.ko.* +%{_cross_kmoddir}/kernel/drivers/gpu/drm/ttm/ttm.ko.* +%endif +%{_cross_kmoddir}/kernel/drivers/hid/hid-generic.ko.* +%{_cross_kmoddir}/kernel/drivers/hid/hid-multitouch.ko.* +%{_cross_kmoddir}/kernel/drivers/hid/uhid.ko.* +%{_cross_kmoddir}/kernel/drivers/hid/usbhid/usbhid.ko.* +%if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/drivers/hid/hid-hyperv.ko.* +%{_cross_kmoddir}/kernel/drivers/hv/hv_balloon.ko.* +%{_cross_kmoddir}/kernel/drivers/hv/hv_utils.ko.* +%{_cross_kmoddir}/kernel/drivers/hv/hv_vmbus.ko.* +%endif +%{_cross_kmoddir}/kernel/drivers/hwmon/acpi_power_meter.ko.* +%{_cross_kmoddir}/kernel/drivers/hwmon/hwmon.ko.* +%{_cross_kmoddir}/kernel/drivers/i2c/algos/i2c-algo-bit.ko.* +%{_cross_kmoddir}/kernel/drivers/i2c/i2c-core.ko.* +%{_cross_kmoddir}/kernel/drivers/infiniband/core/ib_cm.ko.* +%{_cross_kmoddir}/kernel/drivers/infiniband/core/ib_core.ko.* +%{_cross_kmoddir}/kernel/drivers/infiniband/core/ib_uverbs.ko.* +%{_cross_kmoddir}/kernel/drivers/infiniband/core/iw_cm.ko.* +%{_cross_kmoddir}/kernel/drivers/infiniband/core/rdma_cm.ko.* +%{_cross_kmoddir}/kernel/drivers/infiniband/core/rdma_ucm.ko.* +%{_cross_kmoddir}/kernel/drivers/infiniband/hw/mlx5/mlx5_ib.ko.* +%{_cross_kmoddir}/kernel/drivers/input/misc/uinput.ko.* +%{_cross_kmoddir}/kernel/drivers/input/mousedev.ko.* +%{_cross_kmoddir}/kernel/drivers/input/keyboard/atkbd.ko.* +%{_cross_kmoddir}/kernel/drivers/input/mouse/psmouse.ko.* +%{_cross_kmoddir}/kernel/drivers/input/serio/libps2.ko.* +%{_cross_kmoddir}/kernel/drivers/input/serio/serio.ko.* +%{_cross_kmoddir}/kernel/drivers/input/serio/serport.ko.* +%{_cross_kmoddir}/kernel/drivers/input/sparse-keymap.ko.* +%{_cross_kmoddir}/kernel/drivers/input/vivaldi-fmap.ko.* +%if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/drivers/input/serio/hyperv-keyboard.ko.* +%{_cross_kmoddir}/kernel/drivers/input/serio/i8042.ko.* +%endif +%{_cross_kmoddir}/kernel/drivers/iommu/virtio-iommu.ko.* +%if "%{_cross_arch}" == "aarch64" +%{_cross_kmoddir}/kernel/drivers/mailbox/arm_mhu_db.ko.* +%{_cross_kmoddir}/kernel/drivers/mailbox/arm_mhu.ko.* +%endif +%{_cross_kmoddir}/kernel/drivers/md/bcache/bcache.ko.* +%{_cross_kmoddir}/kernel/drivers/md/dm-bio-prison.ko.* +%{_cross_kmoddir}/kernel/drivers/md/dm-cache.ko.* +%{_cross_kmoddir}/kernel/drivers/md/dm-cache-smq.ko.* +%{_cross_kmoddir}/kernel/drivers/md/dm-crypt.ko.* +%{_cross_kmoddir}/kernel/drivers/md/dm-delay.ko.* +%{_cross_kmoddir}/kernel/drivers/md/dm-dust.ko.* +%{_cross_kmoddir}/kernel/drivers/md/dm-flakey.ko.* +%{_cross_kmoddir}/kernel/drivers/md/dm-integrity.ko.* +%{_cross_kmoddir}/kernel/drivers/md/dm-log.ko.* +%{_cross_kmoddir}/kernel/drivers/md/dm-log-userspace.ko.* +%{_cross_kmoddir}/kernel/drivers/md/dm-log-writes.ko.* +%{_cross_kmoddir}/kernel/drivers/md/dm-mirror.ko.* +%{_cross_kmoddir}/kernel/drivers/md/dm-multipath.ko.* +%{_cross_kmoddir}/kernel/drivers/md/dm-queue-length.ko.* +%{_cross_kmoddir}/kernel/drivers/md/dm-raid.ko.* +%{_cross_kmoddir}/kernel/drivers/md/dm-region-hash.ko.* +%{_cross_kmoddir}/kernel/drivers/md/dm-round-robin.ko.* +%{_cross_kmoddir}/kernel/drivers/md/dm-service-time.ko.* +%{_cross_kmoddir}/kernel/drivers/md/dm-snapshot.ko.* +%{_cross_kmoddir}/kernel/drivers/md/dm-thin-pool.ko.* +%{_cross_kmoddir}/kernel/drivers/md/dm-zero.ko.* +%{_cross_kmoddir}/kernel/drivers/md/faulty.ko.* +%{_cross_kmoddir}/kernel/drivers/md/linear.ko.* +%{_cross_kmoddir}/kernel/drivers/md/persistent-data/dm-persistent-data.ko.* +%{_cross_kmoddir}/kernel/drivers/md/raid0.ko.* +%{_cross_kmoddir}/kernel/drivers/md/raid10.ko.* +%{_cross_kmoddir}/kernel/drivers/md/raid1.ko.* +%{_cross_kmoddir}/kernel/drivers/md/raid456.ko.* +%{_cross_kmoddir}/kernel/drivers/mfd/lpc_ich.ko.* +%{_cross_kmoddir}/kernel/drivers/mfd/lpc_sch.ko.* +%if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/drivers/mfd/mfd-core.ko.* +%{_cross_kmoddir}/kernel/drivers/misc/vmw_balloon.ko.* +%{_cross_kmoddir}/kernel/drivers/misc/vmw_vmci/vmw_vmci.ko.* +%endif +%{_cross_kmoddir}/kernel/drivers/net/bonding/bonding.ko.* +%{_cross_kmoddir}/kernel/drivers/net/dummy.ko.* +%{_cross_kmoddir}/kernel/drivers/net/ethernet/intel/e1000/e1000.ko.* +%{_cross_kmoddir}/kernel/drivers/net/ethernet/intel/e1000e/e1000e.ko.* +%{_cross_kmoddir}/kernel/drivers/net/ethernet/intel/igb/igb.ko.* +%{_cross_kmoddir}/kernel/drivers/net/ethernet/intel/ixgbevf/ixgbevf.ko.* +%{_cross_kmoddir}/kernel/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.ko.* +%{_cross_kmoddir}/kernel/drivers/net/ethernet/mellanox/mlxfw/mlxfw.ko.* +%{_cross_kmoddir}/kernel/drivers/net/geneve.ko.* +%if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/drivers/net/hyperv/hv_netvsc.ko.* +%endif +%{_cross_kmoddir}/kernel/drivers/net/ifb.ko.* +%{_cross_kmoddir}/kernel/drivers/net/ipvlan/ipvlan.ko.* +%{_cross_kmoddir}/kernel/drivers/net/ipvlan/ipvtap.ko.* +%{_cross_kmoddir}/kernel/drivers/net/macvlan.ko.* +%{_cross_kmoddir}/kernel/drivers/net/macvtap.ko.* +%{_cross_kmoddir}/kernel/drivers/net/mdio/acpi_mdio.ko.* +%{_cross_kmoddir}/kernel/drivers/net/mdio/fwnode_mdio.ko.* +%{_cross_kmoddir}/kernel/drivers/net/netdevsim/netdevsim.ko.* +%{_cross_kmoddir}/kernel/drivers/net/net_failover.ko.* +%{_cross_kmoddir}/kernel/drivers/net/nlmon.ko.* +%{_cross_kmoddir}/kernel/drivers/net/phy/fixed_phy.ko.* +%{_cross_kmoddir}/kernel/drivers/net/phy/libphy.ko.* +%{_cross_kmoddir}/kernel/drivers/net/phy/mdio_devres.ko.* +%{_cross_kmoddir}/kernel/drivers/net/tap.ko.* +%{_cross_kmoddir}/kernel/drivers/net/team/team.ko.* +%{_cross_kmoddir}/kernel/drivers/net/team/team_mode_activebackup.ko.* +%{_cross_kmoddir}/kernel/drivers/net/team/team_mode_broadcast.ko.* +%{_cross_kmoddir}/kernel/drivers/net/team/team_mode_loadbalance.ko.* +%{_cross_kmoddir}/kernel/drivers/net/team/team_mode_roundrobin.ko.* +%{_cross_kmoddir}/kernel/drivers/net/tun.ko.* +%{_cross_kmoddir}/kernel/drivers/net/veth.ko.* +%{_cross_kmoddir}/kernel/drivers/net/virtio_net.ko.* +%{_cross_kmoddir}/kernel/drivers/net/vmxnet3/vmxnet3.ko.* +%{_cross_kmoddir}/kernel/drivers/net/vrf.ko.* +%{_cross_kmoddir}/kernel/drivers/net/vxlan/vxlan.ko.* +%{_cross_kmoddir}/kernel/drivers/net/wireguard/wireguard.ko.* +%if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/drivers/net/xen-netback/xen-netback.ko.* +%endif +%if "%{_cross_arch}" == "aarch64" +%{_cross_kmoddir}/kernel/drivers/net/mdio/of_mdio.ko.* +%endif +%{_cross_kmoddir}/kernel/drivers/nvme/host/nvme-fabrics.ko.* +%{_cross_kmoddir}/kernel/drivers/nvme/host/nvme-tcp.ko.* +%{_cross_kmoddir}/kernel/drivers/pci/hotplug/acpiphp_ibm.ko.* +%{_cross_kmoddir}/kernel/drivers/pci/pci-stub.ko.* +%if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/drivers/pci/hotplug/cpcihp_generic.ko.* +%{_cross_kmoddir}/kernel/drivers/platform/x86/wmi-bmof.ko.* +%{_cross_kmoddir}/kernel/drivers/platform/x86/wmi.ko.* +%endif +%if "%{_cross_arch}" == "aarch64" +%{_cross_kmoddir}/kernel/drivers/perf/arm-cmn.ko.* +%endif +%{_cross_kmoddir}/kernel/drivers/pps/clients/pps-gpio.ko.* +%{_cross_kmoddir}/kernel/drivers/pps/clients/pps-ldisc.ko.* +%{_cross_kmoddir}/kernel/drivers/pps/pps_core.ko.* +%{_cross_kmoddir}/kernel/drivers/ptp/ptp.ko.* +%{_cross_kmoddir}/kernel/drivers/ptp/ptp_kvm.ko.* +%{_cross_kmoddir}/kernel/drivers/scsi/ch.ko.* +%if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/drivers/scsi/hv_storvsc.ko.* +%endif +%{_cross_kmoddir}/kernel/drivers/scsi/iscsi_boot_sysfs.ko.* +%{_cross_kmoddir}/kernel/drivers/scsi/iscsi_tcp.ko.* +%{_cross_kmoddir}/kernel/drivers/scsi/libiscsi.ko.* +%{_cross_kmoddir}/kernel/drivers/scsi/libiscsi_tcp.ko.* +%{_cross_kmoddir}/kernel/drivers/scsi/scsi_transport_iscsi.ko.* +%{_cross_kmoddir}/kernel/drivers/scsi/sg.ko.* +%{_cross_kmoddir}/kernel/drivers/scsi/sr_mod.ko.* +%{_cross_kmoddir}/kernel/drivers/scsi/st.ko.* +%if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/drivers/scsi/vmw_pvscsi.ko.* +%{_cross_kmoddir}/kernel/drivers/scsi/xen-scsifront.ko.* +%endif +%{_cross_kmoddir}/kernel/drivers/staging/lustrefsx/libcfs/libcfs/libcfs.ko.* +%{_cross_kmoddir}/kernel/drivers/staging/lustrefsx/lnet/klnds/o2iblnd/ko2iblnd.ko.* +%{_cross_kmoddir}/kernel/drivers/staging/lustrefsx/lnet/klnds/socklnd/ksocklnd.ko.* +%{_cross_kmoddir}/kernel/drivers/staging/lustrefsx/lnet/lnet/lnet.ko.* +%{_cross_kmoddir}/kernel/drivers/staging/lustrefsx/lnet/selftest/lnet_selftest.ko.* +%{_cross_kmoddir}/kernel/drivers/staging/lustrefsx/lustre/fid/fid.ko.* +%{_cross_kmoddir}/kernel/drivers/staging/lustrefsx/lustre/fld/fld.ko.* +%{_cross_kmoddir}/kernel/drivers/staging/lustrefsx/lustre/llite/lustre.ko.* +%{_cross_kmoddir}/kernel/drivers/staging/lustrefsx/lustre/lmv/lmv.ko.* +%{_cross_kmoddir}/kernel/drivers/staging/lustrefsx/lustre/lov/lov.ko.* +%{_cross_kmoddir}/kernel/drivers/staging/lustrefsx/lustre/mdc/mdc.ko.* +%{_cross_kmoddir}/kernel/drivers/staging/lustrefsx/lustre/mgc/mgc.ko.* +%{_cross_kmoddir}/kernel/drivers/staging/lustrefsx/lustre/obdclass/obdclass.ko.* +%{_cross_kmoddir}/kernel/drivers/staging/lustrefsx/lustre/obdecho/obdecho.ko.* +%{_cross_kmoddir}/kernel/drivers/staging/lustrefsx/lustre/osc/osc.ko.* +%{_cross_kmoddir}/kernel/drivers/staging/lustrefsx/lustre/ptlrpc/ptlrpc.ko.* +%{_cross_kmoddir}/kernel/drivers/target/iscsi/iscsi_target_mod.ko.* +%{_cross_kmoddir}/kernel/drivers/target/loopback/tcm_loop.ko.* +%{_cross_kmoddir}/kernel/drivers/target/target_core_file.ko.* +%{_cross_kmoddir}/kernel/drivers/target/target_core_iblock.ko.* +%{_cross_kmoddir}/kernel/drivers/target/target_core_mod.ko.* +%{_cross_kmoddir}/kernel/drivers/target/target_core_user.ko.* +%if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/drivers/thermal/intel/x86_pkg_temp_thermal.ko.* +%endif +%{_cross_kmoddir}/kernel/drivers/tty/serial/8250/8250_exar.ko.* +%{_cross_kmoddir}/kernel/drivers/uio/uio_dmem_genirq.ko.* +%{_cross_kmoddir}/kernel/drivers/uio/uio.ko.* +%{_cross_kmoddir}/kernel/drivers/uio/uio_pci_generic.ko.* +%{_cross_kmoddir}/kernel/drivers/uio/uio_pdrv_genirq.ko.* +%if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/drivers/uio/uio_hv_generic.ko.* +%endif +%{_cross_kmoddir}/kernel/drivers/usb/class/cdc-acm.ko.* +%{_cross_kmoddir}/kernel/drivers/usb/common/usb-common.ko.* +%{_cross_kmoddir}/kernel/drivers/usb/core/usbcore.ko.* +%{_cross_kmoddir}/kernel/drivers/usb/host/ehci-hcd.ko.* +%{_cross_kmoddir}/kernel/drivers/usb/host/ehci-pci.ko.* +%{_cross_kmoddir}/kernel/drivers/usb/host/ehci-platform.ko.* +%{_cross_kmoddir}/kernel/drivers/usb/host/ohci-hcd.ko.* +%{_cross_kmoddir}/kernel/drivers/usb/host/ohci-pci.ko.* +%{_cross_kmoddir}/kernel/drivers/usb/host/ohci-platform.ko.* +%{_cross_kmoddir}/kernel/drivers/usb/host/uhci-hcd.ko.* +%{_cross_kmoddir}/kernel/drivers/usb/host/xhci-hcd.ko.* +%{_cross_kmoddir}/kernel/drivers/usb/host/xhci-pci.ko.* +%{_cross_kmoddir}/kernel/drivers/usb/host/xhci-plat-hcd.ko.* +%{_cross_kmoddir}/kernel/drivers/usb/mon/usbmon.ko.* +%{_cross_kmoddir}/kernel/drivers/usb/serial/cp210x.ko.* +%{_cross_kmoddir}/kernel/drivers/usb/serial/ftdi_sio.ko.* +%{_cross_kmoddir}/kernel/drivers/usb/serial/usbserial.ko.* +%{_cross_kmoddir}/kernel/drivers/usb/storage/uas.ko.* +%{_cross_kmoddir}/kernel/drivers/usb/storage/usb-storage.ko.* +%{_cross_kmoddir}/kernel/drivers/usb/usbip/usbip-core.ko.* +%{_cross_kmoddir}/kernel/drivers/usb/usbip/usbip-host.ko.* +%{_cross_kmoddir}/kernel/drivers/usb/usbip/vhci-hcd.ko.* +%{_cross_kmoddir}/kernel/drivers/vfio/pci/vfio-pci-core.ko.* +%{_cross_kmoddir}/kernel/drivers/vfio/pci/vfio-pci.ko.* +%{_cross_kmoddir}/kernel/drivers/vfio/vfio_iommu_type1.ko.* +%{_cross_kmoddir}/kernel/drivers/vfio/vfio.ko.* +%{_cross_kmoddir}/kernel/drivers/vfio/vfio_virqfd.ko.* +%{_cross_kmoddir}/kernel/drivers/vhost/vhost_iotlb.ko.* +%{_cross_kmoddir}/kernel/drivers/vhost/vhost.ko.* +%{_cross_kmoddir}/kernel/drivers/vhost/vhost_net.ko.* +%{_cross_kmoddir}/kernel/drivers/vhost/vhost_vsock.ko.* +%{_cross_kmoddir}/kernel/drivers/video/backlight/backlight.ko.* +%{_cross_kmoddir}/kernel/drivers/video/backlight/lcd.ko.* +%{_cross_kmoddir}/kernel/drivers/video/fbdev/core/fb_sys_fops.ko.* +%{_cross_kmoddir}/kernel/drivers/video/fbdev/core/syscopyarea.ko.* +%{_cross_kmoddir}/kernel/drivers/video/fbdev/core/sysfillrect.ko.* +%{_cross_kmoddir}/kernel/drivers/video/fbdev/core/sysimgblt.ko.* +%if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/drivers/virt/coco/sev-guest/sev-guest.ko.* +%{_cross_kmoddir}/kernel/drivers/virt/vboxguest/vboxguest.ko.* +%endif +%if "%{_cross_arch}" == "aarch64" +%{_cross_kmoddir}/kernel/drivers/virt/nitro_enclaves/nitro_enclaves.ko.* +%endif +%{_cross_kmoddir}/kernel/drivers/virtio/virtio_balloon.ko.* +%{_cross_kmoddir}/kernel/drivers/virtio/virtio_mmio.ko.* +%if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/drivers/virtio/virtio_mem.ko.* +%endif +%{_cross_kmoddir}/kernel/drivers/watchdog/softdog.ko.* +%if "%{_cross_arch}" == "aarch64" +%{_cross_kmoddir}/kernel/drivers/watchdog/gpio_wdt.ko.* +%{_cross_kmoddir}/kernel/drivers/watchdog/sbsa_gwdt.ko.* +%{_cross_kmoddir}/kernel/drivers/watchdog/sp805_wdt.ko.* +%endif +%if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/drivers/xen/xen-evtchn.ko.* +%{_cross_kmoddir}/kernel/drivers/xen/xenfs/xenfs.ko.* +%{_cross_kmoddir}/kernel/drivers/xen/xen-gntalloc.ko.* +%{_cross_kmoddir}/kernel/drivers/xen/xen-gntdev.ko.* +%{_cross_kmoddir}/kernel/drivers/xen/xen-pciback/xen-pciback.ko.* +%{_cross_kmoddir}/kernel/drivers/xen/xen-privcmd.ko.* +%endif +%{_cross_kmoddir}/kernel/fs/binfmt_misc.ko.* +%{_cross_kmoddir}/kernel/fs/btrfs/btrfs.ko.* +%{_cross_kmoddir}/kernel/fs/cachefiles/cachefiles.ko.* +%{_cross_kmoddir}/kernel/fs/ceph/ceph.ko.* +%{_cross_kmoddir}/kernel/fs/configfs/configfs.ko.* +%{_cross_kmoddir}/kernel/fs/efivarfs/efivarfs.ko.* +%{_cross_kmoddir}/kernel/fs/exfat/exfat.ko.* +%{_cross_kmoddir}/kernel/fs/fat/fat.ko.* +%{_cross_kmoddir}/kernel/fs/fat/msdos.ko.* +%{_cross_kmoddir}/kernel/fs/fat/vfat.ko.* +%{_cross_kmoddir}/kernel/fs/fscache/fscache.ko.* +%{_cross_kmoddir}/kernel/fs/fuse/cuse.ko.* +%{_cross_kmoddir}/kernel/fs/fuse/fuse.ko.* +%{_cross_kmoddir}/kernel/fs/fuse/virtiofs.ko.* +%{_cross_kmoddir}/kernel/fs/isofs/isofs.ko.* +%{_cross_kmoddir}/kernel/fs/lockd/lockd.ko.* +%{_cross_kmoddir}/kernel/fs/netfs/netfs.ko.* +%{_cross_kmoddir}/kernel/fs/nfs/blocklayout/blocklayoutdriver.ko.* +%{_cross_kmoddir}/kernel/fs/nfs_common/grace.ko.* +%{_cross_kmoddir}/kernel/fs/nfs_common/nfs_acl.ko.* +%{_cross_kmoddir}/kernel/fs/nfsd/nfsd.ko.* +%{_cross_kmoddir}/kernel/fs/nfs/filelayout/nfs_layout_nfsv41_files.ko.* +%{_cross_kmoddir}/kernel/fs/nfs/flexfilelayout/nfs_layout_flexfiles.ko.* +%{_cross_kmoddir}/kernel/fs/nfs/nfs.ko.* +%{_cross_kmoddir}/kernel/fs/nfs/nfsv3.ko.* +%{_cross_kmoddir}/kernel/fs/nfs/nfsv4.ko.* +%{_cross_kmoddir}/kernel/fs/nls/mac-celtic.ko.* +%{_cross_kmoddir}/kernel/fs/nls/mac-centeuro.ko.* +%{_cross_kmoddir}/kernel/fs/nls/mac-croatian.ko.* +%{_cross_kmoddir}/kernel/fs/nls/mac-cyrillic.ko.* +%{_cross_kmoddir}/kernel/fs/nls/mac-gaelic.ko.* +%{_cross_kmoddir}/kernel/fs/nls/mac-greek.ko.* +%{_cross_kmoddir}/kernel/fs/nls/mac-iceland.ko.* +%{_cross_kmoddir}/kernel/fs/nls/mac-inuit.ko.* +%{_cross_kmoddir}/kernel/fs/nls/mac-romanian.ko.* +%{_cross_kmoddir}/kernel/fs/nls/mac-roman.ko.* +%{_cross_kmoddir}/kernel/fs/nls/mac-turkish.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_ascii.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_cp1250.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_cp1251.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_cp1255.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_cp437.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_cp737.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_cp775.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_cp850.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_cp852.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_cp855.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_cp857.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_cp860.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_cp861.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_cp862.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_cp863.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_cp864.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_cp865.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_cp866.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_cp869.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_cp874.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_cp932.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_cp936.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_cp949.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_cp950.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_euc-jp.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_iso8859-13.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_iso8859-14.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_iso8859-15.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_iso8859-1.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_iso8859-2.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_iso8859-3.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_iso8859-4.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_iso8859-5.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_iso8859-6.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_iso8859-7.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_iso8859-9.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_koi8-r.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_koi8-ru.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_koi8-u.ko.* +%{_cross_kmoddir}/kernel/fs/nls/nls_utf8.ko.* +%{_cross_kmoddir}/kernel/fs/overlayfs/overlay.ko.* +%{_cross_kmoddir}/kernel/fs/pstore/ramoops.ko.* +%{_cross_kmoddir}/kernel/fs/quota/quota_tree.ko.* +%{_cross_kmoddir}/kernel/fs/quota/quota_v2.ko.* +%{_cross_kmoddir}/kernel/fs/smb/client/cifs.ko.* +%{_cross_kmoddir}/kernel/fs/smb/common/cifs_arc4.ko.* +%{_cross_kmoddir}/kernel/fs/smb/common/cifs_md4.ko.* +%{_cross_kmoddir}/kernel/fs/squashfs/squashfs.ko.* +%{_cross_kmoddir}/kernel/fs/udf/udf.ko.* +%{_cross_kmoddir}/kernel/kernel/bpf/preload/bpf_preload.ko.* +%{_cross_kmoddir}/kernel/lib/asn1_encoder.ko.* +%{_cross_kmoddir}/kernel/lib/crc4.ko.* +%{_cross_kmoddir}/kernel/lib/crc7.ko.* +%{_cross_kmoddir}/kernel/lib/crc8.ko.* +%{_cross_kmoddir}/kernel/lib/crc-itu-t.ko.* +%{_cross_kmoddir}/kernel/lib/crypto/libarc4.ko.* +%{_cross_kmoddir}/kernel/lib/crypto/libchacha20poly1305.ko.* +%{_cross_kmoddir}/kernel/lib/crypto/libchacha.ko.* +%{_cross_kmoddir}/kernel/lib/crypto/libcurve25519-generic.ko.* +%{_cross_kmoddir}/kernel/lib/crypto/libcurve25519.ko.* +%{_cross_kmoddir}/kernel/lib/crypto/libdes.ko.* +%{_cross_kmoddir}/kernel/lib/crypto/libpoly1305.ko.* +%{_cross_kmoddir}/kernel/lib/lru_cache.ko.* +%{_cross_kmoddir}/kernel/lib/lz4/lz4_compress.ko.* +%{_cross_kmoddir}/kernel/lib/lz4/lz4_decompress.ko.* +%{_cross_kmoddir}/kernel/lib/lz4/lz4hc_compress.ko.* +%{_cross_kmoddir}/kernel/lib/raid6/raid6_pq.ko.* +%{_cross_kmoddir}/kernel/lib/reed_solomon/reed_solomon.ko.* +%{_cross_kmoddir}/kernel/lib/test_lockup.ko.* +%{_cross_kmoddir}/kernel/lib/ts_bm.ko.* +%{_cross_kmoddir}/kernel/lib/ts_fsm.ko.* +%{_cross_kmoddir}/kernel/lib/ts_kmp.ko.* +%{_cross_kmoddir}/kernel/lib/zstd/zstd_compress.ko.* +%{_cross_kmoddir}/kernel/mm/z3fold.ko.* +%{_cross_kmoddir}/kernel/mm/zsmalloc.ko.* +%{_cross_kmoddir}/kernel/net/8021q/8021q.ko.* +%{_cross_kmoddir}/kernel/net/802/garp.ko.* +%{_cross_kmoddir}/kernel/net/802/mrp.ko.* +%{_cross_kmoddir}/kernel/net/802/p8022.ko.* +%{_cross_kmoddir}/kernel/net/802/psnap.ko.* +%{_cross_kmoddir}/kernel/net/802/stp.ko.* +%{_cross_kmoddir}/kernel/net/bridge/bridge.ko.* +%{_cross_kmoddir}/kernel/net/bridge/br_netfilter.ko.* +%{_cross_kmoddir}/kernel/net/bridge/netfilter/ebt_802_3.ko.* +%{_cross_kmoddir}/kernel/net/bridge/netfilter/ebtable_broute.ko.* +%{_cross_kmoddir}/kernel/net/bridge/netfilter/ebtable_filter.ko.* +%{_cross_kmoddir}/kernel/net/bridge/netfilter/ebtable_nat.ko.* +%{_cross_kmoddir}/kernel/net/bridge/netfilter/ebtables.ko.* +%{_cross_kmoddir}/kernel/net/bridge/netfilter/ebt_among.ko.* +%{_cross_kmoddir}/kernel/net/bridge/netfilter/ebt_arp.ko.* +%{_cross_kmoddir}/kernel/net/bridge/netfilter/ebt_arpreply.ko.* +%{_cross_kmoddir}/kernel/net/bridge/netfilter/ebt_dnat.ko.* +%{_cross_kmoddir}/kernel/net/bridge/netfilter/ebt_ip6.ko.* +%{_cross_kmoddir}/kernel/net/bridge/netfilter/ebt_ip.ko.* +%{_cross_kmoddir}/kernel/net/bridge/netfilter/ebt_limit.ko.* +%{_cross_kmoddir}/kernel/net/bridge/netfilter/ebt_log.ko.* +%{_cross_kmoddir}/kernel/net/bridge/netfilter/ebt_mark.ko.* +%{_cross_kmoddir}/kernel/net/bridge/netfilter/ebt_mark_m.ko.* +%{_cross_kmoddir}/kernel/net/bridge/netfilter/ebt_nflog.ko.* +%{_cross_kmoddir}/kernel/net/bridge/netfilter/ebt_pkttype.ko.* +%{_cross_kmoddir}/kernel/net/bridge/netfilter/ebt_redirect.ko.* +%{_cross_kmoddir}/kernel/net/bridge/netfilter/ebt_snat.ko.* +%{_cross_kmoddir}/kernel/net/bridge/netfilter/ebt_stp.ko.* +%{_cross_kmoddir}/kernel/net/bridge/netfilter/ebt_vlan.ko.* +%{_cross_kmoddir}/kernel/net/bridge/netfilter/nft_reject_bridge.ko.* +%{_cross_kmoddir}/kernel/net/ceph/libceph.ko.* +%{_cross_kmoddir}/kernel/net/core/failover.ko.* +%{_cross_kmoddir}/kernel/net/core/selftests.ko.* +%{_cross_kmoddir}/kernel/net/dns_resolver/dns_resolver.ko.* +%{_cross_kmoddir}/kernel/net/ife/ife.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/ah4.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/esp4.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/esp4_offload.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/fou.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/gre.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/inet_diag.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/ipcomp.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/ip_gre.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/ipip.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/ip_tunnel.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/ip_vti.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/arptable_filter.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/arp_tables.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/arpt_mangle.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/iptable_filter.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/iptable_mangle.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/iptable_nat.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/iptable_raw.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/iptable_security.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/ipt_ah.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/ipt_CLUSTERIP.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/ipt_ECN.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/ipt_REJECT.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/ipt_rpfilter.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/ipt_SYNPROXY.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/nf_defrag_ipv4.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/nf_dup_ipv4.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/nf_nat_h323.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/nf_nat_pptp.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/nf_nat_snmp_basic.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/nf_reject_ipv4.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/nf_socket_ipv4.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/nft_dup_ipv4.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/nft_fib_ipv4.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/nf_tproxy_ipv4.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/netfilter/nft_reject_ipv4.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/raw_diag.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/tcp_bbr.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/tcp_bic.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/tcp_dctcp.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/tcp_diag.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/tcp_highspeed.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/tcp_htcp.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/tcp_hybla.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/tcp_illinois.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/tcp_lp.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/tcp_scalable.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/tcp_vegas.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/tcp_veno.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/tcp_westwood.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/tcp_yeah.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/tunnel4.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/udp_diag.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/udp_tunnel.ko.* +%{_cross_kmoddir}/kernel/net/ipv4/xfrm4_tunnel.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/ah6.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/esp6.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/esp6_offload.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/fou6.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/ila/ila.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/ip6_gre.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/ip6_tunnel.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/ip6_udp_tunnel.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/ip6_vti.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/ipcomp6.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/mip6.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/ip6table_filter.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/ip6table_mangle.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/ip6table_nat.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/ip6table_raw.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/ip6table_security.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/ip6t_ah.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/ip6t_eui64.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/ip6t_frag.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/ip6t_hbh.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/ip6t_ipv6header.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/ip6t_mh.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/ip6t_REJECT.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/ip6t_rpfilter.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/ip6t_rt.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/ip6t_srh.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/ip6t_SYNPROXY.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/nf_defrag_ipv6.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/nf_dup_ipv6.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/nf_reject_ipv6.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/nf_socket_ipv6.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/nft_dup_ipv6.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/nft_fib_ipv6.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/nf_tproxy_ipv6.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/netfilter/nft_reject_ipv6.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/sit.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/tunnel6.ko.* +%{_cross_kmoddir}/kernel/net/ipv6/xfrm6_tunnel.ko.* +%{_cross_kmoddir}/kernel/net/key/af_key.ko.* +%{_cross_kmoddir}/kernel/net/llc/llc.ko.* +%{_cross_kmoddir}/kernel/net/mpls/mpls_gso.ko.* +%{_cross_kmoddir}/kernel/net/mpls/mpls_iptunnel.ko.* +%{_cross_kmoddir}/kernel/net/mpls/mpls_router.ko.* +%{_cross_kmoddir}/kernel/net/mptcp/mptcp_diag.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipset/ip_set_bitmap_ip.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipset/ip_set_bitmap_ipmac.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipset/ip_set_bitmap_port.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipset/ip_set_hash_ip.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipset/ip_set_hash_ipmac.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipset/ip_set_hash_ipmark.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipset/ip_set_hash_ipportip.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipset/ip_set_hash_ipport.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipset/ip_set_hash_ipportnet.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipset/ip_set_hash_mac.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipset/ip_set_hash_netiface.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipset/ip_set_hash_net.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipset/ip_set_hash_netnet.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipset/ip_set_hash_netport.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipset/ip_set_hash_netportnet.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipset/ip_set.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipset/ip_set_list_set.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipvs/ip_vs_dh.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipvs/ip_vs_fo.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipvs/ip_vs_ftp.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipvs/ip_vs.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipvs/ip_vs_lblc.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipvs/ip_vs_lblcr.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipvs/ip_vs_lc.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipvs/ip_vs_mh.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipvs/ip_vs_nq.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipvs/ip_vs_ovf.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipvs/ip_vs_pe_sip.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipvs/ip_vs_rr.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipvs/ip_vs_sed.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipvs/ip_vs_sh.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipvs/ip_vs_wlc.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/ipvs/ip_vs_wrr.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_conncount.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_conntrack_amanda.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_conntrack_broadcast.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_conntrack_ftp.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_conntrack_h323.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_conntrack_irc.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_conntrack.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_conntrack_netbios_ns.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_conntrack_netlink.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_conntrack_pptp.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_conntrack_sane.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_conntrack_sip.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_conntrack_snmp.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_conntrack_tftp.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_dup_netdev.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_flow_table_inet.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_flow_table.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_log_syslog.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_nat_amanda.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_nat_ftp.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_nat_irc.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_nat.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_nat_sip.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_nat_tftp.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nfnetlink_acct.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nfnetlink_cthelper.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nfnetlink_cttimeout.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nfnetlink.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nfnetlink_log.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nfnetlink_osf.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nfnetlink_queue.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_synproxy_core.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nf_tables.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_chain_nat.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_compat.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_connlimit.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_ct.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_dup_netdev.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_fib_inet.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_fib.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_fib_netdev.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_flow_offload.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_fwd_netdev.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_hash.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_limit.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_log.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_masq.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_nat.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_numgen.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_objref.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_osf.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_queue.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_quota.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_redir.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_reject_inet.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_reject.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_socket.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_synproxy.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_tproxy.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_tunnel.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/nft_xfrm.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_addrtype.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_AUDIT.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_bpf.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_cgroup.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_CHECKSUM.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_CLASSIFY.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_cluster.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_comment.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_connbytes.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_connlabel.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_connlimit.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_connmark.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_CONNSECMARK.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_conntrack.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_cpu.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_CT.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_devgroup.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_dscp.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_DSCP.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_ecn.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_esp.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_hashlimit.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_helper.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_hl.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_HL.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_HMARK.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_IDLETIMER.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_ipcomp.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_iprange.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_ipvs.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_l2tp.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_length.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_limit.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_LOG.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_mac.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_mark.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_MASQUERADE.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_multiport.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_nat.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_NETMAP.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_nfacct.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_NFLOG.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_NFQUEUE.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_osf.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_owner.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_physdev.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_pkttype.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_policy.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_quota.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_rateest.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_RATEEST.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_realm.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_recent.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_REDIRECT.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_sctp.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_SECMARK.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_set.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_socket.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_state.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_statistic.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_string.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_tcpmss.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_TCPMSS.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_TCPOPTSTRIP.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_TEE.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_time.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_TPROXY.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_TRACE.ko.* +%{_cross_kmoddir}/kernel/net/netfilter/xt_u32.ko.* +%{_cross_kmoddir}/kernel/net/nsh/nsh.ko.* +%{_cross_kmoddir}/kernel/net/openvswitch/openvswitch.ko.* +%{_cross_kmoddir}/kernel/net/openvswitch/vport-geneve.ko.* +%{_cross_kmoddir}/kernel/net/openvswitch/vport-gre.ko.* +%{_cross_kmoddir}/kernel/net/openvswitch/vport-vxlan.ko.* +%{_cross_kmoddir}/kernel/net/packet/af_packet_diag.ko.* +%{_cross_kmoddir}/kernel/net/psample/psample.ko.* +%{_cross_kmoddir}/kernel/net/sched/act_bpf.ko.* +%{_cross_kmoddir}/kernel/net/sched/act_connmark.ko.* +%{_cross_kmoddir}/kernel/net/sched/act_csum.ko.* +%{_cross_kmoddir}/kernel/net/sched/act_gact.ko.* +%{_cross_kmoddir}/kernel/net/sched/act_ipt.ko.* +%{_cross_kmoddir}/kernel/net/sched/act_mirred.ko.* +%{_cross_kmoddir}/kernel/net/sched/act_nat.ko.* +%{_cross_kmoddir}/kernel/net/sched/act_pedit.ko.* +%{_cross_kmoddir}/kernel/net/sched/act_police.ko.* +%{_cross_kmoddir}/kernel/net/sched/act_sample.ko.* +%{_cross_kmoddir}/kernel/net/sched/act_simple.ko.* +%{_cross_kmoddir}/kernel/net/sched/act_skbedit.ko.* +%{_cross_kmoddir}/kernel/net/sched/act_vlan.ko.* +%{_cross_kmoddir}/kernel/net/sched/cls_basic.ko.* +%{_cross_kmoddir}/kernel/net/sched/cls_bpf.ko.* +%{_cross_kmoddir}/kernel/net/sched/cls_cgroup.ko.* +%{_cross_kmoddir}/kernel/net/sched/cls_flower.ko.* +%{_cross_kmoddir}/kernel/net/sched/cls_flow.ko.* +%{_cross_kmoddir}/kernel/net/sched/cls_fw.ko.* +%{_cross_kmoddir}/kernel/net/sched/cls_route.ko.* +%{_cross_kmoddir}/kernel/net/sched/cls_u32.ko.* +%{_cross_kmoddir}/kernel/net/sched/em_cmp.ko.* +%{_cross_kmoddir}/kernel/net/sched/em_ipset.ko.* +%{_cross_kmoddir}/kernel/net/sched/em_ipt.ko.* +%{_cross_kmoddir}/kernel/net/sched/em_meta.ko.* +%{_cross_kmoddir}/kernel/net/sched/em_nbyte.ko.* +%{_cross_kmoddir}/kernel/net/sched/em_text.ko.* +%{_cross_kmoddir}/kernel/net/sched/em_u32.ko.* +%{_cross_kmoddir}/kernel/net/sched/sch_cbs.ko.* +%{_cross_kmoddir}/kernel/net/sched/sch_choke.ko.* +%{_cross_kmoddir}/kernel/net/sched/sch_codel.ko.* +%{_cross_kmoddir}/kernel/net/sched/sch_drr.ko.* +%{_cross_kmoddir}/kernel/net/sched/sch_fq_codel.ko.* +%{_cross_kmoddir}/kernel/net/sched/sch_fq.ko.* +%{_cross_kmoddir}/kernel/net/sched/sch_gred.ko.* +%{_cross_kmoddir}/kernel/net/sched/sch_hfsc.ko.* +%{_cross_kmoddir}/kernel/net/sched/sch_hhf.ko.* +%{_cross_kmoddir}/kernel/net/sched/sch_htb.ko.* +%{_cross_kmoddir}/kernel/net/sched/sch_ingress.ko.* +%{_cross_kmoddir}/kernel/net/sched/sch_mqprio.ko.* +%{_cross_kmoddir}/kernel/net/sched/sch_multiq.ko.* +%{_cross_kmoddir}/kernel/net/sched/sch_netem.ko.* +%{_cross_kmoddir}/kernel/net/sched/sch_pie.ko.* +%{_cross_kmoddir}/kernel/net/sched/sch_plug.ko.* +%{_cross_kmoddir}/kernel/net/sched/sch_prio.ko.* +%{_cross_kmoddir}/kernel/net/sched/sch_qfq.ko.* +%{_cross_kmoddir}/kernel/net/sched/sch_red.ko.* +%{_cross_kmoddir}/kernel/net/sched/sch_sfb.ko.* +%{_cross_kmoddir}/kernel/net/sched/sch_sfq.ko.* +%{_cross_kmoddir}/kernel/net/sched/sch_tbf.ko.* +%{_cross_kmoddir}/kernel/net/sched/sch_teql.ko.* +%{_cross_kmoddir}/kernel/net/sctp/sctp_diag.ko.* +%{_cross_kmoddir}/kernel/net/sctp/sctp.ko.* +%{_cross_kmoddir}/kernel/net/sunrpc/auth_gss/auth_rpcgss.ko.* +%{_cross_kmoddir}/kernel/net/sunrpc/auth_gss/rpcsec_gss_krb5.ko.* +%{_cross_kmoddir}/kernel/net/sunrpc/sunrpc.ko.* +%{_cross_kmoddir}/kernel/net/tls/tls.ko.* +%{_cross_kmoddir}/kernel/net/unix/unix_diag.ko.* +%{_cross_kmoddir}/kernel/net/vmw_vsock/vmw_vsock_virtio_transport_common.ko.* +%{_cross_kmoddir}/kernel/net/vmw_vsock/vmw_vsock_virtio_transport.ko.* +%{_cross_kmoddir}/kernel/net/vmw_vsock/vsock_diag.ko.* +%{_cross_kmoddir}/kernel/net/vmw_vsock/vsock.ko.* +%{_cross_kmoddir}/kernel/net/vmw_vsock/vsock_loopback.ko.* +%if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/net/vmw_vsock/hv_sock.ko.* +%{_cross_kmoddir}/kernel/net/vmw_vsock/vmw_vsock_vmci_transport.ko.* +%endif +%{_cross_kmoddir}/kernel/net/xfrm/xfrm_algo.ko.* +%{_cross_kmoddir}/kernel/net/xfrm/xfrm_ipcomp.ko.* +%{_cross_kmoddir}/kernel/net/xfrm/xfrm_user.ko.* +%{_cross_kmoddir}/kernel/security/keys/encrypted-keys/encrypted-keys.ko.* +%{_cross_kmoddir}/kernel/security/keys/trusted-keys/trusted.ko.* +%if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/virt/lib/irqbypass.ko.* +%endif + +%if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/drivers/infiniband/hw/usnic/usnic_verbs.ko.gz +%endif +%{_cross_kmoddir}/kernel/drivers/net/ethernet/amd/xgbe/amd-xgbe.ko.gz +%{_cross_kmoddir}/kernel/drivers/net/ethernet/broadcom/bnx2x/bnx2x.ko.gz +%{_cross_kmoddir}/kernel/drivers/net/ethernet/broadcom/bnxt/bnxt_en.ko.gz +%{_cross_kmoddir}/kernel/drivers/net/ethernet/broadcom/tg3.ko.gz +%{_cross_kmoddir}/kernel/drivers/net/ethernet/chelsio/cxgb4/cxgb4.ko.gz +%{_cross_kmoddir}/kernel/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf.ko.gz +%{_cross_kmoddir}/kernel/drivers/net/ethernet/cisco/enic/enic.ko.gz +%{_cross_kmoddir}/kernel/drivers/net/ethernet/emulex/benet/be2net.ko.gz +%{_cross_kmoddir}/kernel/drivers/net/ethernet/huawei/hinic/hinic.ko.gz +%{_cross_kmoddir}/kernel/drivers/net/ethernet/intel/fm10k/fm10k.ko.gz +%{_cross_kmoddir}/kernel/drivers/net/ethernet/intel/i40e/i40e.ko.gz +%{_cross_kmoddir}/kernel/drivers/net/ethernet/intel/ice/ice.ko.gz +%{_cross_kmoddir}/kernel/drivers/net/ethernet/intel/igbvf/igbvf.ko.gz +%{_cross_kmoddir}/kernel/drivers/net/ethernet/intel/ixgb/ixgb.ko.gz +%{_cross_kmoddir}/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe.ko.gz +%{_cross_kmoddir}/kernel/drivers/net/ethernet/myricom/myri10ge/myri10ge.ko.gz +%{_cross_kmoddir}/kernel/drivers/net/ethernet/pensando/ionic/ionic.ko.gz +%{_cross_kmoddir}/kernel/drivers/net/ethernet/qlogic/qed/qed.ko.gz +%{_cross_kmoddir}/kernel/drivers/net/ethernet/qlogic/qede/qede.ko.gz +%{_cross_kmoddir}/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic.ko.gz +%{_cross_kmoddir}/kernel/drivers/net/ethernet/sfc/falcon/sfc-falcon.ko.gz +%{_cross_kmoddir}/kernel/drivers/net/ethernet/sfc/sfc.ko.gz +%{_cross_kmoddir}/kernel/drivers/net/mdio.ko.gz +%{_cross_kmoddir}/kernel/drivers/scsi/snic/snic.ko.gz + %changelog From 06755ad4fdf0573060320909aad81d0e70012efa Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 30 Apr 2024 00:14:11 +0000 Subject: [PATCH 1215/1356] kernel-6.1: support zstd compressed firmware Add support for zstd-compressed firmware now that the linux-firmware package uses that compression format. Signed-off-by: Ben Cressey --- packages/kernel-6.1/config-bottlerocket | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/kernel-6.1/config-bottlerocket b/packages/kernel-6.1/config-bottlerocket index a88a6fc0..c4ae543b 100644 --- a/packages/kernel-6.1/config-bottlerocket +++ b/packages/kernel-6.1/config-bottlerocket @@ -145,7 +145,8 @@ CONFIG_MODULE_COMPRESS_GZIP=y # Support handling of compressed firmware CONFIG_FW_LOADER_COMPRESS=y -CONFIG_FW_LOADER_COMPRESS_XZ=y +# CONFIG_FW_LOADER_COMPRESS_XZ is not set +CONFIG_FW_LOADER_COMPRESS_ZSTD=y # Add virtio drivers for development setups running as guests in qemu CONFIG_VIRTIO_CONSOLE=m From 4b32f2cdca78b35a3573599649904ea2a61e1319 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 30 Apr 2024 00:09:33 +0000 Subject: [PATCH 1216/1356] kernel-6.1: move metal modules to subpackage Move the modules that were previously only for "metal" into a new package that's installed by default for `metal-*` variants. This cuts down on the installed footprint for `aws-*` and `vmware-*` variants where the corresponding hardware is not supported. Signed-off-by: Ben Cressey --- packages/kernel-6.1/kernel-6.1.spec | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 41a4222d..461e8c02 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -46,6 +46,9 @@ Requires: %{_cross_os}microcode-licenses Requires: %{name}-modules = %{version}-%{release} Requires: %{name}-devel = %{version}-%{release} +# Pull in platform-dependent modules. +Requires: (%{name}-modules-metal if %{_cross_os}variant-platform(metal)) + # Pull in FIPS-related files if needed. Requires: (%{name}-fips if %{_cross_os}image-feature(fips)) @@ -73,6 +76,12 @@ Summary: Modules for the Linux kernel %description modules %{summary}. +%package modules-metal +Summary: Modules for the Linux kernel on bare metal + +%description modules-metal +%{summary}. + %package headers Summary: Header files for the Linux kernel for use by glibc @@ -246,6 +255,9 @@ rm -f %{buildroot}%{_cross_kmoddir}/build %{buildroot}%{_cross_kmoddir}/source ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{_cross_kmoddir}/build ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{_cross_kmoddir}/source +# Install a copy of System.map so that module dependencies can be regenerated. +install -p -m 0600 System.map %{buildroot}%{_cross_kmoddir} + # Ensure that each required FIPS module is loaded as a dependency of the # check-fips-module.service. The list of FIPS modules is different across # kernels but the check is consistent: it loads the "tcrypt" module after @@ -318,6 +330,7 @@ done %{_cross_kmoddir}/modules.softdep %{_cross_kmoddir}/modules.symbols %{_cross_kmoddir}/modules.symbols.bin +%{_cross_kmoddir}/System.map %if "%{_cross_arch}" == "x86_64" %{_cross_kmoddir}/kernel/arch/x86/crypto/aesni-intel.ko.* @@ -1257,6 +1270,7 @@ done %{_cross_kmoddir}/kernel/virt/lib/irqbypass.ko.* %endif +%files modules-metal %if "%{_cross_arch}" == "x86_64" %{_cross_kmoddir}/kernel/drivers/infiniband/hw/usnic/usnic_verbs.ko.gz %endif From d64c86aee9fd8643fe975dc708da3746065aa348 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 29 Apr 2024 18:38:15 +0000 Subject: [PATCH 1217/1356] kernel-6.1: add platform bootconfig snippets Block the storage device drivers that were previously only built into the "metal" kernel from loading on `aws-*` and `vmware-*` variants by default. Block the i8042 driver from loading on "aws-*" variants by default, to approximate the effect of disabling the module at build time. These blocks are implemented through bootconfig snippets, where the resulting settings can be changed at runtime if necessary. Signed-off-by: Ben Cressey --- packages/kernel-6.1/bootconfig-aws.conf | 2 + packages/kernel-6.1/bootconfig-metal.conf | 0 packages/kernel-6.1/bootconfig-vmware.conf | 1 + packages/kernel-6.1/kernel-6.1.spec | 43 ++++++++++++++++++++++ 4 files changed, 46 insertions(+) create mode 100644 packages/kernel-6.1/bootconfig-aws.conf create mode 100644 packages/kernel-6.1/bootconfig-metal.conf create mode 100644 packages/kernel-6.1/bootconfig-vmware.conf diff --git a/packages/kernel-6.1/bootconfig-aws.conf b/packages/kernel-6.1/bootconfig-aws.conf new file mode 100644 index 00000000..90bd5c25 --- /dev/null +++ b/packages/kernel-6.1/bootconfig-aws.conf @@ -0,0 +1,2 @@ +kernel.initcall_blacklist = vmd_drv_init, megasas_init, mpt3sas_init, pqi_init +kernel.module_blacklist = i8042 diff --git a/packages/kernel-6.1/bootconfig-metal.conf b/packages/kernel-6.1/bootconfig-metal.conf new file mode 100644 index 00000000..e69de29b diff --git a/packages/kernel-6.1/bootconfig-vmware.conf b/packages/kernel-6.1/bootconfig-vmware.conf new file mode 100644 index 00000000..8d7ae016 --- /dev/null +++ b/packages/kernel-6.1/bootconfig-vmware.conf @@ -0,0 +1 @@ +kernel.initcall_blacklist = vmd_drv_init, megasas_init, mpt3sas_init, pqi_init diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 461e8c02..c476b251 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -16,6 +16,11 @@ Source200: check-fips-modules.drop-in.conf.in Source201: fipsmodules-x86_64 Source202: fipsmodules-aarch64 +# Bootconfig snippets to adjust the default kernel command line for the platform. +Source300: bootconfig-aws.conf +Source301: bootconfig-vmware.conf +Source302: bootconfig-metal.conf + # Help out-of-tree module builds run `make prepare` automatically. Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch # Expose tools/* targets for out-of-tree module builds. @@ -46,6 +51,11 @@ Requires: %{_cross_os}microcode-licenses Requires: %{name}-modules = %{version}-%{release} Requires: %{name}-devel = %{version}-%{release} +# Pull in platform-dependent boot config snippets. +Requires: (%{name}-bootconfig-aws if %{_cross_os}variant-platform(aws)) +Requires: (%{name}-bootconfig-vmware if %{_cross_os}variant-platform(vmware)) +Requires: (%{name}-bootconfig-metal if %{_cross_os}variant-platform(vmware)) + # Pull in platform-dependent modules. Requires: (%{name}-modules-metal if %{_cross_os}variant-platform(metal)) @@ -70,6 +80,24 @@ Summary: Archived Linux kernel source for module building %description archive %{summary}. +%package bootconfig-aws +Summary: Boot config snippet for the Linux kernel on AWS + +%description bootconfig-aws +%{summary}. + +%package bootconfig-vmware +Summary: Boot config snippet for the Linux kernel on VMware + +%description bootconfig-vmware +%{summary}. + +%package bootconfig-metal +Summary: Boot config snippet for the Linux kernel on bare metal + +%description bootconfig-metal +%{summary}. + %package modules Summary: Modules for the Linux kernel @@ -272,6 +300,12 @@ for fipsmod in $(cat %{_sourcedir}/fipsmodules-%{_cross_arch}) ; do (( i+=1 )) done +# Install platform-specific bootconfig snippets. +install -d %{buildroot}%{_cross_bootconfigdir} +install -p -m 0644 %{S:300} %{buildroot}%{_cross_bootconfigdir}/05-aws.conf +install -p -m 0644 %{S:301} %{buildroot}%{_cross_bootconfigdir}/05-vmware.conf +install -p -m 0644 %{S:302} %{buildroot}%{_cross_bootconfigdir}/05-metal.conf + %files %license COPYING LICENSES/preferred/GPL-2.0 LICENSES/exceptions/Linux-syscall-note %{_cross_attribution_file} @@ -314,6 +348,15 @@ done %files fips %{_cross_unitdir}/check-fips-modules.service.d/*.conf +%files bootconfig-aws +%{_cross_bootconfigdir}/05-aws.conf + +%files bootconfig-vmware +%{_cross_bootconfigdir}/05-vmware.conf + +%files bootconfig-metal +%{_cross_bootconfigdir}/05-metal.conf + %files modules %dir %{_cross_libdir}/modules %dir %{_cross_kmoddir} From 79582e0fbee7588963ad182e62570ad610962030 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 23 Apr 2024 05:52:14 +0000 Subject: [PATCH 1218/1356] kernel-6.1: remove variant sensitivity Now that the same config is used for all platforms, drop the variant sensitive marker from the package. Signed-off-by: Ben Cressey --- packages/kernel-6.1/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 3f7bc2aa..8290071a 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -6,7 +6,6 @@ publish = false build = "../build.rs" [package.metadata.build-package] -variant-sensitive = "platform" package-name = "kernel-6.1" [lib] From 0f000526810d64ad11b8179af890a24c59ff714d Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Wed, 1 May 2024 17:21:59 +0000 Subject: [PATCH 1219/1356] kernel-5.10: update to 5.10.215 Rebase to Amazon Linux upstream version 5.10.215-203.850.amzn2. Signed-off-by: Martin Harriman --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 88268418..3c4e1b4f 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/6ed4450682e3cd4bb4a66245eff09d376f623d8bb7646a386814fbf6d8e55691/kernel-5.10.214-202.855.amzn2.src.rpm" -sha512 = "7f201e8e747ebf3b1d93ad39ced49436d276e8446fc84a56cf971beb1422f8f727c5250750dfee8451f377189ac884d0bf7a156fcaa4d63732a0ee8c0e671394" +url = "https://cdn.amazonlinux.com/blobstore/962957e692b6ca23e981930d7a3dc644768e89d2ac321745883eaa99bc55e67a/kernel-5.10.215-203.850.amzn2.src.rpm" +sha512 = "4abdbacaf18bf224c56d4648803a5cad0b1c6a6fab7ffbdff9d38ac6deeb485abb2d4dea4daa61ffa271a848e634969e94bd4438dd48529213518e0671000455" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 75dfd7ed..dde777e4 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.214 +Version: 5.10.215 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/6ed4450682e3cd4bb4a66245eff09d376f623d8bb7646a386814fbf6d8e55691/kernel-5.10.214-202.855.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/962957e692b6ca23e981930d7a3dc644768e89d2ac321745883eaa99bc55e67a/kernel-5.10.215-203.850.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From aa94f23dcab92e5eeb80d8387da4fa644ef184b2 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Wed, 1 May 2024 17:22:34 +0000 Subject: [PATCH 1220/1356] kernel-5.15: update to 5.15.156 Rebase to Amazon Linux upstream version 5.15.156-102.160.amzn2. Signed-off-by: Martin Harriman --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 8e757bb6..2dd4f289 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/b0b83af53711690ad1bbc3de7e01e03f6d93582a3fc506cf79a063c4937833aa/kernel-5.15.153-100.162.amzn2.src.rpm" -sha512 = "3d84822f9401d8902b6ecf84cca8d0546be67c9516a51a22e4e0036741f74210a8398159a44e6aff6ee9481dbfd44a297f56b84ef4da4336af2c2e9efcaca680" +url = "https://cdn.amazonlinux.com/blobstore/72726a4adc0c205ce087f838249744029fc8e51b85ea0e3d395cb10ac99d4864/kernel-5.15.156-102.160.amzn2.src.rpm" +sha512 = "0964d79ecb44e23273633a831022cd4d43ca9eeb2f029f994d5f52dc1aceed11b985a2b8acf978524033d738e3a4670ea87566e87bfb4c3a6d7bd37b5a6d8e22" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 55d32704..d514cbd5 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.153 +Version: 5.15.156 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/b0b83af53711690ad1bbc3de7e01e03f6d93582a3fc506cf79a063c4937833aa/kernel-5.15.153-100.162.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/72726a4adc0c205ce087f838249744029fc8e51b85ea0e3d395cb10ac99d4864/kernel-5.15.156-102.160.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From f4447778b4ddd269570f14fd35476f289525233c Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Tue, 30 Apr 2024 19:10:50 +0000 Subject: [PATCH 1221/1356] kernel-6.1: update to 6.1.87 Rebase to Amazon Linux upstream version 6.1.87-99.174.amzn2023. Signed-off-by: Martin Harriman --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 8290071a..31597d7c 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/bdca6b79db0d3d5ad549b61951208fbf474daebe38ca619f8c706070dc252239/kernel-6.1.84-99.169.amzn2023.src.rpm" -sha512 = "3e1b219fc89e5c051b321088ad464db1e1278bc9e8ca90ffa2b17853a9db23310f7a7f370e3253671c7cf74e492cde4effb38c38500da3b391c7295027b134e1" +url = "https://cdn.amazonlinux.com/al2023/blobstore/1abc503d6f7da124bf9e7c306ab8119b4b85c9207c943fb2c5a617d0d1716362/kernel-6.1.87-99.174.amzn2023.src.rpm" +sha512 = "2aaab825ca6bed61366fcdbc67d954191190a6010a9b083c824c9adeeb308000fa7af5b867b450d4bf5c6a9be4234e909ae5210a7157af5e7edcc4ff291a2f61" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index c476b251..d2068ec5 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.84 +Version: 6.1.87 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/bdca6b79db0d3d5ad549b61951208fbf474daebe38ca619f8c706070dc252239/kernel-6.1.84-99.169.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/1abc503d6f7da124bf9e7c306ab8119b4b85c9207c943fb2c5a617d0d1716362/kernel-6.1.87-99.174.amzn2023.src.rpm Source100: config-bottlerocket # This list of FIPS modules is extracted from /etc/fipsmodules in the initramfs From f842b1ee06f8ddb9419c498eb97df0a34981015c Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Mon, 20 May 2024 22:07:45 +0000 Subject: [PATCH 1222/1356] kernel-5.10: update to 5.10.216 Rebase to Amazon Linux upstream version 5.10.216-204.855.amzn2. Signed-off-by: Martin Harriman --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 3c4e1b4f..ca59c720 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/962957e692b6ca23e981930d7a3dc644768e89d2ac321745883eaa99bc55e67a/kernel-5.10.215-203.850.amzn2.src.rpm" -sha512 = "4abdbacaf18bf224c56d4648803a5cad0b1c6a6fab7ffbdff9d38ac6deeb485abb2d4dea4daa61ffa271a848e634969e94bd4438dd48529213518e0671000455" +url = "https://cdn.amazonlinux.com/blobstore/0a25c63e615af935b4980fb447cdd9e44e2fab61ef15b47c60d34e50907e8a12/kernel-5.10.216-204.855.amzn2.src.rpm" +sha512 = "c32d9c1b3bddcc4a9f5c014be07681e7990d5cedccf7242e7a943f8f2fb270ae419e9bec44215ff1df4d8afede5af5b16926507af5b86f9dd8982b04648b2cde" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index dde777e4..61e15285 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.215 +Version: 5.10.216 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/962957e692b6ca23e981930d7a3dc644768e89d2ac321745883eaa99bc55e67a/kernel-5.10.215-203.850.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/0a25c63e615af935b4980fb447cdd9e44e2fab61ef15b47c60d34e50907e8a12/kernel-5.10.216-204.855.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From d7f68d8b005a7b3df655c6720dd624f44f4588fb Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Mon, 20 May 2024 22:08:19 +0000 Subject: [PATCH 1223/1356] kernel-5.15: update to 5.15.158 Rebase to Amazon Linux upstream version 5.15.158-103.164.amzn2. Signed-off-by: Martin Harriman --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 2dd4f289..05d3d0a9 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/72726a4adc0c205ce087f838249744029fc8e51b85ea0e3d395cb10ac99d4864/kernel-5.15.156-102.160.amzn2.src.rpm" -sha512 = "0964d79ecb44e23273633a831022cd4d43ca9eeb2f029f994d5f52dc1aceed11b985a2b8acf978524033d738e3a4670ea87566e87bfb4c3a6d7bd37b5a6d8e22" +url = "https://cdn.amazonlinux.com/blobstore/f75f72cbdb5b3da04159fef0093b7ca471b95b58172bc9630600bc94668e247a/kernel-5.15.158-103.164.amzn2.src.rpm" +sha512 = "3ba3616cfcbc230208c84dffbbe1648e57a295dd828288e1e330e988f1f14a9a10fc6e6f251573d20e6679e802ac3b3ca53dfef39d1e19f61af4ede42a035af0" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index d514cbd5..356f3315 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.156 +Version: 5.15.158 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/72726a4adc0c205ce087f838249744029fc8e51b85ea0e3d395cb10ac99d4864/kernel-5.15.156-102.160.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/f75f72cbdb5b3da04159fef0093b7ca471b95b58172bc9630600bc94668e247a/kernel-5.15.158-103.164.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 71813ad7de70f7a44890023c479ce85dc46fa5f2 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Fri, 17 May 2024 21:41:51 +0000 Subject: [PATCH 1224/1356] kernel-6.1: update to 6.1.90 Rebase to Amazon Linux upstream version 6.1.90-99.173.amzn2023. Signed-off-by: Martin Harriman --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 31597d7c..29962519 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/1abc503d6f7da124bf9e7c306ab8119b4b85c9207c943fb2c5a617d0d1716362/kernel-6.1.87-99.174.amzn2023.src.rpm" -sha512 = "2aaab825ca6bed61366fcdbc67d954191190a6010a9b083c824c9adeeb308000fa7af5b867b450d4bf5c6a9be4234e909ae5210a7157af5e7edcc4ff291a2f61" +url = "https://cdn.amazonlinux.com/al2023/blobstore/4deb8487627a15345b5963c9825994ff2ec7c42015380406ab8640590242fe7c/kernel-6.1.90-99.173.amzn2023.src.rpm" +sha512 = "a055bc88f4d99dd8df2b1272eaecdeb2a25e12e0f5a6639eba8c16ce6dcec0d2b2c8371dc87b507058ff232138e5ab5319a9b5b95314111d3fc5c2f25161c3e4" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index d2068ec5..5d70715e 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.87 +Version: 6.1.90 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/1abc503d6f7da124bf9e7c306ab8119b4b85c9207c943fb2c5a617d0d1716362/kernel-6.1.87-99.174.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/4deb8487627a15345b5963c9825994ff2ec7c42015380406ab8640590242fe7c/kernel-6.1.90-99.173.amzn2023.src.rpm Source100: config-bottlerocket # This list of FIPS modules is extracted from /etc/fipsmodules in the initramfs From 21cd8fe08efe854984762b3c2285704b278e3adc Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 22 May 2024 16:18:03 +0000 Subject: [PATCH 1225/1356] packages: add neuron kmod for 6.1 kernel Signed-off-by: Ben Cressey --- packages/kmod-6.1-neuron/Cargo.toml | 20 +++++++ packages/kmod-6.1-neuron/kmod-6.1-neuron.spec | 58 +++++++++++++++++++ packages/kmod-6.1-neuron/latest-srpm-url.sh | 9 +++ .../kmod-6.1-neuron/neuron-modules-load.conf | 1 + .../neuron-systemd-modules-load.drop-in.conf | 2 + 5 files changed, 90 insertions(+) create mode 100644 packages/kmod-6.1-neuron/Cargo.toml create mode 100644 packages/kmod-6.1-neuron/kmod-6.1-neuron.spec create mode 100755 packages/kmod-6.1-neuron/latest-srpm-url.sh create mode 100644 packages/kmod-6.1-neuron/neuron-modules-load.conf create mode 100644 packages/kmod-6.1-neuron/neuron-systemd-modules-load.drop-in.conf diff --git a/packages/kmod-6.1-neuron/Cargo.toml b/packages/kmod-6.1-neuron/Cargo.toml new file mode 100644 index 00000000..32e69917 --- /dev/null +++ b/packages/kmod-6.1-neuron/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "kmod-6_1-neuron" +version = "0.1.0" +edition = "2021" +publish = false +build = "../build.rs" + +[lib] +path = "../packages.rs" + +[package.metadata.build-package] +package-name = "kmod-6.1-neuron" +releases-url = "https://awsdocs-neuron.readthedocs-hosted.com/en/latest/release-notes/runtime/aws-neuronx-dkms/index.html" + +[[package.metadata.build-package.external-files]] +url = "https://yum.repos.neuron.amazonaws.com/aws-neuronx-dkms-2.16.7.0.noarch.rpm" +sha512 = "8e66feb4051af31321c08b6663a950172da65c4e5b432c0b5609785be34ccb193c0eb50c9aadfeec8b6410ccbe05264a3fb6fc7cb66dc87b172bc5be5c4d92d0" + +[build-dependencies] +kernel-6_1 = { path = "../kernel-6.1" } diff --git a/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec b/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec new file mode 100644 index 00000000..fec66701 --- /dev/null +++ b/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec @@ -0,0 +1,58 @@ +Name: %{_cross_os}kmod-6.1-neuron +Version: 2.16.7.0 +Release: 1%{?dist} +Summary: Neuron drivers for the 6.1 kernel +License: GPL-2.0-only +URL: https://awsdocs-neuron.readthedocs-hosted.com/en/latest/ + +Source0: https://yum.repos.neuron.amazonaws.com/aws-neuronx-dkms-%{version}.noarch.rpm +Source1: neuron-modules-load.conf +Source2: neuron-systemd-modules-load.drop-in.conf + +BuildRequires: %{_cross_os}glibc-devel +BuildRequires: %{_cross_os}kernel-6.1-archive + +%description +%{summary}. + +%prep +rpm2cpio %{SOURCE0} | cpio -idmv +tar -xf %{_cross_datadir}/bottlerocket/kernel-devel.tar.xz + +%global neuron_sources usr/src/aws-neuronx-%{version} +%global kernel_sources %{_builddir}/kernel-devel + +%build +pushd %{_builddir}/%{neuron_sources} +%make_build \ + -C %{kernel_sources} \ + M=${PWD} \ + ARCH=%{_cross_karch} \ + CROSS_COMPILE=%{_cross_target}- \ + INSTALL_MOD_STRIP=1 \ + %{nil} +gzip -9 neuron.ko +popd + +%install +pushd %{_builddir}/%{neuron_sources} +export KVER="$(cat %{kernel_sources}/include/config/kernel.release)" +export KMODDIR="%{_cross_libdir}/modules/${KVER}/extra" +install -d "%{buildroot}${KMODDIR}" +install -p -m 0644 neuron.ko.gz "%{buildroot}${KMODDIR}" +popd + +# Install modules-load.d drop-in to autoload required kernel modules +install -d %{buildroot}%{_cross_libdir}/modules-load.d +install -p -m 0644 %{S:1} %{buildroot}%{_cross_libdir}/modules-load.d/neuron.conf + +# Install systemd-modules-load drop-in to ensure that depmod runs. +install -d %{buildroot}%{_cross_unitdir}/systemd-modules-load.service.d +install -p -m 0644 %{S:2} %{buildroot}%{_cross_unitdir}/systemd-modules-load.service.d/neuron.conf + +%files +%license %{neuron_sources}/LICENSE +%{_cross_attribution_file} +%{_cross_libdir}/modules/*/extra/neuron.ko.gz +%{_cross_libdir}/modules-load.d/neuron.conf +%{_cross_unitdir}/systemd-modules-load.service.d/neuron.conf diff --git a/packages/kmod-6.1-neuron/latest-srpm-url.sh b/packages/kmod-6.1-neuron/latest-srpm-url.sh new file mode 100755 index 00000000..5bb6c85e --- /dev/null +++ b/packages/kmod-6.1-neuron/latest-srpm-url.sh @@ -0,0 +1,9 @@ +#!/bin/sh +cmd=" +dnf install -q -y --releasever=latest yum-utils && +dnf download -q --repofrompath neuron,https://yum.repos.neuron.amazonaws.com --repo=neuron --urls aws-neuronx-dkms +" +docker run --rm amazonlinux:2023 bash -c "${cmd}" \ + | grep '^http' \ + | xargs --max-args=1 --no-run-if-empty realpath --canonicalize-missing --relative-to=. \ + | sed 's_:/_://_' diff --git a/packages/kmod-6.1-neuron/neuron-modules-load.conf b/packages/kmod-6.1-neuron/neuron-modules-load.conf new file mode 100644 index 00000000..aba019c2 --- /dev/null +++ b/packages/kmod-6.1-neuron/neuron-modules-load.conf @@ -0,0 +1 @@ +neuron diff --git a/packages/kmod-6.1-neuron/neuron-systemd-modules-load.drop-in.conf b/packages/kmod-6.1-neuron/neuron-systemd-modules-load.drop-in.conf new file mode 100644 index 00000000..0b130296 --- /dev/null +++ b/packages/kmod-6.1-neuron/neuron-systemd-modules-load.drop-in.conf @@ -0,0 +1,2 @@ +[Service] +ExecStartPre=-/usr/bin/depmod From 7a1d400e1a79b384d05628803436d3cbbc829e4a Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 23 May 2024 23:29:58 +0000 Subject: [PATCH 1226/1356] kmod-*-nvidia: specify AWS EULA as license Previously, the NVIDIA packages were special-cased with a "bring your own license" step, to support the case where a developer building the project might choose the terms of a different license under which to distribute the software or to make it available to end users. With the advent of out-of-tree builds, packages are moving into kits that contain pre-compiled binary RPMs, which means they will always have a header indicating which license applies. This commit makes it explicit what license those RPMs will have at the time they are published. Since the terms of the AWS EULA indicate that the software should be used to develop AMIs for use on AWS, add an install-time requirement for `aws-*` variants. Signed-off-by: Ben Cressey --- packages/kmod-5.10-nvidia/.gitignore | 1 + packages/kmod-5.10-nvidia/Cargo.toml | 4 ++++ packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec | 11 +++++++---- packages/kmod-5.15-nvidia/.gitignore | 1 + packages/kmod-5.15-nvidia/Cargo.toml | 4 ++++ packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec | 12 +++++++----- packages/kmod-6.1-nvidia/.gitignore | 1 + packages/kmod-6.1-nvidia/Cargo.toml | 4 ++++ packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 12 +++++++----- 9 files changed, 36 insertions(+), 14 deletions(-) create mode 100644 packages/kmod-5.10-nvidia/.gitignore create mode 100644 packages/kmod-5.15-nvidia/.gitignore create mode 100644 packages/kmod-6.1-nvidia/.gitignore diff --git a/packages/kmod-5.10-nvidia/.gitignore b/packages/kmod-5.10-nvidia/.gitignore new file mode 100644 index 00000000..0bcfb52f --- /dev/null +++ b/packages/kmod-5.10-nvidia/.gitignore @@ -0,0 +1 @@ +NVidiaEULAforAWS.pdf diff --git a/packages/kmod-5.10-nvidia/Cargo.toml b/packages/kmod-5.10-nvidia/Cargo.toml index fff89483..9edcbcc6 100644 --- a/packages/kmod-5.10-nvidia/Cargo.toml +++ b/packages/kmod-5.10-nvidia/Cargo.toml @@ -12,6 +12,10 @@ path = "../packages.rs" package-name = "kmod-5.10-nvidia" releases-url = "https://docs.nvidia.com/datacenter/tesla/" +[[package.metadata.build-package.external-files]] +url = "https://s3.amazonaws.com/EULA/NVidiaEULAforAWS.pdf" +sha512 = "e1926fe99afc3ab5b2f2744fcd53b4046465aefb2793e2e06c4a19455a3fde895e00af1415ff1a5804c32e6a2ed0657e475de63da6c23a0e9c59feeef52f3f58" + [[package.metadata.build-package.external-files]] url = "https://us.download.nvidia.com/tesla/470.239.06/NVIDIA-Linux-x86_64-470.239.06.run" sha512 = "92bdfb11db405071cd58deed2a0853448932657e256258e0a0bda5069f00485e2b6e49b4a0eeff499a4991be4f884273f3564c164110b1ed1f5d924506f13e2d" diff --git a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec index 0de7ac53..c592ddd7 100644 --- a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec +++ b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec @@ -1,8 +1,6 @@ %global tesla_470 470.239.06 %global tesla_470_libdir %{_cross_libdir}/nvidia/tesla/%{tesla_470} %global tesla_470_bindir %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} -%global spdx_id %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml spdx-id nvidia) -%global license_file %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml path nvidia -p ./licenses) Name: %{_cross_os}kmod-5.10-nvidia Version: 1.0.0 @@ -16,6 +14,7 @@ URL: http://www.nvidia.com/ # NVIDIA .run scripts from 0 to 199 Source0: https://us.download.nvidia.com/tesla/%{tesla_470}/NVIDIA-Linux-x86_64-%{tesla_470}.run Source1: https://us.download.nvidia.com/tesla/%{tesla_470}/NVIDIA-Linux-aarch64-%{tesla_470}.run +Source2: NVidiaEULAforAWS.pdf # Common NVIDIA conf files from 200 to 299 Source200: nvidia-tmpfiles.conf.in @@ -36,7 +35,8 @@ BuildRequires: %{_cross_os}kernel-5.10-archive %package tesla-470 Summary: NVIDIA 470 Tesla driver Version: %{tesla_470} -License: %{spdx_id} +License: LicenseRef-NVIDIA-AWS-EULA +Requires: %{_cross_os}variant-platform(aws) Requires: %{name} %description tesla-470 @@ -47,6 +47,9 @@ Requires: %{name} # the driver in the current run sh %{_sourcedir}/NVIDIA-Linux-%{_cross_arch}-%{tesla_470}.run -x +# Add the license. +install -p -m 0644 %{S:2} . + %global kernel_sources %{_builddir}/kernel-devel tar -xf %{_cross_datadir}/bottlerocket/kernel-devel.tar.xz @@ -179,7 +182,7 @@ popd %{_cross_libdir}/modules-load.d/nvidia-dependencies.conf %files tesla-470 -%license %{license_file} +%license NVidiaEULAforAWS.pdf %dir %{_cross_datadir}/nvidia/tesla/%{tesla_470} %dir %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} %dir %{tesla_470_libdir} diff --git a/packages/kmod-5.15-nvidia/.gitignore b/packages/kmod-5.15-nvidia/.gitignore new file mode 100644 index 00000000..0bcfb52f --- /dev/null +++ b/packages/kmod-5.15-nvidia/.gitignore @@ -0,0 +1 @@ +NVidiaEULAforAWS.pdf diff --git a/packages/kmod-5.15-nvidia/Cargo.toml b/packages/kmod-5.15-nvidia/Cargo.toml index 20d2c316..713bb11f 100644 --- a/packages/kmod-5.15-nvidia/Cargo.toml +++ b/packages/kmod-5.15-nvidia/Cargo.toml @@ -12,6 +12,10 @@ path = "../packages.rs" package-name = "kmod-5.15-nvidia" releases-url = "https://docs.nvidia.com/datacenter/tesla/" +[[package.metadata.build-package.external-files]] +url = "https://s3.amazonaws.com/EULA/NVidiaEULAforAWS.pdf" +sha512 = "e1926fe99afc3ab5b2f2744fcd53b4046465aefb2793e2e06c4a19455a3fde895e00af1415ff1a5804c32e6a2ed0657e475de63da6c23a0e9c59feeef52f3f58" + [[package.metadata.build-package.external-files]] url = "https://us.download.nvidia.com/tesla/535.161.07/NVIDIA-Linux-x86_64-535.161.07.run" sha512 = "4e8dd709157c15519f01a8d419daa098da64666d20a80edf3894239707ff1e83b48553f3edc5d567109d36e52b31ac7c0c7218ea77862a04e89aa3cc1f16a5ba" diff --git a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec index e0df743c..c4f9313c 100644 --- a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec +++ b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec @@ -8,9 +8,6 @@ %global fm_arch %{_cross_arch} %endif -%global spdx_id %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml spdx-id nvidia) -%global license_file %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml path nvidia -p ./licenses) - # With the split of the firmware binary from firmware/gsp.bin to firmware/gsp_ga10x.bin # and firmware/gsp_tu10x.bin the file format changed from executable to relocatable. # The __spec_install_post macro will by default try to strip all binary files. @@ -31,6 +28,7 @@ URL: http://www.nvidia.com/ # NVIDIA .run scripts for kernel and userspace drivers Source0: https://us.download.nvidia.com/tesla/%{tesla_ver}/NVIDIA-Linux-x86_64-%{tesla_ver}.run Source1: https://us.download.nvidia.com/tesla/%{tesla_ver}/NVIDIA-Linux-aarch64-%{tesla_ver}.run +Source2: NVidiaEULAforAWS.pdf # fabricmanager for NVSwitch Source10: https://developer.download.nvidia.com/compute/nvidia-driver/redist/fabricmanager/linux-x86_64/fabricmanager-linux-x86_64-%{tesla_ver}-archive.tar.xz @@ -64,7 +62,8 @@ Requires: %{name}-tesla(fabricmanager) %package tesla-%{tesla_major} Summary: NVIDIA %{tesla_major} Tesla driver Version: %{tesla_ver} -License: %{spdx_id} +License: LicenseRef-NVIDIA-AWS-EULA +Requires: %{_cross_os}variant-platform(aws) Requires: %{name} Requires: %{name}-fabricmanager Provides: %{name}-tesla(fabricmanager) @@ -81,6 +80,9 @@ sh %{_sourcedir}/NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}.run -x # correct source is architecture-dependent. tar -xf %{_sourcedir}/fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive.tar.xz +# Add the license. +install -p -m 0644 %{S:2} . + %global kernel_sources %{_builddir}/kernel-devel tar -xf %{_cross_datadir}/bottlerocket/kernel-devel.tar.xz @@ -233,7 +235,7 @@ popd %{_cross_libdir}/modules-load.d/nvidia-dependencies.conf %files tesla-%{tesla_major} -%license %{license_file} +%license NVidiaEULAforAWS.pdf %license fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive/third-party-notices.txt %dir %{_cross_datadir}/nvidia/tesla %dir %{_cross_libexecdir}/nvidia/tesla/bin diff --git a/packages/kmod-6.1-nvidia/.gitignore b/packages/kmod-6.1-nvidia/.gitignore new file mode 100644 index 00000000..0bcfb52f --- /dev/null +++ b/packages/kmod-6.1-nvidia/.gitignore @@ -0,0 +1 @@ +NVidiaEULAforAWS.pdf diff --git a/packages/kmod-6.1-nvidia/Cargo.toml b/packages/kmod-6.1-nvidia/Cargo.toml index d1ab1a3c..9be7e87d 100644 --- a/packages/kmod-6.1-nvidia/Cargo.toml +++ b/packages/kmod-6.1-nvidia/Cargo.toml @@ -12,6 +12,10 @@ path = "../packages.rs" package-name = "kmod-6.1-nvidia" releases-url = "https://docs.nvidia.com/datacenter/tesla/" +[[package.metadata.build-package.external-files]] +url = "https://s3.amazonaws.com/EULA/NVidiaEULAforAWS.pdf" +sha512 = "e1926fe99afc3ab5b2f2744fcd53b4046465aefb2793e2e06c4a19455a3fde895e00af1415ff1a5804c32e6a2ed0657e475de63da6c23a0e9c59feeef52f3f58" + [[package.metadata.build-package.external-files]] url = "https://us.download.nvidia.com/tesla/535.161.07/NVIDIA-Linux-x86_64-535.161.07.run" sha512 = "4e8dd709157c15519f01a8d419daa098da64666d20a80edf3894239707ff1e83b48553f3edc5d567109d36e52b31ac7c0c7218ea77862a04e89aa3cc1f16a5ba" diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index 5b08cd60..d6b8771c 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -8,9 +8,6 @@ %global fm_arch %{_cross_arch} %endif -%global spdx_id %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml spdx-id nvidia) -%global license_file %(bottlerocket-license-tool -l %{_builddir}/Licenses.toml path nvidia -p ./licenses) - # With the split of the firmware binary from firmware/gsp.bin to firmware/gsp_ga10x.bin # and firmware/gsp_tu10x.bin the file format changed from executable to relocatable. # The __spec_install_post macro will by default try to strip all binary files. @@ -31,6 +28,7 @@ URL: http://www.nvidia.com/ # NVIDIA .run scripts for kernel and userspace drivers Source0: https://us.download.nvidia.com/tesla/%{tesla_ver}/NVIDIA-Linux-x86_64-%{tesla_ver}.run Source1: https://us.download.nvidia.com/tesla/%{tesla_ver}/NVIDIA-Linux-aarch64-%{tesla_ver}.run +Source2: NVidiaEULAforAWS.pdf # fabricmanager for NVSwitch Source10: https://developer.download.nvidia.com/compute/nvidia-driver/redist/fabricmanager/linux-x86_64/fabricmanager-linux-x86_64-%{tesla_ver}-archive.tar.xz @@ -64,7 +62,8 @@ Requires: %{name}-tesla(fabricmanager) %package tesla-%{tesla_major} Summary: NVIDIA %{tesla_major} Tesla driver Version: %{tesla_ver} -License: %{spdx_id} +License: LicenseRef-NVIDIA-AWS-EULA +Requires: %{_cross_os}variant-platform(aws) Requires: %{name} Requires: %{name}-fabricmanager Provides: %{name}-tesla(fabricmanager) @@ -81,6 +80,9 @@ sh %{_sourcedir}/NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}.run -x # correct source is architecture-dependent. tar -xf %{_sourcedir}/fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive.tar.xz +# Add the license. +install -p -m 0644 %{S:2} . + %global kernel_sources %{_builddir}/kernel-devel tar -xf %{_cross_datadir}/bottlerocket/kernel-devel.tar.xz @@ -233,7 +235,7 @@ popd %{_cross_libdir}/modules-load.d/nvidia-dependencies.conf %files tesla-%{tesla_major} -%license %{license_file} +%license NVidiaEULAforAWS.pdf %license fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive/third-party-notices.txt %dir %{_cross_datadir}/nvidia/tesla %dir %{_cross_libexecdir}/nvidia/tesla/bin From b6021cfa0ffecb72f376b379aa3be4bd69a509dd Mon Sep 17 00:00:00 2001 From: Kush Upadhyay Date: Tue, 28 May 2024 11:52:18 +0000 Subject: [PATCH 1227/1356] README: Add k8s cmd to retrieve log archive Signed-off-by: Kush Upadhyay --- README.md | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 8e865b8a..570bfb1c 100644 --- a/README.md +++ b/README.md @@ -462,6 +462,11 @@ See the [`settings.aws.*` reference](https://bottlerocket.dev/en/os/latest/#/api ### Logs You can use `logdog` through the [admin container](#admin-container) to obtain an archive of log files from your Bottlerocket host. + +For a list of what is collected, see the logdog [command list](sources/logdog/src/log_request.rs). + +#### Generating logs + SSH to the Bottlerocket host or `apiclient exec admin bash` to access the admin container, then run: ```shell @@ -471,18 +476,29 @@ logdog This will write an archive of the logs to `/var/log/support/bottlerocket-logs.tar.gz`. This archive is accessible from host containers at `/.bottlerocket/support`. -You can use SSH to retrieve the file. -Once you have exited from the Bottlerocket host, run a command like: -```shell -ssh -i YOUR_KEY_FILE \ - ec2-user@YOUR_HOST \ - "cat /.bottlerocket/support/bottlerocket-logs.tar.gz" > bottlerocket-logs.tar.gz -``` +#### Fetching logs -(If your instance isn't accessible through SSH, you can use [SSH over SSM](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-getting-started-enable-ssh-connections.html).) +There are multiple methods to retrieve the generated log archive. -For a list of what is collected, see the logdog [command list](sources/logdog/src/log_request.rs). +- **Via SSH if already enabled** + + Once you have exited from the Bottlerocket host, run a command like: + + ```shell + ssh -i YOUR_KEY_FILE \ + ec2-user@YOUR_HOST \ + "cat /.bottlerocket/support/bottlerocket-logs.tar.gz" > bottlerocket-logs.tar.gz + ``` + +- **With `kubectl get` if running Kubernetes** + + ```shell + kubectl get --raw \ + "/api/v1/nodes/NODE_NAME/proxy/logs/support/bottlerocket-logs.tar.gz" > bottlerocket-logs.tar.gz + ``` + +- **Using [SSH over SSM](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-getting-started-enable-ssh-connections.html) if your instance isn't accessible through SSH or Kubernetes** ### Kdump Support From f1f6cea03a766621c5a6aa268e22a31fe1c22e98 Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Fri, 31 May 2024 17:25:18 +0000 Subject: [PATCH 1228/1356] kmod-*-nvidia: switch source for Fabric Manager binaries The RPMs vended on the developer portal align with Amazon Linux's consumption of Fabric Manager for AL2023. AL2023 is on different driver versions than Bottlerocket at the moment but this at least moves the build to use the same RPM distributions they consume. Signed-off-by: Matthew Yeazel --- packages/kmod-5.15-nvidia/Cargo.toml | 8 ++++---- packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec | 17 +++++++++-------- packages/kmod-6.1-nvidia/Cargo.toml | 8 ++++---- packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 17 +++++++++-------- 4 files changed, 26 insertions(+), 24 deletions(-) diff --git a/packages/kmod-5.15-nvidia/Cargo.toml b/packages/kmod-5.15-nvidia/Cargo.toml index 713bb11f..f7179a68 100644 --- a/packages/kmod-5.15-nvidia/Cargo.toml +++ b/packages/kmod-5.15-nvidia/Cargo.toml @@ -27,13 +27,13 @@ sha512 = "bb96a28b45197003480ae223c71a5426ef5258a31eaa485cab0cf4b86bed1664827347 force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://developer.download.nvidia.com/compute/nvidia-driver/redist/fabricmanager/linux-x86_64/fabricmanager-linux-x86_64-535.161.07-archive.tar.xz" -sha512 = "868b35d567e4c6dccbff0f7e8f74bc55781c8d71db995fd9e471829afec0b44fd430caba964377052678e244d18ea999133487f9a3c50c7289f381480b24c55d" +url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/nvidia-fabric-manager-535.161.07-1.x86_64.rpm" +sha512 = "6710c40b0e50f974697d2c7078281cd2d28a685c138c20cfe9da4696431a5aceb56f04a30e29f4fe05f2b5eddccb7e456897053051ad91d89d40383629525245" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://developer.download.nvidia.com/compute/nvidia-driver/redist/fabricmanager/linux-sbsa/fabricmanager-linux-sbsa-535.161.07-archive.tar.xz" -sha512 = "f37f7a24e31dd6ed184d1041616abb8cfcb0ddaec79778930db79bbef8b23b3d468daaa9c156a6cf7a7f2ffc0507e78e2bb6215f70bc39d11bb0ee16c5ef4c82" +url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/sbsa/nvidia-fabric-manager-535.161.07-1.aarch64.rpm" +sha512 = "3ac673b6f38fd5fbdca021fbc910b6ec6a506dd34ec814ee0003da59de600d044b11c5a97f087080c8581910db33aef71bace5ddb601ee39474d7fde3deeeaa2" force-upstream = true [build-dependencies] diff --git a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec index c4f9313c..f1a17afb 100644 --- a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec +++ b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec @@ -31,8 +31,8 @@ Source1: https://us.download.nvidia.com/tesla/%{tesla_ver}/NVIDIA-Linux-aarch64- Source2: NVidiaEULAforAWS.pdf # fabricmanager for NVSwitch -Source10: https://developer.download.nvidia.com/compute/nvidia-driver/redist/fabricmanager/linux-x86_64/fabricmanager-linux-x86_64-%{tesla_ver}-archive.tar.xz -Source11: https://developer.download.nvidia.com/compute/nvidia-driver/redist/fabricmanager/linux-sbsa/fabricmanager-linux-sbsa-%{tesla_ver}-archive.tar.xz +Source10: https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/nvidia-fabric-manager-%{tesla_ver}-1.x86_64.rpm +Source11: https://developer.download.nvidia.com/compute/cuda/repos/rhel9/sbsa/nvidia-fabric-manager-%{tesla_ver}-1.aarch64.rpm # Common NVIDIA conf files from 200 to 299 Source200: nvidia-tmpfiles.conf.in @@ -76,9 +76,10 @@ Provides: %{name}-tesla(fabricmanager) # the driver in the current run sh %{_sourcedir}/NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}.run -x -# Extract fabricmanager archive. Use `tar` rather than `%%setup` since the +# Extract fabricmanager from the rpm via cpio rather than `%%setup` since the # correct source is architecture-dependent. -tar -xf %{_sourcedir}/fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive.tar.xz +mkdir fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive +rpm2cpio %{_sourcedir}/nvidia-fabric-manager-%{tesla_ver}-1.%{_cross_arch}.rpm | cpio -idmV -D fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive # Add the license. install -p -m 0644 %{S:2} . @@ -213,11 +214,11 @@ popd # Begin NVIDIA fabric manager binaries and topologies pushd fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive -install -p -m 0755 bin/nv-fabricmanager %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin -install -p -m 0755 bin/nvswitch-audit %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin +install -p -m 0755 usr/bin/nv-fabricmanager %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin +install -p -m 0755 usr/bin/nvswitch-audit %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin install -d %{buildroot}%{_cross_datadir}/nvidia/tesla/nvswitch -for t in share/nvidia/nvswitch/*_topology ; do +for t in usr/share/nvidia/nvswitch/*_topology ; do install -p -m 0644 "${t}" %{buildroot}%{_cross_datadir}/nvidia/tesla/nvswitch done @@ -236,7 +237,7 @@ popd %files tesla-%{tesla_major} %license NVidiaEULAforAWS.pdf -%license fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive/third-party-notices.txt +%license fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive/usr/share/doc/nvidia-fabricmanager/third-party-notices.txt %dir %{_cross_datadir}/nvidia/tesla %dir %{_cross_libexecdir}/nvidia/tesla/bin %dir %{_cross_libdir}/nvidia/tesla diff --git a/packages/kmod-6.1-nvidia/Cargo.toml b/packages/kmod-6.1-nvidia/Cargo.toml index 9be7e87d..0350b018 100644 --- a/packages/kmod-6.1-nvidia/Cargo.toml +++ b/packages/kmod-6.1-nvidia/Cargo.toml @@ -27,13 +27,13 @@ sha512 = "bb96a28b45197003480ae223c71a5426ef5258a31eaa485cab0cf4b86bed1664827347 force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://developer.download.nvidia.com/compute/nvidia-driver/redist/fabricmanager/linux-x86_64/fabricmanager-linux-x86_64-535.161.07-archive.tar.xz" -sha512 = "868b35d567e4c6dccbff0f7e8f74bc55781c8d71db995fd9e471829afec0b44fd430caba964377052678e244d18ea999133487f9a3c50c7289f381480b24c55d" +url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/nvidia-fabric-manager-535.161.07-1.x86_64.rpm" +sha512 = "6710c40b0e50f974697d2c7078281cd2d28a685c138c20cfe9da4696431a5aceb56f04a30e29f4fe05f2b5eddccb7e456897053051ad91d89d40383629525245" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://developer.download.nvidia.com/compute/nvidia-driver/redist/fabricmanager/linux-sbsa/fabricmanager-linux-sbsa-535.161.07-archive.tar.xz" -sha512 = "f37f7a24e31dd6ed184d1041616abb8cfcb0ddaec79778930db79bbef8b23b3d468daaa9c156a6cf7a7f2ffc0507e78e2bb6215f70bc39d11bb0ee16c5ef4c82" +url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/sbsa/nvidia-fabric-manager-535.161.07-1.aarch64.rpm" +sha512 = "3ac673b6f38fd5fbdca021fbc910b6ec6a506dd34ec814ee0003da59de600d044b11c5a97f087080c8581910db33aef71bace5ddb601ee39474d7fde3deeeaa2" force-upstream = true [build-dependencies] diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index d6b8771c..105d224e 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -31,8 +31,8 @@ Source1: https://us.download.nvidia.com/tesla/%{tesla_ver}/NVIDIA-Linux-aarch64- Source2: NVidiaEULAforAWS.pdf # fabricmanager for NVSwitch -Source10: https://developer.download.nvidia.com/compute/nvidia-driver/redist/fabricmanager/linux-x86_64/fabricmanager-linux-x86_64-%{tesla_ver}-archive.tar.xz -Source11: https://developer.download.nvidia.com/compute/nvidia-driver/redist/fabricmanager/linux-sbsa/fabricmanager-linux-sbsa-%{tesla_ver}-archive.tar.xz +Source10: https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/nvidia-fabric-manager-%{tesla_ver}-1.x86_64.rpm +Source11: https://developer.download.nvidia.com/compute/cuda/repos/rhel9/sbsa/nvidia-fabric-manager-%{tesla_ver}-1.aarch64.rpm # Common NVIDIA conf files from 200 to 299 Source200: nvidia-tmpfiles.conf.in @@ -76,9 +76,10 @@ Provides: %{name}-tesla(fabricmanager) # the driver in the current run sh %{_sourcedir}/NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}.run -x -# Extract fabricmanager archive. Use `tar` rather than `%%setup` since the +# Extract fabricmanager from the rpm via cpio rather than `%%setup` since the # correct source is architecture-dependent. -tar -xf %{_sourcedir}/fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive.tar.xz +mkdir fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive +rpm2cpio %{_sourcedir}/nvidia-fabric-manager-%{tesla_ver}-1.%{_cross_arch}.rpm | cpio -idmV -D fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive # Add the license. install -p -m 0644 %{S:2} . @@ -213,11 +214,11 @@ popd # Begin NVIDIA fabric manager binaries and topologies pushd fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive -install -p -m 0755 bin/nv-fabricmanager %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin -install -p -m 0755 bin/nvswitch-audit %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin +install -p -m 0755 usr/bin/nv-fabricmanager %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin +install -p -m 0755 usr/bin/nvswitch-audit %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin install -d %{buildroot}%{_cross_datadir}/nvidia/tesla/nvswitch -for t in share/nvidia/nvswitch/*_topology ; do +for t in usr/share/nvidia/nvswitch/*_topology ; do install -p -m 0644 "${t}" %{buildroot}%{_cross_datadir}/nvidia/tesla/nvswitch done @@ -236,7 +237,7 @@ popd %files tesla-%{tesla_major} %license NVidiaEULAforAWS.pdf -%license fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive/third-party-notices.txt +%license fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive/usr/share/doc/nvidia-fabricmanager/third-party-notices.txt %dir %{_cross_datadir}/nvidia/tesla %dir %{_cross_libexecdir}/nvidia/tesla/bin %dir %{_cross_libdir}/nvidia/tesla From ee767c7cb4eaaf3046c2cf368afaaaba2e6669e1 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Wed, 29 May 2024 18:04:45 +0000 Subject: [PATCH 1229/1356] kernel-6.1: update to 6.1.91 Rebase to Amazon Linux upstream version 6.1.91-99.172.amzn2023. Signed-off-by: Martin Harriman --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 29962519..1dbc29b6 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/4deb8487627a15345b5963c9825994ff2ec7c42015380406ab8640590242fe7c/kernel-6.1.90-99.173.amzn2023.src.rpm" -sha512 = "a055bc88f4d99dd8df2b1272eaecdeb2a25e12e0f5a6639eba8c16ce6dcec0d2b2c8371dc87b507058ff232138e5ab5319a9b5b95314111d3fc5c2f25161c3e4" +url = "https://cdn.amazonlinux.com/al2023/blobstore/086e4ee2c793afa14e68663f7af853027a04e714f716931d7689976f9c854f38/kernel-6.1.91-99.172.amzn2023.src.rpm" +sha512 = "aaced4e33283aeb31cbfeb9ba8faebe5e87299e8b882526966065903f9582d00dfc4340aa0ecefce1173a156dea0025caf75781df2617edb5489d79cd5c951e3" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 5d70715e..f2f07945 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.90 +Version: 6.1.91 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/4deb8487627a15345b5963c9825994ff2ec7c42015380406ab8640590242fe7c/kernel-6.1.90-99.173.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/086e4ee2c793afa14e68663f7af853027a04e714f716931d7689976f9c854f38/kernel-6.1.91-99.172.amzn2023.src.rpm Source100: config-bottlerocket # This list of FIPS modules is extracted from /etc/fipsmodules in the initramfs From 3240796817298cd1a30d33543926d8fd70181333 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Fri, 31 May 2024 15:15:34 +0000 Subject: [PATCH 1230/1356] Update golangci/golangci-lint-action to v6 --- .github/workflows/golangci-lint.yaml | 4 ++-- .golangci.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/golangci-lint.yaml b/.github/workflows/golangci-lint.yaml index 6adbf290..107d9da9 100644 --- a/.github/workflows/golangci-lint.yaml +++ b/.github/workflows/golangci-lint.yaml @@ -19,12 +19,12 @@ jobs: go-version: 1.21 - uses: actions/checkout@v4 - name: lint-host-ctr - uses: golangci/golangci-lint-action@v4 + uses: golangci/golangci-lint-action@v6 with: version: latest working-directory: sources/host-ctr - name: lint-ecs-gpu-init - uses: golangci/golangci-lint-action@v4 + uses: golangci/golangci-lint-action@v6 with: version: latest working-directory: sources/ecs-gpu-init diff --git a/.golangci.yaml b/.golangci.yaml index 964e117a..471707f6 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -8,7 +8,7 @@ linters: - staticcheck - unconvert - unused - - vet + - govet run: timeout: 3m From 3aa016d4d079c316792c311be4eeb101157b0a48 Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Mon, 3 Jun 2024 00:19:40 +0000 Subject: [PATCH 1231/1356] kmod-*-nvidia: add git ignore for rpms The new source for Fabric Manager is an rpm file so this configures git to ignore these source files. --- packages/kmod-5.15-nvidia/.gitignore | 1 + packages/kmod-6.1-nvidia/.gitignore | 1 + 2 files changed, 2 insertions(+) diff --git a/packages/kmod-5.15-nvidia/.gitignore b/packages/kmod-5.15-nvidia/.gitignore index 0bcfb52f..64d9ed83 100644 --- a/packages/kmod-5.15-nvidia/.gitignore +++ b/packages/kmod-5.15-nvidia/.gitignore @@ -1 +1,2 @@ NVidiaEULAforAWS.pdf +*.rpm diff --git a/packages/kmod-6.1-nvidia/.gitignore b/packages/kmod-6.1-nvidia/.gitignore index 0bcfb52f..64d9ed83 100644 --- a/packages/kmod-6.1-nvidia/.gitignore +++ b/packages/kmod-6.1-nvidia/.gitignore @@ -1 +1,2 @@ NVidiaEULAforAWS.pdf +*.rpm From f9f42f49b1d4f82ac8559364ff2f6dc59161cdc7 Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Fri, 31 May 2024 00:24:20 +0000 Subject: [PATCH 1232/1356] kmod-5.10-nvidia: update driver to 470.256.02 Signed-off-by: Matthew Yeazel yeazelm@amazon.com Reviewed-by: Arnaldo Garcia Rincon Reviewed-by: Ben Cressey Reviewed-by: Martin Harriman (cherry picked from commit dea6811e8a9f5e7a74e0368dc73186d427e5dee0) --- packages/kmod-5.10-nvidia/Cargo.toml | 8 ++++---- packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/kmod-5.10-nvidia/Cargo.toml b/packages/kmod-5.10-nvidia/Cargo.toml index 9edcbcc6..07cc9b72 100644 --- a/packages/kmod-5.10-nvidia/Cargo.toml +++ b/packages/kmod-5.10-nvidia/Cargo.toml @@ -17,13 +17,13 @@ url = "https://s3.amazonaws.com/EULA/NVidiaEULAforAWS.pdf" sha512 = "e1926fe99afc3ab5b2f2744fcd53b4046465aefb2793e2e06c4a19455a3fde895e00af1415ff1a5804c32e6a2ed0657e475de63da6c23a0e9c59feeef52f3f58" [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/470.239.06/NVIDIA-Linux-x86_64-470.239.06.run" -sha512 = "92bdfb11db405071cd58deed2a0853448932657e256258e0a0bda5069f00485e2b6e49b4a0eeff499a4991be4f884273f3564c164110b1ed1f5d924506f13e2d" +url = "https://us.download.nvidia.com/tesla/470.256.02/NVIDIA-Linux-x86_64-470.256.02.run" +sha512 = "a837946dd24d7945c1962a695f1f31965f3ceb6927f52cd08fd51b8db138b7a888bbeab69243f5c8468a7bd7ccd47f5dbdb48a1ca81264866c1ebb7d88628f88" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/470.239.06/NVIDIA-Linux-aarch64-470.239.06.run" -sha512 = "e448c18cf243233387d3bde4fff4d6fa1eaccc743706f18fd3c6431ce73c8f4ac49009a18ff6bd7796456ce719905bb7611548bf68d61259285f5d5f1d061c0f" +url = "https://us.download.nvidia.com/tesla/470.256.02/NVIDIA-Linux-aarch64-470.256.02.run" +sha512 = "38eee5933355c34ca816a2ac0fbc4f55c19c20e1322891bfc98cb6b37d99a31218eea9314877ab0e3cf3ac6eb61f9d9d4d09d0af304b689f18b4efa721b65d5c" force-upstream = true [build-dependencies] diff --git a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec index c592ddd7..542b1473 100644 --- a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec +++ b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec @@ -1,4 +1,4 @@ -%global tesla_470 470.239.06 +%global tesla_470 470.256.02 %global tesla_470_libdir %{_cross_libdir}/nvidia/tesla/%{tesla_470} %global tesla_470_bindir %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} From 8a00c8fd09bba5deb7b0840666da6ceca6a61df8 Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Fri, 31 May 2024 19:55:36 +0000 Subject: [PATCH 1233/1356] kmod-5.15-nvidia: update driver to 535.183.01 Signed-off-by: Matthew Yeazel Reviewed-by: Arnaldo Garcia Rincon Reviewed-by: Ben Cressey Reviewed-by: Martin Harriman (cherry picked from commit b43675288be2c1c9f3d0e4aa5fca565882c5c3a1) --- packages/kmod-5.15-nvidia/Cargo.toml | 16 ++++++++-------- packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/kmod-5.15-nvidia/Cargo.toml b/packages/kmod-5.15-nvidia/Cargo.toml index f7179a68..f26a5a36 100644 --- a/packages/kmod-5.15-nvidia/Cargo.toml +++ b/packages/kmod-5.15-nvidia/Cargo.toml @@ -17,23 +17,23 @@ url = "https://s3.amazonaws.com/EULA/NVidiaEULAforAWS.pdf" sha512 = "e1926fe99afc3ab5b2f2744fcd53b4046465aefb2793e2e06c4a19455a3fde895e00af1415ff1a5804c32e6a2ed0657e475de63da6c23a0e9c59feeef52f3f58" [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/535.161.07/NVIDIA-Linux-x86_64-535.161.07.run" -sha512 = "4e8dd709157c15519f01a8d419daa098da64666d20a80edf3894239707ff1e83b48553f3edc5d567109d36e52b31ac7c0c7218ea77862a04e89aa3cc1f16a5ba" +url = "https://us.download.nvidia.com/tesla/535.183.01/NVIDIA-Linux-x86_64-535.183.01.run" +sha512 = "02b6b679f4fc1d5305f32fca8ce0875eef04cb99f5611d0bb85ac7607ecdd5b2aa4d60b51bf47546477464531a07fffa5bf3db3859868648bd5e86565d85afbb" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/535.161.07/NVIDIA-Linux-aarch64-535.161.07.run" -sha512 = "bb96a28b45197003480ae223c71a5426ef5258a31eaa485cab0cf4b86bed166482734784f20c6370a1155f3ff991652cac15f1b1083d2fb056677e6881b219e2" +url = "https://us.download.nvidia.com/tesla/535.183.01/NVIDIA-Linux-aarch64-535.183.01.run" +sha512 = "d2ac1be8c19b359023c31941374911f3adfe1be34aa2821ef582df4c854ac4eefbbcb10aa22583ac8c9d5caf9326bda12ed1ce6343d67479ed37a4887bd17b5e" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/nvidia-fabric-manager-535.161.07-1.x86_64.rpm" -sha512 = "6710c40b0e50f974697d2c7078281cd2d28a685c138c20cfe9da4696431a5aceb56f04a30e29f4fe05f2b5eddccb7e456897053051ad91d89d40383629525245" +url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/nvidia-fabric-manager-535.183.01-1.x86_64.rpm" +sha512 = "d52879d1e552b949a529ede9c4ce3e7b66af0df96e8f43906f211673b99815561c83a7c382be17950b1308457ca496ce49adca41766f808cc5a340471353494b" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/sbsa/nvidia-fabric-manager-535.161.07-1.aarch64.rpm" -sha512 = "3ac673b6f38fd5fbdca021fbc910b6ec6a506dd34ec814ee0003da59de600d044b11c5a97f087080c8581910db33aef71bace5ddb601ee39474d7fde3deeeaa2" +url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/sbsa/nvidia-fabric-manager-535.183.01-1.aarch64.rpm" +sha512 = "75e1d306b9aa6cc8737bce50f39dc641f64de6a944c50f2c9706345c656f203c4706414dcb51def7671f0fd02fd18605aa3d62958b690d2705cb7011c54ff48e" force-upstream = true [build-dependencies] diff --git a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec index f1a17afb..1e9a1750 100644 --- a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec +++ b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec @@ -1,6 +1,6 @@ %global tesla_major 535 -%global tesla_minor 161 -%global tesla_patch 07 +%global tesla_minor 183 +%global tesla_patch 01 %global tesla_ver %{tesla_major}.%{tesla_minor}.%{tesla_patch} %if "%{?_cross_arch}" == "aarch64" %global fm_arch sbsa From de2f932395e8449458fdedde33aba39795ed4d0c Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Fri, 31 May 2024 19:56:49 +0000 Subject: [PATCH 1234/1356] kmod-6.1-nvidia: update driver to 535.183.01 Signed-off-by: Matthew Yeazel Reviewed-by: Arnaldo Garcia Rincon Reviewed-by: Ben Cressey Reviewed-by: Martin Harriman (cherry picked from commit ee10e0cc2ed7b12832e7d502de7d0fa9c83fd52a) --- packages/kmod-6.1-nvidia/Cargo.toml | 16 ++++++++-------- packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/kmod-6.1-nvidia/Cargo.toml b/packages/kmod-6.1-nvidia/Cargo.toml index 0350b018..a105eef2 100644 --- a/packages/kmod-6.1-nvidia/Cargo.toml +++ b/packages/kmod-6.1-nvidia/Cargo.toml @@ -17,23 +17,23 @@ url = "https://s3.amazonaws.com/EULA/NVidiaEULAforAWS.pdf" sha512 = "e1926fe99afc3ab5b2f2744fcd53b4046465aefb2793e2e06c4a19455a3fde895e00af1415ff1a5804c32e6a2ed0657e475de63da6c23a0e9c59feeef52f3f58" [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/535.161.07/NVIDIA-Linux-x86_64-535.161.07.run" -sha512 = "4e8dd709157c15519f01a8d419daa098da64666d20a80edf3894239707ff1e83b48553f3edc5d567109d36e52b31ac7c0c7218ea77862a04e89aa3cc1f16a5ba" +url = "https://us.download.nvidia.com/tesla/535.183.01/NVIDIA-Linux-x86_64-535.183.01.run" +sha512 = "02b6b679f4fc1d5305f32fca8ce0875eef04cb99f5611d0bb85ac7607ecdd5b2aa4d60b51bf47546477464531a07fffa5bf3db3859868648bd5e86565d85afbb" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/535.161.07/NVIDIA-Linux-aarch64-535.161.07.run" -sha512 = "bb96a28b45197003480ae223c71a5426ef5258a31eaa485cab0cf4b86bed166482734784f20c6370a1155f3ff991652cac15f1b1083d2fb056677e6881b219e2" +url = "https://us.download.nvidia.com/tesla/535.183.01/NVIDIA-Linux-aarch64-535.183.01.run" +sha512 = "d2ac1be8c19b359023c31941374911f3adfe1be34aa2821ef582df4c854ac4eefbbcb10aa22583ac8c9d5caf9326bda12ed1ce6343d67479ed37a4887bd17b5e" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/nvidia-fabric-manager-535.161.07-1.x86_64.rpm" -sha512 = "6710c40b0e50f974697d2c7078281cd2d28a685c138c20cfe9da4696431a5aceb56f04a30e29f4fe05f2b5eddccb7e456897053051ad91d89d40383629525245" +url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/nvidia-fabric-manager-535.183.01-1.x86_64.rpm" +sha512 = "d52879d1e552b949a529ede9c4ce3e7b66af0df96e8f43906f211673b99815561c83a7c382be17950b1308457ca496ce49adca41766f808cc5a340471353494b" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/sbsa/nvidia-fabric-manager-535.161.07-1.aarch64.rpm" -sha512 = "3ac673b6f38fd5fbdca021fbc910b6ec6a506dd34ec814ee0003da59de600d044b11c5a97f087080c8581910db33aef71bace5ddb601ee39474d7fde3deeeaa2" +url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/sbsa/nvidia-fabric-manager-535.183.01-1.aarch64.rpm" +sha512 = "75e1d306b9aa6cc8737bce50f39dc641f64de6a944c50f2c9706345c656f203c4706414dcb51def7671f0fd02fd18605aa3d62958b690d2705cb7011c54ff48e" force-upstream = true [build-dependencies] diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index 105d224e..4f5b149b 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -1,6 +1,6 @@ %global tesla_major 535 -%global tesla_minor 161 -%global tesla_patch 07 +%global tesla_minor 183 +%global tesla_patch 01 %global tesla_ver %{tesla_major}.%{tesla_minor}.%{tesla_patch} %if "%{?_cross_arch}" == "aarch64" %global fm_arch sbsa From 79a74b52529d20ae539a1788a3c17d0edf819619 Mon Sep 17 00:00:00 2001 From: Gavin Inglis Date: Fri, 31 May 2024 22:05:08 +0000 Subject: [PATCH 1235/1356] Drop k8s 1.26 metal and VMWare variants This removes the metal and VMware 1.26 variants. This version of Kubernetes has gone end-of-life and these variants are no longer supported. Signed-off-by: Gavin Inglis --- README.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/README.md b/README.md index 570bfb1c..14ec34eb 100644 --- a/README.md +++ b/README.md @@ -85,14 +85,12 @@ The following variants support ECS: We also have variants that are designed to be Kubernetes worker nodes in VMware: -* `vmware-k8s-1.26` * `vmware-k8s-1.27` * `vmware-k8s-1.28` * `vmware-k8s-1.29` The following variants are designed to be Kubernetes worker nodes on bare metal: -* `metal-k8s-1.26` * `metal-k8s-1.27` * `metal-k8s-1.28` * `metal-k8s-1.29` @@ -100,7 +98,7 @@ The following variants are designed to be Kubernetes worker nodes on bare metal: The following variants are no longer supported: * All Kubernetes variants using Kubernetes 1.22 and earlier -* Bare metal and VMware variants using Kubernetes 1.25 and earlier +* Bare metal and VMware variants using Kubernetes 1.26 and earlier We recommend users replace nodes running these variants with the [latest variant compatible with their cluster](variants/). From 0df6b0dae0417fb086c9cf694c6d15f035f75733 Mon Sep 17 00:00:00 2001 From: Gavin Inglis Date: Fri, 31 May 2024 23:13:40 +0000 Subject: [PATCH 1236/1356] docs: add k8s-1.30 variants Signed-off-by: Gavin Inglis --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 14ec34eb..7d347e5d 100644 --- a/README.md +++ b/README.md @@ -68,6 +68,7 @@ The following variants support EKS, as described above: * `aws-k8s-1.27` * `aws-k8s-1.28` * `aws-k8s-1.29` +* `aws-k8s-1.30` * `aws-k8s-1.23-nvidia` * `aws-k8s-1.24-nvidia` * `aws-k8s-1.25-nvidia` @@ -75,6 +76,7 @@ The following variants support EKS, as described above: * `aws-k8s-1.27-nvidia` * `aws-k8s-1.28-nvidia` * `aws-k8s-1.29-nvidia` +* `aws-k8s-1.30-nvidia` The following variants support ECS: @@ -88,6 +90,7 @@ We also have variants that are designed to be Kubernetes worker nodes in VMware: * `vmware-k8s-1.27` * `vmware-k8s-1.28` * `vmware-k8s-1.29` +* `vmware-k8s-1.30` The following variants are designed to be Kubernetes worker nodes on bare metal: From 926da53e5767d5729124724193dc25e9f8d16b04 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Thu, 6 Jun 2024 18:42:52 +0000 Subject: [PATCH 1237/1356] kernel-5.10: update to 5.10.217 Rebase to Amazon Linux upstream version 5.10.217-205.860.amzn2. Signed-off-by: Martin Harriman --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index ca59c720..8fd8a3e8 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/0a25c63e615af935b4980fb447cdd9e44e2fab61ef15b47c60d34e50907e8a12/kernel-5.10.216-204.855.amzn2.src.rpm" -sha512 = "c32d9c1b3bddcc4a9f5c014be07681e7990d5cedccf7242e7a943f8f2fb270ae419e9bec44215ff1df4d8afede5af5b16926507af5b86f9dd8982b04648b2cde" +url = "https://cdn.amazonlinux.com/blobstore/0e8dd42b36d60da0f50a2bce7fecca30610adf37e5a35585e39d2f318cdb1e76/kernel-5.10.217-205.860.amzn2.src.rpm" +sha512 = "e10c0099384cc5ee8b153594101aea35df8541ec06829472650bb15af72550006d8c436756ebfaa7a40a206bc823dd1edd5a37b076086d8fc860ee2ac4c441c8" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 61e15285..d3ddb136 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.216 +Version: 5.10.217 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/0a25c63e615af935b4980fb447cdd9e44e2fab61ef15b47c60d34e50907e8a12/kernel-5.10.216-204.855.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/0e8dd42b36d60da0f50a2bce7fecca30610adf37e5a35585e39d2f318cdb1e76/kernel-5.10.217-205.860.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 7086032535113f8cb1c1df66ffdeef46b630b0e9 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Mon, 10 Jun 2024 21:29:29 +0000 Subject: [PATCH 1238/1356] kernel-6.1: update to 6.1.92 Rebase to Amazon Linux upstream version 6.1.92-99.174.amzn2023. Signed-off-by: Martin Harriman --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 1dbc29b6..4a973e08 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/086e4ee2c793afa14e68663f7af853027a04e714f716931d7689976f9c854f38/kernel-6.1.91-99.172.amzn2023.src.rpm" -sha512 = "aaced4e33283aeb31cbfeb9ba8faebe5e87299e8b882526966065903f9582d00dfc4340aa0ecefce1173a156dea0025caf75781df2617edb5489d79cd5c951e3" +url = "https://cdn.amazonlinux.com/al2023/blobstore/56c452d9992a4b8c25e5ff09f38a1464761196c1462a341e438301b6d56bfe50/kernel-6.1.92-99.174.amzn2023.src.rpm" +sha512 = "134d231c7c87e9136a6ceb2f125bd7d2163d7b73590d821f0d2192effd1a5f0850c612e0f9e03bcbd92f47014fd99fe6e9e8a1b45c5e01dab6d074faf74b4df4" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index f2f07945..ce68c408 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.91 +Version: 6.1.92 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/086e4ee2c793afa14e68663f7af853027a04e714f716931d7689976f9c854f38/kernel-6.1.91-99.172.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/56c452d9992a4b8c25e5ff09f38a1464761196c1462a341e438301b6d56bfe50/kernel-6.1.92-99.174.amzn2023.src.rpm Source100: config-bottlerocket # This list of FIPS modules is extracted from /etc/fipsmodules in the initramfs From 5dd0e6debadb8baa0fe834a549f74c0997fc4163 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Fri, 14 Jun 2024 21:00:59 +0000 Subject: [PATCH 1239/1356] tools: removed unnecessary helper programs and files Signed-off-by: Arnaldo Garcia Rincon --- .../qemu-x86-console-bootconfig.data | Bin 60 -> 0 bytes tools/pubsys/Infra.toml.example | 84 ----- .../policies/repo-expiration/2w-2w-1w.toml | 3 - tools/pubsys/policies/ssm/README.md | 39 -- tools/pubsys/policies/ssm/defaults.toml | 7 - .../support/vmware/import_spec.template | 16 - tools/start-local-vm | 357 ------------------ 7 files changed, 506 deletions(-) delete mode 100644 tools/bootconfig/qemu-x86-console-bootconfig.data delete mode 100644 tools/pubsys/Infra.toml.example delete mode 100644 tools/pubsys/policies/repo-expiration/2w-2w-1w.toml delete mode 100644 tools/pubsys/policies/ssm/README.md delete mode 100644 tools/pubsys/policies/ssm/defaults.toml delete mode 100644 tools/pubsys/support/vmware/import_spec.template delete mode 100755 tools/start-local-vm diff --git a/tools/bootconfig/qemu-x86-console-bootconfig.data b/tools/bootconfig/qemu-x86-console-bootconfig.data deleted file mode 100644 index b6aa9ebca270586ff80010c28e8e7e97ae142b06..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 60 zcmc~!Ey_#HQK;rp0D|QFyyELqk&|1A{yZu39bz1`P%V1~+a724yFI N{}5+?KQ~WzE&u{J4%Ywx diff --git a/tools/pubsys/Infra.toml.example b/tools/pubsys/Infra.toml.example deleted file mode 100644 index 9b5b5e8e..00000000 --- a/tools/pubsys/Infra.toml.example +++ /dev/null @@ -1,84 +0,0 @@ -# This is an example infrastructure configuration for pubsys, the tool that -# creates repos when you call `cargo make repo`. Save a copy as `Infra.toml` -# at the root of the repo, then edit the settings below to match your use case. - -# You can have any number of repos defined and build a specific one by running like this: -# cargo make -e PUBLISH_REPO=myrepo repo -[repo.default] -# URL to your root role JSON file; can be a file:// URL for local files. If -# you don't specify one here, a file will be generated for you under /roles. -# For production use, you should store them somewhere safer. -root_role_url = "https://example.com/root.json" -# SHA512 checksum of your root role JSON file. -root_role_sha512 = "0123456789abcdef" - -# For reference, this is the Bottlerocket root role: -#root_role_url = "https://cache.bottlerocket.aws/root.json" -#root_role_sha512 = "a3c58bc73999264f6f28f3ed9bfcb325a5be943a782852c7d53e803881968e0a4698bd54c2f125493f4669610a9da83a1787eb58a8303b2ee488fa2a3f7d802f" - -# pubsys assumes a single publication key that signs the snapshot, targets, -# and timestamp roles. Here you specify where that key lives so we can sign -# the created repo. If you don't specify one here, a key will be generated for -# you under /keys. For production use, you should use a key stored in a -# trusted service like KMS or SSM. -# (Need inline table syntax until this is fixed: https://github.com/alexcrichton/toml-rs/issues/225) -signing_keys = { file = { path = "/home/user/key.pem" } } -#signing_keys = { kms = { key_id = "abc-def-123" } } -#signing_keys = { ssm = { parameter = "/my/parameter" } } - -# If these URLs are uncommented, the repo will be pulled and used as a starting -# point, and your images (and related files) will be added as a new update in -# the created repo. Otherwise, we build a new repo from scratch. -metadata_base_url = "https://example.com/" -targets_url = "https://example.com/targets/" - -[aws] -# The list of regions in which you want to publish AMIs. We register an AMI in -# the first region and copy it to all other regions. -regions = ["us-west-2", "us-east-1", "us-east-2"] -# If specified, we use this named profile from ~/.aws/credentials, rather than -# the default path of trying credentials from the environment, from a -# credential process, from the default profile, and then from an IAM instance -# profile. -profile = "my-profile" -# If specified, we assume this role before making any API calls. -role = "arn:aws:iam::012345678901:role/assume-global" -# If specified, this string will be prefixed on all parameter names published to SSM. -ssm_prefix = "/your/prefix/here" - -[aws.region.us-west-2] -# If specified, we assume this role before making any API calls in this region. -# (This is assumed after the "global" aws.role, if that is also specified.) -role = "arn:aws:iam::012345678901:role/assume-regional" - -[vmware] -# A list of datacenter names to which you would like to upload an OVA. These -# are "friendly" names, and do not need to be the actual name of the -# software-defined datacenter, but can be. For example, you may have have -# multiple vSphere instances with datacenters that still carry the default -# "SDDC-Datacenter" name; this field allows you to differentiate them. -datacenters = ["north", "south"] - -# *** -# GOVC_* environment variables set in the current environment override any -# configuration set in the sections below! -# *** - -# Optional common configuration -# This configuration allow values to be set in a single place if they are common in -# multiple datacenters. They can be overridden in the datacenter's block below. -[vmware.common] -network = "a_network" - -# Datacenter specific configuration -# This specifies all of the values necessary to communicate with this -# datacenter via `govc`. Each value maps directly to the GOVC_* environment -# variable in the corresponding comment. If any of these values is missing and -# isn't in the environment, we will look for them in `vmware.common`. -[vmware.datacenter.north] -vsphere_url = "https://vcenter.1234.vmwarevmc.com" # GOVC_URL -datacenter = "SDDC-Datacenter" # GOVC_DATACENTER -datastore = "WorkloadDatastore" # GOVC_DATASTORE -network = "sddc-cgw-network-1" # GOVC_NETWORK -folder = "my_folder" # GOVC_FOLDER -resource_pool = "/SDDC-Datacenter/host/Cluster/Resources/Compute-ResourcePool" # GOVC_RESOURCE_POOL diff --git a/tools/pubsys/policies/repo-expiration/2w-2w-1w.toml b/tools/pubsys/policies/repo-expiration/2w-2w-1w.toml deleted file mode 100644 index 7a3a7b85..00000000 --- a/tools/pubsys/policies/repo-expiration/2w-2w-1w.toml +++ /dev/null @@ -1,3 +0,0 @@ -snapshot_expiration = '2 weeks' -targets_expiration = '2 weeks' -timestamp_expiration = '1 week' diff --git a/tools/pubsys/policies/ssm/README.md b/tools/pubsys/policies/ssm/README.md deleted file mode 100644 index 9760125f..00000000 --- a/tools/pubsys/policies/ssm/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# Parameter templates - -Files in this directory contain template strings that are used to generate SSM parameter names and values. -You can pass a different directory to `pubsys` to use a different set of parameters. - -The directory is expected to contain a file named `defaults.toml` with a table entry per parameter, like this: - -```toml -[[parameter]] -name = "{variant}/{arch}/{image_version}/image_id" -value = "{image_id}" -``` - -The `name` and `value` can contain template variables that will be replaced with information from the current build and from the AMI registered from that build. - -The available variables include: -* `variant`, for example "aws-ecs-1" -* `arch`, for example "x86_64" or "arm64". - * Note: "amd64" and "aarch64" are mapped to "x86_64" and "arm64", respectively, to match the names used by EC2. -* `image_id`, for example "ami-0123456789abcdef0" -* `image_name`, for example "bottlerocket-aws-ecs-1-x86_64-v0.5.0-e0ddf1b" -* `image_version`, for example "0.5.0-e0ddf1b" -* `region`, for example "us-west-2" - -# Conditional parameters - -You can also list parameters that only apply to specific variants or architectures. -To do so, add `variant` or `arch` keys (or both) to your parameter definition. -The parameter will only be populated if the current `variant` or `arch` matches one of the values in the list. -(If both `variant` and `arch` are listed, the build must match an entry from both lists.) - -For example, to add an extra parameter that's only set for "aarch64" builds of the "aws-ecs-1" variant: -```toml -[[parameter]] -arch = ["aarch64"] -variant = ["aws-ecs-1"] -name = "/a/special/aarch64/ecs/parameter" -value = "{image_name}" -``` diff --git a/tools/pubsys/policies/ssm/defaults.toml b/tools/pubsys/policies/ssm/defaults.toml deleted file mode 100644 index 5e972276..00000000 --- a/tools/pubsys/policies/ssm/defaults.toml +++ /dev/null @@ -1,7 +0,0 @@ -[[parameter]] -name = "{variant}/{arch}/{image_version}/image_id" -value = "{image_id}" - -[[parameter]] -name = "{variant}/{arch}/{image_version}/image_version" -value = "{image_version}" diff --git a/tools/pubsys/support/vmware/import_spec.template b/tools/pubsys/support/vmware/import_spec.template deleted file mode 100644 index 9b24bfe6..00000000 --- a/tools/pubsys/support/vmware/import_spec.template +++ /dev/null @@ -1,16 +0,0 @@ -\{ - "DiskProvisioning": "flat", - "IPAllocationPolicy": "dhcpPolicy", - "IPProtocol": "IPv4", - "NetworkMapping": [ - \{ - "Name": "VM Network", - "Network": "{ network }" - } - ], - "MarkAsTemplate": { mark_as_template }, - "PowerOn": false, - "InjectOvfEnv": false, - "WaitForIP": false, - "Name": null -} diff --git a/tools/start-local-vm b/tools/start-local-vm deleted file mode 100755 index b6f0875a..00000000 --- a/tools/start-local-vm +++ /dev/null @@ -1,357 +0,0 @@ -#!/usr/bin/env bash -# shellcheck disable=SC2054 # Arrays are formatted for passing args to other tools - -# -# Common error handling -# - -exit_trap_cmds=() - -on_exit() { - exit_trap_cmds+=( "$1" ) -} - -run_exit_trap_cmds() { - for cmd in "${exit_trap_cmds[@]}"; do - eval "${cmd}" - done -} - -trap run_exit_trap_cmds exit - -bail() { - >&2 echo "$@" - exit 1 -} - -shopt -s nullglob - -arch=${BUILDSYS_ARCH} -variant=${BUILDSYS_VARIANT} -product_name=${BUILDSYS_NAME:-bottlerocket} -host_port_forwards=tcp::2222-:22 -vm_mem=4G -vm_cpus=4 -force_extract= -declare -A extra_files=() - -os_image= -data_image= - - -if ! git_toplevel=$(git rev-parse --show-toplevel); then - bail "Failed to get the root of the repo." -else - readonly repo_root="${git_toplevel}" -fi - -show_usage() { - echo "\ -usage: ${0##*/} [--arch BUILDSYS_ARCH] [--variant BUILDSYS_VARIANT] - [--host-port-forwards HOST_PORT_FWDS] - [--product-name NAME] - [--vm-memory VM_MEMORY] [--vm-cpus VM_CPUS] - [--force-extract] - [--inject-file LOCAL_PATH[:IMAGE_PATH]]... - [--firmware-code PATH] [--firmware-vars PATH] - [--os-image-size SIZE] [--data-image-size SIZE] - -Launch a local virtual machine from a Bottlerocket image. - -Options: - - --arch architecture of the Bottlerocket image (must match the - host architecture ($(uname -m)); may be omitted if the - BUILDSYS_ARCH environment variable is set) - --variant Bottlerocket variant to run (may be omitted if the - BUILDSYS_VARIANT environment variable is set) - --product-name short product name used as prefix for file and directory - names (defaults to the BUILDSYS_NAME environment variable - or 'bottlerocket' when that is unset) - --host-port-forwards - list of host ports to forward to the VM; HOST_PORT_FWDS - must be a valid QEMU port forwarding specifier (default - is ${host_port_forwards}) - --vm-memory amount of memory to assign to the VM; VM_MEMORY must be - a valid QEMU memory specifier (default is ${vm_mem}) - --vm-cpus number of CPUs to spawn for VM (default is ${vm_cpus}) - --force-extract force recreation of the extracted Bottlerocket image, - e.g. to force first boot behavior - --inject-file adds a local file to the private partition of the - Bottlerocket image before launching the virtual machine - (may be given multiple times); existing data on the - private partition will be lost - --firmware-code override the default firmware executable file - --firmware-vars override the initial firmware variable storage file - --os-image-size resize the OS disk image to the given size (e.g. 4096M) - --data-image-size resize the data disk image to the given size (e.g. 20G) - --help shows this usage text - -By default, the virtual machine's port 22 (SSH) will be exposed via the local -port 2222, i.e. if the Bottlerocket admin container has been enabled via -user-data, it can be reached by running - - ssh -p 2222 ec2-user@localhost - -from the host. - -Usage example: - - ${0##*/} --arch $(uname -m) --variant metal-dev --inject-file net.toml -" -} - -usage_error() { - local error=$1 - - { - if [[ -n ${error} ]]; then - printf "%s\n\n" "${error}" - fi - show_usage - } >&2 - - exit 1 -} - -parse_args() { - while [[ $# -gt 0 ]]; do - case $1 in - -h|--help) - show_usage; exit 0 ;; - --arch) - shift; arch=$1 ;; - --variant) - shift; variant=$1 ;; - --product-name) - shift; product_name=$1 ;; - --host-port-forwards) - shift; host_port_forwards=$1 ;; - --vm-memory) - shift; vm_mem=$1 ;; - --vm-cpus) - shift; vm_cpus=$1 ;; - --force-extract) - force_extract=yes ;; - --inject-file) - shift; local file_spec=$1 - if [[ ${file_spec} = *:* ]]; then - local local_file=${file_spec%%:*} - local image_file=${file_spec#*:} - else - local local_file=${file_spec} - local image_file=${file_spec##*/} - fi - extra_files[${local_file}]=${image_file} - ;; - --firmware-code) - shift; firmware_code=$1 - ;; - --firmware-vars) - shift; firmware_vars=$1 - ;; - --os-image-size) - shift; os_image_size=$1 - ;; - --data-image-size) - shift; data_image_size=$1 - ;; - *) - usage_error "unknown option '$1'" ;; - esac - shift - done - - [[ -n ${arch} ]] || usage_error 'Architecture needs to be set via either --arch or BUILDSYS_ARCH.' - [[ -n ${variant} ]] || usage_error 'Variant needs to be set via either --variant or BUILDSYS_VARIANT.' - - declare -l host_arch - host_arch=$(uname -m) - [[ ${arch} == "${host_arch}" ]] || bail "Architecture needs to match host architecture (${host_arch}) for hardware virtualization." - - for path in "${!extra_files[@]}"; do - [[ -e ${path} ]] || bail "Cannot find local file '${path}' to inject." - done -} - -extract_image() { - local -r compressed_image=$1 - local -r uncompressed_image=$2 - - if [[ ${force_extract} = yes ]] || [[ ${compressed_image} -nt ${uncompressed_image} ]]; then - lz4 --decompress --force --keep "${compressed_image}" "${uncompressed_image}" \ - || bail "Failed to extract '${compressed_image}'." - fi -} - -prepare_raw_images() { - local -r image_dir=build/images/${arch}-${variant}/latest - local -r compressed_os_image=${image_dir}/${product_name}-${variant}-${arch}.img.lz4 - local -r compressed_data_image=${image_dir}/${product_name}-${variant}-${arch}-data.img.lz4 - - if [[ -e ${compressed_os_image} ]]; then - readonly os_image=${compressed_os_image%*.lz4} - extract_image "${compressed_os_image}" "${os_image}" - else - bail 'Boot image not found. Did the last build fail?' - fi - - if [[ -e ${compressed_data_image} ]]; then - readonly data_image=${compressed_data_image%*.lz4} - extract_image "${compressed_data_image}" "${data_image}" - else - # Missing data image is fine. This variant may not be a split build. - readonly data_image= - fi - - if [[ -n ${os_image_size} ]]; then - truncate --no-create --size "${os_image_size}" "${os_image}" \ - || bail "Failed to resize OS image '${os_image}'." - fi - - if [[ -n ${data_image_size} ]]; then - if [[ -e ${data_image} ]]; then - truncate --no-create --size "${data_image_size}" "${data_image}" \ - || bail "Failed to resize data image '${data_image}'." - else - >&2 echo "Ignoring option --data-image-size ${data_image_size} since no data image was found." - fi - fi -} - -prepare_firmware() { - # Create local copies of the edk2 firmware variable storage, to help with - # facilitate Secure Boot testing where custom variables are needed for both - # architectures, but can't safely be reused across QEMU invocations. Also - # set reasonable defaults for both firmware files, if nothing more specific - # was requested. - local original_vars - - if [[ ${arch} = x86_64 ]]; then - firmware_code=${firmware_code:-/usr/share/edk2/ovmf/OVMF_CODE.fd} - original_vars=${firmware_vars:-/usr/share/edk2/ovmf/OVMF_VARS.fd} - firmware_vars="$(mktemp)" - on_exit "rm '${firmware_vars}'" - cp "${original_vars}" "${firmware_vars}" - fi - - if [[ ${arch} = aarch64 ]]; then - original_code=${firmware_code:-/usr/share/edk2/aarch64/QEMU_EFI.silent.fd} - original_vars=${firmware_vars:-/usr/share/edk2/aarch64/QEMU_VARS.fd} - firmware_code="$(mktemp)" - firmware_vars="$(mktemp)" - on_exit "rm '${firmware_code}' '${firmware_vars}'" - cat "${original_code}" /dev/zero \ - | head -c 64m > "${firmware_code}" - cat "${original_vars}" /dev/zero \ - | head -c 64m > "${firmware_vars}" - fi -} - -create_extra_files() { - # Explicitly instruct the kernel to send its output to the serial port on - # x86 via a bootconfig initrd. Passing in settings via user-data would be - # too late to get console output of the first boot. - if [[ ${arch} = x86_64 ]]; then - extra_files["${repo_root}/tools/bootconfig/qemu-x86-console-bootconfig.data"]=bootconfig.data - fi - - # If the private partition needs to be recreated, ensure that any bootconfig - # data file is present, otherwise GRUB will notice the missing file and wait - # for a key press. - if [[ ${#extra_files[@]} -gt 0 ]]; then - local has_bootconfig=no - for image_file in "${extra_files[@]}"; do - if [[ ${image_file} = bootconfig.data ]]; then - has_bootconfig=yes - break - fi - done - if [[ ${has_bootconfig} = no ]]; then - extra_files["${repo_root}/tools/bootconfig/empty-bootconfig.data"]=bootconfig.data - fi - fi -} - -inject_files() { - if [[ ${#extra_files[@]} -eq 0 ]]; then - return 0 - fi - - # We inject files into the boot image by replacing the private partition - # entirely. The new partition has to perfectly fit over the original one. - # Find the first and last sector, then calculate the partition's size. In - # absence of actual hardware, assume a traditional sector size of 512 bytes. - local private_first_sector private_last_sector - read -r private_first_sector private_last_sector < <( - fdisk --list-details "${os_image}" \ - | awk '/BOTTLEROCKET-PRIVATE/ { print $2, $3 }') - if [[ -z ${private_first_sector} ]] || [[ -z ${private_last_sector} ]]; then - bail "Failed to find the private partition in '${os_image}'." - fi - local private_size_mib=$(( (private_last_sector - private_first_sector + 1) * 512 / 1024 / 1024 )) - - local private_mount private_image - private_mount=$(mktemp -d) - private_image=$(mktemp) - on_exit "rm -rf '${private_mount}' '${private_image}'" - - for local_file in "${!extra_files[@]}"; do - local image_file=${extra_files[${local_file}]} - cp "${local_file}" "${private_mount}/${image_file}" - done - - if ! mkfs.ext4 -d "${private_mount}" "${private_image}" "${private_size_mib}M" \ - || ! dd if="${private_image}" of="${os_image}" conv=notrunc bs=512 seek="${private_first_sector}" - then - rm -f "${private_image}" - rm -rf "${private_mount}" - bail "Failed to inject files into '${os_image}'." - fi -} - -launch_vm() { - local -a qemu_args=( - -nographic - -enable-kvm - -cpu host - -smp "${vm_cpus}" - -m "${vm_mem}" - -drive if=pflash,format=raw,unit=0,file="${firmware_code}",readonly=on - -drive if=pflash,format=raw,unit=1,file="${firmware_vars}" - -drive index=0,if=virtio,format=raw,file="${os_image}" - ) - - # Plug the virtual primary NIC in as BDF 00:10.0 so udev will give it a - # consistent name we can know ahead of time--enp0s16 or ens16. - qemu_args+=( - -netdev user,id=net0,hostfwd="${host_port_forwards}" - -device virtio-net-pci,netdev=net0,addr=10.0 - ) - - # Resolve the last bit of uncertainty by disabling ACPI-based PCI hot plug, - # causing udev to use the bus location when naming the NIC (enp0s16). Since - # QEMU does not support PCI hot plug via ACPI on Arm, turn it off for the - # emulated x86_64 chipset only to achieve parity. - if [[ ${arch} = x86_64 ]]; then - qemu_args+=( -global PIIX4_PM.acpi-root-pci-hotplug=off ) - qemu_args+=( -machine q35,smm=on ) - fi - - if [[ ${arch} = aarch64 ]]; then - qemu_args+=( -machine virt ) - fi - - if [[ -n ${data_image} ]]; then - qemu_args+=( -drive index=1,if=virtio,format=raw,file="${data_image}" ) - fi - - qemu-system-"${arch}" "${qemu_args[@]}" -} - -parse_args "$@" -prepare_raw_images -prepare_firmware -create_extra_files -inject_files -launch_vm From 3fea5d72e5c47b4c2f2d4946d81d8b58c2cba96d Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Fri, 14 Jun 2024 21:08:40 +0000 Subject: [PATCH 1240/1356] docs: remove unnecessary documentation files Signed-off-by: Arnaldo Garcia Rincon --- README.md | 616 ------------------------------------------------------ 1 file changed, 616 deletions(-) delete mode 100644 README.md diff --git a/README.md b/README.md deleted file mode 100644 index 7d347e5d..00000000 --- a/README.md +++ /dev/null @@ -1,616 +0,0 @@ -# Bottlerocket OS - -Welcome to Bottlerocket! - -Bottlerocket is a free and open-source Linux-based operating system meant for hosting containers. - -To learn more about Bottlerocket, visit the [official Bottlerocket website and documentation](https://bottlerocket.dev/). -Otherwise, if you’re ready to jump right in, read one of our setup guides for running Bottlerocket in [Amazon EKS](QUICKSTART-EKS.md), [Amazon ECS](QUICKSTART-ECS.md), or [VMware](QUICKSTART-VMWARE.md). -If you're interested in running Bottlerocket on bare metal servers, please refer to the [provisioning guide](PROVISIONING-METAL.md) to get started. - -Bottlerocket focuses on security and maintainability, providing a reliable, consistent, and safe platform for container-based workloads. -This is a reflection of what we've learned building operating systems and services at Amazon. -You can read more about what drives us in [our charter](CHARTER.md). - -The base operating system has just what you need to run containers reliably, and is built with standard open-source components. -Bottlerocket-specific additions focus on reliable updates and on the API. -Instead of making configuration changes manually, you can change settings with an API call, and these changes are automatically migrated through updates. - -Some notable features include: - -* [API access](#api) for configuring your system, with secure out-of-band [access methods](#exploration) when you need them. -* [Updates](#updates) based on partition flips, for fast and reliable system updates. -* [Modeled configuration](#settings) that's automatically migrated through updates. -* [Security](#security) as a top priority. - -## Participate in the Community - -There are many ways to take part in the Bottlerocket community: - -- [Join us on Meetup](https://www.meetup.com/bottlerocket-community/) to hear about the latest Bottlerocket (virtual/in-person) events and community meetings. - Community meetings are typically every other week. - - Details can be found under the [Events section on Meetup](https://www.meetup.com/bottlerocket-community/events/), and you will receive email notifications if you become a member of the Meetup group. (It's free to join!) - -- [Start or join a discussion](https://github.com/bottlerocket-os/bottlerocket/discussions) if you have questions about Bottlerocket. -- If you're interested in contributing, thank you! - Please see our [contributor's guide](CONTRIBUTING.md). - -## Contact us - -If you find a security issue, please [contact our security team](https://github.com/bottlerocket-os/bottlerocket/security/policy) rather than opening an issue. - -We use GitHub issues to track other bug reports and feature requests. -You can look at [existing issues](https://github.com/bottlerocket-os/bottlerocket/issues) to see whether your concern is already known. - -If not, you can select from a few templates and get some guidance on the type of information that would be most helpful. -[Contact us with a new issue here.](https://github.com/bottlerocket-os/bottlerocket/issues/new/choose) - -We don't have other communication channels set up quite yet, but don't worry about making an issue or a discussion thread! -You can let us know about things that seem difficult, or even ways you might like to help. - -## Variants - -To start, we're focusing on the use of Bottlerocket as a host OS in AWS EKS Kubernetes clusters and Amazon ECS clusters. -We’re excited to get early feedback and to continue working on more use cases! - -Bottlerocket is architected such that different cloud environments and container orchestrators can be supported in the future. -A build of Bottlerocket that supports different features or integration characteristics is known as a 'variant'. -The artifacts of a build will include the architecture and variant name. -For example, an `x86_64` build of the `aws-k8s-1.24` variant will produce an image named `bottlerocket-aws-k8s-1.24-x86_64--.img`. - -The following variants support EKS, as described above: - -* `aws-k8s-1.23` -* `aws-k8s-1.24` -* `aws-k8s-1.25` -* `aws-k8s-1.26` -* `aws-k8s-1.27` -* `aws-k8s-1.28` -* `aws-k8s-1.29` -* `aws-k8s-1.30` -* `aws-k8s-1.23-nvidia` -* `aws-k8s-1.24-nvidia` -* `aws-k8s-1.25-nvidia` -* `aws-k8s-1.26-nvidia` -* `aws-k8s-1.27-nvidia` -* `aws-k8s-1.28-nvidia` -* `aws-k8s-1.29-nvidia` -* `aws-k8s-1.30-nvidia` - -The following variants support ECS: - -* `aws-ecs-1` -* `aws-ecs-1-nvidia` -* `aws-ecs-2` -* `aws-ecs-2-nvidia` - -We also have variants that are designed to be Kubernetes worker nodes in VMware: - -* `vmware-k8s-1.27` -* `vmware-k8s-1.28` -* `vmware-k8s-1.29` -* `vmware-k8s-1.30` - -The following variants are designed to be Kubernetes worker nodes on bare metal: - -* `metal-k8s-1.27` -* `metal-k8s-1.28` -* `metal-k8s-1.29` - -The following variants are no longer supported: - -* All Kubernetes variants using Kubernetes 1.22 and earlier -* Bare metal and VMware variants using Kubernetes 1.26 and earlier - -We recommend users replace nodes running these variants with the [latest variant compatible with their cluster](variants/). - -## Architectures - -Our supported architectures include `x86_64` and `aarch64` (written as `arm64` in some contexts). - -## Setup - -:walking: :running: - -Bottlerocket is best used with a container orchestrator. -To get started with Kubernetes in Amazon EKS, please see [QUICKSTART-EKS](QUICKSTART-EKS.md). -To get started with Kubernetes in VMware, please see [QUICKSTART-VMWARE](QUICKSTART-VMWARE.md). -To get started with Amazon ECS, please see [QUICKSTART-ECS](QUICKSTART-ECS.md). -These guides describe: - -* how to set up a cluster with the orchestrator, so your Bottlerocket instance can run containers -* how to launch a Bottlerocket instance in EC2 or VMware - -To see how to provision Bottlerocket on bare metal, see [PROVISIONING-METAL](PROVISIONING-METAL.md). - -To build your own Bottlerocket images, please see [BUILDING](BUILDING.md). -It describes: - -* how to build an image -* how to register an EC2 AMI from an image - -To publish your built Bottlerocket images, please see [PUBLISHING](PUBLISHING.md). -It describes: - -* how to make TUF repos including your image -* how to copy your AMI across regions -* how to mark your AMIs public or grant access to specific accounts -* how to make your AMIs discoverable using [SSM parameters](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-parameter-store.html) - -## Exploration - -To improve security, there's no SSH server in a Bottlerocket image, and not even a shell. - -Don't panic! - -There are a couple out-of-band access methods you can use to explore Bottlerocket like you would a typical Linux system. -Either option will give you a shell within Bottlerocket. -From there, you can [change settings](#settings), manually [update Bottlerocket](#updates), debug problems, and generally explore. - -**Note:** These methods require that your instance has permission to access the ECR repository where these containers live; the appropriate policy to add to your instance's IAM role is `AmazonEC2ContainerRegistryReadOnly`. - -### Control container - -Bottlerocket has a ["control" container](https://github.com/bottlerocket-os/bottlerocket-control-container), enabled by default, that runs outside of the orchestrator in a separate instance of containerd. -This container runs the [AWS SSM agent](https://github.com/aws/amazon-ssm-agent) that lets you run commands, or start shell sessions, on Bottlerocket instances in EC2. -(You can easily replace this control container with your own just by changing the URI; see [Settings](#settings).) - -In AWS, you need to give your instance the SSM role for this to work; see the [setup guide](QUICKSTART-EKS.md#enabling-ssm). -Outside of AWS, you can use [AWS Systems Manager for hybrid environments](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances.html). -There's more detail about hybrid environments in the [control container documentation](https://github.com/bottlerocket-os/bottlerocket-control-container/#connecting-to-aws-systems-manager-ssm). - -Once the instance is started, you can start a session: - -* Go to AWS SSM's [Session Manager](https://console.aws.amazon.com/systems-manager/session-manager/sessions) -* Select "Start session" and choose your Bottlerocket instance -* Select "Start session" again to get a shell - -If you prefer a command-line tool, you can start a session with a recent [AWS CLI](https://aws.amazon.com/cli/) and the [session-manager-plugin](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html). -Then you'd be able to start a session using only your instance ID, like this: - -```shell -aws ssm start-session --target INSTANCE_ID -``` - -With the [default control container](https://github.com/bottlerocket-os/bottlerocket-control-container), you can make [API calls](#api) to configure and manage your Bottlerocket host. -To do even more, read the next section about the [admin container](#admin-container). -You can access the admin container from the control container like this: - -```shell -enter-admin-container -``` - -### Admin container - -Bottlerocket has an [administrative container](https://github.com/bottlerocket-os/bottlerocket-admin-container), disabled by default, that runs outside of the orchestrator in a separate instance of containerd. -This container has an SSH server that lets you log in as `ec2-user` using your EC2-registered SSH key. -Outside of AWS, you can [pass in your own SSH keys](https://github.com/bottlerocket-os/bottlerocket-admin-container#authenticating-with-the-admin-container). -(You can easily replace this admin container with your own just by changing the URI; see [Settings](#settings). - -To enable the container, you can change the setting in user data when starting Bottlerocket, for example EC2 instance user data: - -```toml -[settings.host-containers.admin] -enabled = true -``` - -If Bottlerocket is already running, you can enable the admin container from the default [control container](#control-container) like this: - -```shell -enable-admin-container -``` - -Or you can start an interactive session immediately like this: - -```shell -enter-admin-container -``` - -If you're using a custom control container, or want to make the API calls directly, you can enable the admin container like this instead: - -```shell -apiclient set host-containers.admin.enabled=true -``` - -Once you've enabled the admin container, you can either access it through SSH or execute commands from the control container like this: - -```shell -apiclient exec admin bash -``` - -Once you're in the admin container, you can run `sheltie` to get a full root shell in the Bottlerocket host. -Be careful; while you can inspect and change even more as root, Bottlerocket's filesystem and dm-verity setup will prevent most changes from persisting over a restart - see [Security](#security). - -## Updates - -Rather than a package manager that updates individual pieces of software, Bottlerocket downloads a full filesystem image and reboots into it. -It can automatically roll back if boot failures occur, and workload failures can trigger manual rollbacks. - -The update process uses images secured by [TUF](https://theupdateframework.github.io/). -For more details, see the [update system documentation](sources/updater/). - -### Update methods - -There are several ways of updating your Bottlerocket hosts. -We provide tools for automatically updating hosts, as well as an API for direct control of updates. - -#### Automated updates - -For EKS variants of Bottlerocket, we recommend using the [Bottlerocket update operator](https://github.com/bottlerocket-os/bottlerocket-update-operator) for automated updates. - -For the ECS variant of Bottlerocket, we recommend using the [Bottlerocket ECS updater](https://github.com/bottlerocket-os/bottlerocket-ecs-updater/) for automated updates. - -#### Update API - -The [Bottlerocket API](#api) includes methods for checking and starting system updates. -You can read more about the update APIs in our [update system documentation](sources/updater/README.md#update-api). - -apiclient knows how to handle those update APIs for you, and you can run it from the [control](#control-container) or [admin](#admin-container) containers. - -To see what updates are available: - -```shell -apiclient update check -``` - -If an update is available, it will show up in the `chosen_update` field. -The `available_updates` field will show the full list of available versions, including older versions, because Bottlerocket supports safely rolling back. - -To apply the latest update: - -```shell -apiclient update apply -``` - -The next time you reboot, you'll start up in the new version, and system configuration will be automatically [migrated](sources/api/migration/). -To reboot right away: - -```shell -apiclient reboot -``` - -If you're confident about updating, the `apiclient update apply` command has `--check` and `--reboot` flags to combine the above actions, so you can accomplish all of the above steps like this: - -```shell -apiclient update apply --check --reboot -``` - -See the [apiclient documentation](sources/api/apiclient/) for more details. - -### Update rollback - -The system will automatically roll back if it's unable to boot. -If the update is not functional for a given container workload, you can do a manual rollback: - -```shell -signpost rollback-to-inactive -reboot -``` - -This doesn't require any external communication, so it's quicker than `apiclient`, and it's made to be as reliable as possible. - -## Settings - -Here we'll describe the settings you can configure on your Bottlerocket instance, and how to do it. - -(API endpoints are defined in our [OpenAPI spec](sources/api/openapi.yaml) if you want more detail.) - -### Interacting with settings - -#### Using the API client - -You can see the current settings with an API request: - -```shell -apiclient get settings -``` - -This will return all of the current settings in JSON format. -For example, here's an abbreviated response: - -```json -{"motd": "...", {"kubernetes": {}}} -``` - -You can change settings like this: - -```shell -apiclient set motd="hi there" kubernetes.node-labels.environment=test -``` - -You can also use a JSON input mode to help change many related settings at once, and a "raw" mode if you want more control over how the settings are committed and applied to the system. -See the [apiclient README](sources/api/apiclient/) for details. - -#### Using user data - -If you know what settings you want to change when you start your Bottlerocket instance, you can send them in the user data. - -In user data, we structure the settings in TOML form to make things a bit simpler. -Here's the user data to change the message of the day setting, as we did in the last section: - -```toml -[settings] -motd = "my own value!" -``` - -If your user data is over the size limit of the platform (e.g. 16KiB for EC2) you can compress the contents with gzip. -(With [aws-cli](https://aws.amazon.com/cli/), you can use `--user-data fileb:///path/to/gz-file` to pass binary data.) - -### Description of settings - -Here we'll describe each setting you can change. - -**Note:** You can see the default values (for any settings that are not generated at runtime) by looking in the `defaults.d` directory for a variant, for example [aws-ecs-2](sources/models/src/aws-ecs-2/defaults.d/). - -When you're sending settings to the API, or receiving settings from the API, they're in a structured JSON format. -This allows modification of any number of keys at once. -It also lets us ensure that they fit the definition of the Bottlerocket data model - requests with invalid settings won't even parse correctly, helping ensure safety. - -Here, however, we'll use the shortcut "dotted key" syntax for referring to keys. -This is used in some API endpoints with less-structured requests or responses. -It's also more compact for our needs here. - -In this format, "settings.kubernetes.cluster-name" refers to the same key as in the JSON `{"settings": {"kubernetes": {"cluster-name": "value"}}}`. - -**NOTE:** [bottlerocket.dev](https://bottlerocket.dev/en/os/latest/#/api/settings/) now contains a complete, versioned setting reference. -This documents retains the headings below for existing link and bookmark compatability. -Please update your bookmarks and check out [bottlerocket.dev](https://bottlerocket.dev/) for future updates to the setting reference. - -#### Top-level settings - -See the [`settings.motd` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/motd/). - -#### Kubernetes settings - -See the [`settings.kubernetes.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/kubernetes/). - -#### Amazon ECS settings - -See the [`settings.ecs.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/ecs/). - -#### CloudFormation signal helper settings - -See the [`settings.cloudformation.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/cloudformation/). - -#### Auto Scaling group settings - -See the [`settings.autoscaling.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/autoscaling/). - -#### OCI Hooks settings - -See the [`settings.oci-hooks.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/oci-hooks/). - -#### OCI Defaults settings - -See the [`settings.oci-defaults.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/oci-defaults/). - -##### OCI Defaults: Capabilities - -See the ["Capabilities Settings" section in the `settings.oci-defaults.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/oci-defaults/). - -##### OCI Defaults: Resource Limits - -See the ["Resource Limits Settings" section in the `settings.oci-defaults.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/oci-defaults/). - -#### Container image registry settings - -See the [`settings.container-registry.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/container-registry/). - -#### Container runtime settings - -See the [`settings.container-runtime.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/container-runtime/). - -#### Updates settings - -See the [`settings.updates.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/updates/). - -#### Network settings - -See the [`settings.network.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/network/). - -##### Proxy settings - -See the ["Proxy Settings" section in the `settings.networks.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/network/). - -#### Metrics settings - -See the [`settings.metrics.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/metrics/). - -#### Time settings - -See the [`settings.ntp.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/ntp/). - -#### Kernel settings - -See the [`settings.kernel.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/kernel/). - -#### Boot-related settings - -See the [`settings.boot.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/boot/). - -#### Custom CA certificates settings - -See the [`settings.pki.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/pki/). - -#### Host containers settings - -See the [`settings.host-containers.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/host-containers/). - -##### Custom host containers - -See the [Host Containers documentation](https://bottlerocket.dev/en/os/latest/#/concepts/host-containers/). - -#### Bootstrap containers settings - -See the [`settings.bootstrap-containers.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/bootstrap-containers/) as well as the [Bootstrap Containers documentation](https://bottlerocket.dev/en/os/latest/#/concepts/bootstrap-containers/) - -##### Mount propagations in bootstrap and superpowered containers - -Both bootstrap and superpowered host containers are configured with the `/.bottlerocket/rootfs/mnt` bind mount that points to `/mnt` in the host, which itself is a bind mount of `/local/mnt`. -This bind mount is set up with shared propagations, so any new mount point created underneath `/.bottlerocket/rootfs/mnt` in any bootstrap or superpowered host container will propagate across mount namespaces. -You can use this feature to configure ephemeral disks attached to your hosts that you may want to use on your workloads. - -#### Platform-specific settings - -Platform-specific settings are automatically set at boot time by [early-boot-config](sources/api/early-boot-config) based on metadata available on the running platform. -They can be overridden for testing purposes in [the same way as other settings](#interacting-with-settings). - -##### AWS-specific settings - -See the [`settings.aws.*` reference](https://bottlerocket.dev/en/os/latest/#/api/settings/aws/). - -### Logs - -You can use `logdog` through the [admin container](#admin-container) to obtain an archive of log files from your Bottlerocket host. - -For a list of what is collected, see the logdog [command list](sources/logdog/src/log_request.rs). - -#### Generating logs - -SSH to the Bottlerocket host or `apiclient exec admin bash` to access the admin container, then run: - -```shell -sudo sheltie -logdog -``` - -This will write an archive of the logs to `/var/log/support/bottlerocket-logs.tar.gz`. -This archive is accessible from host containers at `/.bottlerocket/support`. - -#### Fetching logs - -There are multiple methods to retrieve the generated log archive. - -- **Via SSH if already enabled** - - Once you have exited from the Bottlerocket host, run a command like: - - ```shell - ssh -i YOUR_KEY_FILE \ - ec2-user@YOUR_HOST \ - "cat /.bottlerocket/support/bottlerocket-logs.tar.gz" > bottlerocket-logs.tar.gz - ``` - -- **With `kubectl get` if running Kubernetes** - - ```shell - kubectl get --raw \ - "/api/v1/nodes/NODE_NAME/proxy/logs/support/bottlerocket-logs.tar.gz" > bottlerocket-logs.tar.gz - ``` - -- **Using [SSH over SSM](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-getting-started-enable-ssh-connections.html) if your instance isn't accessible through SSH or Kubernetes** - -### Kdump Support - -Bottlerocket provides support to collect kernel crash dumps whenever the system kernel panics. -Once this happens, both the dmesg log and vmcore dump are stored at `/var/log/kdump`, and the system reboots. - -There are a few important caveats about the provided kdump support: - -* Currently, only vmware variants have kdump support enabled -* The system kernel will reserve 256MB for the crash kernel, only when the host has at least 2GB of memory; the reserved space won't be available for processes running in the host -* The crash kernel will only be loaded when the `crashkernel` parameter is present in the kernel's cmdline and if there is memory reserved for it - -### NVIDIA GPUs Support - -Bottlerocket's `nvidia` variants include the required packages and configurations to leverage NVIDIA GPUs. -Currently, the following NVIDIA driver versions are supported in Bottlerocket: - -* 470.X -* 515.X - -The official AMIs for these variants can be used with EC2 GPU-equipped instance types such as: `p2`, `p3`, `p4`, `g3`, `g4dn`, `g5` and `g5g`. -Note that older instance types, such as `p2`, are not supported by NVIDIA driver `515.X` and above. -You need to make sure you select the appropriate AMI depending on the instance type you are planning to use. -Please see [QUICKSTART-EKS](QUICKSTART-EKS.md#aws-k8s--nvidia-variants) for further details about Kubernetes variants, and [QUICKSTART-ECS](QUICKSTART-ECS.md#aws-ecs--nvidia-variants) for ECS variants. - -## Details - -### Security - -:shield: :crab: - -To learn more about security features in Bottlerocket, please see [SECURITY FEATURES](SECURITY_FEATURES.md). -It describes how we use features like [dm-verity](https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity) and [SELinux](https://selinuxproject.org/) to protect the system from security threats. - -To learn more about security recommendations for Bottlerocket, please see [SECURITY GUIDANCE](SECURITY_GUIDANCE.md). -It documents additional steps you can take to secure the OS, and includes resources such as a [Pod Security Policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) for your reference. - -In addition, almost all first-party components are written in [Rust](https://www.rust-lang.org/). -Rust eliminates some classes of memory safety issues, and encourages design patterns that help security. - -### Packaging - -Bottlerocket is built from source using a container toolchain. -We use RPM package definitions to build and install individual packages into an image. -RPM itself is not in the image - it's just a common and convenient package definition format. - -We currently package the following major third-party components: - -* Linux kernel ([background](https://en.wikipedia.org/wiki/Linux), [5.10 packaging](packages/kernel-5.10/), [5.15 packaging](packages/kernel-5.15/)) -* glibc ([background](https://www.gnu.org/software/libc/), [packaging](packages/glibc/)) -* Buildroot as build toolchain ([background](https://buildroot.org/), via the [SDK](https://github.com/bottlerocket-os/bottlerocket-sdk)) -* GRUB, with patches for partition flip updates ([background](https://www.gnu.org/software/grub/), [packaging](packages/grub/)) -* systemd as init ([background](https://en.wikipedia.org/wiki/Systemd), [packaging](packages/systemd/)) -* wicked for networking ([background](https://github.com/openSUSE/wicked), [packaging](packages/wicked/)) -* containerd ([background](https://containerd.io/), [packaging](packages/containerd/)) -* Kubernetes ([background](https://kubernetes.io/), [packaging](packages/kubernetes-1.24/)) -* aws-iam-authenticator ([background](https://github.com/kubernetes-sigs/aws-iam-authenticator), [packaging](packages/aws-iam-authenticator/)) -* Amazon ECS agent ([background](https://github.com/aws/amazon-ecs-agent), [packaging](packages/ecs-agent/)) - -For further documentation or to see the rest of the packages, see the [packaging directory](packages/). - -### Updates - -The Bottlerocket image has two identical sets of partitions, A and B. -When updating Bottlerocket, the partition table is updated to point from set A to set B, or vice versa. - -We also track successful boots, and if there are failures it will automatically revert back to the prior working partition set. - -The update process uses images secured by [TUF](https://theupdateframework.github.io/). -For more details, see the [update system documentation](sources/updater/). - -### API - -There are two main ways you'd interact with a production Bottlerocket instance. -(There are a couple more [exploration](#exploration) methods above for test instances.) - -The first method is through a container orchestrator, for when you want to run or manage containers. -This uses the standard channel for your orchestrator, for example a tool like `kubectl` for Kubernetes. - -The second method is through the Bottlerocket API, for example when you want to configure the system. - -There's an HTTP API server that listens on a local Unix-domain socket. -Remote access to the API requires an authenticated transport such as SSM's RunCommand or Session Manager, as described above. -For more details, see the [apiserver documentation](sources/api/apiserver/). - -The [apiclient](sources/api/apiclient/) can be used to make requests. -They're just HTTP requests, but the API client simplifies making requests with the Unix-domain socket. - -To make configuration easier, we have [early-boot-config](sources/api/early-boot-config/), which can send an API request for you based on instance user data. -If you start a virtual machine, like an EC2 instance, it will read TOML-formatted Bottlerocket configuration from user data and send it to the API server. -This way, you can configure your Bottlerocket instance without having to make API calls after launch. - -See [Settings](#settings) above for examples and to understand what you can configure. - -You can also access host containers through the API using [apiclient exec](sources/api/apiclient/README.md#exec-mode). - -The server and client are the user-facing components of the API system, but there are a number of other components that work together to make sure your settings are applied, and that they survive upgrades of Bottlerocket. - -For more details, see the [API system documentation](sources/api/). - -### Default Volumes - -Bottlerocket operates with two default storage volumes. - -* The root device, holds the active and passive [partition sets](#updates-1). - It also contains the bootloader, the dm-verity hash tree for verifying the [immutable root filesystem](SECURITY_FEATURES.md#immutable-rootfs-backed-by-dm-verity), and the data store for the Bottlerocket API. -* The data device is used as persistent storage for container images, container orchestration, [host-containers](#Custom-host-containers), and [bootstrap containers](#Bootstrap-containers-settings). - The operating system does not typically make changes to this volume during regular updates, though changes to upstream software such as containerd or kubelet could result in changes to their stored data. - This device (mounted to `/local` on the host) can be used for application storage for orchestrated workloads; however, we recommend using an additional volume if possible for such cases. - See [this section of the Security Guidance documentation](./SECURITY_GUIDANCE.md#limit-access-to-system-mounts) for more information. - -On boot Bottlerocket will increase the data partition size to use all of the data device. -If you increase the size of the device, you can reboot Bottlerocket to extend the data partition. -If you need to extend the data partition without rebooting, have a look at this [discussion](https://github.com/bottlerocket-os/bottlerocket/discussions/2011). From fc507840bed5cb9090b374162cb614491c96850d Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Fri, 14 Jun 2024 21:18:10 +0000 Subject: [PATCH 1241/1356] build: add all packages to core kit Signed-off-by: Arnaldo Garcia Rincon --- kits/build.rs | 9 +++++++++ kits/kit.rs | 7 +++++++ 2 files changed, 16 insertions(+) create mode 100644 kits/build.rs create mode 100644 kits/kit.rs diff --git a/kits/build.rs b/kits/build.rs new file mode 100644 index 00000000..669c934e --- /dev/null +++ b/kits/build.rs @@ -0,0 +1,9 @@ +use std::process::{exit, Command}; + +fn main() -> Result<(), std::io::Error> { + let ret = Command::new("buildsys").arg("build-kit").status()?; + if !ret.success() { + exit(1); + } + Ok(()) +} diff --git a/kits/kit.rs b/kits/kit.rs new file mode 100644 index 00000000..6bedb60e --- /dev/null +++ b/kits/kit.rs @@ -0,0 +1,7 @@ +/*! + +This is an intentionally empty file that all of the variant `Cargo.toml` files can point to as their +`lib.rs`. The build system uses `build.rs` to invoke `buildsys` but Cargo needs something to compile +so we give it an empty `lib.rs` file. + +!*/ From b1886c54da175f89f8cfeb53f2d757e3f704d336 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Fri, 14 Jun 2024 21:19:19 +0000 Subject: [PATCH 1242/1356] build: add Makefile Signed-off-by: Arnaldo Garcia Rincon --- Makefile | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 Makefile diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..7e0ce399 --- /dev/null +++ b/Makefile @@ -0,0 +1,52 @@ +TOP := $(dir $(abspath $(firstword $(MAKEFILE_LIST)))) +TOOLS_DIR := $(TOP)tools +TWOLITER_DIR := $(TOOLS_DIR)/twoliter +TWOLITER := $(TWOLITER_DIR)/twoliter +CARGO_HOME := $(TOP).cargo +VERSION := $(shell awk '/^release-version = /{ print $$3 }' $(TOP)Twoliter.toml) +GIT_HASH := $(shell git describe --always --dirty --exclude '*' || echo 00000000 ) + +TWOLITER_VERSION ?= "0.3.0" +KIT ?= bottlerocket-core-kit +ARCH ?= $(shell uname -m) +VENDOR ?= bottlerocket + +all: build + +prep: + @mkdir -p $(TWOLITER_DIR) + @mkdir -p $(CARGO_HOME) + @$(TOOLS_DIR)/install-twoliter.sh \ + --repo "https://github.com/bottlerocket-os/twoliter" \ + --version v$(TWOLITER_VERSION) \ + --directory $(TWOLITER_DIR) \ + --reuse-existing-install \ + --allow-binary-install \ + --allow-from-source + +update: prep + @$(TWOLITER) update + +fetch: prep + @$(TWOLITER) fetch --arch $(ARCH) + +build: fetch + @$(TWOLITER) build kit $(KIT) --arch $(ARCH) + +publish: prep + @$(TWOLITER) publish kit $(KIT) $(VENDOR) v$(VERSION)-$(GIT_HASH) + +TWOLITER_MAKE = $(TWOLITER) make --cargo-home $(CARGO_HOME) --arch $(ARCH) + +# Treat any targets after "make twoliter" as arguments to "twoliter make". +ifeq (twoliter,$(firstword $(MAKECMDGOALS))) + TWOLITER_MAKE_ARGS := $(wordlist 2,$(words $(MAKECMDGOALS)),$(MAKECMDGOALS)) + $(eval $(TWOLITER_MAKE_ARGS):;@:) +endif + +# Transform "make twoliter" into "twoliter make", for access to tasks that are +# only available through the embedded Makefile.toml. +twoliter: prep + @$(TWOLITER_MAKE) $(TWOLITER_MAKE_ARGS) + +.PHONY: prep update fetch build publish twoliter From 778fb818f668c9ec7ad9567ee5f9c161a91e1418 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Fri, 14 Jun 2024 21:39:28 +0000 Subject: [PATCH 1243/1356] github: rework issue templates and action workflows for kit Signed-off-by: Arnaldo Garcia Rincon --- .github/ISSUE_TEMPLATE/metal_driver.md | 26 -------- .../ISSUE_TEMPLATE/{image.md => package.md} | 4 +- .github/actions/list-variants/action.yml | 24 ------- .github/workflows/build.yml | 45 ++----------- .github/workflows/cache.yml | 8 +-- .github/workflows/weekly.yml | 65 ------------------- 6 files changed, 13 insertions(+), 159 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/metal_driver.md rename .github/ISSUE_TEMPLATE/{image.md => package.md} (87%) delete mode 100644 .github/actions/list-variants/action.yml delete mode 100644 .github/workflows/weekly.yml diff --git a/.github/ISSUE_TEMPLATE/metal_driver.md b/.github/ISSUE_TEMPLATE/metal_driver.md deleted file mode 100644 index 1e4e58d9..00000000 --- a/.github/ISSUE_TEMPLATE/metal_driver.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -name: bare metal variant - driver request -about: Request a driver to be added to the metal variant of Bottlerocket -labels: status/needs-triage, area/metal, type/enhancement ---- - - - -**What I'd like:** - -**Device type (e.g. network interface, disk controller):** - -**Device vendor:** - -**Device model:** - -**Driver used on other Linux distribition:** - -**Any alternatives you've considered:** - diff --git a/.github/ISSUE_TEMPLATE/image.md b/.github/ISSUE_TEMPLATE/package.md similarity index 87% rename from .github/ISSUE_TEMPLATE/image.md rename to .github/ISSUE_TEMPLATE/package.md index b61e72ad..6da8db36 100644 --- a/.github/ISSUE_TEMPLATE/image.md +++ b/.github/ISSUE_TEMPLATE/package.md @@ -1,5 +1,5 @@ --- -name: Bug report - Bottlerocket image +name: Bug report - Bottlerocket package about: Let us know about a problem with Bottlerocket labels: status/needs-triage, type/bug --- @@ -11,7 +11,7 @@ Tips: - Please include any error messages you received, with any required context. --> -**Image I'm using:** +**Package I'm using:** diff --git a/.github/actions/list-variants/action.yml b/.github/actions/list-variants/action.yml deleted file mode 100644 index a6e800c7..00000000 --- a/.github/actions/list-variants/action.yml +++ /dev/null @@ -1,24 +0,0 @@ -name: "List active variants" -description: "Dynamically determines current Bottlerocket variants based on repo contents." -outputs: - variants: - description: A list of all variants defined in the repo - value: ${{ steps.get-variants.outputs.variants }} - aarch-enemies: - description: Variants that should not run for aarch64 - value: ${{ steps.get-variants.outputs.aarch-enemies }} -runs: - using: "composite" - steps: - - uses: actions/checkout@v4 - - id: get-variants - name: Determine variants - shell: bash - run: | - cd variants - output="variants=$(ls -d */ | cut -d'/' -f 1 | grep -vE '^(shared|target)$' | sort | awk '$0 != x "-nvidia" && NR>1 {print x} {x=$0} END {print}' | jq -R -s -c 'split("\n")[:-1]')" - echo $output - echo $output >> $GITHUB_OUTPUT - output="aarch-enemies=$(ls -d */ | cut -d'/' -f 1 | grep -E '(^(metal|vmware)|\-dev$)' | jq -R -s -c 'split("\n")[:-1] | [ .[] | {"variant": ., "arch": "aarch64"}]')" - echo $output - echo $output >> $GITHUB_OUTPUT diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 2cffc553..4f9c66da 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -17,41 +17,22 @@ on: - '**.tpl' # Sample config files and OpenAPI docs - '**.yaml' - # Other files that don't affect the build - - 'packages/os/eni-max-pods' concurrency: group: ${{ github.ref }} cancel-in-progress: true jobs: - list-variants: - # This needs to be its own job since the build job needs its output before - # it can initialize - if: github.repository == 'bottlerocket-os/bottlerocket' - name: "Determine variants" - runs-on: ubuntu-latest - outputs: - variants: ${{ steps.get-variants.outputs.variants }} - aarch-enemies: ${{ steps.get-variants.outputs.aarch-enemies }} - steps: - - uses: actions/checkout@v4 - - uses: ./.github/actions/list-variants - id: get-variants - build: - needs: list-variants runs-on: group: bottlerocket labels: bottlerocket_ubuntu-latest_32-core continue-on-error: true strategy: matrix: - variant: ${{ fromJson(needs.list-variants.outputs.variants) }} arch: [x86_64, aarch64] - exclude: ${{ fromJson(needs.list-variants.outputs.aarch-enemies) }} fail-fast: false - name: "Build ${{ matrix.variant }}-${{ matrix.arch }}" + name: "Build ${{ matrix.arch }}" steps: - name: Random delay run: | @@ -61,23 +42,11 @@ jobs: - uses: actions/checkout@v4 - name: Preflight step to set up the runner uses: ./.github/actions/setup-node - - if: contains(matrix.variant, 'nvidia') - run: | - cat <<-EOF > Licenses.toml - [nvidia] - spdx-id = "LICENSE-LicenseRef-NVIDIA-Customer" - licenses = [ - { path = "NVIDIA", license-url = "https://www.nvidia.com/en-us/drivers/nvidia-license/" } - ] - EOF - run: rustup component add rustfmt - - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} unit-tests - - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} check-fmt + - run: make twoliter unit-tests + # TODO: fixme please! # Avoid running Go lint check via `cargo make check-lints` since there's a separate golangci-lint workflow - - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} check-clippy - - run: cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} check-shell - - run: | - cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} \ - -e BUILDSYS_ARCH=${{ matrix.arch }} \ - -e BUILDSYS_JOBS=12 \ - -e BUILDSYS_UPSTREAM_LICENSE_FETCH="${{ contains(matrix.variant, 'nvidia') }}" + # - run: make twoliter check-fmt + # - run: make twoliter check-clippy + - run: make twoliter check-shell + - run: make ARCH="${{ matrix.arch }}" diff --git a/.github/workflows/cache.yml b/.github/workflows/cache.yml index 2b66cc02..174a2724 100644 --- a/.github/workflows/cache.yml +++ b/.github/workflows/cache.yml @@ -7,12 +7,12 @@ on: paths: - '.github/**' - 'sources/Cargo.lock' - - 'tools/pubsys*/**' - - '!tools/pubsys/policies/**' - - '!tools/pubsys/**.example' + - 'Twoliter.toml' + - 'Twoliter.lock' + - 'Makefile' jobs: cache: - if: github.repository == 'bottlerocket-os/bottlerocket' + if: github.repository == 'bottlerocket-os/bottlerocket-core-kit' runs-on: group: bottlerocket labels: bottlerocket_ubuntu-latest_8-core diff --git a/.github/workflows/weekly.yml b/.github/workflows/weekly.yml deleted file mode 100644 index 125e554f..00000000 --- a/.github/workflows/weekly.yml +++ /dev/null @@ -1,65 +0,0 @@ -# This is basically a duplicate of the main "build" workflow, but uses GOPROXY=direct -# to try to catch errors close to their introduction due to yanked Go modules. These -# could otherwise be covered up by caching and not discovered until much later when -# bypassing the main cache. -name: Weekly -on: - schedule: - # Run Monday at 02:15 UTC. Randomly chosen as a "quiet" time for this to run. - # See syntax for format details: https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#onschedule - - cron: '15 2 * * 1' - -env: - # When Go packages are built, buildsys will vendor in dependent Go code for - # that package and bundle it up in a tarball. This env variable is consumed - # and used to configure Go to directly download code from its upstream source. - # This is a useful early signal during GitHub actions to see if there are - # upstream Go code problems. - GOPROXY: direct - -jobs: - list-variants: - # This needs to be its own job since the build job needs its output before - # it can initialize - if: github.repository == 'bottlerocket-os/bottlerocket' - name: "Determine variants" - runs-on: ubuntu-latest - outputs: - variants: ${{ steps.get-variants.outputs.variants }} - aarch-enemies: ${{ steps.get-variants.outputs.aarch-enemies }} - steps: - - uses: actions/checkout@v4 - - uses: ./.github/actions/list-variants - id: get-variants - - build: - needs: list-variants - runs-on: - group: bottlerocket - labels: bottlerocket_ubuntu-latest_32-core - continue-on-error: false - strategy: - matrix: - variant: ${{ fromJson(needs.list-variants.outputs.variants) }} - arch: [x86_64, aarch64] - exclude: ${{ fromJson(needs.list-variants.outputs.aarch-enemies) }} - fail-fast: false - name: "Build ${{ matrix.variant }}-${{ matrix.arch }}" - steps: - - uses: actions/checkout@v4 - - name: Preflight step to set up the runner - uses: ./.github/actions/setup-node - - if: contains(matrix.variant, 'nvidia') - run: | - cat <<-EOF > Licenses.toml - [nvidia] - spdx-id = "LICENSE-LicenseRef-NVIDIA-Customer" - licenses = [ - { path = "NVIDIA", license-url = "https://www.nvidia.com/en-us/drivers/nvidia-license/" } - ] - EOF - - run: | - cargo make -e BUILDSYS_VARIANT=${{ matrix.variant }} \ - -e BUILDSYS_ARCH=${{ matrix.arch }} \ - -e BUILDSYS_JOBS=12 \ - -e BUILDSYS_UPSTREAM_LICENSE_FETCH="${{ contains(matrix.variant, 'nvidia') }}" From 15db18f94edf66ed60ce1cee1787596325736437 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sat, 15 Jun 2024 02:49:19 +0000 Subject: [PATCH 1244/1356] github: fix and improve cache action The previous cache action failed because `Makefile.toml` was removed from the kit. Switch it to use the Makefile targets instead. While we're at it, modify the cache action to cache Rust and Go deps, to see if this helps to speed up builds. Signed-off-by: Ben Cressey --- .github/actions/setup-node/action.yml | 8 +++++++- .github/workflows/cache.yml | 12 ++++++++++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/.github/actions/setup-node/action.yml b/.github/actions/setup-node/action.yml index 3a16aff2..7fe7b1c5 100644 --- a/.github/actions/setup-node/action.yml +++ b/.github/actions/setup-node/action.yml @@ -20,11 +20,17 @@ runs: ~/.cargo key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }} - uses: actions/cache@v4 - # Cache first-party code dependencies + # Cache Rust dependencies with: path: | .cargo key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }}-${{ hashFiles('sources/Cargo.lock') }} + - uses: actions/cache@v4 + # Cache Go dependencies + with: + path: | + .gomodcache + key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }}-${{ hashFiles('sources/**/go.sum') }} - run: cargo install cargo-make shell: bash - if: ${{ inputs.perform-cache-cleanup }} diff --git a/.github/workflows/cache.yml b/.github/workflows/cache.yml index 174a2724..5d975c9b 100644 --- a/.github/workflows/cache.yml +++ b/.github/workflows/cache.yml @@ -1,4 +1,4 @@ -# This workflow caches crate dependencies and build artifacts for tools (except 'test-tools' since we don't use them in build workflows). +# This workflow caches crate dependencies, build tools, and external kits. # The cache is only usable by workflows started from pull requests against the develop branch. name: CacheDepsAndTools on: @@ -7,6 +7,7 @@ on: paths: - '.github/**' - 'sources/Cargo.lock' + - 'sources/**/go.sum' - 'Twoliter.toml' - 'Twoliter.lock' - 'Makefile' @@ -23,6 +24,13 @@ jobs: uses: ./.github/actions/setup-node with: perform-cache-cleanup: true - - run: cargo make install-twoliter + # This installs twoliter. + - run: make prep + # This fetches any external kit dependencies. + - run: make fetch + # This cleans the existing project local caches. + - run: make twoliter purge-cache + # This fetches Rust crate and Go module dependencies. + - run: make twoliter fetch # This cleans the cargo cache in ~/.cargo - run: cargo-cache From 11b1340490a9e12f7a1189ce2dcd8af64845c24d Mon Sep 17 00:00:00 2001 From: Sam Berning Date: Tue, 18 Jun 2024 18:56:02 +0000 Subject: [PATCH 1245/1356] Makefile: remove version from publish command Signed-off-by: Sam Berning --- Makefile | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 7e0ce399..4b1b055e 100644 --- a/Makefile +++ b/Makefile @@ -3,8 +3,6 @@ TOOLS_DIR := $(TOP)tools TWOLITER_DIR := $(TOOLS_DIR)/twoliter TWOLITER := $(TWOLITER_DIR)/twoliter CARGO_HOME := $(TOP).cargo -VERSION := $(shell awk '/^release-version = /{ print $$3 }' $(TOP)Twoliter.toml) -GIT_HASH := $(shell git describe --always --dirty --exclude '*' || echo 00000000 ) TWOLITER_VERSION ?= "0.3.0" KIT ?= bottlerocket-core-kit @@ -34,7 +32,7 @@ build: fetch @$(TWOLITER) build kit $(KIT) --arch $(ARCH) publish: prep - @$(TWOLITER) publish kit $(KIT) $(VENDOR) v$(VERSION)-$(GIT_HASH) + @$(TWOLITER) publish kit $(KIT) $(VENDOR) TWOLITER_MAKE = $(TWOLITER) make --cargo-home $(CARGO_HOME) --arch $(ARCH) From b67548ab4d821ad798cd77991c179df12fd2b59b Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 19 Jun 2024 15:06:34 +0000 Subject: [PATCH 1246/1356] build: export Go modules to environment This tells Twoliter to fetch modules for these projects. Signed-off-by: Ben Cressey --- Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Makefile b/Makefile index 4b1b055e..09f08452 100644 --- a/Makefile +++ b/Makefile @@ -9,6 +9,8 @@ KIT ?= bottlerocket-core-kit ARCH ?= $(shell uname -m) VENDOR ?= bottlerocket +export GO_MODULES = ecs-gpu-init host-ctr + all: build prep: From 0562f83d1bb7f01f36188c9d2ab9b55870858b2a Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 19 Jun 2024 15:04:29 +0000 Subject: [PATCH 1247/1356] github: split up cache save and restore actions This splits up the cache steps to make the cache more useful. Pull requests will fetch the most recent build cache, if available, and will not generate caches of their own since the cached artifacts cannot be easily reused across other branches. On merge to develop, a shared project-level cache will be refreshed with the most recent artifacts. Signed-off-by: Ben Cressey --- .github/actions/setup-node/action.yml | 29 ++++------------- .github/workflows/cache.yml | 46 ++++++++++++++++----------- 2 files changed, 34 insertions(+), 41 deletions(-) diff --git a/.github/actions/setup-node/action.yml b/.github/actions/setup-node/action.yml index 7fe7b1c5..44bb6970 100644 --- a/.github/actions/setup-node/action.yml +++ b/.github/actions/setup-node/action.yml @@ -1,11 +1,5 @@ name: "Node setup" description: "Performs setup for caching and other common needs." -inputs: - perform-cache-cleanup: - description: "Whether to perform cache cleanup" - required: false - default: false - type: boolean runs: using: "composite" steps: @@ -13,26 +7,17 @@ runs: echo "OS_ARCH=`uname -m`" >> $GITHUB_ENV sudo apt -y install build-essential openssl libssl-dev pkg-config liblz4-tool shell: bash - - uses: actions/cache@v4 - # Cache `cargo-make`, `cargo-cache` + - uses: actions/cache/restore@v4 + # Restore most recent cache if available. with: path: | ~/.cargo - key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }} - - uses: actions/cache@v4 - # Cache Rust dependencies - with: - path: | .cargo - key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }}-${{ hashFiles('sources/Cargo.lock') }} - - uses: actions/cache@v4 - # Cache Go dependencies - with: - path: | .gomodcache - key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }}-${{ hashFiles('sources/**/go.sum') }} + build/external-kits + build/rpms + build/state + target + key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }} - run: cargo install cargo-make shell: bash - - if: ${{ inputs.perform-cache-cleanup }} - run: cargo install --no-default-features --features ci-autoclean cargo-cache - shell: bash diff --git a/.github/workflows/cache.yml b/.github/workflows/cache.yml index 5d975c9b..5f2f3b5a 100644 --- a/.github/workflows/cache.yml +++ b/.github/workflows/cache.yml @@ -1,36 +1,44 @@ -# This workflow caches crate dependencies, build tools, and external kits. +# This workflow caches external dependencies, build tools, and the most recent artifacts. # The cache is only usable by workflows started from pull requests against the develop branch. -name: CacheDepsAndTools +name: CacheLatest on: push: branches: [develop] - paths: - - '.github/**' - - 'sources/Cargo.lock' - - 'sources/**/go.sum' - - 'Twoliter.toml' - - 'Twoliter.lock' - - 'Makefile' jobs: cache: if: github.repository == 'bottlerocket-os/bottlerocket-core-kit' runs-on: group: bottlerocket - labels: bottlerocket_ubuntu-latest_8-core - continue-on-error: true + labels: bottlerocket_ubuntu-latest_32-core steps: - uses: actions/checkout@v4 - - name: Preflight step to set up the runner - uses: ./.github/actions/setup-node - with: - perform-cache-cleanup: true + # Install dependencies for twoliter and cargo-make. + - run: | + echo "OS_ARCH=`uname -m`" >> $GITHUB_ENV + sudo apt -y install build-essential openssl libssl-dev pkg-config liblz4-tool + shell: bash + # Install cargo-make. + - run: cargo install cargo-make + shell: bash # This installs twoliter. - run: make prep # This fetches any external kit dependencies. - run: make fetch - # This cleans the existing project local caches. - - run: make twoliter purge-cache # This fetches Rust crate and Go module dependencies. - run: make twoliter fetch - # This cleans the cargo cache in ~/.cargo - - run: cargo-cache + # This builds the current packages and kits. + - run: make ARCH=x86_64 + - run: make ARCH=aarch64 + # This caches the reusable artifacts for future CI runs. + - uses: actions/cache/save@v4 + # Save Rust dependencies + with: + path: | + ~/.cargo + .cargo + .gomodcache + build/external-kits + build/rpms + build/state + target + key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }} From d1746ff2a28c6fdd4149e0e9dcf285a7ac122253 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Wed, 19 Jun 2024 19:07:27 +0000 Subject: [PATCH 1248/1356] build: update to Twoliter 0.4.1 Signed-off-by: Arnaldo Garcia Rincon --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 09f08452..13d42756 100644 --- a/Makefile +++ b/Makefile @@ -4,7 +4,7 @@ TWOLITER_DIR := $(TOOLS_DIR)/twoliter TWOLITER := $(TWOLITER_DIR)/twoliter CARGO_HOME := $(TOP).cargo -TWOLITER_VERSION ?= "0.3.0" +TWOLITER_VERSION ?= "0.4.1" KIT ?= bottlerocket-core-kit ARCH ?= $(shell uname -m) VENDOR ?= bottlerocket From 746cbafce81a08896014ff25686ce15fda2704e2 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 19 Jun 2024 19:12:54 +0000 Subject: [PATCH 1249/1356] github: avoid concurrent cache workflows Signed-off-by: Ben Cressey --- .github/workflows/cache.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/cache.yml b/.github/workflows/cache.yml index 5f2f3b5a..a5a428a6 100644 --- a/.github/workflows/cache.yml +++ b/.github/workflows/cache.yml @@ -10,6 +10,9 @@ jobs: runs-on: group: bottlerocket labels: bottlerocket_ubuntu-latest_32-core + concurrency: + group: cache-${{ github.ref }} + cancel-in-progress: true steps: - uses: actions/checkout@v4 # Install dependencies for twoliter and cargo-make. From 6fefe308bfd8f97e3d3ff2ba60b76445b5337cca Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 19 Jun 2024 20:53:59 +0000 Subject: [PATCH 1250/1356] github: delete the cache before saving it GitHub doesn't allow modifying an existing cache, so delete it before saving the results. This should also stop the repository from surging over its 10 GiB cache limit if a second cache is ever created. Signed-off-by: Ben Cressey --- .github/actions/setup-node/action.yml | 6 ++---- .github/workflows/cache.yml | 16 ++++++++++++---- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/.github/actions/setup-node/action.yml b/.github/actions/setup-node/action.yml index 44bb6970..58a9b92d 100644 --- a/.github/actions/setup-node/action.yml +++ b/.github/actions/setup-node/action.yml @@ -3,9 +3,7 @@ description: "Performs setup for caching and other common needs." runs: using: "composite" steps: - - run: | - echo "OS_ARCH=`uname -m`" >> $GITHUB_ENV - sudo apt -y install build-essential openssl libssl-dev pkg-config liblz4-tool + - run: sudo apt -y install build-essential openssl libssl-dev pkg-config liblz4-tool shell: bash - uses: actions/cache/restore@v4 # Restore most recent cache if available. @@ -18,6 +16,6 @@ runs: build/rpms build/state target - key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }} + key: build-cache - run: cargo install cargo-make shell: bash diff --git a/.github/workflows/cache.yml b/.github/workflows/cache.yml index a5a428a6..bbf43cb9 100644 --- a/.github/workflows/cache.yml +++ b/.github/workflows/cache.yml @@ -13,12 +13,14 @@ jobs: concurrency: group: cache-${{ github.ref }} cancel-in-progress: true + env: + cache-key: build-cache + permissions: + actions: write steps: - uses: actions/checkout@v4 # Install dependencies for twoliter and cargo-make. - - run: | - echo "OS_ARCH=`uname -m`" >> $GITHUB_ENV - sudo apt -y install build-essential openssl libssl-dev pkg-config liblz4-tool + - run: sudo apt -y install build-essential openssl libssl-dev pkg-config liblz4-tool shell: bash # Install cargo-make. - run: cargo install cargo-make @@ -32,6 +34,12 @@ jobs: # This builds the current packages and kits. - run: make ARCH=x86_64 - run: make ARCH=aarch64 + # Remove the previous cache. + - run: | + gh extension install actions/gh-actions-cache + gh actions-cache delete "${{ env.cache-key }}" --confirm + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This caches the reusable artifacts for future CI runs. - uses: actions/cache/save@v4 # Save Rust dependencies @@ -44,4 +52,4 @@ jobs: build/rpms build/state target - key: ${{ hashFiles('.github/workflows/cache.yml') }}-${{ runner.os }}-${{ env.OS_ARCH }} + key: ${{ env.cache-key }} From 2291722f84ab43a43993cf0ca275c46b058f33be Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 20 Jun 2024 04:18:31 +0000 Subject: [PATCH 1251/1356] github: don't cache debug and nvidia packages These significantly increase the size of the cache, and aren't needed when rebuilding packages. Signed-off-by: Ben Cressey --- .github/workflows/cache.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/cache.yml b/.github/workflows/cache.yml index bbf43cb9..1ecc570d 100644 --- a/.github/workflows/cache.yml +++ b/.github/workflows/cache.yml @@ -34,12 +34,18 @@ jobs: # This builds the current packages and kits. - run: make ARCH=x86_64 - run: make ARCH=aarch64 - # Remove the previous cache. + # Delete packages that aren't needed for other builds. + - run: | + find build/rpms -name '*debugsource*' -type f -print -delete + find build/rpms -name '*debuginfo*' -type f -print -delete + find build/rpms -name '*kmod*nvidia*' -type f -print -delete + # Remove the previous cache (if it exists). - run: | gh extension install actions/gh-actions-cache gh actions-cache delete "${{ env.cache-key }}" --confirm env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + continue-on-error: true # This caches the reusable artifacts for future CI runs. - uses: actions/cache/save@v4 # Save Rust dependencies From cd041e16253b504902c11a1214a959b46497339e Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Wed, 19 Jun 2024 15:31:17 +0000 Subject: [PATCH 1252/1356] kernel-5.10: update to 5.10.218 Rebase to Amazon Linux upstream version 5.10.218-208.862.amzn2. Signed-off-by: Martin Harriman --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 8fd8a3e8..fb5adf38 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/0e8dd42b36d60da0f50a2bce7fecca30610adf37e5a35585e39d2f318cdb1e76/kernel-5.10.217-205.860.amzn2.src.rpm" -sha512 = "e10c0099384cc5ee8b153594101aea35df8541ec06829472650bb15af72550006d8c436756ebfaa7a40a206bc823dd1edd5a37b076086d8fc860ee2ac4c441c8" +url = "https://cdn.amazonlinux.com/blobstore/b5fd278db7388155390664828137d2628fc514d69cabad6476b60908577f7ed8/kernel-5.10.218-208.862.amzn2.src.rpm" +sha512 = "3cc192e5862faa0b3ae9f1c80f65984e8bf96a7f19e9322577d4fe3a564d17668971ac480dc4982cb065f1fac1b18ca8bcf4bd2bd9156671f6d9d68aa053b339" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index d3ddb136..94834b33 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.217 +Version: 5.10.218 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/0e8dd42b36d60da0f50a2bce7fecca30610adf37e5a35585e39d2f318cdb1e76/kernel-5.10.217-205.860.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/b5fd278db7388155390664828137d2628fc514d69cabad6476b60908577f7ed8/kernel-5.10.218-208.862.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 304a04bfeae5987cd34ac394a86767dd8b3e1020 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Wed, 19 Jun 2024 15:31:45 +0000 Subject: [PATCH 1253/1356] kernel-5.15: update to 5.15.160 Rebase to Amazon Linux upstream version 5.15.160-104.158.amzn2. Signed-off-by: Martin Harriman --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 05d3d0a9..0a5c47f3 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/f75f72cbdb5b3da04159fef0093b7ca471b95b58172bc9630600bc94668e247a/kernel-5.15.158-103.164.amzn2.src.rpm" -sha512 = "3ba3616cfcbc230208c84dffbbe1648e57a295dd828288e1e330e988f1f14a9a10fc6e6f251573d20e6679e802ac3b3ca53dfef39d1e19f61af4ede42a035af0" +url = "https://cdn.amazonlinux.com/blobstore/30d3a0d3adde03b0edcad16b16c89e9b3086b4d5594eb3f57e50b0d42ade76d5/kernel-5.15.160-104.158.amzn2.src.rpm" +sha512 = "368682b26dc17636f760c3ec6f53745bd774b6a482469cd5dcfebb9f7d5418695d344ba5f9b2e3e8189987eeb901c93ac9c0885d21c6d85fb11e6beb0dfcce5f" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 356f3315..e0b26f1f 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.158 +Version: 5.15.160 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/f75f72cbdb5b3da04159fef0093b7ca471b95b58172bc9630600bc94668e247a/kernel-5.15.158-103.164.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/30d3a0d3adde03b0edcad16b16c89e9b3086b4d5594eb3f57e50b0d42ade76d5/kernel-5.15.160-104.158.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From df013cf0df62d58e4ee17edfb52d9413579fafff Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 27 Jun 2024 23:10:54 +0000 Subject: [PATCH 1254/1356] kmod-6.1-neuron: include in core kit Also remove the unneeded glibc dependency, and gitignore the RPM. Signed-off-by: Ben Cressey --- packages/kmod-6.1-neuron/.gitignore | 1 + packages/kmod-6.1-neuron/kmod-6.1-neuron.spec | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 packages/kmod-6.1-neuron/.gitignore diff --git a/packages/kmod-6.1-neuron/.gitignore b/packages/kmod-6.1-neuron/.gitignore new file mode 100644 index 00000000..e7a9c134 --- /dev/null +++ b/packages/kmod-6.1-neuron/.gitignore @@ -0,0 +1 @@ +*.rpm diff --git a/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec b/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec index fec66701..add77106 100644 --- a/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec +++ b/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec @@ -9,7 +9,6 @@ Source0: https://yum.repos.neuron.amazonaws.com/aws-neuronx-dkms-%{version}.noar Source1: neuron-modules-load.conf Source2: neuron-systemd-modules-load.drop-in.conf -BuildRequires: %{_cross_os}glibc-devel BuildRequires: %{_cross_os}kernel-6.1-archive %description From ae5a8566af43586b0334276c83f33d5a822a4560 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 28 Jun 2024 02:08:10 +0000 Subject: [PATCH 1255/1356] kmod-6.1-neuron: do not outline atomics on arm64 Fixes a slew of build errors: modpost: "__aarch64_cas4_acq_rel" [neuron.ko] undefined! modpost: "__aarch64_cas1_acq_rel" [neuron.ko] undefined! modpost: "__aarch64_ldadd8_acq_rel" [neuron.ko] undefined! Note that there are no EC2 instance types that make Neuron hardware available for a Graviton processor, so this is untested apart from verifying that the kmod builds. Signed-off-by: Ben Cressey --- ...ild-do-not-outline-atomics-for-arm64.patch | 22 +++++++++++++++++++ packages/kmod-6.1-neuron/kmod-6.1-neuron.spec | 2 ++ 2 files changed, 24 insertions(+) create mode 100644 packages/kmod-6.1-neuron/0001-kbuild-do-not-outline-atomics-for-arm64.patch diff --git a/packages/kmod-6.1-neuron/0001-kbuild-do-not-outline-atomics-for-arm64.patch b/packages/kmod-6.1-neuron/0001-kbuild-do-not-outline-atomics-for-arm64.patch new file mode 100644 index 00000000..aa65f202 --- /dev/null +++ b/packages/kmod-6.1-neuron/0001-kbuild-do-not-outline-atomics-for-arm64.patch @@ -0,0 +1,22 @@ +From 5c77e9c0db55dd35f162ec41fa5a62856121f5da Mon Sep 17 00:00:00 2001 +From: Ben Cressey +Date: Fri, 28 Jun 2024 02:06:55 +0000 +Subject: [PATCH] kbuild: do not outline atomics for arm64 + +Signed-off-by: Ben Cressey +--- + usr/src/aws-neuronx-2.16.7.0/Kbuild | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/usr/src/aws-neuronx-2.16.7.0/Kbuild b/usr/src/aws-neuronx-2.16.7.0/Kbuild +index 11f8490..6535608 100644 +--- a/usr/src/aws-neuronx-2.16.7.0/Kbuild ++++ b/usr/src/aws-neuronx-2.16.7.0/Kbuild +@@ -16,3 +16,4 @@ neuron-objs += v3/notific.o v3/neuron_dhal_v3.o + + ccflags-y += -O3 -Wall -Werror -Wno-declaration-after-statement -Wunused-macros -Wunused-local-typedefs + ccflags-y += -I$(src)/ ++ccflags-$(CONFIG_ARM64) += -mno-outline-atomics +-- +2.45.1 + diff --git a/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec b/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec index add77106..38cb439a 100644 --- a/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec +++ b/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec @@ -8,6 +8,7 @@ URL: https://awsdocs-neuron.readthedocs-hosted.com/en/latest/ Source0: https://yum.repos.neuron.amazonaws.com/aws-neuronx-dkms-%{version}.noarch.rpm Source1: neuron-modules-load.conf Source2: neuron-systemd-modules-load.drop-in.conf +Patch0001: 0001-kbuild-do-not-outline-atomics-for-arm64.patch BuildRequires: %{_cross_os}kernel-6.1-archive @@ -17,6 +18,7 @@ BuildRequires: %{_cross_os}kernel-6.1-archive %prep rpm2cpio %{SOURCE0} | cpio -idmv tar -xf %{_cross_datadir}/bottlerocket/kernel-devel.tar.xz +%autopatch -p1 %global neuron_sources usr/src/aws-neuronx-%{version} %global kernel_sources %{_builddir}/kernel-devel From 2230dd5d46accc62b337a339a3e4eecbbdefc477 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Tue, 25 Jun 2024 16:44:12 +0000 Subject: [PATCH 1256/1356] kernel-5.10: update to 5.10.219 Rebase to Amazon Linux upstream version 5.10.219-208.866.amzn2. Signed-off-by: Martin Harriman --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index fb5adf38..dcbaf881 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/b5fd278db7388155390664828137d2628fc514d69cabad6476b60908577f7ed8/kernel-5.10.218-208.862.amzn2.src.rpm" -sha512 = "3cc192e5862faa0b3ae9f1c80f65984e8bf96a7f19e9322577d4fe3a564d17668971ac480dc4982cb065f1fac1b18ca8bcf4bd2bd9156671f6d9d68aa053b339" +url = "https://cdn.amazonlinux.com/blobstore/a76ae585dd09b2f986aa20d7b48f6a8557ac9a63265972464dcae464925ec700/kernel-5.10.219-208.866.amzn2.src.rpm" +sha512 = "7669cab43a35f7b5a7feaf0f4e5349bbe940d7eb2a52c0c5f647e91c645ecb364c81282f17d2be47be60122f470736378fec0935c002cc30a214dd50d6c6ae29" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 94834b33..c6387be9 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.218 +Version: 5.10.219 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/b5fd278db7388155390664828137d2628fc514d69cabad6476b60908577f7ed8/kernel-5.10.218-208.862.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/a76ae585dd09b2f986aa20d7b48f6a8557ac9a63265972464dcae464925ec700/kernel-5.10.219-208.866.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From a423b83ce47c0897b089a44431d19524e3418408 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Tue, 25 Jun 2024 16:45:26 +0000 Subject: [PATCH 1257/1356] kernel-6.1: update to 6.1.94 Rebase to Amazon Linux upstream version 6.1.94-99.176.amzn2023. Signed-off-by: Martin Harriman --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 4a973e08..8d9f9bba 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/56c452d9992a4b8c25e5ff09f38a1464761196c1462a341e438301b6d56bfe50/kernel-6.1.92-99.174.amzn2023.src.rpm" -sha512 = "134d231c7c87e9136a6ceb2f125bd7d2163d7b73590d821f0d2192effd1a5f0850c612e0f9e03bcbd92f47014fd99fe6e9e8a1b45c5e01dab6d074faf74b4df4" +url = "https://cdn.amazonlinux.com/al2023/blobstore/b36ee10673c56c67b1f1a12e9afe3794a81ab7ff630c09abd2295c1d46a36e40/kernel-6.1.94-99.176.amzn2023.src.rpm" +sha512 = "d487b50ebc11b1492f5dd5e28ce1ee73d9311bc5e3fae7a4278a25096ebff821fc6b167279d9bcd5d8ea59c36f93316b1b48454465209356b7a8597e0750f0ba" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index ce68c408..599f5437 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.92 +Version: 6.1.94 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/56c452d9992a4b8c25e5ff09f38a1464761196c1462a341e438301b6d56bfe50/kernel-6.1.92-99.174.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/b36ee10673c56c67b1f1a12e9afe3794a81ab7ff630c09abd2295c1d46a36e40/kernel-6.1.94-99.176.amzn2023.src.rpm Source100: config-bottlerocket # This list of FIPS modules is extracted from /etc/fipsmodules in the initramfs From 4f2744ae4ad62910ee50e2b0174bfa9732c9bb5a Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Tue, 25 Jun 2024 20:17:17 +0000 Subject: [PATCH 1258/1356] Add files to kernel-6.1.spec --- packages/kernel-6.1/kernel-6.1.spec | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 599f5437..7991c4ba 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -653,9 +653,12 @@ install -p -m 0644 %{S:302} %{buildroot}%{_cross_bootconfigdir}/05-metal.conf %{_cross_kmoddir}/kernel/drivers/net/ethernet/intel/e1000/e1000.ko.* %{_cross_kmoddir}/kernel/drivers/net/ethernet/intel/e1000e/e1000e.ko.* %{_cross_kmoddir}/kernel/drivers/net/ethernet/intel/igb/igb.ko.* +%{_cross_kmoddir}/kernel/drivers/net/ethernet/intel/igc/igc.ko.* %{_cross_kmoddir}/kernel/drivers/net/ethernet/intel/ixgbevf/ixgbevf.ko.* %{_cross_kmoddir}/kernel/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.ko.* %{_cross_kmoddir}/kernel/drivers/net/ethernet/mellanox/mlxfw/mlxfw.ko.* +%{_cross_kmoddir}/kernel/drivers/net/ethernet/realtek/r8169.ko.gz +%{_cross_kmoddir}/kernel/drivers/net/phy/realtek.ko.gz %{_cross_kmoddir}/kernel/drivers/net/geneve.ko.* %if "%{_cross_arch}" == "x86_64" %{_cross_kmoddir}/kernel/drivers/net/hyperv/hv_netvsc.ko.* From 08e97a544aaa3db5fb50ede58ab4b09e84827725 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Thu, 11 Jul 2024 17:42:57 +0000 Subject: [PATCH 1259/1356] kernel-6.1: update to package kernel-6.1.96 Rebase to Amazon Linux upstream version package kernel-6.1.96-102.177.amzn2023 Signed-off-by: Arnaldo Garcia Rincon --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 6 ++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 8d9f9bba..745f721f 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/b36ee10673c56c67b1f1a12e9afe3794a81ab7ff630c09abd2295c1d46a36e40/kernel-6.1.94-99.176.amzn2023.src.rpm" -sha512 = "d487b50ebc11b1492f5dd5e28ce1ee73d9311bc5e3fae7a4278a25096ebff821fc6b167279d9bcd5d8ea59c36f93316b1b48454465209356b7a8597e0750f0ba" +url = "https://cdn.amazonlinux.com/al2023/blobstore/704482a5b82230d7012a6bd9b15689a3c8c05ab85493984fbe6c4bbbb0d38e21/kernel-6.1.96-102.177.amzn2023.src.rpm" +sha512 = "4a0c5223a4d8ee9e47440fbeb0a5b65e115421b1ed980fa603dcbd7909c74b01948a1dc853c30cc8f37ed51fe8980165b4cf69820c375a61f2e1269559514f7b" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 7991c4ba..de2e4bb5 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.94 +Version: 6.1.96 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/b36ee10673c56c67b1f1a12e9afe3794a81ab7ff630c09abd2295c1d46a36e40/kernel-6.1.94-99.176.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/704482a5b82230d7012a6bd9b15689a3c8c05ab85493984fbe6c4bbbb0d38e21/kernel-6.1.96-102.177.amzn2023.src.rpm Source100: config-bottlerocket # This list of FIPS modules is extracted from /etc/fipsmodules in the initramfs @@ -709,8 +709,6 @@ install -p -m 0644 %{S:302} %{buildroot}%{_cross_bootconfigdir}/05-metal.conf %endif %{_cross_kmoddir}/kernel/drivers/pps/clients/pps-gpio.ko.* %{_cross_kmoddir}/kernel/drivers/pps/clients/pps-ldisc.ko.* -%{_cross_kmoddir}/kernel/drivers/pps/pps_core.ko.* -%{_cross_kmoddir}/kernel/drivers/ptp/ptp.ko.* %{_cross_kmoddir}/kernel/drivers/ptp/ptp_kvm.ko.* %{_cross_kmoddir}/kernel/drivers/scsi/ch.ko.* %if "%{_cross_arch}" == "x86_64" From e765071097e5a89642e777728e89ebdf9f33237d Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Thu, 11 Jul 2024 17:43:46 +0000 Subject: [PATCH 1260/1356] kernel-5.15: update to package kernel-5.15.161 Rebase to Amazon Linux upstream version package kernel-5.15.161-106.159.amzn2. Signed-off-by: Arnaldo Garcia Rincon --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 0a5c47f3..461392be 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/30d3a0d3adde03b0edcad16b16c89e9b3086b4d5594eb3f57e50b0d42ade76d5/kernel-5.15.160-104.158.amzn2.src.rpm" -sha512 = "368682b26dc17636f760c3ec6f53745bd774b6a482469cd5dcfebb9f7d5418695d344ba5f9b2e3e8189987eeb901c93ac9c0885d21c6d85fb11e6beb0dfcce5f" +url = "https://cdn.amazonlinux.com/blobstore/c86decdb8cfd1f1fe51e6d17c0dcc935d44b48480db4ea182f922934f7d0d34e/kernel-5.15.161-106.159.amzn2.src.rpm" +sha512 = "6d7b9b5f2cb9fe8371b65772c732ab9deead6719194d849b1888c0c47f2c9b134543ee63c36333fd35c8bd2353c9865365c3c48e0b90c939a16725312d88e1f8" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index e0b26f1f..40ceb579 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.160 +Version: 5.15.161 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/30d3a0d3adde03b0edcad16b16c89e9b3086b4d5594eb3f57e50b0d42ade76d5/kernel-5.15.160-104.158.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/c86decdb8cfd1f1fe51e6d17c0dcc935d44b48480db4ea182f922934f7d0d34e/kernel-5.15.161-106.159.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From c75c7635455563df720ea56f873e9dbe9873c5b9 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Thu, 11 Jul 2024 17:44:35 +0000 Subject: [PATCH 1261/1356] kernel-5.10: update to package kernel-5.10.220 Rebase to Amazon Linux upstream version package kernel-5.10.220-209.867.amzn2. Signed-off-by: Arnaldo Garcia Rincon --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index dcbaf881..e08be1b3 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/a76ae585dd09b2f986aa20d7b48f6a8557ac9a63265972464dcae464925ec700/kernel-5.10.219-208.866.amzn2.src.rpm" -sha512 = "7669cab43a35f7b5a7feaf0f4e5349bbe940d7eb2a52c0c5f647e91c645ecb364c81282f17d2be47be60122f470736378fec0935c002cc30a214dd50d6c6ae29" +url = "https://cdn.amazonlinux.com/blobstore/36b61b64a5b940e6dbe9bd7c0e63ff2a61e24901d776de27a233d38b5f52ae94/kernel-5.10.220-209.867.amzn2.src.rpm" +sha512 = "939bc3fae149dbd22fa7b0a177c44add6b94c4b90feac2e3b4da73dba78685edf74ed705478c6e832ed1e98f186396385bbd5c1a28eef1b5b77fdc38d48a85c3" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index c6387be9..faa10e9d 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.219 +Version: 5.10.220 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/a76ae585dd09b2f986aa20d7b48f6a8557ac9a63265972464dcae464925ec700/kernel-5.10.219-208.866.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/36b61b64a5b940e6dbe9bd7c0e63ff2a61e24901d776de27a233d38b5f52ae94/kernel-5.10.220-209.867.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From e685512fda5ebd1d26bd07fe4b24774b9a569c7c Mon Sep 17 00:00:00 2001 From: Jarrett Tierney Date: Fri, 12 Jul 2024 12:27:19 -0700 Subject: [PATCH 1262/1356] Bump twoliter to 0.4.2 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 13d42756..c79b0185 100644 --- a/Makefile +++ b/Makefile @@ -4,7 +4,7 @@ TWOLITER_DIR := $(TOOLS_DIR)/twoliter TWOLITER := $(TWOLITER_DIR)/twoliter CARGO_HOME := $(TOP).cargo -TWOLITER_VERSION ?= "0.4.1" +TWOLITER_VERSION ?= "0.4.2" KIT ?= bottlerocket-core-kit ARCH ?= $(shell uname -m) VENDOR ?= bottlerocket From ae5bfdc30d4ab9a1e910e548cd20edb2f74862d7 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Fri, 12 Jul 2024 21:24:07 +0000 Subject: [PATCH 1263/1356] diff-kernel-config: fix for core-kits The script referenced variants, which aren't a thing in the core-kit world Signed-off-by: Arnaldo Garcia Rincon --- tools/diff-kernel-config | 76 ++++++++++++---------------------------- 1 file changed, 22 insertions(+), 54 deletions(-) diff --git a/tools/diff-kernel-config b/tools/diff-kernel-config index 3fa5c62a..8fbb996f 100755 --- a/tools/diff-kernel-config +++ b/tools/diff-kernel-config @@ -36,9 +36,7 @@ Compare kernel configurations before and after a series of commits. -a, --after new Git revision to compare from -b, --before baseline Git revision to compare against - -v, --variant variant to pick kernel from to compare configs for, may - be given multiple times (optional, defaults to this list: - 'aws-k8s-1.23', 'metal-k8s-1.23', 'aws-dev', 'metal-dev') + -k, --kernel the kernel versions -o, --output-dir path to the output directory; must not exist yet -r, --resume resume work on a previous invocation; check which parts already exist in OUTPUT_DIR and skip builds accordingly @@ -50,15 +48,14 @@ Example invocation: and 5.15 (through metal-k8s-1.26) before and after the most recent commit has been applied: - $0 -b HEAD^ -a HEAD -v metal-k8s-1.23 -v metal-k8s-1.26 -o configs + $0 -b HEAD^ -a HEAD -k 5.10 -k 5.15 -o configs Notes: This compares the config changes for all combinations of aarch64/x86_64, - cloud/metal, and kernel versions. Combinations without a corresponding - Bottlerocket variant are skipped. Since this involves numerous full kernel - builds the comparison will take some time. Consider the working tree this - is invoked on busy while the script is running. + and kernel versions. Since this involves numerous full kernel builds the + comparison will take some time. Consider the working tree this is invoked + on busy while the script is running. EOF } @@ -74,7 +71,6 @@ usage_error() { # kernel_versions=() -variants=() while [[ $# -gt 0 ]]; do case $1 in @@ -82,8 +78,8 @@ while [[ $# -gt 0 ]]; do shift; gitrev_after_arg=$1 ;; -b|--before) shift; gitrev_before_arg=$1 ;; - -v|--variant) - shift; variants+=( "$1" ) ;; + -k|--kernel) + shift; kernel_versions+=( "$1" ) ;; -o|--output-dir) shift; output_dir=$1 ;; -r|--resume) @@ -96,15 +92,6 @@ while [[ $# -gt 0 ]]; do shift done -if [[ ${#variants[@]} -eq 0 ]]; then - variants=( aws-k8s-1.23 metal-k8s-1.23 aws-dev metal-dev ) -fi - -for var in "${variants[@]}"; do - [[ -d variants/${var} ]] || bail "Unknown variant '${var}'" -done -readonly variants - [[ -n ${output_dir} ]] || usage_error 'require -o|--output-dir' [[ -e ${output_dir} && ! -v resume ]] && bail "Output directory '${output_dir}' exists already, not touching it" readonly output_dir @@ -114,6 +101,7 @@ readonly output_dir # checkout. [[ -n ${gitrev_before_arg} ]] || usage_error 'require -b|--before' [[ -n ${gitrev_after_arg} ]] || usage_error 'require -a|--after' +[[ -n ${kernel_versions[*]} ]] || usage_error 'require -k|--kernel' gitrev_before=$(git rev-parse --verify --end-of-options "${gitrev_before_arg}^{commit}") gitrev_after=$(git rev-parse --verify --end-of-options "${gitrev_after_arg}^{commit}") [[ -n ${gitrev_before} ]] || bail "Invalid Git revision '${gitrev_before_arg}'" @@ -154,45 +142,25 @@ for state in after before; do gitrev_var=gitrev_${state} git checkout --quiet "${!gitrev_var}" || bail "Cannot check out '${!gitrev_var}'." - for variant in "${variants[@]}"; do + for kver in "${kernel_versions[@]}"; do - arches=() - IFS=" " read -r -a arches <<< "$(grep "supported-arches" "variants/${variant}/Cargo.toml" | cut -d ' ' -f 3 | tr -d '"[]')" - if [[ ${#arches[@]} -eq 0 ]]; then - arches=( aarch64 x86_64 ) - fi - - kver=$(grep "packages/kernel" "variants/${variant}/Cargo.toml" | cut -d ' ' -f 1 | cut -d '-' -f 2 | tr '_' '.') - - kernel_versions+=( "${kver}" ) + arches=( aarch64 x86_64 ) for arch in "${arches[@]}"; do - config_path=${output_dir}/config-${arch}-${variant}-${state} + config_path=${output_dir}/config-${arch}-${kver}-${state} if [[ -v resume && -e ${config_path} ]]; then echo "${config_path} already exists, skipping" continue fi - debug_id="state=${state} arch=${arch} variant=${variant} kernel=${kver}" - - IFS=- read -ra variant_parts <<<"${variant}" - variant_platform="${variant_parts[0]}" - variant_runtime="${variant_parts[1]}" - variant_family="${variant_platform}-${variant_runtime}" + debug_id="state=${state} arch=${arch} kernel=${kver}" # # Run build # - cargo make \ - -e BUILDSYS_ARCH="${arch}" \ - -e BUILDSYS_VARIANT="${variant}" \ - -e BUILDSYS_VARIANT_PLATFORM="${variant_platform}" \ - -e BUILDSYS_VARIANT_RUNTIME="${variant_runtime}" \ - -e BUILDSYS_VARIANT_FAMILY="${variant_family}" \ - -e PACKAGE="kernel-${kver/./_}" \ - build-package \ + ARCH="${arch}" PACKAGE="kernel-${kver/./_}" make twoliter build-package \ || bail "Build failed for ${debug_id}" # @@ -201,8 +169,7 @@ for state in after before; do shopt -s nullglob kernel_rpms=( - ./build/rpms/bottlerocket-*kernel-"${kver}"-"${kver}".*."${arch}".rpm - ./build/rpms/bottlerocket-"${arch}"-*kernel-"${kver}"-"${kver}".*.rpm + ./build/rpms/kernel-"${kver}"/bottlerocket-kernel-"${kver}"-"${kver}".*.*.*.br1."${arch}".rpm ) shopt -u nullglob @@ -227,10 +194,11 @@ for state in after before; do [[ -s "${config_path}" ]] || bail "Failed to extract config for ${debug_id}" - echo "config-${arch}-${variant}-${state} -> ${kver_full}" >> "${output_dir}"/kver_mapping + echo "config-${arch}-${kver}-${state} -> ${kver_full}" >> "${output_dir}"/kver_mapping done # arch - done # variant + done # kernel + done # state @@ -243,7 +211,7 @@ done # state # in the kernel-archive RPM from where it can be extracted. Here we extract the # latest version of the script, but any kernel version and arch will do. latest_kver=$(printf '%s\n' "${kernel_versions[@]}" | sort -V | tail -n1) -latest_archive_rpms=( ./build/rpms/bottlerocket-*kernel-"${latest_kver}"-archive-*.rpm ) +latest_archive_rpms=( ./build/rpms/kernel-"${latest_kver}"/bottlerocket-kernel-"${latest_kver}"-archive-*.rpm ) diffconfig=$(mktemp --suffix -bottlerocket-diffconfig) on_exit "rm '${diffconfig}'" rpm2cpio "${latest_archive_rpms[0]}" \ @@ -282,10 +250,10 @@ echo "config change" > "${output_dir}"/diff-table cat "${output_dir}"/*-diff | sort | uniq >> "${output_dir}"/diff-table for config_diff in "${output_dir}"/config-*-diff; do - variant_name=$(echo "${config_diff}" | sed -e "s%^${output_dir}/config-%%" -e "s%-diff$%%") - kver_before=$(grep "${variant_name}-before" "${output_dir}/kver_mapping" | cut -d ' ' -f 3) - kver_after=$(grep "${variant_name}-after" "${output_dir}/kver_mapping" | cut -d ' ' -f 3) - col_name="${variant_name} (${kver_before} -> ${kver_after})" + kernel_version=$(echo "${config_diff}" | sed -e "s%^${output_dir}/config-%%" -e "s%-diff$%%") + kver_before=$(grep "${kernel_version}-before" "${output_dir}/kver_mapping" | cut -d ' ' -f 3) + kver_after=$(grep "${kernel_version}-after" "${output_dir}/kver_mapping" | cut -d ' ' -f 3) + col_name="${kernel_version} (${kver_before} -> ${kver_after})" sed -i "s/$/,/" "${output_dir}"/diff-table sed -i "/^config change/ s/$/${col_name}/" "${output_dir}"/diff-table From 1ee208fcf8c8f42ee0519a904ae541ede03fe3fc Mon Sep 17 00:00:00 2001 From: Jarrett Tierney Date: Wed, 17 Jul 2024 14:54:18 -0700 Subject: [PATCH 1264/1356] bump twoliter to 0.4.3 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index c79b0185..f4e4430a 100644 --- a/Makefile +++ b/Makefile @@ -4,7 +4,7 @@ TWOLITER_DIR := $(TOOLS_DIR)/twoliter TWOLITER := $(TWOLITER_DIR)/twoliter CARGO_HOME := $(TOP).cargo -TWOLITER_VERSION ?= "0.4.2" +TWOLITER_VERSION ?= "0.4.3" KIT ?= bottlerocket-core-kit ARCH ?= $(shell uname -m) VENDOR ?= bottlerocket From 56cb41fee628e8d8fde0b6b64a1ca5b63172513f Mon Sep 17 00:00:00 2001 From: Gavin Inglis Date: Mon, 22 Jul 2024 23:39:47 +0000 Subject: [PATCH 1265/1356] actions: ignore advisories/ in build workflow Signed-off-by: Gavin Inglis --- .github/workflows/build.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4f9c66da..5b20f59c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -17,6 +17,8 @@ on: - '**.tpl' # Sample config files and OpenAPI docs - '**.yaml' + # Bottlerocket Security Advisories + - 'advisories/**' concurrency: group: ${{ github.ref }} From 31ffd81e1686ef63f750904d6b75bd3c88217550 Mon Sep 17 00:00:00 2001 From: Gavin Inglis Date: Mon, 22 Jul 2024 22:27:29 +0000 Subject: [PATCH 1266/1356] kernel-6.1: update to package kernel-6.1.97 Rebase to Amazon Linux upstream version package kernel-6.1.97-104.177.amzn2023.src.rpm. Signed-off-by: Gavin Inglis --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 745f721f..9b53f9e8 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/704482a5b82230d7012a6bd9b15689a3c8c05ab85493984fbe6c4bbbb0d38e21/kernel-6.1.96-102.177.amzn2023.src.rpm" -sha512 = "4a0c5223a4d8ee9e47440fbeb0a5b65e115421b1ed980fa603dcbd7909c74b01948a1dc853c30cc8f37ed51fe8980165b4cf69820c375a61f2e1269559514f7b" +url = "https://cdn.amazonlinux.com/al2023/blobstore/d99ee343f454259e069b83f9c5b6c672d3e166a424243a4ae9fc2634a8d7d4d4/kernel-6.1.97-104.177.amzn2023.src.rpm" +sha512 = "c368f7e9f999e6b95d0ca12a32af5944fe91e2ac410d06b289f2ef9b0722fe97e2ae8abab8859dba26ab18fcd85e17f9065a864e13be7aff1f4015bf5e670b12" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index de2e4bb5..b423130e 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.96 +Version: 6.1.97 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/704482a5b82230d7012a6bd9b15689a3c8c05ab85493984fbe6c4bbbb0d38e21/kernel-6.1.96-102.177.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/d99ee343f454259e069b83f9c5b6c672d3e166a424243a4ae9fc2634a8d7d4d4/kernel-6.1.97-104.177.amzn2023.src.rpm Source100: config-bottlerocket # This list of FIPS modules is extracted from /etc/fipsmodules in the initramfs From cb2864556837054f60f8ad8884ec4264575a0ea8 Mon Sep 17 00:00:00 2001 From: Gavin Inglis Date: Mon, 22 Jul 2024 22:28:39 +0000 Subject: [PATCH 1267/1356] kernel-5.10: update to package kernel-5.10.220 Rebase to Amazon Linux upstream version package kernel-5.10.220-209.869.amzn2.src.rpm. Signed-off-by: Gavin Inglis --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index e08be1b3..d76a5657 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/36b61b64a5b940e6dbe9bd7c0e63ff2a61e24901d776de27a233d38b5f52ae94/kernel-5.10.220-209.867.amzn2.src.rpm" -sha512 = "939bc3fae149dbd22fa7b0a177c44add6b94c4b90feac2e3b4da73dba78685edf74ed705478c6e832ed1e98f186396385bbd5c1a28eef1b5b77fdc38d48a85c3" +url = "https://cdn.amazonlinux.com/blobstore/8ec5b8c87de6871a92e268fc5b3e79c230b45a1629f503584407a91d94c424be/kernel-5.10.220-209.869.amzn2.src.rpm" +sha512 = "46140306a2eb9dbcbf80ec3fd9a96b62490c945b76c02a792cfa20ac6b012d066fcd554697d2d553712ebe23fb450311ab8671b212c71d45f3ba0f9f5fc6d46d" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index faa10e9d..a22fcfe0 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -7,7 +7,7 @@ Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/36b61b64a5b940e6dbe9bd7c0e63ff2a61e24901d776de27a233d38b5f52ae94/kernel-5.10.220-209.867.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/8ec5b8c87de6871a92e268fc5b3e79c230b45a1629f503584407a91d94c424be/kernel-5.10.220-209.869.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From df8a2b3bc0a45886484f74db83d66f25d194db90 Mon Sep 17 00:00:00 2001 From: Gavin Inglis Date: Mon, 22 Jul 2024 22:29:23 +0000 Subject: [PATCH 1268/1356] kernel-5.15: update to package kernel-5.15.162 Rebase to Amazon Linux upstream version package kernel-5.15.162-107.160.amzn2.src.rpm. Signed-off-by: Gavin Inglis --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 461392be..3f0464c4 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/c86decdb8cfd1f1fe51e6d17c0dcc935d44b48480db4ea182f922934f7d0d34e/kernel-5.15.161-106.159.amzn2.src.rpm" -sha512 = "6d7b9b5f2cb9fe8371b65772c732ab9deead6719194d849b1888c0c47f2c9b134543ee63c36333fd35c8bd2353c9865365c3c48e0b90c939a16725312d88e1f8" +url = "https://cdn.amazonlinux.com/blobstore/f66f6724d9537ad1beb2068370c8d59d77d54b669036be14278bb92e8656def6/kernel-5.15.162-107.160.amzn2.src.rpm" +sha512 = "aa24e68ddeb428e2364b7baca15736d1e97381871dd037d3143313ddd578de992ec61b67772c360caaed816afdbd0d3ca70c0c43d1277714803aa88a9d92a6e0" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 40ceb579..2e0b6af8 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.161 +Version: 5.15.162 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/c86decdb8cfd1f1fe51e6d17c0dcc935d44b48480db4ea182f922934f7d0d34e/kernel-5.15.161-106.159.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/f66f6724d9537ad1beb2068370c8d59d77d54b669036be14278bb92e8656def6/kernel-5.15.162-107.160.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 1e9642cf0e175fba1b98adc1f426da37257d5333 Mon Sep 17 00:00:00 2001 From: Gavin Inglis Date: Tue, 23 Jul 2024 00:56:02 +0000 Subject: [PATCH 1269/1356] diff-kernel-config: use BUILDSYS_ARCH Signed-off-by: Gavin Inglis --- tools/diff-kernel-config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/diff-kernel-config b/tools/diff-kernel-config index 8fbb996f..8aa85891 100755 --- a/tools/diff-kernel-config +++ b/tools/diff-kernel-config @@ -160,7 +160,7 @@ for state in after before; do # Run build # - ARCH="${arch}" PACKAGE="kernel-${kver/./_}" make twoliter build-package \ + BUILDSYS_ARCH="${arch}" PACKAGE="kernel-${kver/./_}" make twoliter build-package \ || bail "Build failed for ${debug_id}" # From 6f31b068b62d4e16bb0afb0d0ec50c67d17573f8 Mon Sep 17 00:00:00 2001 From: Todd Neal Date: Mon, 22 Jul 2024 17:24:37 +0000 Subject: [PATCH 1270/1356] add a kmod-6.1-neuron-devel package with header files --- packages/kmod-6.1-neuron/kmod-6.1-neuron.spec | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec b/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec index 38cb439a..67bc81ca 100644 --- a/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec +++ b/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec @@ -15,6 +15,13 @@ BuildRequires: %{_cross_os}kernel-6.1-archive %description %{summary}. +%package devel +Summary: Files for development using the Neuron drivers +Requires: %{name} + +%description devel +%{summary}. + %prep rpm2cpio %{SOURCE0} | cpio -idmv tar -xf %{_cross_datadir}/bottlerocket/kernel-devel.tar.xz @@ -51,9 +58,19 @@ install -p -m 0644 %{S:1} %{buildroot}%{_cross_libdir}/modules-load.d/neuron.con install -d %{buildroot}%{_cross_unitdir}/systemd-modules-load.service.d install -p -m 0644 %{S:2} %{buildroot}%{_cross_unitdir}/systemd-modules-load.service.d/neuron.conf +# Install the shared header file +install -d %{buildroot}%{_cross_includedir}/share +install -p -m 0644 %{_builddir}/%{neuron_sources}/share/neuron_driver_shared.h %{buildroot}/%{_cross_includedir}/share/neuron_driver_shared.h +install -p -m 0644 %{_builddir}/%{neuron_sources}/neuron_ioctl.h %{buildroot}/%{_cross_includedir}/neuron_ioctl.h + %files %license %{neuron_sources}/LICENSE %{_cross_attribution_file} %{_cross_libdir}/modules/*/extra/neuron.ko.gz %{_cross_libdir}/modules-load.d/neuron.conf %{_cross_unitdir}/systemd-modules-load.service.d/neuron.conf + +%files devel +%dir %{_cross_includedir}/share/ +%{_cross_includedir}/share/neuron_driver_shared.h +%{_cross_includedir}/neuron_ioctl.h From 605eb4cbb0d0b94233ba46ad4b3c3e96cf5896f4 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Thu, 8 Aug 2024 19:33:51 +0000 Subject: [PATCH 1271/1356] kernel-6.1: update to 6.1.102 Rebase to Amazon Linux upstream version 6.1.102-108.177.amzn2023. Disable two newly selected virtio drivers from upstream. Signed-off-by: Martin Harriman --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/config-bottlerocket | 5 +++++ packages/kernel-6.1/kernel-6.1.spec | 4 ++-- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 9b53f9e8..47cde202 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/d99ee343f454259e069b83f9c5b6c672d3e166a424243a4ae9fc2634a8d7d4d4/kernel-6.1.97-104.177.amzn2023.src.rpm" -sha512 = "c368f7e9f999e6b95d0ca12a32af5944fe91e2ac410d06b289f2ef9b0722fe97e2ae8abab8859dba26ab18fcd85e17f9065a864e13be7aff1f4015bf5e670b12" +url = "https://cdn.amazonlinux.com/al2023/blobstore/0ad0fc5918f243e524ea2a8b8608330d14e7683c7f8b13dd99a9a5620907f0c5/kernel-6.1.102-108.177.amzn2023.src.rpm" +sha512 = "aed038b03b0c1d87cf4da28df475ed78286333a07f279b744da2dbccff72db65a83c5f7c5638acd28aa55910de5b8a223351226d248d149730eaa1afff93db23" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/config-bottlerocket b/packages/kernel-6.1/config-bottlerocket index c4ae543b..0091416c 100644 --- a/packages/kernel-6.1/config-bottlerocket +++ b/packages/kernel-6.1/config-bottlerocket @@ -352,3 +352,8 @@ CONFIG_QLCNIC_SRIOV=y # Cisco UCS HBA support CONFIG_FCOE_FNIC=m CONFIG_SCSI_SNIC=m + +# Disable virtio drivers unused in Bottlerocket +# CONFIG_DRM_VIRTIO_GPU is not set +# CONFIG_VIRTIO_DMA_SHARED_BUFFER is not set + diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index b423130e..1646048a 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.97 +Version: 6.1.102 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/d99ee343f454259e069b83f9c5b6c672d3e166a424243a4ae9fc2634a8d7d4d4/kernel-6.1.97-104.177.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/0ad0fc5918f243e524ea2a8b8608330d14e7683c7f8b13dd99a9a5620907f0c5/kernel-6.1.102-108.177.amzn2023.src.rpm Source100: config-bottlerocket # This list of FIPS modules is extracted from /etc/fipsmodules in the initramfs From 6a19576a552c8ae20b26d1efa99e5438065ab03e Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Thu, 8 Aug 2024 05:54:40 +0000 Subject: [PATCH 1272/1356] re-enable check-fmt in CI --- .github/workflows/build.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 5b20f59c..958bb528 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -46,9 +46,9 @@ jobs: uses: ./.github/actions/setup-node - run: rustup component add rustfmt - run: make twoliter unit-tests - # TODO: fixme please! # Avoid running Go lint check via `cargo make check-lints` since there's a separate golangci-lint workflow - # - run: make twoliter check-fmt + - run: make twoliter check-fmt + # TODO: fixme please! # - run: make twoliter check-clippy - run: make twoliter check-shell - run: make ARCH="${{ matrix.arch }}" From 02ac5b0b0550cc7e834c1c28dc6834c0d20b6793 Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Thu, 8 Aug 2024 06:14:27 +0000 Subject: [PATCH 1273/1356] re-enable check-licenses in CI --- .github/workflows/build.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 958bb528..33ef9d0d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -45,6 +45,7 @@ jobs: - name: Preflight step to set up the runner uses: ./.github/actions/setup-node - run: rustup component add rustfmt + - run: make twoliter check-licenses - run: make twoliter unit-tests # Avoid running Go lint check via `cargo make check-lints` since there's a separate golangci-lint workflow - run: make twoliter check-fmt From 8116ec562f44e540677427ce9242bfe59af36228 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Tue, 13 Aug 2024 17:21:59 +0000 Subject: [PATCH 1274/1356] kernel-5.10: update to 5.10.223 Rebase to Amazon Linux upstream version 5.10.223-211.872.amzn2. Signed-off-by: Martin Harriman --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index d76a5657..0c6d3554 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/8ec5b8c87de6871a92e268fc5b3e79c230b45a1629f503584407a91d94c424be/kernel-5.10.220-209.869.amzn2.src.rpm" -sha512 = "46140306a2eb9dbcbf80ec3fd9a96b62490c945b76c02a792cfa20ac6b012d066fcd554697d2d553712ebe23fb450311ab8671b212c71d45f3ba0f9f5fc6d46d" +url = "https://cdn.amazonlinux.com/blobstore/80b412025b31bb458101f1b77be21026705d3050811f05dad47e25e943c2cfb5/kernel-5.10.223-211.872.amzn2.src.rpm" +sha512 = "2e206345d88ed8f17363df1104dccad5d745116ecfeeac61b2ba84ca5a2dc9116b50754ea74f6ca79fe6eeeaf0bc0a5319469efde2726d8c0ced87ebcf5ecba8" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index a22fcfe0..acb47864 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.220 +Version: 5.10.223 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/8ec5b8c87de6871a92e268fc5b3e79c230b45a1629f503584407a91d94c424be/kernel-5.10.220-209.869.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/80b412025b31bb458101f1b77be21026705d3050811f05dad47e25e943c2cfb5/kernel-5.10.223-211.872.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 9809e7b18d2d2baa6f9bc17170a012d427f0f217 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Tue, 13 Aug 2024 17:22:18 +0000 Subject: [PATCH 1275/1356] kernel-5.15: update to 5.15.164 Rebase to Amazon Linux upstream version 5.15.164-108.161.amzn2. Signed-off-by: Martin Harriman --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 3f0464c4..5574a6d5 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/f66f6724d9537ad1beb2068370c8d59d77d54b669036be14278bb92e8656def6/kernel-5.15.162-107.160.amzn2.src.rpm" -sha512 = "aa24e68ddeb428e2364b7baca15736d1e97381871dd037d3143313ddd578de992ec61b67772c360caaed816afdbd0d3ca70c0c43d1277714803aa88a9d92a6e0" +url = "https://cdn.amazonlinux.com/blobstore/ff595c6c1967d028475f9c56e3ba3bb174fbf0d2273b80c798b00394c14e2e4d/kernel-5.15.164-108.161.amzn2.src.rpm" +sha512 = "8f791fd583e06c2a822b8dede8e4b5abdac2cbfed60fe998e79b7129b46aeeb2a21126e06acda5a7b7e11015ae6658115015a653d0caff48b28855bfaf6a350b" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 2e0b6af8..885ef2a9 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.162 +Version: 5.15.164 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/f66f6724d9537ad1beb2068370c8d59d77d54b669036be14278bb92e8656def6/kernel-5.15.162-107.160.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/ff595c6c1967d028475f9c56e3ba3bb174fbf0d2273b80c798b00394c14e2e4d/kernel-5.15.164-108.161.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 40f64e94598cd95fbd4f0f8b5c4ec1a22b31c7e5 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Tue, 13 Aug 2024 20:59:42 +0000 Subject: [PATCH 1276/1356] tools: add collect-kernel-config script Run tools/collect-kernel-config to extract the six as-build kernel configurations from build/rpms after building the core kit. Build both architectures if you will be comparing configurations for both architectures. --- tools/collect-kernel-config | 45 +++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100755 tools/collect-kernel-config diff --git a/tools/collect-kernel-config b/tools/collect-kernel-config new file mode 100755 index 00000000..a5fd83e2 --- /dev/null +++ b/tools/collect-kernel-config @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +output_dir=/tmp/configs + +usage() { + cat <&2 usage + bail "$1" +} + + +# +# Parse arguments +# + +while [[ $# -gt 0 ]]; do + case $1 in + -o|--output-dir) + shift; output_dir=$1 ;; + -h|--help) + usage; exit 0 ;; + *) + usage_error "Invalid option '$1'" ;; + esac + shift +done + +readonly output_dir +mkdir -p ${output_dir} +echo "Created ${output_dir}" + +for kernel in 5.10 5.15 6.1; do + for arch in x86_64 aarch64; do + rpm2cpio ./build/rpms/kernel-${kernel}/bottlerocket-kernel-${kernel}-${kernel}*.${arch}.rpm | cpio --extract --to-stdout ./boot/config > ${output_dir}/config-${kernel}-${arch}.config + done +done From 8bdd2189f302ffeaee206287293f314a54d9ca29 Mon Sep 17 00:00:00 2001 From: Yutong Sun Date: Mon, 19 Aug 2024 23:44:55 +0000 Subject: [PATCH 1277/1356] kernel-6.1: update to 6.1.102 Rebase to Amazon Linux upstream version 6.1.102-111.182.amzn2023. Signed-off-by: Yutong Sun --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 47cde202..0e6f7c79 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/0ad0fc5918f243e524ea2a8b8608330d14e7683c7f8b13dd99a9a5620907f0c5/kernel-6.1.102-108.177.amzn2023.src.rpm" -sha512 = "aed038b03b0c1d87cf4da28df475ed78286333a07f279b744da2dbccff72db65a83c5f7c5638acd28aa55910de5b8a223351226d248d149730eaa1afff93db23" +url = "https://cdn.amazonlinux.com/al2023/blobstore/bc929cd6c35e297ebc5760d75997d219080501a32b7936641003178bce778074/kernel-6.1.102-111.182.amzn2023.src.rpm" +sha512 = "fab5cfd995c22a36a9815a3c8115a72d1a2e3a28e3fc49b7b490f664b562a6f48724616cc458b58a147d4b5fbdf16cb34a58676fbae72838a770d43334089300" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 1646048a..91650b4d 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -7,7 +7,7 @@ Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/0ad0fc5918f243e524ea2a8b8608330d14e7683c7f8b13dd99a9a5620907f0c5/kernel-6.1.102-108.177.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/bc929cd6c35e297ebc5760d75997d219080501a32b7936641003178bce778074/kernel-6.1.102-111.182.amzn2023.src.rpm Source100: config-bottlerocket # This list of FIPS modules is extracted from /etc/fipsmodules in the initramfs From 15def155851d0817af5cdcf9dd2251bb8ac18ffb Mon Sep 17 00:00:00 2001 From: Yutong Sun Date: Mon, 19 Aug 2024 23:45:45 +0000 Subject: [PATCH 1278/1356] kernel-5.10: update to 5.10.223 Rebase to Amazon Linux upstream version 5.10.223-212.873.amzn2. Signed-off-by: Yutong Sun --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 0c6d3554..00104ca6 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/80b412025b31bb458101f1b77be21026705d3050811f05dad47e25e943c2cfb5/kernel-5.10.223-211.872.amzn2.src.rpm" -sha512 = "2e206345d88ed8f17363df1104dccad5d745116ecfeeac61b2ba84ca5a2dc9116b50754ea74f6ca79fe6eeeaf0bc0a5319469efde2726d8c0ced87ebcf5ecba8" +url = "https://cdn.amazonlinux.com/blobstore/53ac58d4538601179e563feeda7d409981189fdde44ed02b0195fff959016432/kernel-5.10.223-212.873.amzn2.src.rpm" +sha512 = "1d45f8480ee51dfc4a8e7200aa42b63aface8c7633d90565624788d9bbfbca017a3c24635d994994ac672dc565995e2067e526e2f1852817f77beaa5b1305749" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index acb47864..0207aac1 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -7,7 +7,7 @@ Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/80b412025b31bb458101f1b77be21026705d3050811f05dad47e25e943c2cfb5/kernel-5.10.223-211.872.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/53ac58d4538601179e563feeda7d409981189fdde44ed02b0195fff959016432/kernel-5.10.223-212.873.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From e06a250d0a4609483d7c45c7974a0e1110aa49b1 Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Thu, 22 Aug 2024 17:06:32 +0000 Subject: [PATCH 1279/1356] twoliter: bump to 0.4.5 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index f4e4430a..8617c93e 100644 --- a/Makefile +++ b/Makefile @@ -4,7 +4,7 @@ TWOLITER_DIR := $(TOOLS_DIR)/twoliter TWOLITER := $(TWOLITER_DIR)/twoliter CARGO_HOME := $(TOP).cargo -TWOLITER_VERSION ?= "0.4.3" +TWOLITER_VERSION ?= "0.4.5" KIT ?= bottlerocket-core-kit ARCH ?= $(shell uname -m) VENDOR ?= bottlerocket From 706362ac5664bc7a631f76a57ddb7aa82978831f Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Fri, 23 Aug 2024 22:32:15 +0000 Subject: [PATCH 1280/1356] workspace: add .vscode settings This configures the VSCode rust-analyzer plugin to provide hints under 'sources/'. Without this configuration, rust-analyzer is tripped up by `buildsys` build scripts under 'packages/' --- .github/workflows/build.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 33ef9d0d..6623161b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -19,6 +19,8 @@ on: - '**.yaml' # Bottlerocket Security Advisories - 'advisories/**' + # VSCode configurations + - '.vscode/**' concurrency: group: ${{ github.ref }} From 32bbcf1ebfe51b8ade1f872f325ec935d82a2324 Mon Sep 17 00:00:00 2001 From: Sumukh Ballal Date: Wed, 28 Aug 2024 07:47:05 +0000 Subject: [PATCH 1281/1356] docs: updated root.json hash --- BUILDING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/BUILDING.md b/BUILDING.md index 684c36d6..cb0fae64 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -214,7 +214,7 @@ Next, you need the Bottlerocket root role, which is used by tuftool to verify th This will download and verify the root role itself: ```shell curl -O "https://cache.bottlerocket.aws/root.json" -sha512sum -c <<<"a3c58bc73999264f6f28f3ed9bfcb325a5be943a782852c7d53e803881968e0a4698bd54c2f125493f4669610a9da83a1787eb58a8303b2ee488fa2a3f7d802f root.json" +sha512sum -c <<<"2ff1fbf99b20dd7ff5d2c84243a8e3b51701183b1f524b7d470a6b7a9b0172fbb36a0949b7e586ab7ccb6e348eb77125d6ed9fd1a638f4381e4f3f084ff38596 root.json" ``` Next, set your desired parameters, and download the kmod kit: From 8b1d831f6887c313ce9a19e8ccd1e10f00b0ef9e Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Wed, 28 Aug 2024 19:49:34 +0000 Subject: [PATCH 1282/1356] ci: enable clippy in ci --- .github/workflows/build.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6623161b..d9403005 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -51,7 +51,6 @@ jobs: - run: make twoliter unit-tests # Avoid running Go lint check via `cargo make check-lints` since there's a separate golangci-lint workflow - run: make twoliter check-fmt - # TODO: fixme please! - # - run: make twoliter check-clippy + - run: make twoliter check-clippy - run: make twoliter check-shell - run: make ARCH="${{ matrix.arch }}" From bdf267f6093ff888ec064d5822f3599b81247e15 Mon Sep 17 00:00:00 2001 From: Shikha Vyaghra Date: Wed, 4 Sep 2024 21:09:45 +0000 Subject: [PATCH 1283/1356] kernel-6.1: update to 6.1.106 Rebase to Amazon Linux upstream version 6.1.106-116.188.amzn2023. Signed-off-by: Shikha Vyaghra --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 0e6f7c79..4f7483ea 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/bc929cd6c35e297ebc5760d75997d219080501a32b7936641003178bce778074/kernel-6.1.102-111.182.amzn2023.src.rpm" -sha512 = "fab5cfd995c22a36a9815a3c8115a72d1a2e3a28e3fc49b7b490f664b562a6f48724616cc458b58a147d4b5fbdf16cb34a58676fbae72838a770d43334089300" +url = "https://cdn.amazonlinux.com/al2023/blobstore/f578e84fd35abf2a86cbe79936f7d773eed3ca0202ac5fa049cf01879ce9bbe3/kernel-6.1.106-116.188.amzn2023.src.rpm" +sha512 = "253f601c2df406697fe9cff2a4cbfc3fb4c098a2ea8f36b3a1ce21c7c7d207612e18422a8eb832e6f3e105a59bb62b12bba6fb2f603e7740665ae38a78292645" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 91650b4d..38150c46 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.102 +Version: 6.1.106 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/bc929cd6c35e297ebc5760d75997d219080501a32b7936641003178bce778074/kernel-6.1.102-111.182.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/f578e84fd35abf2a86cbe79936f7d773eed3ca0202ac5fa049cf01879ce9bbe3/kernel-6.1.106-116.188.amzn2023.src.rpm Source100: config-bottlerocket # This list of FIPS modules is extracted from /etc/fipsmodules in the initramfs @@ -502,6 +502,7 @@ install -p -m 0644 %{S:302} %{buildroot}%{_cross_bootconfigdir}/05-metal.conf %endif %{_cross_kmoddir}/kernel/drivers/amazon/net/efa/efa.ko.* %{_cross_kmoddir}/kernel/drivers/amazon/net/ena/ena.ko.* +%{_cross_kmoddir}/kernel/drivers/amazon/scsi/mpi3mr/mpi3mr.ko.gz %if "%{_cross_arch}" == "aarch64" %{_cross_kmoddir}/kernel/drivers/ata/ahci_platform.ko.* %{_cross_kmoddir}/kernel/drivers/ata/libahci_platform.ko.* @@ -556,6 +557,7 @@ install -p -m 0644 %{S:302} %{buildroot}%{_cross_bootconfigdir}/05-metal.conf %{_cross_kmoddir}/kernel/drivers/edac/pnd2_edac.ko.* %{_cross_kmoddir}/kernel/drivers/edac/sb_edac.ko.* %{_cross_kmoddir}/kernel/drivers/edac/skx_edac.ko.* +%{_cross_kmoddir}/kernel/drivers/edac/skx_edac_common.ko.gz %{_cross_kmoddir}/kernel/drivers/edac/x38_edac.ko.* %endif %{_cross_kmoddir}/kernel/drivers/firmware/dmi-sysfs.ko.* From 438a3790d963e6c964e0169517e62edab4ac1ad4 Mon Sep 17 00:00:00 2001 From: Shikha Vyaghra Date: Wed, 4 Sep 2024 23:59:31 +0000 Subject: [PATCH 1284/1356] kernel-5.15: update to 5.15.165 Rebase to Amazon Linux upstream version 5.15.165-110.161.amzn2. Signed-off-by: Shikha Vyaghra --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 5574a6d5..1ff30224 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/ff595c6c1967d028475f9c56e3ba3bb174fbf0d2273b80c798b00394c14e2e4d/kernel-5.15.164-108.161.amzn2.src.rpm" -sha512 = "8f791fd583e06c2a822b8dede8e4b5abdac2cbfed60fe998e79b7129b46aeeb2a21126e06acda5a7b7e11015ae6658115015a653d0caff48b28855bfaf6a350b" +url = "https://cdn.amazonlinux.com/blobstore/5fc19dbcdad79c0964001228b7f301dc9726ba49f28248fe04f44186bb318e51/kernel-5.15.165-110.161.amzn2.src.rpm" +sha512 = "dcb77a87aa343d10936a40e155d6d3b67e78a42f04f817157731b97caa4de113564edfba37dd9ed66712081ae0df1102cd5703cd895a493e4ec7348886eb303b" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 885ef2a9..cf629197 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.164 +Version: 5.15.165 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/ff595c6c1967d028475f9c56e3ba3bb174fbf0d2273b80c798b00394c14e2e4d/kernel-5.15.164-108.161.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/5fc19dbcdad79c0964001228b7f301dc9726ba49f28248fe04f44186bb318e51/kernel-5.15.165-110.161.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 4bb2373a6ce3ecd4253c168a66d8498f7a780187 Mon Sep 17 00:00:00 2001 From: Shikha Vyaghra Date: Thu, 5 Sep 2024 00:00:02 +0000 Subject: [PATCH 1285/1356] kernel-5.10: update to 5.10.224 Rebase to Amazon Linux upstream version 5.10.224-212.876.amzn2. Signed-off-by: Shikha Vyaghra --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 00104ca6..c91e98e2 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/53ac58d4538601179e563feeda7d409981189fdde44ed02b0195fff959016432/kernel-5.10.223-212.873.amzn2.src.rpm" -sha512 = "1d45f8480ee51dfc4a8e7200aa42b63aface8c7633d90565624788d9bbfbca017a3c24635d994994ac672dc565995e2067e526e2f1852817f77beaa5b1305749" +url = "https://cdn.amazonlinux.com/blobstore/090ee50d7c80b80f41f8e3c8c7d63efaa9592371f48bc9f769b2d52cb358a238/kernel-5.10.224-212.876.amzn2.src.rpm" +sha512 = "1a5d1066aa061b4b8cc2d97671d86c4aee727266386f0507b8a841adfc51235d3fa74eab3cfc64ca3d78397294f789ae3ee48f45c16f330929691d14ce7153c0" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 0207aac1..e118e3f4 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.223 +Version: 5.10.224 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/53ac58d4538601179e563feeda7d409981189fdde44ed02b0195fff959016432/kernel-5.10.223-212.873.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/090ee50d7c80b80f41f8e3c8c7d63efaa9592371f48bc9f769b2d52cb358a238/kernel-5.10.224-212.876.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From cf03f97132cea5cfcc75ed40783c6bc3177a12c9 Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Fri, 30 Aug 2024 23:25:44 +0000 Subject: [PATCH 1286/1356] kmod-5.15-nvidia: update driver to 535.183.06 Signed-off-by: Matthew Yeazel --- packages/kmod-5.15-nvidia/Cargo.toml | 16 ++++++++-------- packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/packages/kmod-5.15-nvidia/Cargo.toml b/packages/kmod-5.15-nvidia/Cargo.toml index f26a5a36..f5002018 100644 --- a/packages/kmod-5.15-nvidia/Cargo.toml +++ b/packages/kmod-5.15-nvidia/Cargo.toml @@ -17,23 +17,23 @@ url = "https://s3.amazonaws.com/EULA/NVidiaEULAforAWS.pdf" sha512 = "e1926fe99afc3ab5b2f2744fcd53b4046465aefb2793e2e06c4a19455a3fde895e00af1415ff1a5804c32e6a2ed0657e475de63da6c23a0e9c59feeef52f3f58" [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/535.183.01/NVIDIA-Linux-x86_64-535.183.01.run" -sha512 = "02b6b679f4fc1d5305f32fca8ce0875eef04cb99f5611d0bb85ac7607ecdd5b2aa4d60b51bf47546477464531a07fffa5bf3db3859868648bd5e86565d85afbb" +url = "https://us.download.nvidia.com/tesla/535.183.06/NVIDIA-Linux-x86_64-535.183.06.run" +sha512 = "424950ef303ea39499e96f8c90c1e0c83aee12309779d4f335769ef554ad4f7c38e98f69c64b408adc85a7cf51ea600d85222792402b9c6b7941f1af066d2a33" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/535.183.01/NVIDIA-Linux-aarch64-535.183.01.run" -sha512 = "d2ac1be8c19b359023c31941374911f3adfe1be34aa2821ef582df4c854ac4eefbbcb10aa22583ac8c9d5caf9326bda12ed1ce6343d67479ed37a4887bd17b5e" +url = "https://us.download.nvidia.com/tesla/535.183.06/NVIDIA-Linux-aarch64-535.183.06.run" +sha512 = "bb305f1703557461b0a0a29066c304658d9684841104c6f4d9ff44f9db90fee14ae619cd2fe3242823a5fe3a69b168b8174b163740014b15cdef36db88ba2d96" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/nvidia-fabric-manager-535.183.01-1.x86_64.rpm" -sha512 = "d52879d1e552b949a529ede9c4ce3e7b66af0df96e8f43906f211673b99815561c83a7c382be17950b1308457ca496ce49adca41766f808cc5a340471353494b" +url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/nvidia-fabric-manager-535.183.06-1.x86_64.rpm" +sha512 = "c3d98878363f857b2963665a0e485cb7b1afeaabd0040a970478d00ffb870ab4130ab9dfe1b7a40d1b38734636ebccec39fd1b3fc8c06abc5c07470f749b6025" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/sbsa/nvidia-fabric-manager-535.183.01-1.aarch64.rpm" -sha512 = "75e1d306b9aa6cc8737bce50f39dc641f64de6a944c50f2c9706345c656f203c4706414dcb51def7671f0fd02fd18605aa3d62958b690d2705cb7011c54ff48e" +url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/sbsa/nvidia-fabric-manager-535.183.06-1.aarch64.rpm" +sha512 = "6a646cd7ea11e668f7dbe6f6bb22516107a856e3c3755f8693c91d4bed706b8b3667b853f07e84c2d0da4de7ab1107337b6a1493879d75d8c201bfe9da071b32" force-upstream = true [build-dependencies] diff --git a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec index 1e9a1750..61dfdb5e 100644 --- a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec +++ b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec @@ -1,6 +1,6 @@ %global tesla_major 535 %global tesla_minor 183 -%global tesla_patch 01 +%global tesla_patch 06 %global tesla_ver %{tesla_major}.%{tesla_minor}.%{tesla_patch} %if "%{?_cross_arch}" == "aarch64" %global fm_arch sbsa From ca18ed666d6f7cca9369cfe8be8e737f15be83fa Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Fri, 30 Aug 2024 23:26:07 +0000 Subject: [PATCH 1287/1356] kmod-6.1-nvidia: update driver to 535.183.06 Signed-off-by: Matthew Yeazel --- packages/kmod-6.1-nvidia/Cargo.toml | 16 ++++++++-------- packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/packages/kmod-6.1-nvidia/Cargo.toml b/packages/kmod-6.1-nvidia/Cargo.toml index a105eef2..12fa6eb8 100644 --- a/packages/kmod-6.1-nvidia/Cargo.toml +++ b/packages/kmod-6.1-nvidia/Cargo.toml @@ -17,23 +17,23 @@ url = "https://s3.amazonaws.com/EULA/NVidiaEULAforAWS.pdf" sha512 = "e1926fe99afc3ab5b2f2744fcd53b4046465aefb2793e2e06c4a19455a3fde895e00af1415ff1a5804c32e6a2ed0657e475de63da6c23a0e9c59feeef52f3f58" [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/535.183.01/NVIDIA-Linux-x86_64-535.183.01.run" -sha512 = "02b6b679f4fc1d5305f32fca8ce0875eef04cb99f5611d0bb85ac7607ecdd5b2aa4d60b51bf47546477464531a07fffa5bf3db3859868648bd5e86565d85afbb" +url = "https://us.download.nvidia.com/tesla/535.183.06/NVIDIA-Linux-x86_64-535.183.06.run" +sha512 = "424950ef303ea39499e96f8c90c1e0c83aee12309779d4f335769ef554ad4f7c38e98f69c64b408adc85a7cf51ea600d85222792402b9c6b7941f1af066d2a33" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/535.183.01/NVIDIA-Linux-aarch64-535.183.01.run" -sha512 = "d2ac1be8c19b359023c31941374911f3adfe1be34aa2821ef582df4c854ac4eefbbcb10aa22583ac8c9d5caf9326bda12ed1ce6343d67479ed37a4887bd17b5e" +url = "https://us.download.nvidia.com/tesla/535.183.06/NVIDIA-Linux-aarch64-535.183.06.run" +sha512 = "bb305f1703557461b0a0a29066c304658d9684841104c6f4d9ff44f9db90fee14ae619cd2fe3242823a5fe3a69b168b8174b163740014b15cdef36db88ba2d96" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/nvidia-fabric-manager-535.183.01-1.x86_64.rpm" -sha512 = "d52879d1e552b949a529ede9c4ce3e7b66af0df96e8f43906f211673b99815561c83a7c382be17950b1308457ca496ce49adca41766f808cc5a340471353494b" +url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/nvidia-fabric-manager-535.183.06-1.x86_64.rpm" +sha512 = "c3d98878363f857b2963665a0e485cb7b1afeaabd0040a970478d00ffb870ab4130ab9dfe1b7a40d1b38734636ebccec39fd1b3fc8c06abc5c07470f749b6025" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/sbsa/nvidia-fabric-manager-535.183.01-1.aarch64.rpm" -sha512 = "75e1d306b9aa6cc8737bce50f39dc641f64de6a944c50f2c9706345c656f203c4706414dcb51def7671f0fd02fd18605aa3d62958b690d2705cb7011c54ff48e" +url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/sbsa/nvidia-fabric-manager-535.183.06-1.aarch64.rpm" +sha512 = "6a646cd7ea11e668f7dbe6f6bb22516107a856e3c3755f8693c91d4bed706b8b3667b853f07e84c2d0da4de7ab1107337b6a1493879d75d8c201bfe9da071b32" force-upstream = true [build-dependencies] diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index 4f5b149b..c16b77ba 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -1,6 +1,6 @@ %global tesla_major 535 %global tesla_minor 183 -%global tesla_patch 01 +%global tesla_patch 06 %global tesla_ver %{tesla_major}.%{tesla_minor}.%{tesla_patch} %if "%{?_cross_arch}" == "aarch64" %global fm_arch sbsa From bc324787fe116fed8f54ebefb03b3ba4e7e0dbf8 Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Mon, 2 Sep 2024 23:56:35 +0000 Subject: [PATCH 1288/1356] kmod-5.15-nvidia: Add open gpu kernel modules This adds the open source kernel modules for NVIDIA. They are compiled alongside the proprietary drivers and are currently included via their own subpackage but are not immediately loadable since they are not in a path that modprobe knows to look for them. Signed-off-by: Matthew Yeazel --- packages/kmod-5.15-nvidia/.gitignore | 1 + packages/kmod-5.15-nvidia/Cargo.toml | 5 ++ .../kmod-5.15-nvidia/kmod-5.15-nvidia.spec | 84 ++++++++++++++++++- 3 files changed, 86 insertions(+), 4 deletions(-) diff --git a/packages/kmod-5.15-nvidia/.gitignore b/packages/kmod-5.15-nvidia/.gitignore index 64d9ed83..db8b415b 100644 --- a/packages/kmod-5.15-nvidia/.gitignore +++ b/packages/kmod-5.15-nvidia/.gitignore @@ -1,2 +1,3 @@ NVidiaEULAforAWS.pdf +COPYING *.rpm diff --git a/packages/kmod-5.15-nvidia/Cargo.toml b/packages/kmod-5.15-nvidia/Cargo.toml index f5002018..59c1332b 100644 --- a/packages/kmod-5.15-nvidia/Cargo.toml +++ b/packages/kmod-5.15-nvidia/Cargo.toml @@ -36,6 +36,11 @@ url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/sbsa/nvidi sha512 = "6a646cd7ea11e668f7dbe6f6bb22516107a856e3c3755f8693c91d4bed706b8b3667b853f07e84c2d0da4de7ab1107337b6a1493879d75d8c201bfe9da071b32" force-upstream = true +[[package.metadata.build-package.external-files]] +url = "https://raw.githubusercontent.com/NVIDIA/open-gpu-kernel-modules/535/COPYING" +sha512 = "f9cee68cbb12095af4b4e92d01c210461789ef41c70b64efefd6719d0b88468b7a67a3629c432d4d9304c730b5d1a942228a5bcc74a03ab1c411c77c758cd938" +force-upstream = true + [build-dependencies] glibc = { path = "../glibc" } kernel-5_15 = { path = "../kernel-5.15" } diff --git a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec index 61dfdb5e..ddec74c3 100644 --- a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec +++ b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec @@ -29,6 +29,7 @@ URL: http://www.nvidia.com/ Source0: https://us.download.nvidia.com/tesla/%{tesla_ver}/NVIDIA-Linux-x86_64-%{tesla_ver}.run Source1: https://us.download.nvidia.com/tesla/%{tesla_ver}/NVIDIA-Linux-aarch64-%{tesla_ver}.run Source2: NVidiaEULAforAWS.pdf +Source3: COPYING # fabricmanager for NVSwitch Source10: https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/nvidia-fabric-manager-%{tesla_ver}-1.x86_64.rpm @@ -59,6 +60,15 @@ Requires: %{name}-tesla(fabricmanager) %description fabricmanager %{summary}. +%package open-gpu-%{tesla_major} +Summary: NVIDIA %{tesla_major} Open GPU driver +Version: %{tesla_ver} +License: MIT OR GPL-2.0-only +Requires: %{_cross_os}variant-platform(aws) + +%description open-gpu-%{tesla_major} +%{summary}. + %package tesla-%{tesla_major} Summary: NVIDIA %{tesla_major} Tesla driver Version: %{tesla_ver} @@ -67,6 +77,7 @@ Requires: %{_cross_os}variant-platform(aws) Requires: %{name} Requires: %{name}-fabricmanager Provides: %{name}-tesla(fabricmanager) +Requires: %{name}-open-gpu-%{tesla_major} %description tesla-%{tesla_major} %{summary} @@ -83,16 +94,30 @@ rpm2cpio %{_sourcedir}/nvidia-fabric-manager-%{tesla_ver}-1.%{_cross_arch}.rpm | # Add the license. install -p -m 0644 %{S:2} . +install -p -m 0644 %{S:3} . %global kernel_sources %{_builddir}/kernel-devel tar -xf %{_cross_datadir}/bottlerocket/kernel-devel.tar.xz -%build -pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}/kernel +%define _kernel_version %(ls %{kernel_sources}/include/config/kernel.release) +%global _cross_kmoddir %{_cross_libdir}/modules/%{_kernel_version} # This recipe was based in the NVIDIA yum/dnf specs: # https://github.com/NVIDIA/yum-packaging-precompiled-kmod +# Begin open driver build +pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}/kernel-open + +# We set IGNORE_CC_MISMATCH even though we are using the same compiler used to compile the kernel, if +# we don't set this flag the compilation fails +make %{?_smp_mflags} ARCH=%{_cross_karch} IGNORE_CC_MISMATCH=1 SYSSRC=%{kernel_sources} CC=%{_cross_target}-gcc LD=%{_cross_target}-ld + +# end open driver build +popd + +# Begin proprietary driver build +pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}/kernel + # We set IGNORE_CC_MISMATCH even though we are using the same compiler used to compile the kernel, if # we don't set this flag the compilation fails make %{?_smp_mflags} ARCH=%{_cross_karch} IGNORE_CC_MISMATCH=1 SYSSRC=%{kernel_sources} CC=%{_cross_target}-gcc LD=%{_cross_target}-ld @@ -111,6 +136,14 @@ rm nvidia{,-modeset,-peermem}.o # don't include any linked module in the base image rm nvidia{,-modeset,-peermem,-drm}.ko +# End proprietary driver build +popd + +# Grab the list of supported devices +pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}/supported-gpus +jq -r '.chips[] | select(.features[] | contains("kernelopen"))' supported-gpus.json | jq -s '{"open-gpu": .}' > open-gpu-supported-devices.json +# confirm "NVIDIA A10G" is in the resulting file to catch shape changes +jq -e '."open-gpu"[] | select(."devid" == "0x2237") | ."features"| index("kernelopen")' open-gpu-supported-devices.json popd %install @@ -137,11 +170,13 @@ install -p -m 0644 %{S:204} %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir} # Begin NVIDIA tesla driver pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_ver} -# We install bins and libs in a versioned directory to prevent collisions with future drivers versions +# Proprietary driver install -d %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin install -d %{buildroot}%{_cross_libdir}/nvidia/tesla install -d %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d install -d %{buildroot}%{_cross_factorydir}/nvidia/tesla +install -d %{buildroot}%{_cross_factorydir}/nvidia/open-gpu +install -d %{buildroot}%{_cross_datadir}/nvidia/open-gpu/drivers install -m 0644 %{S:300} %{buildroot}%{_cross_tmpfilesdir}/nvidia-tesla.conf sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/tesla/module-objects.d/|' %{S:301} > \ @@ -158,7 +193,7 @@ install -m 0644 nvidia-path.env %{buildroot}%{_cross_factorydir}/nvidia/tesla sed -e 's|__LIBDIR__|%{_cross_libdir}|' %{S:303} > nvidia-tesla.conf install -m 0644 nvidia-tesla.conf %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/ -# driver +# proprietary driver install kernel/nvidia.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d install kernel/nvidia/nv-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d install kernel/nvidia/nv-kernel.o_binary %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d/nv-kernel.o @@ -180,6 +215,23 @@ install kernel/nvidia-peermem/nvidia-peermem.o %{buildroot}%{_cross_datadir}/nvi install kernel/nvidia-drm.mod.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/module-objects.d install kernel/nvidia-drm.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/module-objects.d +# open driver +install -d %{buildroot}%{_cross_datadir}/nvidia/open-gpu/drivers/ +install kernel-open/nvidia.ko %{buildroot}%{_cross_datadir}/nvidia/open-gpu/drivers/ + +# uvm +install kernel-open/nvidia-uvm.ko %{buildroot}%{_cross_datadir}/nvidia/open-gpu/drivers/ + +# modeset +install kernel-open/nvidia-modeset.ko %{buildroot}%{_cross_datadir}/nvidia/open-gpu/drivers/ + +# peermem +install kernel-open/nvidia-peermem.ko %{buildroot}%{_cross_datadir}/nvidia/open-gpu/drivers/ + +# drm +install kernel-open/nvidia-drm.ko %{buildroot}%{_cross_datadir}/nvidia/open-gpu/drivers/ +# end open driver + # Binaries install -m 755 nvidia-smi %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin install -m 755 nvidia-debugdump %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin @@ -210,6 +262,9 @@ install -d %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_ver} install -p -m 0644 firmware/gsp_ga10x.bin %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_ver} install -p -m 0644 firmware/gsp_tu10x.bin %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_ver} +# Include the open driver supported devices file for runtime matching of the driver. This is consumed by ghostdog to match the driver to this list +install -p -m 0644 supported-gpus/open-gpu-supported-devices.json %{buildroot}%{_cross_datadir}/nvidia/open-gpu-supported-devices.json + popd # Begin NVIDIA fabric manager binaries and topologies @@ -262,6 +317,7 @@ popd %{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-tesla.toml %{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/nvidia-tesla.conf %{_cross_factorydir}/nvidia/tesla/nvidia-path.env +%{_cross_datadir}/nvidia/open-gpu-supported-devices.json # driver %{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia.mod.o @@ -389,6 +445,26 @@ popd %exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-wayland.so.1.1.11 %exclude %{_cross_libdir}/nvidia/tesla/libnvidia-wayland-client.so.%{tesla_ver} +%files open-gpu-%{tesla_major} +%license COPYING +%dir %{_cross_datadir}/nvidia/open-gpu/drivers +%dir %{_cross_factorydir}/nvidia/open-gpu + +# driver +%{_cross_datadir}/nvidia/open-gpu/drivers/nvidia.ko + +# uvm +%{_cross_datadir}/nvidia/open-gpu/drivers/nvidia-uvm.ko + +# modeset +%{_cross_datadir}/nvidia/open-gpu/drivers/nvidia-modeset.ko + +# drm +%{_cross_datadir}/nvidia/open-gpu/drivers/nvidia-drm.ko + +# peermem +%{_cross_datadir}/nvidia/open-gpu/drivers/nvidia-peermem.ko + %files fabricmanager %{_cross_factorydir}%{_cross_sysconfdir}/nvidia/fabricmanager.cfg %{_cross_unitdir}/nvidia-fabricmanager.service From e58cd2ea0740b1c103081cb79b783894253dbc88 Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Mon, 2 Sep 2024 23:59:08 +0000 Subject: [PATCH 1289/1356] kmod-6.1-nvidia: Add open gpu kernel modules This adds the open source kernel modules for NVIDIA. They are compiled alongside the proprietary drivers and are currently included via their own subpackage but are not immediately loadable since they are not in a path that modprobe knows to look for them. Signed-off-by: Matthew Yeazel --- packages/kmod-6.1-nvidia/.gitignore | 1 + packages/kmod-6.1-nvidia/Cargo.toml | 5 ++ packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 84 ++++++++++++++++++- 3 files changed, 86 insertions(+), 4 deletions(-) diff --git a/packages/kmod-6.1-nvidia/.gitignore b/packages/kmod-6.1-nvidia/.gitignore index 64d9ed83..db8b415b 100644 --- a/packages/kmod-6.1-nvidia/.gitignore +++ b/packages/kmod-6.1-nvidia/.gitignore @@ -1,2 +1,3 @@ NVidiaEULAforAWS.pdf +COPYING *.rpm diff --git a/packages/kmod-6.1-nvidia/Cargo.toml b/packages/kmod-6.1-nvidia/Cargo.toml index 12fa6eb8..3c53998f 100644 --- a/packages/kmod-6.1-nvidia/Cargo.toml +++ b/packages/kmod-6.1-nvidia/Cargo.toml @@ -36,6 +36,11 @@ url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/sbsa/nvidi sha512 = "6a646cd7ea11e668f7dbe6f6bb22516107a856e3c3755f8693c91d4bed706b8b3667b853f07e84c2d0da4de7ab1107337b6a1493879d75d8c201bfe9da071b32" force-upstream = true +[[package.metadata.build-package.external-files]] +url = "https://raw.githubusercontent.com/NVIDIA/open-gpu-kernel-modules/535/COPYING" +sha512 = "f9cee68cbb12095af4b4e92d01c210461789ef41c70b64efefd6719d0b88468b7a67a3629c432d4d9304c730b5d1a942228a5bcc74a03ab1c411c77c758cd938" +force-upstream = true + [build-dependencies] glibc = { path = "../glibc" } kernel-6_1 = { path = "../kernel-6.1" } diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index c16b77ba..8e130489 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -29,6 +29,7 @@ URL: http://www.nvidia.com/ Source0: https://us.download.nvidia.com/tesla/%{tesla_ver}/NVIDIA-Linux-x86_64-%{tesla_ver}.run Source1: https://us.download.nvidia.com/tesla/%{tesla_ver}/NVIDIA-Linux-aarch64-%{tesla_ver}.run Source2: NVidiaEULAforAWS.pdf +Source3: COPYING # fabricmanager for NVSwitch Source10: https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/nvidia-fabric-manager-%{tesla_ver}-1.x86_64.rpm @@ -59,6 +60,15 @@ Requires: %{name}-tesla(fabricmanager) %description fabricmanager %{summary}. +%package open-gpu-%{tesla_major} +Summary: NVIDIA %{tesla_major} Open GPU driver +Version: %{tesla_ver} +License: MIT OR GPL-2.0-only +Requires: %{_cross_os}variant-platform(aws) + +%description open-gpu-%{tesla_major} +%{summary}. + %package tesla-%{tesla_major} Summary: NVIDIA %{tesla_major} Tesla driver Version: %{tesla_ver} @@ -67,6 +77,7 @@ Requires: %{_cross_os}variant-platform(aws) Requires: %{name} Requires: %{name}-fabricmanager Provides: %{name}-tesla(fabricmanager) +Requires: %{name}-open-gpu-%{tesla_major} %description tesla-%{tesla_major} %{summary} @@ -83,16 +94,30 @@ rpm2cpio %{_sourcedir}/nvidia-fabric-manager-%{tesla_ver}-1.%{_cross_arch}.rpm | # Add the license. install -p -m 0644 %{S:2} . +install -p -m 0644 %{S:3} . %global kernel_sources %{_builddir}/kernel-devel tar -xf %{_cross_datadir}/bottlerocket/kernel-devel.tar.xz -%build -pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}/kernel +%define _kernel_version %(ls %{kernel_sources}/include/config/kernel.release) +%global _cross_kmoddir %{_cross_libdir}/modules/%{_kernel_version} # This recipe was based in the NVIDIA yum/dnf specs: # https://github.com/NVIDIA/yum-packaging-precompiled-kmod +# Begin open driver build +pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}/kernel-open + +# We set IGNORE_CC_MISMATCH even though we are using the same compiler used to compile the kernel, if +# we don't set this flag the compilation fails +make %{?_smp_mflags} ARCH=%{_cross_karch} IGNORE_CC_MISMATCH=1 SYSSRC=%{kernel_sources} CC=%{_cross_target}-gcc LD=%{_cross_target}-ld + +# end open driver build +popd + +# Begin proprietary driver build +pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}/kernel + # We set IGNORE_CC_MISMATCH even though we are using the same compiler used to compile the kernel, if # we don't set this flag the compilation fails make %{?_smp_mflags} ARCH=%{_cross_karch} IGNORE_CC_MISMATCH=1 SYSSRC=%{kernel_sources} CC=%{_cross_target}-gcc LD=%{_cross_target}-ld @@ -111,6 +136,14 @@ rm nvidia{,-modeset,-peermem}.o # don't include any linked module in the base image rm nvidia{,-modeset,-peermem,-drm}.ko +# End proprietary driver build +popd + +# Grab the list of supported devices +pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}/supported-gpus +jq -r '.chips[] | select(.features[] | contains("kernelopen"))' supported-gpus.json | jq -s '{"open-gpu": .}' > open-gpu-supported-devices.json +# confirm "NVIDIA A10G" is in the resulting file to catch shape changes +jq -e '."open-gpu"[] | select(."devid" == "0x2237") | ."features"| index("kernelopen")' open-gpu-supported-devices.json popd %install @@ -137,11 +170,13 @@ install -p -m 0644 %{S:204} %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir} # Begin NVIDIA tesla driver pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_ver} -# We install bins and libs in a versioned directory to prevent collisions with future drivers versions +# Proprietary driver install -d %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin install -d %{buildroot}%{_cross_libdir}/nvidia/tesla install -d %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d install -d %{buildroot}%{_cross_factorydir}/nvidia/tesla +install -d %{buildroot}%{_cross_factorydir}/nvidia/open-gpu +install -d %{buildroot}%{_cross_datadir}/nvidia/open-gpu/drivers install -m 0644 %{S:300} %{buildroot}%{_cross_tmpfilesdir}/nvidia-tesla.conf sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/tesla/module-objects.d/|' %{S:301} > \ @@ -158,7 +193,7 @@ install -m 0644 nvidia-path.env %{buildroot}%{_cross_factorydir}/nvidia/tesla sed -e 's|__LIBDIR__|%{_cross_libdir}|' %{S:303} > nvidia-tesla.conf install -m 0644 nvidia-tesla.conf %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/ -# driver +# proprietary driver install kernel/nvidia.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d install kernel/nvidia/nv-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d install kernel/nvidia/nv-kernel.o_binary %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d/nv-kernel.o @@ -180,6 +215,23 @@ install kernel/nvidia-peermem/nvidia-peermem.o %{buildroot}%{_cross_datadir}/nvi install kernel/nvidia-drm.mod.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/module-objects.d install kernel/nvidia-drm.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/module-objects.d +# open driver +install -d %{buildroot}%{_cross_datadir}/nvidia/open-gpu/drivers/ +install kernel-open/nvidia.ko %{buildroot}%{_cross_datadir}/nvidia/open-gpu/drivers/ + +# uvm +install kernel-open/nvidia-uvm.ko %{buildroot}%{_cross_datadir}/nvidia/open-gpu/drivers/ + +# modeset +install kernel-open/nvidia-modeset.ko %{buildroot}%{_cross_datadir}/nvidia/open-gpu/drivers/ + +# peermem +install kernel-open/nvidia-peermem.ko %{buildroot}%{_cross_datadir}/nvidia/open-gpu/drivers/ + +# drm +install kernel-open/nvidia-drm.ko %{buildroot}%{_cross_datadir}/nvidia/open-gpu/drivers/ +# end open driver + # Binaries install -m 755 nvidia-smi %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin install -m 755 nvidia-debugdump %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin @@ -210,6 +262,9 @@ install -d %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_ver} install -p -m 0644 firmware/gsp_ga10x.bin %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_ver} install -p -m 0644 firmware/gsp_tu10x.bin %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_ver} +# Include the open driver supported devices file for runtime matching of the driver. This is consumed by ghostdog to match the driver to this list +install -p -m 0644 supported-gpus/open-gpu-supported-devices.json %{buildroot}%{_cross_datadir}/nvidia/open-gpu-supported-devices.json + popd # Begin NVIDIA fabric manager binaries and topologies @@ -262,6 +317,7 @@ popd %{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-tesla.toml %{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/nvidia-tesla.conf %{_cross_factorydir}/nvidia/tesla/nvidia-path.env +%{_cross_datadir}/nvidia/open-gpu-supported-devices.json # driver %{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia.mod.o @@ -389,6 +445,26 @@ popd %exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-wayland.so.1.1.11 %exclude %{_cross_libdir}/nvidia/tesla/libnvidia-wayland-client.so.%{tesla_ver} +%files open-gpu-%{tesla_major} +%license COPYING +%dir %{_cross_datadir}/nvidia/open-gpu/drivers +%dir %{_cross_factorydir}/nvidia/open-gpu + +# driver +%{_cross_datadir}/nvidia/open-gpu/drivers/nvidia.ko + +# uvm +%{_cross_datadir}/nvidia/open-gpu/drivers/nvidia-uvm.ko + +# modeset +%{_cross_datadir}/nvidia/open-gpu/drivers/nvidia-modeset.ko + +# drm +%{_cross_datadir}/nvidia/open-gpu/drivers/nvidia-drm.ko + +# peermem +%{_cross_datadir}/nvidia/open-gpu/drivers/nvidia-peermem.ko + %files fabricmanager %{_cross_factorydir}%{_cross_sysconfdir}/nvidia/fabricmanager.cfg %{_cross_unitdir}/nvidia-fabricmanager.service From 8693d495541e9d3e23065e8eb83281126cf522d7 Mon Sep 17 00:00:00 2001 From: Vighnesh Maheshwari Date: Wed, 4 Sep 2024 19:13:47 +0000 Subject: [PATCH 1290/1356] kmod-6.1-neuron: Update to v2.17.17.0 --- .../0001-kbuild-do-not-outline-atomics-for-arm64.patch | 8 ++++---- packages/kmod-6.1-neuron/Cargo.toml | 4 ++-- packages/kmod-6.1-neuron/kmod-6.1-neuron.spec | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/packages/kmod-6.1-neuron/0001-kbuild-do-not-outline-atomics-for-arm64.patch b/packages/kmod-6.1-neuron/0001-kbuild-do-not-outline-atomics-for-arm64.patch index aa65f202..5ce125cf 100644 --- a/packages/kmod-6.1-neuron/0001-kbuild-do-not-outline-atomics-for-arm64.patch +++ b/packages/kmod-6.1-neuron/0001-kbuild-do-not-outline-atomics-for-arm64.patch @@ -5,13 +5,13 @@ Subject: [PATCH] kbuild: do not outline atomics for arm64 Signed-off-by: Ben Cressey --- - usr/src/aws-neuronx-2.16.7.0/Kbuild | 1 + + usr/src/aws-neuronx-2.17.17.0/Kbuild | 1 + 1 file changed, 1 insertion(+) -diff --git a/usr/src/aws-neuronx-2.16.7.0/Kbuild b/usr/src/aws-neuronx-2.16.7.0/Kbuild +diff --git a/usr/src/aws-neuronx-2.17.17.0/Kbuild b/usr/src/aws-neuronx-2.17.17.0/Kbuild index 11f8490..6535608 100644 ---- a/usr/src/aws-neuronx-2.16.7.0/Kbuild -+++ b/usr/src/aws-neuronx-2.16.7.0/Kbuild +--- a/usr/src/aws-neuronx-2.17.17.0/Kbuild ++++ b/usr/src/aws-neuronx-2.17.17.0/Kbuild @@ -16,3 +16,4 @@ neuron-objs += v3/notific.o v3/neuron_dhal_v3.o ccflags-y += -O3 -Wall -Werror -Wno-declaration-after-statement -Wunused-macros -Wunused-local-typedefs diff --git a/packages/kmod-6.1-neuron/Cargo.toml b/packages/kmod-6.1-neuron/Cargo.toml index 32e69917..054c490e 100644 --- a/packages/kmod-6.1-neuron/Cargo.toml +++ b/packages/kmod-6.1-neuron/Cargo.toml @@ -13,8 +13,8 @@ package-name = "kmod-6.1-neuron" releases-url = "https://awsdocs-neuron.readthedocs-hosted.com/en/latest/release-notes/runtime/aws-neuronx-dkms/index.html" [[package.metadata.build-package.external-files]] -url = "https://yum.repos.neuron.amazonaws.com/aws-neuronx-dkms-2.16.7.0.noarch.rpm" -sha512 = "8e66feb4051af31321c08b6663a950172da65c4e5b432c0b5609785be34ccb193c0eb50c9aadfeec8b6410ccbe05264a3fb6fc7cb66dc87b172bc5be5c4d92d0" +url = "https://yum.repos.neuron.amazonaws.com/aws-neuronx-dkms-2.17.17.0.noarch.rpm" +sha512 = "8ff07280fc2864677d9401d56939f8b8f8cd59dc1ae9df53d49aced500cd62c3071a61639e9f5381b557d29cc7a2b72be71020f90880680a59d10a1f8b28580b" [build-dependencies] kernel-6_1 = { path = "../kernel-6.1" } diff --git a/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec b/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec index 67bc81ca..6b98c1a3 100644 --- a/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec +++ b/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec @@ -1,5 +1,5 @@ Name: %{_cross_os}kmod-6.1-neuron -Version: 2.16.7.0 +Version: 2.17.17.0 Release: 1%{?dist} Summary: Neuron drivers for the 6.1 kernel License: GPL-2.0-only From 9fde5f888eb3a0147dfb07b661646ef5c1811df8 Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Tue, 10 Sep 2024 18:09:22 +0000 Subject: [PATCH 1291/1356] microcode: update Intel and AMD microcode to August 2024 release Signed-off-by: Matthew Yeazel --- packages/microcode/Cargo.toml | 8 ++++---- packages/microcode/microcode.spec | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/microcode/Cargo.toml b/packages/microcode/Cargo.toml index f8469310..7edbdc9b 100644 --- a/packages/microcode/Cargo.toml +++ b/packages/microcode/Cargo.toml @@ -11,9 +11,9 @@ path = "../packages.rs" # Check the two upstream repositories for the latest releases [[package.metadata.build-package.external-files]] -url = "https://www.kernel.org/pub/linux/kernel/firmware/linux-firmware-20231111.tar.xz" -sha512 = "dd8eb7e8a51fe14479e2f4e1081a8a31f13d041ddd6180eaae6fe865dbad303c89aaaed2b9df237923f74f1cf9fe8c2e5492b4de5fce991ddb02bb091c95dc58" +url = "https://www.kernel.org/pub/linux/kernel/firmware/linux-firmware-20240909.tar.xz" +sha512 = "d1918364f9925291da722075cf2d038082a6b6b5c6d7e5ab8b0888c5e87563718934f493fe172db21608d6eace92ade5c519b5f50b1fc7f25a328e45be059142" [[package.metadata.build-package.external-files]] -url = "https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files/archive/refs/tags/microcode-20231114.tar.gz" -sha512 = "a684444ef81e81687ff43b8255e95675eed1d728053bb1a483a60e94e2d2d43f10fc12522510b22daf90c4debd8f035e6b9a565813aa799c2e1e3a464124f59b" +url = "https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files/archive/refs/tags/microcode-20240813.tar.gz" +sha512 = "ba1fa7d9bed7d90756ea959f5878afca0deacc9b1e932a936a15d74a411b7efb6103a4af75dc3731d9cbb2e464439ce9a7d448f75bc6f38b616907ff6dec6ee3" diff --git a/packages/microcode/microcode.spec b/packages/microcode/microcode.spec index 697949db..edbe01e9 100644 --- a/packages/microcode/microcode.spec +++ b/packages/microcode/microcode.spec @@ -3,8 +3,8 @@ # These are specific to the upstream source RPM, and will likely need to be # updated for each new version. -%global amd_ucode_version 20231111 -%global intel_ucode_version 20231114 +%global amd_ucode_version 20240909 +%global intel_ucode_version 20240813 Name: %{_cross_os}microcode Version: 0.0 From 1cda947ca3ef42734ae069fea40bf0b0ab449c63 Mon Sep 17 00:00:00 2001 From: Vighnesh Maheshwari Date: Sat, 10 Aug 2024 17:37:34 +0000 Subject: [PATCH 1292/1356] documentation: add a README --- README.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 00000000..b65f7003 --- /dev/null +++ b/README.md @@ -0,0 +1,23 @@ +# Bottlerocket Core Kit +This is the core kit for [Bottlerocket](https://github.com/bottlerocket-os/bottlerocket). +It includes many common dependencies for downstream package and variant builds. + +## Contents +The core kit includes: +* Shared libraries such as glibc and libz +* Management daemons such as systemd and dbus-broker +* Agents for settings API and in-place updates + +### Availability +The [Bottlerocket core kit](https://gallery.ecr.aws/bottlerocket/bottlerocket-core-kit) is available through Amazon ECR Public. + +### Development +The core kit can be built on either an **x86_64** or an **aarch64** host. To do this you can use the following commands. +```shell +make +``` +OR +```shell +make ARCH= +``` +See the [BUILDING](https://github.com/bottlerocket-os/bottlerocket-core-kit/blob/develop/BUILDING.md) guide for more details. From 7a9de7f8e1964962c1b64ddf9abeaa11b465d4fe Mon Sep 17 00:00:00 2001 From: Vighnesh Maheshwari Date: Sat, 10 Aug 2024 17:37:53 +0000 Subject: [PATCH 1293/1356] documentation: update the BUILDING --- BUILDING.md | 243 ++++++++++++---------------------------------------- README.md | 2 +- 2 files changed, 54 insertions(+), 191 deletions(-) diff --git a/BUILDING.md b/BUILDING.md index cb0fae64..0a419d4c 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -1,244 +1,107 @@ -# Building Bottlerocket +# How to build the Bottlerocket core kit -If you'd like to build your own image instead of relying on an Amazon-provided image, follow these steps. -You can skip to the [setup guide for Kubernetes](QUICKSTART-EKS.md) or the [setup guide for Amazon ECS](QUICKSTART-ECS.md) to use an existing image in Amazon EC2. -(We're still working on other use cases!) - -## Build an image - -### Dependencies +If you'd like to build your own copy of the core kit for local development, follow these steps. +## Dependencies #### System Requirements - The build process artifacts and resulting images can consume in excess of 80GB in the local directory. - -The build process is also fairly demanding on your CPU, since we build all included software from scratch. -(The first time. Package builds are cached, and only changes are built afterward.) +The build process is also fairly demanding on your CPU, since we build all included software from scratch. This is only done the first time. Package builds are cached, and only changes are built afterward. The build scales well to 32+ cores. -The first time you build, the fastest machines can take about 12 minutes while slower machines with only a couple cores can take 3-4 hours. - +The first time you build, the fastest machines can take about 20 minutes while slower machines with only a couple cores can take 3-4 hours. #### Linux - The build system requires certain operating system packages to be installed. - -Ensure the following OS packages are installed: - +Ensure the following packages are installed: ##### Ubuntu - ```shell apt install build-essential openssl libssl-dev pkg-config liblz4-tool ``` - ##### Fedora - ```shell yum install make automake gcc openssl openssl-devel pkg-config lz4 perl-FindBin perl-lib ``` - #### Rust - The build system is based on the Rust language. We recommend you install the latest stable Rust using [rustup](https://rustup.rs/), either from the official site or your development host's package manager. Rust 1.51.0 or higher is required. - To organize build tasks, we use [cargo-make](https://sagiegurari.github.io/cargo-make/). To get it, run: - ```shell cargo install cargo-make ``` -#### Docker +### OCI Artifacts + +Building a kit results in building OCI artifacts, there are two ways to build these artifacts: `crane` or `docker` with the `containerd-snapshotter` feature enabled. -Bottlerocket uses [Docker](https://docs.docker.com/install/#supported-platforms) to orchestrate package and image builds. +We recommend using `crane` (and `krane`) over `docker` as it has shown better performance in our testing. -We recommend Docker 20.10.10 or later. +#### Docker +We recommend [Docker](https://docs.docker.com/install/#supported-platforms) 20.10.10 or later. The default seccomp policy of older versions of Docker do not support the `clone3` syscall in recent versions of Fedora or Ubuntu, on which the Bottlerocket SDK is based. Builds rely on Docker's integrated BuildKit support, which has received many fixes and improvements in newer versions. -The default seccomp policy of older versions of Docker do not support the `clone3` syscall in recent versions of Fedora or Ubuntu, on which the Bottlerocket SDK is based. You'll need to have Docker installed and running, with your user account added to the `docker` group. Docker's [post-installation steps for Linux](https://docs.docker.com/install/linux/linux-postinstall/) will walk you through that. - -> Note: If you're on a newer Linux distribution using the unified cgroup hierarchy with cgroups v2, you may need to disable it to work with current versions of runc. -> You'll know this is the case if you see an error like `docker: Error response from daemon: OCI runtime create failed: this version of runc doesn't work on cgroups v2: unknown.` -> Set the kernel parameter `systemd.unified_cgroup_hierarchy=0` in your boot configuration (e.g. GRUB) and reboot. - -### Build process - -To build an image, run: - -```shell -cargo make +You'll also need to enable the containerd-snapshotter and buildkit features for your docker daemon. This is required to ensure docker compatibility with OCI Images (which kits are stored in). +The following configuration is needed in your `/etc/docker/daemon.json` +```json +{ + "features": { + "buildkit": true, + "containerd-snapshotter": true + } +} ``` +#### Crane +[Crane](https://github.com/google/go-containerregistry/blob/main/cmd/crane/README.md) is a tool for interacting with remote images and registries.. It does not require a daemon and thus you don't need the above Docker features to use it. Twoliter supports utilizing `crane` (or `krane`) instead of `docker` if it is installed. -This will build an image for the default variant (a recent `aws-k8s-*`, see the `BUILDSYS_VARIANT` variable in [Makefile.toml](Makefile.toml) to find the current default variant). -All packages will be built in turn, and then compiled into an `img` file in the `build/images/` directory. - -The version number in [Release.toml](Release.toml) will be used in naming the file, and will be used inside the image as the release version. -If you're planning on [publishing your build](PUBLISHING.md), you may want to change the version. +The installation instructions for [crane](https://github.com/google/go-containerregistry/tree/main/cmd/crane) should help you set it up for use with Twoliter. -To build an image for a different variant, run: +## Build the core kit -```shell -cargo make -e BUILDSYS_VARIANT=my-variant-here +Building the core kit can be done by using the makefile targets. ``` - -To build an image for a different architecture, run: - -```shell -cargo make -e BUILDSYS_ARCH=my-arch-here +make ARCH= ``` -If you want to limit the build concurrency, set `BUILDSYS_JOBS` (the default is `8`): +## Publish the Kit +After the kit has been built you can then publish the kit image to your private registry. This will allow you to consume it to build and test a variant. -```shell -cargo make -e BUILDSYS_JOBS=4 -``` - -(You can use variant and arch arguments together, too.) - -#### Package licenses +### Use a private registry for development +It is recommended that you have some form of protected container registry to use for testing. +For testing purposes you can either utilize mutable tags to allow overriding of multiple versions of a core kit as you test, or you can use immutable tags and continuously bump the core kit version via the `Twoliter.toml`. -Most packages will include license files extracted from upstream source archives. -However, in some rare cases there are multiple licenses that could apply to a package. -Bottlerocket's build system uses the `Licenses.toml` file in conjunction with the `licenses` directory to configure the licenses used for such special packages. -Here is an example of a simple `Licenses.toml` configuration file: - -```toml -[package] -spdx-id = "SPDX-ID" -licenses = [ - { path = "the-license.txt" } -] +### Configure Infra.toml +An `Infra.toml` file needs to be created and should have a definition of your vendor (container registry) in order to publish the kits you build. To do so make sure that the `Infra.toml` has the below. ``` - -In the previous example, it is expected that the file `the-license.txt` is present in `licenses`. -You can retrieve the licenses from a remote endpoint, or the local filesystem if you specify the `license-url` field: - -```toml -[package] -spdx-id = "SPDX-ID AND SPDX-ID-2" # Package with multiple licenses -licenses = [ - # This file is copied from a file system, and will be saved as `path` - { license-url = "file:///path/to/spdx-id-license.txt", path = "spdx-id-license.txt" }, - # This file is fetched from an https endpoint, and will be saved as `path` - { license-url = "https://localhost/spdx-id-license-v2.txt", path = "spdx-id-license-2.txt" } -] -``` - -#### NVIDIA variants - -If you want to build any of the NVIDIA variants, you can follow these steps to prepare a `Licenses.toml` file using the [License for customer use of NVIDIA software](https://www.nvidia.com/en-us/drivers/nvidia-license/): - -1. Create a `Licenses.toml` file in your Bottlerocket root directory, with the following content: - -```toml -[nvidia] -spdx-id = "LicensesRef-NVIDIA-Customer-Use" -licenses = [ - { path = "LICENSE", license-url = "https://www.nvidia.com/en-us/drivers/nvidia-license/" } -] +[vendor.] +registry = "####.dkr.ecr.us-west-2.amazonaws.com" ``` - -2. Fetch the licenses with this command: - -```shell -cargo make -e BUILDSYS_UPSTREAM_LICENSE_FETCH=true fetch-licenses +After the kit has been built locally, the kit can be published to the provided vendor in `Infra.toml`. To do this, you will need docker credentials with ECR access. You can do this with, ``` - -3. Build your image, setting the `BUILDSYS_UPSTREAM_SOURCE_FALLBACK` flag to `true`, if you haven't cached the driver's sources: - -```shell -K8S_VERSION=1.24 -cargo make \ - -e BUILDSYS_VARIANT=aws-k8s-${K8S_VERSION}-nvidia \ - -e BUILDSYS_UPSTREAM_SOURCE_FALLBACK="true" +aws ecr get-login-password --region us-west-2 | docker login --username AWS --password-stdin ####.dkr.ecr.us-west-2.amazonaws.com ``` -### Register an AMI - -To use the image in Amazon EC2, we need to register the image as an AMI. - -To do this, you'll need to have your AWS account credentials setup on your system. -There are lots of ways to do this; one method is using [the `aws` CLI](https://aws.amazon.com/cli/) via its `configure` command with your user's access and secret keys. -If you're using an EC2 instance, the [EC2 instance's IAM role](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) will be used automatically if available. - -For a simple start, pick an [EC2 region](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions), then run: - -```shell -cargo make -e PUBLISH_REGIONS=your-region-here ami +Finally, publishing the core kit images can be handled by the makefile target. ``` - -Note that the task ("ami") must come **after** the arguments to `cargo make` that are specified with `-e`. - -Your new AMI ID will be printed after it's registered. - -If you built your image for a different architecture or variant, just use the same arguments here: - -```shell -cargo make -e PUBLISH_REGIONS=your-region-here -e BUILDSYS_VARIANT=my-variant-here ami +make publish VENDOR= ``` +At this point, there should be a core kit image in your private registry which can be consumed when building a variant to test and validate. -(There's a lot more detail on building and managing AMIs in the [PUBLISHING](PUBLISHING.md) guide.) - -## Use your image +## Consuming the published kit image +This section will cover building a variant to test a build of the core kit as done above. Please note this section does not cover the complete complexity of testing a change to Bottlerocket. For this see the [BUILDING](https://github.com/bottlerocket-os/bottlerocket/blob/develop/BUILDING.md) section in the [Bottlerocket](https://github.com/bottlerocket-os/bottlerocket/) repository. -See any of the setup guides tailored to the various execution environments for information on running Bottlerocket images: - -* [Setup guide for Kubernetes](QUICKSTART-EKS.md) -* [Setup guide for Amazon ECS](QUICKSTART-ECS.md) -* [Setup guide for VMware](QUICKSTART-VMWARE.md) -* [Setup guide for QEMU/KVM](QUICKSTART-LOCAL.md) - -## Publish your image - -See the [PUBLISHING](PUBLISHING.md) guide for information on deploying Bottlerocket images and repositories. - -## Building out-of-tree kernel modules - -To further extend Bottlerocket, you may want to build extra kernel modules. -The specifics of building an out-of-tree module will vary by project, but the first step is to download the "kmod kit" that contains the kernel headers and toolchain you'll need to use. - -### Downloading the kmod kit - -kmod kits are included in the official Bottlerocket repos starting with Bottlerocket v1.0.6. -Let's say you want to download the kit for building x86_64 modules for v1.11.0 and variant aws-k8s-1.24. - -First, you need tuftool: -```shell -cargo install tuftool +### Configure Twoliter.toml +To consume a private copy of the Bottlerocket core kit with your changes built into it, you need to define the vendor that points to your container registry in `Twoliter.toml` and adjust the core kit dependency: ``` - -Next, you need the Bottlerocket root role, which is used by tuftool to verify the kmod kit. -This will download and verify the root role itself: -```shell -curl -O "https://cache.bottlerocket.aws/root.json" -sha512sum -c <<<"2ff1fbf99b20dd7ff5d2c84243a8e3b51701183b1f524b7d470a6b7a9b0172fbb36a0949b7e586ab7ccb6e348eb77125d6ed9fd1a638f4381e4f3f084ff38596 root.json" +[vendor.my-vendor] +registry = "####.dkr.ecr.us-west-2.amazonaws.com" +[[kit]] +name = "bottlerocket-core-kit" # Name of your ECR repo +version = "2.x.y" # your version tag you want to test +vendor = "my-vendor" ``` - -Next, set your desired parameters, and download the kmod kit: -```shell -ARCH=x86_64 -VERSION=v1.11.0 -VARIANT=aws-k8s-1.24 -OUTDIR="${VARIANT}-${VERSION}" - -tuftool download "${OUTDIR}" --target-name ${VARIANT}-${ARCH}-kmod-kit-${VERSION}.tar.xz \ - --root ./root.json \ - --metadata-url "https://updates.bottlerocket.aws/2020-07-07/${VARIANT}/${ARCH}/" \ - --targets-url "https://updates.bottlerocket.aws/targets/" +Any time you change the vendor or version of the kit above you need to run `twoliter update` to update the `Twoliter.lock` ``` - -### Using the kmod kit - -To use the kmod kit, extract it, and update your PATH to use its toolchain: -```shell -tar xf "${VARIANT}-${ARCH}-kmod-kit-${VERSION}.tar.xz" - -export CROSS_COMPILE="${ARCH}-bottlerocket-linux-musl-" -export KERNELDIR="${PWD}/${VARIANT}-${ARCH}-kmod-kit-${VERSION}/kernel-devel" -export PATH="${PWD}/${VARIANT}-${ARCH}-kmod-kit-${VERSION}/toolchain/usr/bin:${PATH}" +./tools/twoliter/twoliter update ``` - -Now you can compile modules against the kernel headers in `${KERNELDIR}`. diff --git a/README.md b/README.md index b65f7003..1f3ef5b9 100644 --- a/README.md +++ b/README.md @@ -20,4 +20,4 @@ OR ```shell make ARCH= ``` -See the [BUILDING](https://github.com/bottlerocket-os/bottlerocket-core-kit/blob/develop/BUILDING.md) guide for more details. +See the [BUILDING](BUILDING.md) guide for more details. From adbc7f970fabcc2c8e536f0bc7341b9efe1f6601 Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Fri, 13 Sep 2024 19:58:14 +0000 Subject: [PATCH 1294/1356] install-twoliter: validate binary checksum on install --- Makefile | 11 ++++++++++- tools/install-twoliter.sh | 9 ++++++--- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 8617c93e..1d0b89c0 100644 --- a/Makefile +++ b/Makefile @@ -5,10 +5,19 @@ TWOLITER := $(TWOLITER_DIR)/twoliter CARGO_HOME := $(TOP).cargo TWOLITER_VERSION ?= "0.4.5" +TWOLITER_SHA256_AARCH64 ?= "799103bcc00e1daf931e11eb58630ca7c4d93c14752c3f4dcf25594759e3c3e7" +TWOLITER_SHA256_X86_64 ?= "b0cd35c0a1257fc98992821eb5ea7a96c021dba166ee2b9d04449b9206b3d941" KIT ?= bottlerocket-core-kit ARCH ?= $(shell uname -m) VENDOR ?= bottlerocket +ifeq ($(ARCH), aarch64) + TWOLITER_SHA256=$(TWOLITER_SHA256_AARCH64) +else + TWOLITER_SHA256=$(TWOLITER_SHA256_X86_64) +endif + + export GO_MODULES = ecs-gpu-init host-ctr all: build @@ -21,7 +30,7 @@ prep: --version v$(TWOLITER_VERSION) \ --directory $(TWOLITER_DIR) \ --reuse-existing-install \ - --allow-binary-install \ + --allow-binary-install $(TWOLITER_SHA256) \ --allow-from-source update: prep diff --git a/tools/install-twoliter.sh b/tools/install-twoliter.sh index 0253fad3..8125fe0d 100755 --- a/tools/install-twoliter.sh +++ b/tools/install-twoliter.sh @@ -41,7 +41,8 @@ Usage: $0 -r GIT_REPO -v TWOLITER_VERSION -d INSTALL_DIR [-e REUSE_EXISTING] [-b -d, --directory the directory to install twoliter into -e, --reuse-existing-install we will skip installation if we find the correct version installed -b, --allow-binary-install we will try to install a GitHub release-attached binary if the - host we are on is Linux. + host we are on is Linux. Takes an expected sha256 sum for the + binary as input. -s, --allow-from-source we will install from source using cargo install pointed to a git repo and rev when binary install is either not allowed or not possible @@ -96,7 +97,7 @@ while [[ $# -gt 0 ]]; do -e|--reuse-existing-install) reuse_existing="true" ;; -b|--allow-binary-install) - allow_bin="true" ;; + allow_bin="true"; shift; bin_checksum=$1 ;; -s|--allow-from-source) from_source="true" ;; -k|--skip-version-check) @@ -143,6 +144,8 @@ if [ "${allow_bin}" = "true" ] ; then twoliter_target="${host_arch}-unknown-${host_kernel}-musl" cd "${workdir}" curl -sSL "${twoliter_release}/twoliter-${twoliter_target}.tar.xz" -o "twoliter.tar.xz" + echo "Checking binary checksum..." + sha256sum -c <<< "${bin_checksum} twoliter.tar.xz" tar xf twoliter.tar.xz mv "./twoliter-${twoliter_target}/twoliter" "${dir}" exit 0 @@ -177,4 +180,4 @@ fi if [ ! -x "${dir}/twoliter" ] ; then echo "Could not install twoliter ${version}" >&2 exit 1 -fi \ No newline at end of file +fi From 6e496c8614e6be07e738c295e3e74d906493ef5d Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Mon, 16 Sep 2024 21:42:45 +0000 Subject: [PATCH 1295/1356] kernel-6.1: update to 6.1.109 Rebase to Amazon Linux upstream version 6.1.109-118.189.amzn2023. Adjust some of the modules to account for new configuration changes. Signed-off-by: Matthew Yeazel --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 12 +++++------- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 4f7483ea..0bfc444d 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/f578e84fd35abf2a86cbe79936f7d773eed3ca0202ac5fa049cf01879ce9bbe3/kernel-6.1.106-116.188.amzn2023.src.rpm" -sha512 = "253f601c2df406697fe9cff2a4cbfc3fb4c098a2ea8f36b3a1ce21c7c7d207612e18422a8eb832e6f3e105a59bb62b12bba6fb2f603e7740665ae38a78292645" +url = "https://cdn.amazonlinux.com/al2023/blobstore/60b1be96cb0d00c8998e26b855b51b54e1cc82a655bb47a1d4f51c5ffbdd3148/kernel-6.1.109-118.189.amzn2023.src.rpm" +sha512 = "2a40b73e7fbc28f48b01e3d0f463e6c72660662ce498fc91c4727617201ed1714480d731c9e59e8de632cb829ba1dc6cf0a07838eb9b90e61a2b422cb17aae8b" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 38150c46..219d614c 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.106 +Version: 6.1.109 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/f578e84fd35abf2a86cbe79936f7d773eed3ca0202ac5fa049cf01879ce9bbe3/kernel-6.1.106-116.188.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/60b1be96cb0d00c8998e26b855b51b54e1cc82a655bb47a1d4f51c5ffbdd3148/kernel-6.1.109-118.189.amzn2023.src.rpm Source100: config-bottlerocket # This list of FIPS modules is extracted from /etc/fipsmodules in the initramfs @@ -376,7 +376,6 @@ install -p -m 0644 %{S:302} %{buildroot}%{_cross_bootconfigdir}/05-metal.conf %{_cross_kmoddir}/System.map %if "%{_cross_arch}" == "x86_64" -%{_cross_kmoddir}/kernel/arch/x86/crypto/aesni-intel.ko.* %{_cross_kmoddir}/kernel/arch/x86/crypto/blowfish-x86_64.ko.* %{_cross_kmoddir}/kernel/arch/x86/crypto/camellia-aesni-avx2.ko.* %{_cross_kmoddir}/kernel/arch/x86/crypto/camellia-aesni-avx-x86_64.ko.* @@ -451,7 +450,6 @@ install -p -m 0644 %{S:302} %{buildroot}%{_cross_bootconfigdir}/05-metal.conf %{_cross_kmoddir}/kernel/crypto/chacha_generic.ko.* %{_cross_kmoddir}/kernel/crypto/cmac.ko.* %{_cross_kmoddir}/kernel/crypto/crc32_generic.ko.* -%{_cross_kmoddir}/kernel/crypto/cryptd.ko.* %{_cross_kmoddir}/kernel/crypto/crypto_user.ko.* %{_cross_kmoddir}/kernel/crypto/cts.ko.* %{_cross_kmoddir}/kernel/crypto/des_generic.ko.* @@ -485,12 +483,10 @@ install -p -m 0644 %{S:302} %{buildroot}%{_cross_bootconfigdir}/05-metal.conf %{_cross_kmoddir}/kernel/crypto/xts.ko.* %{_cross_kmoddir}/kernel/crypto/xxhash_generic.ko.* %{_cross_kmoddir}/kernel/crypto/zstd.ko.* -%if "%{_cross_arch}" == "x86_64" -%{_cross_kmoddir}/kernel/crypto/crypto_simd.ko.* -%endif %if "%{_cross_arch}" == "aarch64" %{_cross_kmoddir}/kernel/crypto/sm3.ko.* %{_cross_kmoddir}/kernel/crypto/sm4.ko.* +%{_cross_kmoddir}/kernel/crypto/cryptd.ko.* %endif %{_cross_kmoddir}/kernel/drivers/acpi/ac.ko.* %{_cross_kmoddir}/kernel/drivers/acpi/button.ko.* @@ -702,6 +698,7 @@ install -p -m 0644 %{S:302} %{buildroot}%{_cross_bootconfigdir}/05-metal.conf %{_cross_kmoddir}/kernel/drivers/pci/hotplug/acpiphp_ibm.ko.* %{_cross_kmoddir}/kernel/drivers/pci/pci-stub.ko.* %if "%{_cross_arch}" == "x86_64" +%{_cross_kmoddir}/kernel/drivers/pci/controller/pci-hyperv-intf.ko.* %{_cross_kmoddir}/kernel/drivers/pci/hotplug/cpcihp_generic.ko.* %{_cross_kmoddir}/kernel/drivers/platform/x86/wmi-bmof.ko.* %{_cross_kmoddir}/kernel/drivers/platform/x86/wmi.ko.* @@ -783,6 +780,7 @@ install -p -m 0644 %{S:302} %{buildroot}%{_cross_bootconfigdir}/05-metal.conf %{_cross_kmoddir}/kernel/drivers/usb/usbip/usbip-core.ko.* %{_cross_kmoddir}/kernel/drivers/usb/usbip/usbip-host.ko.* %{_cross_kmoddir}/kernel/drivers/usb/usbip/vhci-hcd.ko.* +%{_cross_kmoddir}/kernel/drivers/vfio/pci/mlx5/mlx5-vfio-pci.ko.* %{_cross_kmoddir}/kernel/drivers/vfio/pci/vfio-pci-core.ko.* %{_cross_kmoddir}/kernel/drivers/vfio/pci/vfio-pci.ko.* %{_cross_kmoddir}/kernel/drivers/vfio/vfio_iommu_type1.ko.* From 53a95f77dae00140700878fb582f7fa0aedd7efd Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Wed, 18 Sep 2024 23:46:50 +0000 Subject: [PATCH 1296/1356] kernel-5.10: update to 5.10.225 Rebase to Amazon Linux upstream version 5.10.225-213.878.amzn2. Signed-off-by: Arnaldo Garcia Rincon --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index c91e98e2..ea108819 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/090ee50d7c80b80f41f8e3c8c7d63efaa9592371f48bc9f769b2d52cb358a238/kernel-5.10.224-212.876.amzn2.src.rpm" -sha512 = "1a5d1066aa061b4b8cc2d97671d86c4aee727266386f0507b8a841adfc51235d3fa74eab3cfc64ca3d78397294f789ae3ee48f45c16f330929691d14ce7153c0" +url = "https://cdn.amazonlinux.com/blobstore/3351af6379ce59bc5724cf19ad4819e5d6929dafdb2925afd0c9ea0e13d3be47/kernel-5.10.225-213.878.amzn2.src.rpm" +sha512 = "96ff97176e92357e89171ebcf5eb5104e59947b92a0f0c7eb161552e2686ffac094db3d45da2bbbb9c343d94515e017f2732cf22ff84745a0c2a475a80b52abc" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index e118e3f4..ce9378d5 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.224 +Version: 5.10.225 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/090ee50d7c80b80f41f8e3c8c7d63efaa9592371f48bc9f769b2d52cb358a238/kernel-5.10.224-212.876.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/3351af6379ce59bc5724cf19ad4819e5d6929dafdb2925afd0c9ea0e13d3be47/kernel-5.10.225-213.878.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From c05b54419b2cb9f0facc10c6e806340795f46568 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Wed, 18 Sep 2024 23:47:17 +0000 Subject: [PATCH 1297/1356] kernel-5.15: update to 5.15.166 Rebase to Amazon Linux upstream version 5.15.166-111.163.amzn2. Signed-off-by: Arnaldo Garcia Rincon --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 1ff30224..0bec9ffb 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/5fc19dbcdad79c0964001228b7f301dc9726ba49f28248fe04f44186bb318e51/kernel-5.15.165-110.161.amzn2.src.rpm" -sha512 = "dcb77a87aa343d10936a40e155d6d3b67e78a42f04f817157731b97caa4de113564edfba37dd9ed66712081ae0df1102cd5703cd895a493e4ec7348886eb303b" +url = "https://cdn.amazonlinux.com/blobstore/1db73ff2ad4ac5d6ccf1f53e405b23ad5ee2715a6392faad73688bbb29c3374e/kernel-5.15.166-111.163.amzn2.src.rpm" +sha512 = "59de7f13b4ab203b17ed4c1fb8260db560f95ade10f31db7c609129cc3043781a76eca79ca293ff9addff88431b5c20072472e952d6d75749b93d282d4cab374" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index cf629197..d3cd1735 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.165 +Version: 5.15.166 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/5fc19dbcdad79c0964001228b7f301dc9726ba49f28248fe04f44186bb318e51/kernel-5.15.165-110.161.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/1db73ff2ad4ac5d6ccf1f53e405b23ad5ee2715a6392faad73688bbb29c3374e/kernel-5.15.166-111.163.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 8ad8406dbbcc9dcf1fe4c3109640209a3e456ffa Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Tue, 17 Sep 2024 21:30:44 +0000 Subject: [PATCH 1298/1356] twoliter: update twoliter to 0.4.6 --- .gitignore | 1 + Makefile | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index e28b1f51..0a418333 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,4 @@ /licenses *.run /tests +Twoliter.override diff --git a/Makefile b/Makefile index 1d0b89c0..27e07308 100644 --- a/Makefile +++ b/Makefile @@ -4,9 +4,9 @@ TWOLITER_DIR := $(TOOLS_DIR)/twoliter TWOLITER := $(TWOLITER_DIR)/twoliter CARGO_HOME := $(TOP).cargo -TWOLITER_VERSION ?= "0.4.5" -TWOLITER_SHA256_AARCH64 ?= "799103bcc00e1daf931e11eb58630ca7c4d93c14752c3f4dcf25594759e3c3e7" -TWOLITER_SHA256_X86_64 ?= "b0cd35c0a1257fc98992821eb5ea7a96c021dba166ee2b9d04449b9206b3d941" +TWOLITER_VERSION ?= "0.4.6" +TWOLITER_SHA256_AARCH64 ?= "12ac3f5a6c641e29481c79289bd07cf1c3494a65e3d283d582feb1d28d8bf2a7" +TWOLITER_SHA256_X86_64 ?= "4a2db7c4d0aac75c6b682336539ee57371cfb6dfea81689d07fc1f4a940fd5c5" KIT ?= bottlerocket-core-kit ARCH ?= $(shell uname -m) VENDOR ?= bottlerocket From 97554d3cc81569011aca3db7a5f101688192bac3 Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Thu, 19 Sep 2024 22:43:44 +0000 Subject: [PATCH 1299/1356] install-twoliter: always use host arch for twoliter If, when you first installed twoliter, you were building for a host architecture that differed from your target, twoliter would use the wrong checksum when validating the installed binary. --- Makefile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 27e07308..60022d57 100644 --- a/Makefile +++ b/Makefile @@ -8,10 +8,11 @@ TWOLITER_VERSION ?= "0.4.6" TWOLITER_SHA256_AARCH64 ?= "12ac3f5a6c641e29481c79289bd07cf1c3494a65e3d283d582feb1d28d8bf2a7" TWOLITER_SHA256_X86_64 ?= "4a2db7c4d0aac75c6b682336539ee57371cfb6dfea81689d07fc1f4a940fd5c5" KIT ?= bottlerocket-core-kit -ARCH ?= $(shell uname -m) +UNAME_ARCH = $(shell uname -m) +ARCH ?= $(UNAME_ARCH) VENDOR ?= bottlerocket -ifeq ($(ARCH), aarch64) +ifeq ($(UNAME_ARCH), aarch64) TWOLITER_SHA256=$(TWOLITER_SHA256_AARCH64) else TWOLITER_SHA256=$(TWOLITER_SHA256_X86_64) From 2f3fb0d2545072b300d891abea64bdebb14fa8be Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Sun, 15 Sep 2024 22:51:51 +0000 Subject: [PATCH 1300/1356] packages/kmod-6.1-nvidia: provide configuration for open-gpu This adds upon the present logic to build the open-gpu driver to provide configuration that driverdog can use to work with the open-gpu drivers. Signed-off-by: Matthew Yeazel --- packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 39 +++++++++++++++---- .../nvidia-open-gpu-config.toml.in | 11 ++++++ .../nvidia-open-gpu-copy-only-config.toml.in | 8 ++++ .../nvidia-tesla-tmpfiles.conf | 2 + .../kmod-6.1-nvidia/nvidia-tmpfiles.conf.in | 2 + 5 files changed, 55 insertions(+), 7 deletions(-) create mode 100644 packages/kmod-6.1-nvidia/nvidia-open-gpu-config.toml.in create mode 100644 packages/kmod-6.1-nvidia/nvidia-open-gpu-copy-only-config.toml.in diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index 8e130489..fd7acbcb 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -44,8 +44,10 @@ Source204: nvidia-fabricmanager.cfg # NVIDIA tesla conf files from 300 to 399 Source300: nvidia-tesla-tmpfiles.conf Source301: nvidia-tesla-build-config.toml.in -Source302: nvidia-tesla-path.env.in -Source303: nvidia-ld.so.conf.in +Source302: nvidia-open-gpu-config.toml.in +Source303: nvidia-open-gpu-copy-only-config.toml.in +Source304: nvidia-tesla-path.env.in +Source305: nvidia-ld.so.conf.in BuildRequires: %{_cross_os}glibc-devel BuildRequires: %{_cross_os}kernel-6.1-archive @@ -141,9 +143,24 @@ popd # Grab the list of supported devices pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}/supported-gpus -jq -r '.chips[] | select(.features[] | contains("kernelopen"))' supported-gpus.json | jq -s '{"open-gpu": .}' > open-gpu-supported-devices.json -# confirm "NVIDIA A10G" is in the resulting file to catch shape changes -jq -e '."open-gpu"[] | select(."devid" == "0x2237") | ."features"| index("kernelopen")' open-gpu-supported-devices.json +# We want to grab all the `kernelopen` enabled chips except for this list that is best held back to the proprietary driver +# 10de:1db1 is V100-16G (P3dn) +# 10de:1db5 is V100-32G (P3dn) +# 10de:1eb8 is T4 (G4dn) +# 10de:1eb4 is T4G (G5g) +# 10de:2237 is A10G (G5) +# 10de:27b8 is L4 (G6) +# 10de:26b9 is L40S (G6e) +jq -r '.chips[] | select(.features[] | contains("kernelopen")) | +select(.devid != "0x1DB1" +and .devid != "0x1DB5" +and .devid != "0x1DEB8" +and .devid != "0x1EB4" +and .devid != "0x2237" +and .devid != "0x27B8" +and .devid != "0x26B9")' supported-gpus.json | jq -s '{"open-gpu": .}' > open-gpu-supported-devices.json +# confirm "NVIDIA H100" is in the resulting file to catch shape changes +jq -e '."open-gpu"[] | select(."devid" == "0x2330") | ."features"| index("kernelopen")' open-gpu-supported-devices.json popd %install @@ -182,15 +199,21 @@ install -m 0644 %{S:300} %{buildroot}%{_cross_tmpfilesdir}/nvidia-tesla.conf sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/tesla/module-objects.d/|' %{S:301} > \ nvidia-tesla.toml install -m 0644 nvidia-tesla.toml %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/drivers +sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/open-gpu/drivers/|' %{S:302} > \ + nvidia-open-gpu.toml +install -m 0644 nvidia-open-gpu.toml %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/drivers +sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/open-gpu/drivers/|' %{S:303} > \ + nvidia-open-gpu-copy-only.toml +install -m 0644 nvidia-open-gpu-copy-only.toml %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/drivers # Install nvidia-path environment file, will be used as a drop-in for containerd.service since # libnvidia-container locates and mounts helper binaries into the containers from either # `PATH` or `NVIDIA_PATH` -sed -e 's|__NVIDIA_BINDIR__|%{_cross_libexecdir}/nvidia/tesla/bin|' %{S:302} > nvidia-path.env +sed -e 's|__NVIDIA_BINDIR__|%{_cross_libexecdir}/nvidia/tesla/bin|' %{S:304} > nvidia-path.env install -m 0644 nvidia-path.env %{buildroot}%{_cross_factorydir}/nvidia/tesla # We need to add `_cross_libdir` to the paths loaded by the ldconfig service # because libnvidia-container uses the `ldcache` file created by the service, to locate and mount the # libraries into the containers -sed -e 's|__LIBDIR__|%{_cross_libdir}|' %{S:303} > nvidia-tesla.conf +sed -e 's|__LIBDIR__|%{_cross_libdir}|' %{S:305} > nvidia-tesla.conf install -m 0644 nvidia-tesla.conf %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/ # proprietary driver @@ -315,6 +338,8 @@ popd # Configuration files %{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-tesla.toml +%{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-open-gpu.toml +%{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-open-gpu-copy-only.toml %{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/nvidia-tesla.conf %{_cross_factorydir}/nvidia/tesla/nvidia-path.env %{_cross_datadir}/nvidia/open-gpu-supported-devices.json diff --git a/packages/kmod-6.1-nvidia/nvidia-open-gpu-config.toml.in b/packages/kmod-6.1-nvidia/nvidia-open-gpu-config.toml.in new file mode 100644 index 00000000..5ae81b71 --- /dev/null +++ b/packages/kmod-6.1-nvidia/nvidia-open-gpu-config.toml.in @@ -0,0 +1,11 @@ +[nvidia-open-gpu] +lib-modules-path = "kernel/drivers/extra/video/nvidia/open-gpu" + +[nvidia-open-gpu.kernel-modules."nvidia.ko"] +copy-source = "__NVIDIA_MODULES__" + +[nvidia-open-gpu.kernel-modules."nvidia-modeset.ko"] +copy-source = "__NVIDIA_MODULES__" + +[nvidia-open-gpu.kernel-modules."nvidia-uvm.ko"] +copy-source = "__NVIDIA_MODULES__" diff --git a/packages/kmod-6.1-nvidia/nvidia-open-gpu-copy-only-config.toml.in b/packages/kmod-6.1-nvidia/nvidia-open-gpu-copy-only-config.toml.in new file mode 100644 index 00000000..774867d4 --- /dev/null +++ b/packages/kmod-6.1-nvidia/nvidia-open-gpu-copy-only-config.toml.in @@ -0,0 +1,8 @@ +[nvidia-open-gpu-copy-only] +lib-modules-path = "kernel/drivers/extra/video/nvidia/open-gpu" + +[nvidia-open-gpu-copy-only.kernel-modules."nvidia-drm.ko"] +copy-source = "__NVIDIA_MODULES__" + +[nvidia-open-gpu-copy-only.kernel-modules."nvidia-peermem.ko"] +copy-source = "__NVIDIA_MODULES__" diff --git a/packages/kmod-6.1-nvidia/nvidia-tesla-tmpfiles.conf b/packages/kmod-6.1-nvidia/nvidia-tesla-tmpfiles.conf index ddcac3e4..fd0f4486 100644 --- a/packages/kmod-6.1-nvidia/nvidia-tesla-tmpfiles.conf +++ b/packages/kmod-6.1-nvidia/nvidia-tesla-tmpfiles.conf @@ -1,3 +1,5 @@ C /etc/drivers/nvidia-tesla.toml +C /etc/drivers/nvidia-open-gpu.toml +C /etc/drivers/nvidia-open-gpu-copy-only.toml C /etc/containerd/nvidia.env - - - - /usr/share/factory/nvidia/tesla/nvidia-path.env C /etc/ld.so.conf.d/nvidia-tesla.conf diff --git a/packages/kmod-6.1-nvidia/nvidia-tmpfiles.conf.in b/packages/kmod-6.1-nvidia/nvidia-tmpfiles.conf.in index 2bee2471..3d3bbc48 100644 --- a/packages/kmod-6.1-nvidia/nvidia-tmpfiles.conf.in +++ b/packages/kmod-6.1-nvidia/nvidia-tmpfiles.conf.in @@ -1,4 +1,6 @@ R __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/tesla - - - - - d __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/tesla 0755 root root - - +R __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/open-gpu - - - - - +d __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/open-gpu 0755 root root - - C /etc/nvidia/fabricmanager.cfg - - - - d /run/nvidia 0700 root root - From 68bf4a87f03f3976a6c56d5414b2ef5d6c07d051 Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Sun, 15 Sep 2024 22:55:14 +0000 Subject: [PATCH 1301/1356] packages/kmod-5.15-nvidia: provide configuration for open-gpu This adds upon the present logic to build the open-gpu driver to provide configuration that driverdog can use to work with the open-gpu drivers. Signed-off-by: Matthew Yeazel --- .../kmod-5.15-nvidia/kmod-5.15-nvidia.spec | 39 +++++++++++++++---- .../nvidia-open-gpu-config.toml.in | 11 ++++++ .../nvidia-open-gpu-copy-only-config.toml.in | 8 ++++ .../nvidia-tesla-tmpfiles.conf | 2 + .../kmod-5.15-nvidia/nvidia-tmpfiles.conf.in | 2 + 5 files changed, 55 insertions(+), 7 deletions(-) create mode 100644 packages/kmod-5.15-nvidia/nvidia-open-gpu-config.toml.in create mode 100644 packages/kmod-5.15-nvidia/nvidia-open-gpu-copy-only-config.toml.in diff --git a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec index ddec74c3..55136b0f 100644 --- a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec +++ b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec @@ -44,8 +44,10 @@ Source204: nvidia-fabricmanager.cfg # NVIDIA tesla conf files from 300 to 399 Source300: nvidia-tesla-tmpfiles.conf Source301: nvidia-tesla-build-config.toml.in -Source302: nvidia-tesla-path.env.in -Source303: nvidia-ld.so.conf.in +Source302: nvidia-open-gpu-config.toml.in +Source303: nvidia-open-gpu-copy-only-config.toml.in +Source304: nvidia-tesla-path.env.in +Source305: nvidia-ld.so.conf.in BuildRequires: %{_cross_os}glibc-devel BuildRequires: %{_cross_os}kernel-5.15-archive @@ -141,9 +143,24 @@ popd # Grab the list of supported devices pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}/supported-gpus -jq -r '.chips[] | select(.features[] | contains("kernelopen"))' supported-gpus.json | jq -s '{"open-gpu": .}' > open-gpu-supported-devices.json -# confirm "NVIDIA A10G" is in the resulting file to catch shape changes -jq -e '."open-gpu"[] | select(."devid" == "0x2237") | ."features"| index("kernelopen")' open-gpu-supported-devices.json +# We want to grab all the `kernelopen` enabled chips except for this list that is best held back to the proprietary driver +# 10de:1db1 is V100-16G (P3dn) +# 10de:1db5 is V100-32G (P3dn) +# 10de:1eb8 is T4 (G4dn) +# 10de:1eb4 is T4G (G5g) +# 10de:2237 is A10G (G5) +# 10de:27b8 is L4 (G6) +# 10de:26b9 is L40S (G6e) +jq -r '.chips[] | select(.features[] | contains("kernelopen")) | +select(.devid != "0x1DB1" +and .devid != "0x1DB5" +and .devid != "0x1DEB8" +and .devid != "0x1EB4" +and .devid != "0x2237" +and .devid != "0x27B8" +and .devid != "0x26B9")' supported-gpus.json | jq -s '{"open-gpu": .}' > open-gpu-supported-devices.json +# confirm "NVIDIA H100" is in the resulting file to catch shape changes +jq -e '."open-gpu"[] | select(."devid" == "0x2330") | ."features"| index("kernelopen")' open-gpu-supported-devices.json popd %install @@ -182,15 +199,21 @@ install -m 0644 %{S:300} %{buildroot}%{_cross_tmpfilesdir}/nvidia-tesla.conf sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/tesla/module-objects.d/|' %{S:301} > \ nvidia-tesla.toml install -m 0644 nvidia-tesla.toml %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/drivers +sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/open-gpu/drivers/|' %{S:302} > \ + nvidia-open-gpu.toml +install -m 0644 nvidia-open-gpu.toml %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/drivers +sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/open-gpu/drivers/|' %{S:303} > \ + nvidia-open-gpu-copy-only.toml +install -m 0644 nvidia-open-gpu-copy-only.toml %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/drivers # Install nvidia-path environment file, will be used as a drop-in for containerd.service since # libnvidia-container locates and mounts helper binaries into the containers from either # `PATH` or `NVIDIA_PATH` -sed -e 's|__NVIDIA_BINDIR__|%{_cross_libexecdir}/nvidia/tesla/bin|' %{S:302} > nvidia-path.env +sed -e 's|__NVIDIA_BINDIR__|%{_cross_libexecdir}/nvidia/tesla/bin|' %{S:304} > nvidia-path.env install -m 0644 nvidia-path.env %{buildroot}%{_cross_factorydir}/nvidia/tesla # We need to add `_cross_libdir` to the paths loaded by the ldconfig service # because libnvidia-container uses the `ldcache` file created by the service, to locate and mount the # libraries into the containers -sed -e 's|__LIBDIR__|%{_cross_libdir}|' %{S:303} > nvidia-tesla.conf +sed -e 's|__LIBDIR__|%{_cross_libdir}|' %{S:305} > nvidia-tesla.conf install -m 0644 nvidia-tesla.conf %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/ # proprietary driver @@ -315,6 +338,8 @@ popd # Configuration files %{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-tesla.toml +%{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-open-gpu.toml +%{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-open-gpu-copy-only.toml %{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/nvidia-tesla.conf %{_cross_factorydir}/nvidia/tesla/nvidia-path.env %{_cross_datadir}/nvidia/open-gpu-supported-devices.json diff --git a/packages/kmod-5.15-nvidia/nvidia-open-gpu-config.toml.in b/packages/kmod-5.15-nvidia/nvidia-open-gpu-config.toml.in new file mode 100644 index 00000000..5ae81b71 --- /dev/null +++ b/packages/kmod-5.15-nvidia/nvidia-open-gpu-config.toml.in @@ -0,0 +1,11 @@ +[nvidia-open-gpu] +lib-modules-path = "kernel/drivers/extra/video/nvidia/open-gpu" + +[nvidia-open-gpu.kernel-modules."nvidia.ko"] +copy-source = "__NVIDIA_MODULES__" + +[nvidia-open-gpu.kernel-modules."nvidia-modeset.ko"] +copy-source = "__NVIDIA_MODULES__" + +[nvidia-open-gpu.kernel-modules."nvidia-uvm.ko"] +copy-source = "__NVIDIA_MODULES__" diff --git a/packages/kmod-5.15-nvidia/nvidia-open-gpu-copy-only-config.toml.in b/packages/kmod-5.15-nvidia/nvidia-open-gpu-copy-only-config.toml.in new file mode 100644 index 00000000..774867d4 --- /dev/null +++ b/packages/kmod-5.15-nvidia/nvidia-open-gpu-copy-only-config.toml.in @@ -0,0 +1,8 @@ +[nvidia-open-gpu-copy-only] +lib-modules-path = "kernel/drivers/extra/video/nvidia/open-gpu" + +[nvidia-open-gpu-copy-only.kernel-modules."nvidia-drm.ko"] +copy-source = "__NVIDIA_MODULES__" + +[nvidia-open-gpu-copy-only.kernel-modules."nvidia-peermem.ko"] +copy-source = "__NVIDIA_MODULES__" diff --git a/packages/kmod-5.15-nvidia/nvidia-tesla-tmpfiles.conf b/packages/kmod-5.15-nvidia/nvidia-tesla-tmpfiles.conf index ddcac3e4..fd0f4486 100644 --- a/packages/kmod-5.15-nvidia/nvidia-tesla-tmpfiles.conf +++ b/packages/kmod-5.15-nvidia/nvidia-tesla-tmpfiles.conf @@ -1,3 +1,5 @@ C /etc/drivers/nvidia-tesla.toml +C /etc/drivers/nvidia-open-gpu.toml +C /etc/drivers/nvidia-open-gpu-copy-only.toml C /etc/containerd/nvidia.env - - - - /usr/share/factory/nvidia/tesla/nvidia-path.env C /etc/ld.so.conf.d/nvidia-tesla.conf diff --git a/packages/kmod-5.15-nvidia/nvidia-tmpfiles.conf.in b/packages/kmod-5.15-nvidia/nvidia-tmpfiles.conf.in index 2bee2471..3d3bbc48 100644 --- a/packages/kmod-5.15-nvidia/nvidia-tmpfiles.conf.in +++ b/packages/kmod-5.15-nvidia/nvidia-tmpfiles.conf.in @@ -1,4 +1,6 @@ R __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/tesla - - - - - d __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/tesla 0755 root root - - +R __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/open-gpu - - - - - +d __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/open-gpu 0755 root root - - C /etc/nvidia/fabricmanager.cfg - - - - d /run/nvidia 0700 root root - From 12bd73d3c6519fbcba470ceda2a8a7563431addd Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Wed, 18 Sep 2024 02:24:30 +0000 Subject: [PATCH 1302/1356] kmod-5.10-nvidia: relocate kernel module services The os package doesn't need to concern itself with NVIDIA specific loading behavior. It will provide driverdog, but the configurations read by driverdog will be included with the specific kernel modules package that provides the drivers described in the configuration. Signed-off-by: Matthew Yeazel --- .../kmod-5.10-nvidia/kmod-5.10-nvidia.spec | 14 ++++++++++++++ .../link-tesla-kernel-modules.service.in | 18 ++++++++++++++++++ .../load-tesla-kernel-modules.service.in | 18 ++++++++++++++++++ 3 files changed, 50 insertions(+) create mode 100644 packages/kmod-5.10-nvidia/link-tesla-kernel-modules.service.in create mode 100644 packages/kmod-5.10-nvidia/load-tesla-kernel-modules.service.in diff --git a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec index 542b1473..0733338a 100644 --- a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec +++ b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec @@ -25,6 +25,8 @@ Source300: nvidia-tesla-tmpfiles.conf.in Source301: nvidia-tesla-build-config.toml.in Source302: nvidia-tesla-path.env.in Source303: nvidia-ld.so.conf.in +Source304: link-tesla-kernel-modules.service.in +Source305: load-tesla-kernel-modules.service.in BuildRequires: %{_cross_os}glibc-devel BuildRequires: %{_cross_os}kernel-5.10-archive @@ -121,6 +123,14 @@ sed -e 's|__LIBDIR__|%{_cross_libdir}|' %{S:303} | sed -e 's|__NVIDIA_VERSION__| > nvidia-tesla-%{tesla_470}.conf install -m 0644 nvidia-tesla-%{tesla_470}.conf %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/ +# Services to link/copy/load modules +sed -e 's|PREFIX|%{_cross_prefix}|g' %{S:304} > link-tesla-kernel-modules.service +sed -e 's|PREFIX|%{_cross_prefix}|g' %{S:305} > load-tesla-kernel-modules.service +install -p -m 0644 \ + link-tesla-kernel-modules.service \ + load-tesla-kernel-modules.service \ + %{buildroot}%{_cross_unitdir} + # driver install kernel/nvidia.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d install kernel/nvidia/nv-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d @@ -189,6 +199,10 @@ popd %dir %{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d %dir %{_cross_factorydir}/nvidia/tesla/%{tesla_470} +# Service files for link/copy/loading drivers +%{_cross_unitdir}/link-tesla-kernel-modules.service +%{_cross_unitdir}/load-tesla-kernel-modules.service + # Binaries %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470}/nvidia-debugdump %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470}/nvidia-smi diff --git a/packages/kmod-5.10-nvidia/link-tesla-kernel-modules.service.in b/packages/kmod-5.10-nvidia/link-tesla-kernel-modules.service.in new file mode 100644 index 00000000..79e5956b --- /dev/null +++ b/packages/kmod-5.10-nvidia/link-tesla-kernel-modules.service.in @@ -0,0 +1,18 @@ +[Unit] +Description=Link additional kernel modules +RequiresMountsFor=PREFIX/lib/modules PREFIX/src/kernels +# Rerunning this service after the system is fully loaded will override +# the already linked kernel modules. This doesn't affect the running system, +# since kernel modules are linked early in the boot sequence, but we still +# disable manual restarts to prevent unnecessary kernel modules rewrites. +RefuseManualStart=true +RefuseManualStop=true + +[Service] +Type=oneshot +ExecStart=/usr/bin/driverdog link-modules +RemainAfterExit=true +StandardError=journal+console + +[Install] +RequiredBy=preconfigured.target diff --git a/packages/kmod-5.10-nvidia/load-tesla-kernel-modules.service.in b/packages/kmod-5.10-nvidia/load-tesla-kernel-modules.service.in new file mode 100644 index 00000000..3e412860 --- /dev/null +++ b/packages/kmod-5.10-nvidia/load-tesla-kernel-modules.service.in @@ -0,0 +1,18 @@ +[Unit] +Description=Load additional kernel modules +RequiresMountsFor=PREFIX/lib/modules PREFIX/src/kernels +After=link-tesla-kernel-modules.service +Requires=link-tesla-kernel-modules.service +# Disable manual restarts to prevent loading kernel modules +# that weren't linked by the running system +RefuseManualStart=true +RefuseManualStop=true + +[Service] +Type=oneshot +ExecStart=/usr/bin/driverdog load-modules +RemainAfterExit=true +StandardError=journal+console + +[Install] +RequiredBy=preconfigured.target From f2fb2a718db3c2a23760343261e24f685982a92a Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Wed, 18 Sep 2024 02:30:09 +0000 Subject: [PATCH 1303/1356] kmod-5.15-nvidia: move kernel module services into package The os package doesn't need to concern itself with NVIDIA specific loading behavior. It will provide driverdog, but the configurations read by driverdog will be included with the specific kernel modules package that provides the drivers described in the configuration. This moves the tesla and open-gpu services into the kmod-5.15-nvidia package instead. Signed-off-by: Matthew Yeazel --- .../copy-open-gpu-kernel-modules.service.in | 20 +++++++++++++++ .../kmod-5.15-nvidia/kmod-5.15-nvidia.spec | 25 +++++++++++++++++++ .../link-tesla-kernel-modules.service.in | 19 ++++++++++++++ .../load-open-gpu-kernel-modules.service.in | 19 ++++++++++++++ .../load-tesla-kernel-modules.service.in | 19 ++++++++++++++ 5 files changed, 102 insertions(+) create mode 100644 packages/kmod-5.15-nvidia/copy-open-gpu-kernel-modules.service.in create mode 100644 packages/kmod-5.15-nvidia/link-tesla-kernel-modules.service.in create mode 100644 packages/kmod-5.15-nvidia/load-open-gpu-kernel-modules.service.in create mode 100644 packages/kmod-5.15-nvidia/load-tesla-kernel-modules.service.in diff --git a/packages/kmod-5.15-nvidia/copy-open-gpu-kernel-modules.service.in b/packages/kmod-5.15-nvidia/copy-open-gpu-kernel-modules.service.in new file mode 100644 index 00000000..2c3420b6 --- /dev/null +++ b/packages/kmod-5.15-nvidia/copy-open-gpu-kernel-modules.service.in @@ -0,0 +1,20 @@ +[Unit] +Description=Copy open GPU kernel modules +RequiresMountsFor=PREFIX/lib/modules PREFIX/src/kernels +# Rerunning this service after the system is fully loaded will override +# the already linked kernel modules. This doesn't affect the running system, +# since kernel modules are linked early in the boot sequence, but we still +# disable manual restarts to prevent unnecessary kernel modules rewrites. +RefuseManualStart=true +RefuseManualStop=true + +[Service] +Type=oneshot +ExecCondition=/usr/bin/ghostdog match-nvidia-driver open-gpu +ExecStart=/usr/bin/driverdog --modules-set nvidia-open-gpu link-modules +ExecStart=/usr/bin/driverdog --modules-set nvidia-open-gpu-copy-only link-modules +RemainAfterExit=true +StandardError=journal+console + +[Install] +RequiredBy=preconfigured.target diff --git a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec index 55136b0f..6ae4da15 100644 --- a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec +++ b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec @@ -48,6 +48,10 @@ Source302: nvidia-open-gpu-config.toml.in Source303: nvidia-open-gpu-copy-only-config.toml.in Source304: nvidia-tesla-path.env.in Source305: nvidia-ld.so.conf.in +Source306: link-tesla-kernel-modules.service.in +Source307: load-tesla-kernel-modules.service.in +Source308: copy-open-gpu-kernel-modules.service.in +Source309: load-open-gpu-kernel-modules.service.in BuildRequires: %{_cross_os}glibc-devel BuildRequires: %{_cross_os}kernel-5.15-archive @@ -216,6 +220,21 @@ install -m 0644 nvidia-path.env %{buildroot}%{_cross_factorydir}/nvidia/tesla sed -e 's|__LIBDIR__|%{_cross_libdir}|' %{S:305} > nvidia-tesla.conf install -m 0644 nvidia-tesla.conf %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/ +# Services to link/copy/load modules +sed -e 's|PREFIX|%{_cross_prefix}|g' %{S:306} > link-tesla-kernel-modules.service +sed -e 's|PREFIX|%{_cross_prefix}|g' %{S:307} > load-tesla-kernel-modules.service +install -p -m 0644 \ + link-tesla-kernel-modules.service \ + load-tesla-kernel-modules.service \ + %{buildroot}%{_cross_unitdir} + +sed -e 's|PREFIX|%{_cross_prefix}|g' %{S:308} > copy-open-gpu-kernel-modules.service +sed -e 's|PREFIX|%{_cross_prefix}|g' %{S:309} > load-open-gpu-kernel-modules.service +install -p -m 0644 \ + copy-open-gpu-kernel-modules.service \ + load-open-gpu-kernel-modules.service \ + %{buildroot}%{_cross_unitdir} + # proprietary driver install kernel/nvidia.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d install kernel/nvidia/nv-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d @@ -323,6 +342,12 @@ popd %dir %{_cross_datadir}/nvidia/tesla/module-objects.d %dir %{_cross_factorydir}/nvidia/tesla +# Service files for link/copy/loading drivers +%{_cross_unitdir}/link-tesla-kernel-modules.service +%{_cross_unitdir}/load-tesla-kernel-modules.service +%{_cross_unitdir}/copy-open-gpu-kernel-modules.service +%{_cross_unitdir}/load-open-gpu-kernel-modules.service + # Binaries %{_cross_libexecdir}/nvidia/tesla/bin/nvidia-debugdump %{_cross_libexecdir}/nvidia/tesla/bin/nvidia-smi diff --git a/packages/kmod-5.15-nvidia/link-tesla-kernel-modules.service.in b/packages/kmod-5.15-nvidia/link-tesla-kernel-modules.service.in new file mode 100644 index 00000000..8fc77921 --- /dev/null +++ b/packages/kmod-5.15-nvidia/link-tesla-kernel-modules.service.in @@ -0,0 +1,19 @@ +[Unit] +Description=Link Tesla kernel modules +RequiresMountsFor=PREFIX/lib/modules PREFIX/src/kernels +# Rerunning this service after the system is fully loaded will override +# the already linked kernel modules. This doesn't affect the running system, +# since kernel modules are linked early in the boot sequence, but we still +# disable manual restarts to prevent unnecessary kernel modules rewrites. +RefuseManualStart=true +RefuseManualStop=true + +[Service] +Type=oneshot +ExecCondition=/usr/bin/ghostdog match-nvidia-driver tesla +ExecStart=/usr/bin/driverdog --modules-set nvidia-tesla link-modules +RemainAfterExit=true +StandardError=journal+console + +[Install] +RequiredBy=preconfigured.target diff --git a/packages/kmod-5.15-nvidia/load-open-gpu-kernel-modules.service.in b/packages/kmod-5.15-nvidia/load-open-gpu-kernel-modules.service.in new file mode 100644 index 00000000..3862b3e7 --- /dev/null +++ b/packages/kmod-5.15-nvidia/load-open-gpu-kernel-modules.service.in @@ -0,0 +1,19 @@ +[Unit] +Description=Load open GPU kernel modules +RequiresMountsFor=PREFIX/lib/modules PREFIX/src/kernels +After=copy-open-gpu-kernel-modules.service +Requires=copy-open-gpu-kernel-modules.service +# Disable manual restarts to prevent loading kernel modules +# that weren't linked by the running system +RefuseManualStart=true +RefuseManualStop=true + +[Service] +Type=oneshot +ExecCondition=/usr/bin/ghostdog match-nvidia-driver open-gpu +ExecStart=/usr/bin/driverdog --modules-set nvidia-open-gpu load-modules +RemainAfterExit=true +StandardError=journal+console + +[Install] +RequiredBy=preconfigured.target diff --git a/packages/kmod-5.15-nvidia/load-tesla-kernel-modules.service.in b/packages/kmod-5.15-nvidia/load-tesla-kernel-modules.service.in new file mode 100644 index 00000000..60024004 --- /dev/null +++ b/packages/kmod-5.15-nvidia/load-tesla-kernel-modules.service.in @@ -0,0 +1,19 @@ +[Unit] +Description=Load Tesla kernel modules +RequiresMountsFor=PREFIX/lib/modules PREFIX/src/kernels +After=link-tesla-kernel-modules.service +Requires=link-tesla-kernel-modules.service +# Disable manual restarts to prevent loading kernel modules +# that weren't linked by the running system +RefuseManualStart=true +RefuseManualStop=true + +[Service] +Type=oneshot +ExecCondition=/usr/bin/ghostdog match-nvidia-driver tesla +ExecStart=/usr/bin/driverdog --modules-set nvidia-tesla load-modules +RemainAfterExit=true +StandardError=journal+console + +[Install] +RequiredBy=preconfigured.target From 03e275cfc1c5dcd8858e9743fa0b58cdcc5fcb83 Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Wed, 18 Sep 2024 02:35:44 +0000 Subject: [PATCH 1304/1356] kmod-6.1-nvidia: move kernel module services into package The os package doesn't need to concern itself with NVIDIA specific loading behavior. It will provide driverdog, but the configurations read by driverdog will be included with the specific kernel modules package that provides the drivers described in the configuration. This moves the tesla and open-gpu services into the kmod-6.1-nvidia package instead. Signed-off-by: Matthew Yeazel --- .../copy-open-gpu-kernel-modules.service.in | 20 +++++++++++++++ packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 25 +++++++++++++++++++ .../link-tesla-kernel-modules.service.in | 19 ++++++++++++++ .../load-open-gpu-kernel-modules.service.in | 19 ++++++++++++++ .../load-tesla-kernel-modules.service.in | 19 ++++++++++++++ 5 files changed, 102 insertions(+) create mode 100644 packages/kmod-6.1-nvidia/copy-open-gpu-kernel-modules.service.in create mode 100644 packages/kmod-6.1-nvidia/link-tesla-kernel-modules.service.in create mode 100644 packages/kmod-6.1-nvidia/load-open-gpu-kernel-modules.service.in create mode 100644 packages/kmod-6.1-nvidia/load-tesla-kernel-modules.service.in diff --git a/packages/kmod-6.1-nvidia/copy-open-gpu-kernel-modules.service.in b/packages/kmod-6.1-nvidia/copy-open-gpu-kernel-modules.service.in new file mode 100644 index 00000000..2c3420b6 --- /dev/null +++ b/packages/kmod-6.1-nvidia/copy-open-gpu-kernel-modules.service.in @@ -0,0 +1,20 @@ +[Unit] +Description=Copy open GPU kernel modules +RequiresMountsFor=PREFIX/lib/modules PREFIX/src/kernels +# Rerunning this service after the system is fully loaded will override +# the already linked kernel modules. This doesn't affect the running system, +# since kernel modules are linked early in the boot sequence, but we still +# disable manual restarts to prevent unnecessary kernel modules rewrites. +RefuseManualStart=true +RefuseManualStop=true + +[Service] +Type=oneshot +ExecCondition=/usr/bin/ghostdog match-nvidia-driver open-gpu +ExecStart=/usr/bin/driverdog --modules-set nvidia-open-gpu link-modules +ExecStart=/usr/bin/driverdog --modules-set nvidia-open-gpu-copy-only link-modules +RemainAfterExit=true +StandardError=journal+console + +[Install] +RequiredBy=preconfigured.target diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index fd7acbcb..1e4a2d8a 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -48,6 +48,10 @@ Source302: nvidia-open-gpu-config.toml.in Source303: nvidia-open-gpu-copy-only-config.toml.in Source304: nvidia-tesla-path.env.in Source305: nvidia-ld.so.conf.in +Source306: link-tesla-kernel-modules.service.in +Source307: load-tesla-kernel-modules.service.in +Source308: copy-open-gpu-kernel-modules.service.in +Source309: load-open-gpu-kernel-modules.service.in BuildRequires: %{_cross_os}glibc-devel BuildRequires: %{_cross_os}kernel-6.1-archive @@ -216,6 +220,21 @@ install -m 0644 nvidia-path.env %{buildroot}%{_cross_factorydir}/nvidia/tesla sed -e 's|__LIBDIR__|%{_cross_libdir}|' %{S:305} > nvidia-tesla.conf install -m 0644 nvidia-tesla.conf %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/ +# Services to link/copy/load modules +sed -e 's|PREFIX|%{_cross_prefix}|g' %{S:306} > link-tesla-kernel-modules.service +sed -e 's|PREFIX|%{_cross_prefix}|g' %{S:307} > load-tesla-kernel-modules.service +install -p -m 0644 \ + link-tesla-kernel-modules.service \ + load-tesla-kernel-modules.service \ + %{buildroot}%{_cross_unitdir} + +sed -e 's|PREFIX|%{_cross_prefix}|g' %{S:308} > copy-open-gpu-kernel-modules.service +sed -e 's|PREFIX|%{_cross_prefix}|g' %{S:309} > load-open-gpu-kernel-modules.service +install -p -m 0644 \ + copy-open-gpu-kernel-modules.service \ + load-open-gpu-kernel-modules.service \ + %{buildroot}%{_cross_unitdir} + # proprietary driver install kernel/nvidia.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d install kernel/nvidia/nv-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d @@ -323,6 +342,12 @@ popd %dir %{_cross_datadir}/nvidia/tesla/module-objects.d %dir %{_cross_factorydir}/nvidia/tesla +# Service files for link/copy/loading drivers +%{_cross_unitdir}/link-tesla-kernel-modules.service +%{_cross_unitdir}/load-tesla-kernel-modules.service +%{_cross_unitdir}/copy-open-gpu-kernel-modules.service +%{_cross_unitdir}/load-open-gpu-kernel-modules.service + # Binaries %{_cross_libexecdir}/nvidia/tesla/bin/nvidia-debugdump %{_cross_libexecdir}/nvidia/tesla/bin/nvidia-smi diff --git a/packages/kmod-6.1-nvidia/link-tesla-kernel-modules.service.in b/packages/kmod-6.1-nvidia/link-tesla-kernel-modules.service.in new file mode 100644 index 00000000..8fc77921 --- /dev/null +++ b/packages/kmod-6.1-nvidia/link-tesla-kernel-modules.service.in @@ -0,0 +1,19 @@ +[Unit] +Description=Link Tesla kernel modules +RequiresMountsFor=PREFIX/lib/modules PREFIX/src/kernels +# Rerunning this service after the system is fully loaded will override +# the already linked kernel modules. This doesn't affect the running system, +# since kernel modules are linked early in the boot sequence, but we still +# disable manual restarts to prevent unnecessary kernel modules rewrites. +RefuseManualStart=true +RefuseManualStop=true + +[Service] +Type=oneshot +ExecCondition=/usr/bin/ghostdog match-nvidia-driver tesla +ExecStart=/usr/bin/driverdog --modules-set nvidia-tesla link-modules +RemainAfterExit=true +StandardError=journal+console + +[Install] +RequiredBy=preconfigured.target diff --git a/packages/kmod-6.1-nvidia/load-open-gpu-kernel-modules.service.in b/packages/kmod-6.1-nvidia/load-open-gpu-kernel-modules.service.in new file mode 100644 index 00000000..3862b3e7 --- /dev/null +++ b/packages/kmod-6.1-nvidia/load-open-gpu-kernel-modules.service.in @@ -0,0 +1,19 @@ +[Unit] +Description=Load open GPU kernel modules +RequiresMountsFor=PREFIX/lib/modules PREFIX/src/kernels +After=copy-open-gpu-kernel-modules.service +Requires=copy-open-gpu-kernel-modules.service +# Disable manual restarts to prevent loading kernel modules +# that weren't linked by the running system +RefuseManualStart=true +RefuseManualStop=true + +[Service] +Type=oneshot +ExecCondition=/usr/bin/ghostdog match-nvidia-driver open-gpu +ExecStart=/usr/bin/driverdog --modules-set nvidia-open-gpu load-modules +RemainAfterExit=true +StandardError=journal+console + +[Install] +RequiredBy=preconfigured.target diff --git a/packages/kmod-6.1-nvidia/load-tesla-kernel-modules.service.in b/packages/kmod-6.1-nvidia/load-tesla-kernel-modules.service.in new file mode 100644 index 00000000..60024004 --- /dev/null +++ b/packages/kmod-6.1-nvidia/load-tesla-kernel-modules.service.in @@ -0,0 +1,19 @@ +[Unit] +Description=Load Tesla kernel modules +RequiresMountsFor=PREFIX/lib/modules PREFIX/src/kernels +After=link-tesla-kernel-modules.service +Requires=link-tesla-kernel-modules.service +# Disable manual restarts to prevent loading kernel modules +# that weren't linked by the running system +RefuseManualStart=true +RefuseManualStop=true + +[Service] +Type=oneshot +ExecCondition=/usr/bin/ghostdog match-nvidia-driver tesla +ExecStart=/usr/bin/driverdog --modules-set nvidia-tesla load-modules +RemainAfterExit=true +StandardError=journal+console + +[Install] +RequiredBy=preconfigured.target From 39c53c1425f8dcd740ebb1a1e9706327211dd067 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Sat, 13 Jul 2024 14:14:39 +0000 Subject: [PATCH 1305/1356] kernel-5.10, -5.15: conflict with erofs image feature Some erofs filesystem features are only available in newer kernels, and older kernels may be missing fixes. Prevent older kernels from being used with erofs root filesystems, just to be safe. Signed-off-by: Ben Cressey --- packages/kernel-5.10/kernel-5.10.spec | 3 +++ packages/kernel-5.15/kernel-5.15.spec | 3 +++ 2 files changed, 6 insertions(+) diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index ce9378d5..5882a688 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -45,6 +45,9 @@ Requires: %{name}-devel = %{version}-%{release} # The 5.10 kernel is not FIPS certified. Conflicts: %{_cross_os}image-feature(fips) +# Using EROFS for the root partition requires a 6.1+ kernel. +Conflicts: %{_cross_os}image-feature(erofs-root-partition) + %global kernel_sourcedir %{_cross_usrsrc}/kernels %global kernel_libdir %{_cross_libdir}/modules/%{version} diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index d3cd1735..6d04c85e 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -40,6 +40,9 @@ Requires: %{name}-devel = %{version}-%{release} # The 5.15 kernel is not FIPS certified. Conflicts: %{_cross_os}image-feature(fips) +# Using EROFS for the root partition requires a 6.1+ kernel. +Conflicts: %{_cross_os}image-feature(erofs-root-partition) + %global kernel_sourcedir %{_cross_usrsrc}/kernels %global kernel_libdir %{_cross_libdir}/modules/%{version} From 3276bc9dbc1434918ed3f0417539a9e4e8745c62 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Wed, 3 Jul 2024 16:50:52 +0000 Subject: [PATCH 1306/1356] kernel-6.1: enable EROFS as a built-in filesystem erofs must be a built-in filesystem in order to be used for the root filesystem. lz4 decompression, which was previously available as a module, is now also built-in to support decompressing erofs filesystems. Signed-off-by: Ben Cressey --- packages/kernel-6.1/config-bottlerocket | 3 ++- packages/kernel-6.1/kernel-6.1.spec | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/kernel-6.1/config-bottlerocket b/packages/kernel-6.1/config-bottlerocket index 0091416c..375b43f5 100644 --- a/packages/kernel-6.1/config-bottlerocket +++ b/packages/kernel-6.1/config-bottlerocket @@ -1,8 +1,9 @@ # Because Bottlerocket does not have an initramfs, modules required to mount # the root filesystem must be set to y. -# The root filesystem is ext4 +# The root filesystem is ext4 or erofs CONFIG_EXT4_FS=y +CONFIG_EROFS_FS=y # btrfs support for compatibility CONFIG_BTRFS_FS=m diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 219d614c..61725381 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -923,7 +923,6 @@ install -p -m 0644 %{S:302} %{buildroot}%{_cross_bootconfigdir}/05-metal.conf %{_cross_kmoddir}/kernel/lib/crypto/libpoly1305.ko.* %{_cross_kmoddir}/kernel/lib/lru_cache.ko.* %{_cross_kmoddir}/kernel/lib/lz4/lz4_compress.ko.* -%{_cross_kmoddir}/kernel/lib/lz4/lz4_decompress.ko.* %{_cross_kmoddir}/kernel/lib/lz4/lz4hc_compress.ko.* %{_cross_kmoddir}/kernel/lib/raid6/raid6_pq.ko.* %{_cross_kmoddir}/kernel/lib/reed_solomon/reed_solomon.ko.* From 92c0db324bf80103915d0ca8321dd5a6451724ac Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 2 Aug 2024 20:37:34 +0000 Subject: [PATCH 1307/1356] kernel-6.1: also provide uncompressed devel files When using erofs as the root filesystem, we want to avoid other forms of compression, such as the squashfs used for the kernel development files. Include the uncompressed version of these files when the erofs image feature is set, and override the configuration for the mount unit to do a simple bind mount instead. Signed-off-by: Ben Cressey --- packages/kernel-6.1/kernel-6.1.spec | 34 ++++++++++++++++++- ...b-kernel-devel-lower.mount.drop-in.conf.in | 4 +++ 2 files changed, 37 insertions(+), 1 deletion(-) create mode 100644 packages/kernel-6.1/var-lib-kernel-devel-lower.mount.drop-in.conf.in diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 61725381..a287695a 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -16,6 +16,9 @@ Source200: check-fips-modules.drop-in.conf.in Source201: fipsmodules-x86_64 Source202: fipsmodules-aarch64 +# Adjust kernel-devel mount behavior if not squashfs. +Source210: var-lib-kernel-devel-lower.mount.drop-in.conf.in + # Bootconfig snippets to adjust the default kernel command line for the platform. Source300: bootconfig-aws.conf Source301: bootconfig-vmware.conf @@ -70,10 +73,24 @@ Requires: (%{name}-fips if %{_cross_os}image-feature(fips)) %package devel Summary: Configured Linux kernel source for module building +Requires: (%{name}-devel-squashed if %{_cross_os}image-feature(no-erofs-root-partition)) +Requires: (%{name}-devel-unpacked if %{_cross_os}image-feature(erofs-root-partition)) %description devel %{summary}. +%package devel-squashed +Summary: Configured Linux kernel source for module building (squashed) + +%description devel-squashed +%{summary}. + +%package devel-unpacked +Summary: Configured Linux kernel source for module building (unpacked) + +%description devel-unpacked +%{summary}. + %package archive Summary: Archived Linux kernel source for module building @@ -265,6 +282,10 @@ mkdir -p src_squashfs/%{version} tar c -T kernel_devel_files | tar x -C src_squashfs/%{version} mksquashfs src_squashfs kernel-devel.squashfs ${SQUASHFS_OPTS} +# Create an uncompressed set of kernel-devel files in the standard location. +install -d %{buildroot}%{_cross_datadir}/bottlerocket/kernel-devel/%{version} +tar c -T kernel_devel_files | tar x -C %{buildroot}%{_cross_datadir}/bottlerocket/kernel-devel/%{version} + # Create a tarball of the same files, for use outside the running system. # In theory we could extract these files with `unsquashfs`, but we do not want # to require it to be installed on the build host, and it errors out when run @@ -300,6 +321,11 @@ for fipsmod in $(cat %{_sourcedir}/fipsmodules-%{_cross_arch}) ; do (( i+=1 )) done +LOWERPATH=$(systemd-escape --path %{_cross_sharedstatedir}/kernel-devel/.overlay/lower) +mkdir -p %{buildroot}%{_cross_unitdir}/"${LOWERPATH}.mount.d" +sed -e 's|PREFIX|%{_cross_prefix}|g' %{S:210} \ + > %{buildroot}%{_cross_unitdir}/"${LOWERPATH}.mount.d"/no-squashfs.conf + # Install platform-specific bootconfig snippets. install -d %{buildroot}%{_cross_bootconfigdir} install -p -m 0644 %{S:300} %{buildroot}%{_cross_bootconfigdir}/05-aws.conf @@ -338,10 +364,16 @@ install -p -m 0644 %{S:302} %{buildroot}%{_cross_bootconfigdir}/05-metal.conf %files devel %dir %{_cross_ksrcdir} -%{_cross_datadir}/bottlerocket/kernel-devel.squashfs %{_cross_kmoddir}/source %{_cross_kmoddir}/build +%files devel-squashed +%{_cross_datadir}/bottlerocket/kernel-devel.squashfs + +%files devel-unpacked +%{_cross_datadir}/bottlerocket/kernel-devel +%{_cross_unitdir}/*kernel*devel*.mount.d/no-squashfs.conf + %files archive %{_cross_datadir}/bottlerocket/kernel-devel.tar.xz diff --git a/packages/kernel-6.1/var-lib-kernel-devel-lower.mount.drop-in.conf.in b/packages/kernel-6.1/var-lib-kernel-devel-lower.mount.drop-in.conf.in new file mode 100644 index 00000000..a56abe6c --- /dev/null +++ b/packages/kernel-6.1/var-lib-kernel-devel-lower.mount.drop-in.conf.in @@ -0,0 +1,4 @@ +[Mount] +What=PREFIX/share/bottlerocket/kernel-devel +Type=none +Options=rbind,rshared From c876572f859047051e848ed624c82ee7809c5c91 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Fri, 27 Sep 2024 02:44:30 +0000 Subject: [PATCH 1308/1356] kernel-6.1: fix kernel-devel requirement Older versions of Twoliter might not define either the positive or negative versions of the erofs image feature, which would result in neither of the kernel-devel subpackages getting installed. Fix this by always installing the squashed version unless the positive form of the erofs image feature is present. Signed-off-by: Ben Cressey --- packages/kernel-6.1/kernel-6.1.spec | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index a287695a..9a115c95 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -73,8 +73,7 @@ Requires: (%{name}-fips if %{_cross_os}image-feature(fips)) %package devel Summary: Configured Linux kernel source for module building -Requires: (%{name}-devel-squashed if %{_cross_os}image-feature(no-erofs-root-partition)) -Requires: (%{name}-devel-unpacked if %{_cross_os}image-feature(erofs-root-partition)) +Requires: (%{name}-devel-unpacked if %{_cross_os}image-feature(erofs-root-partition) else %{name}-devel-squashed) %description devel %{summary}. From 16df5863d11ac690f57850fb23de270000b426d7 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 30 Sep 2024 16:51:01 +0000 Subject: [PATCH 1309/1356] kmod-6.1-neuron: update to 2.18.12.0 Signed-off-by: Ben Cressey --- .../0001-kbuild-do-not-outline-atomics-for-arm64.patch | 8 ++++---- packages/kmod-6.1-neuron/Cargo.toml | 4 ++-- packages/kmod-6.1-neuron/kmod-6.1-neuron.spec | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/packages/kmod-6.1-neuron/0001-kbuild-do-not-outline-atomics-for-arm64.patch b/packages/kmod-6.1-neuron/0001-kbuild-do-not-outline-atomics-for-arm64.patch index 5ce125cf..c52edefb 100644 --- a/packages/kmod-6.1-neuron/0001-kbuild-do-not-outline-atomics-for-arm64.patch +++ b/packages/kmod-6.1-neuron/0001-kbuild-do-not-outline-atomics-for-arm64.patch @@ -5,13 +5,13 @@ Subject: [PATCH] kbuild: do not outline atomics for arm64 Signed-off-by: Ben Cressey --- - usr/src/aws-neuronx-2.17.17.0/Kbuild | 1 + + usr/src/aws-neuronx-2.18.12.0/Kbuild | 1 + 1 file changed, 1 insertion(+) -diff --git a/usr/src/aws-neuronx-2.17.17.0/Kbuild b/usr/src/aws-neuronx-2.17.17.0/Kbuild +diff --git a/usr/src/aws-neuronx-2.18.12.0/Kbuild b/usr/src/aws-neuronx-2.18.12.0/Kbuild index 11f8490..6535608 100644 ---- a/usr/src/aws-neuronx-2.17.17.0/Kbuild -+++ b/usr/src/aws-neuronx-2.17.17.0/Kbuild +--- a/usr/src/aws-neuronx-2.18.12.0/Kbuild ++++ b/usr/src/aws-neuronx-2.18.12.0/Kbuild @@ -16,3 +16,4 @@ neuron-objs += v3/notific.o v3/neuron_dhal_v3.o ccflags-y += -O3 -Wall -Werror -Wno-declaration-after-statement -Wunused-macros -Wunused-local-typedefs diff --git a/packages/kmod-6.1-neuron/Cargo.toml b/packages/kmod-6.1-neuron/Cargo.toml index 054c490e..63cb6b0b 100644 --- a/packages/kmod-6.1-neuron/Cargo.toml +++ b/packages/kmod-6.1-neuron/Cargo.toml @@ -13,8 +13,8 @@ package-name = "kmod-6.1-neuron" releases-url = "https://awsdocs-neuron.readthedocs-hosted.com/en/latest/release-notes/runtime/aws-neuronx-dkms/index.html" [[package.metadata.build-package.external-files]] -url = "https://yum.repos.neuron.amazonaws.com/aws-neuronx-dkms-2.17.17.0.noarch.rpm" -sha512 = "8ff07280fc2864677d9401d56939f8b8f8cd59dc1ae9df53d49aced500cd62c3071a61639e9f5381b557d29cc7a2b72be71020f90880680a59d10a1f8b28580b" +url = "https://yum.repos.neuron.amazonaws.com/aws-neuronx-dkms-2.18.12.0.noarch.rpm" +sha512 = "4ed92e661d0ba368eaf8f60e1a68c202062a26819231fcfd42a5ff05d20ad2f34b82b23359a88e80eea22ee5d0056ad769b6febd5d7e7b161da0e36434ba2579" [build-dependencies] kernel-6_1 = { path = "../kernel-6.1" } diff --git a/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec b/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec index 6b98c1a3..b69fdce4 100644 --- a/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec +++ b/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec @@ -1,5 +1,5 @@ Name: %{_cross_os}kmod-6.1-neuron -Version: 2.17.17.0 +Version: 2.18.12.0 Release: 1%{?dist} Summary: Neuron drivers for the 6.1 kernel License: GPL-2.0-only From 20c9779bac5c5e55c5828ffe5b78c06b8a35c6f2 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 1 Oct 2024 21:30:04 +0000 Subject: [PATCH 1310/1356] kernel-6.1: exclude more object files from devel Now that an unpacked set of kernel-devel files is present in the buildroot, object files will be found by the `/usr/lib/rpm/brp-strip` invocation that tries to ensure all files are stripped. This can lead to build failures if the host and target arch don't match, and if the target arch requires a host tool to be built. The object files will be built for the host, and the target's `strip` command will not recognize the format. In any case, these object files shouldn't be included, as they may need to be rebuilt to match the running host architecture at the time the kernel-devel files are used. Fixes: 76af8bef ("kernel-6.1: also provide uncompressed devel files") Signed-off-by: Ben Cressey --- packages/kernel-6.1/kernel-6.1.spec | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 9a115c95..ebcc43fc 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -236,20 +236,20 @@ chmod 600 System.map \( -name module.lds -o -name vmlinux.lds.S -o -name Platform -o -name \*.tbl \) \ -print - find arch/%{_cross_karch}/{include,lib}/ -type f ! -name \*.o ! -name \*.o.d -print + find arch/%{_cross_karch}/{include,lib}/ -type f ! -name \*.o ! -name \*.o.d ! -name \*.a -print echo arch/%{_cross_karch}/kernel/asm-offsets.s echo lib/vdso/gettimeofday.c for d in \ arch/%{_cross_karch}/tools \ arch/%{_cross_karch}/kernel/vdso ; do - [ -d "${d}" ] && find "${d}/" -type f -print + [ -d "${d}" ] && find "${d}/" -type f ! -name \*.o -print done find include -type f -print find scripts -type f ! -name \*.l ! -name \*.y ! -name \*.o -print - find tools/{arch/%{_cross_karch},include,objtool,scripts}/ -type f ! -name \*.o -print + find tools/{arch/%{_cross_karch},include,objtool,scripts}/ -type f ! -name \*.o ! -name \*.a -print echo tools/build/fixdep.c find tools/lib/subcmd -type f -print find tools/lib/{ctype,hweight,rbtree,string,str_error_r}.c From be3f194347c051b53ab690918aa82c85396b268d Mon Sep 17 00:00:00 2001 From: Kush Upadhyay Date: Thu, 3 Oct 2024 02:16:11 +0000 Subject: [PATCH 1311/1356] kernel-5.15: update to 5.15.167 Rebase to Amazon Linux upstream version 5.15.167-112.165.amzn2. Signed-off-by: Kush Upadhyay --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 0bec9ffb..e2a73cbd 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/1db73ff2ad4ac5d6ccf1f53e405b23ad5ee2715a6392faad73688bbb29c3374e/kernel-5.15.166-111.163.amzn2.src.rpm" -sha512 = "59de7f13b4ab203b17ed4c1fb8260db560f95ade10f31db7c609129cc3043781a76eca79ca293ff9addff88431b5c20072472e952d6d75749b93d282d4cab374" +url = "https://cdn.amazonlinux.com/blobstore/7d9322ae0af16962b5b12f984ec34e1644fe111beb5933bf6d75a1cf7f267201/kernel-5.15.167-112.165.amzn2.src.rpm" +sha512 = "0d39cdc4a4bdcb66aa3f950af2f41ccb9d9f267524b3e5af7def532db68920321081684d2b37f1b0a1e24195b6d77a68f88dc92f5a47bccd3d12b665d2c36e7b" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 6d04c85e..1929962c 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.166 +Version: 5.15.167 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/1db73ff2ad4ac5d6ccf1f53e405b23ad5ee2715a6392faad73688bbb29c3374e/kernel-5.15.166-111.163.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/7d9322ae0af16962b5b12f984ec34e1644fe111beb5933bf6d75a1cf7f267201/kernel-5.15.167-112.165.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From c788f9bd594df83793c1600ad502958f8b150c95 Mon Sep 17 00:00:00 2001 From: Kush Upadhyay Date: Thu, 3 Oct 2024 02:16:35 +0000 Subject: [PATCH 1312/1356] kernel-5.10: update to 5.10.226 Rebase to Amazon Linux upstream version 5.10.226-214.879.amzn2. Signed-off-by: Kush Upadhyay --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index ea108819..49afaa82 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/3351af6379ce59bc5724cf19ad4819e5d6929dafdb2925afd0c9ea0e13d3be47/kernel-5.10.225-213.878.amzn2.src.rpm" -sha512 = "96ff97176e92357e89171ebcf5eb5104e59947b92a0f0c7eb161552e2686ffac094db3d45da2bbbb9c343d94515e017f2732cf22ff84745a0c2a475a80b52abc" +url = "https://cdn.amazonlinux.com/blobstore/11c2c91624bd7d42460b998b80573fa17717ab1fe853099730a373452bde11a2/kernel-5.10.226-214.879.amzn2.src.rpm" +sha512 = "2609956c60e622c8f6eefcc0279fe2d67958f27ef12e64f3b3515a4c51b3a224160eeeffcc219314b7aa3172c52f41dc6504aba46449eba10cae34846af66101" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index 5882a688..f618711a 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.225 +Version: 5.10.226 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/3351af6379ce59bc5724cf19ad4819e5d6929dafdb2925afd0c9ea0e13d3be47/kernel-5.10.225-213.878.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/11c2c91624bd7d42460b998b80573fa17717ab1fe853099730a373452bde11a2/kernel-5.10.226-214.879.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 07909d5961797a9d91bdd8d44f4532fa8017b166 Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Mon, 7 Oct 2024 20:58:48 +0000 Subject: [PATCH 1313/1356] twoliter: update twoliter to 0.4.7 --- Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 60022d57..c3e599c4 100644 --- a/Makefile +++ b/Makefile @@ -4,9 +4,9 @@ TWOLITER_DIR := $(TOOLS_DIR)/twoliter TWOLITER := $(TWOLITER_DIR)/twoliter CARGO_HOME := $(TOP).cargo -TWOLITER_VERSION ?= "0.4.6" -TWOLITER_SHA256_AARCH64 ?= "12ac3f5a6c641e29481c79289bd07cf1c3494a65e3d283d582feb1d28d8bf2a7" -TWOLITER_SHA256_X86_64 ?= "4a2db7c4d0aac75c6b682336539ee57371cfb6dfea81689d07fc1f4a940fd5c5" +TWOLITER_VERSION ?= "0.4.7" +TWOLITER_SHA256_AARCH64 ?= "82fce1895f93946a9e03d71ecaa9a73d0f4681d08cf540be0b7bab3eacbdc41e" +TWOLITER_SHA256_X86_64 ?= "3b5773bea848aa94c5501fa3cf03dc0c788916c7a3da7b989495cd2cdb8b49ec" KIT ?= bottlerocket-core-kit UNAME_ARCH = $(shell uname -m) ARCH ?= $(UNAME_ARCH) From 3f057261559752d86aa9449773c4d494112a4f6c Mon Sep 17 00:00:00 2001 From: Isaac Feldman Date: Tue, 27 Aug 2024 16:41:18 -0700 Subject: [PATCH 1314/1356] kmod-5.10-nvidia: add nvidia-persistenced Whenever the NVIDIA device resources are no longer in use, the NVIDIA kernel driver will tear down the device state. `nvidia-persistenced` activates persistence mode, which keeps the device files open which prevents the kernel from removing the device state. This is desirable in applications that may suffer performance hits due to repeated device initialization. The NVIDIA device drivers ship with templates for running `nvidia-persistenced` as a systemd unit. This change uses that template. The `nvidia-persistenced` documentation advises that while the systemd unit can run as root, the unit should provide a non-root user for `nvidia-persistenced` to run under. See the documentation included with the NVIDIA driver for more information about `nvidia-persistenced`. --- .../kmod-5.10-nvidia/kmod-5.10-nvidia.spec | 18 ++++++++++++++++++ .../nvidia-persistenced.service.in | 10 ++++++++++ packages/kmod-5.10-nvidia/nvidia-sysusers.conf | 1 + .../kmod-5.10-nvidia/nvidia-tmpfiles.conf.in | 1 + 4 files changed, 30 insertions(+) create mode 100644 packages/kmod-5.10-nvidia/nvidia-persistenced.service.in create mode 100644 packages/kmod-5.10-nvidia/nvidia-sysusers.conf diff --git a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec index 0733338a..458cd5b5 100644 --- a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec +++ b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec @@ -19,6 +19,8 @@ Source2: NVidiaEULAforAWS.pdf # Common NVIDIA conf files from 200 to 299 Source200: nvidia-tmpfiles.conf.in Source202: nvidia-dependencies-modules-load.conf +Source203: nvidia-sysusers.conf +Source204: nvidia-persistenced.service.in # NVIDIA tesla conf files from 300 to 399 Source300: nvidia-tesla-tmpfiles.conf.in @@ -105,6 +107,7 @@ install -d %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} install -d %{buildroot}%{tesla_470_libdir} install -d %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d install -d %{buildroot}%{_cross_factorydir}/nvidia/tesla/%{tesla_470} +install -d %{buildroot}%{_cross_sysusersdir} sed -e 's|__NVIDIA_VERSION__|%{tesla_470}|' %{S:300} > nvidia-tesla-%{tesla_470}.conf install -m 0644 nvidia-tesla-%{tesla_470}.conf %{buildroot}%{_cross_tmpfilesdir}/ @@ -158,10 +161,18 @@ install -m 755 nvidia-smi %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{te install -m 755 nvidia-debugdump %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} install -m 755 nvidia-cuda-mps-control %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} install -m 755 nvidia-cuda-mps-server %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} +install -m 755 nvidia-persistenced %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} %if "%{_cross_arch}" == "x86_64" install -m 755 nvidia-ngx-updater %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} %endif +# Users +install -m 0644 %{S:203} %{buildroot}%{_cross_sysusersdir}/nvidia.conf + +# Systemd units +sed -e 's|__NVIDIA_BINDIR__|%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470}|' %{S:204} > nvidia-persistenced.service +install -m 0644 nvidia-persistenced.service %{buildroot}%{_cross_unitdir} + # We install all the libraries, and filter them out in the 'files' section, so we can catch # when new libraries are added install -m 755 *.so* %{buildroot}/%{tesla_470_libdir}/ @@ -206,6 +217,7 @@ popd # Binaries %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470}/nvidia-debugdump %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470}/nvidia-smi +%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470}/nvidia-persistenced # Configuration files %{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-tesla-%{tesla_470}.toml @@ -229,6 +241,12 @@ popd # tmpfiles %{_cross_tmpfilesdir}/nvidia-tesla-%{tesla_470}.conf +# sysuser files +%{_cross_sysusersdir}/nvidia.conf + +# systemd units +%{_cross_unitdir}/nvidia-persistenced.service + # We only install the libraries required by all the DRIVER_CAPABILITIES, described here: # https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html#driver-capabilities diff --git a/packages/kmod-5.10-nvidia/nvidia-persistenced.service.in b/packages/kmod-5.10-nvidia/nvidia-persistenced.service.in new file mode 100644 index 00000000..62663025 --- /dev/null +++ b/packages/kmod-5.10-nvidia/nvidia-persistenced.service.in @@ -0,0 +1,10 @@ +[Unit] +Description=NVIDIA Persistence Daemon +After=load-tesla-kernel-modules.service load-open-gpu-kernel-modules.service + +[Service] +Type=forking +ExecStart=__NVIDIA_BINDIR__/nvidia-persistenced --user nvidia --verbose + +[Install] +RequiredBy=preconfigured.target diff --git a/packages/kmod-5.10-nvidia/nvidia-sysusers.conf b/packages/kmod-5.10-nvidia/nvidia-sysusers.conf new file mode 100644 index 00000000..43ceba0e --- /dev/null +++ b/packages/kmod-5.10-nvidia/nvidia-sysusers.conf @@ -0,0 +1 @@ +u nvidia - "nvidia-persistenced user" \ No newline at end of file diff --git a/packages/kmod-5.10-nvidia/nvidia-tmpfiles.conf.in b/packages/kmod-5.10-nvidia/nvidia-tmpfiles.conf.in index d4763f28..f44152b3 100644 --- a/packages/kmod-5.10-nvidia/nvidia-tmpfiles.conf.in +++ b/packages/kmod-5.10-nvidia/nvidia-tmpfiles.conf.in @@ -1,2 +1,3 @@ R __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/tesla - - - - - d __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/tesla 0755 root root - - +D /var/run/nvidia-persistenced 0755 nvidia nvidia - - \ No newline at end of file From 3ae157100b437f8b68953c9739c21eca32eda10a Mon Sep 17 00:00:00 2001 From: Isaac Feldman Date: Tue, 3 Sep 2024 20:20:55 +0000 Subject: [PATCH 1315/1356] kmod-5.15-nvidia: add nvidia-persistenced --- packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec | 17 +++++++++++++++++ .../nvidia-persistenced.service | 10 ++++++++++ packages/kmod-5.15-nvidia/nvidia-sysusers.conf | 1 + .../kmod-5.15-nvidia/nvidia-tmpfiles.conf.in | 1 + 4 files changed, 29 insertions(+) create mode 100644 packages/kmod-5.15-nvidia/nvidia-persistenced.service create mode 100644 packages/kmod-5.15-nvidia/nvidia-sysusers.conf diff --git a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec index 6ae4da15..5f6e7e2d 100644 --- a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec +++ b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec @@ -40,6 +40,8 @@ Source200: nvidia-tmpfiles.conf.in Source202: nvidia-dependencies-modules-load.conf Source203: nvidia-fabricmanager.service Source204: nvidia-fabricmanager.cfg +Source205: nvidia-sysusers.conf +Source206: nvidia-persistenced.service # NVIDIA tesla conf files from 300 to 399 Source300: nvidia-tesla-tmpfiles.conf @@ -173,6 +175,7 @@ install -d %{buildroot}%{_cross_libdir} install -d %{buildroot}%{_cross_tmpfilesdir} install -d %{buildroot}%{_cross_unitdir} install -d %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/{drivers,ld.so.conf.d} +install -d %{buildroot}%{_cross_sysusersdir} KERNEL_VERSION=$(cat %{kernel_sources}/include/config/kernel.release) sed \ @@ -279,10 +282,17 @@ install -m 755 nvidia-smi %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin install -m 755 nvidia-debugdump %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin install -m 755 nvidia-cuda-mps-control %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin install -m 755 nvidia-cuda-mps-server %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin +install -m 755 nvidia-persistenced %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/ %if "%{_cross_arch}" == "x86_64" install -m 755 nvidia-ngx-updater %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin %endif +# Users +install -m 0644 %{S:205} %{buildroot}%{_cross_sysusersdir}/nvidia.conf + +# Systemd units +install -m 0644 %{S:206} %{buildroot}%{_cross_unitdir} + # We install all the libraries, and filter them out in the 'files' section, so we can catch # when new libraries are added install -m 755 *.so* %{buildroot}/%{_cross_libdir}/nvidia/tesla/ @@ -353,6 +363,7 @@ popd %{_cross_libexecdir}/nvidia/tesla/bin/nvidia-smi %{_cross_libexecdir}/nvidia/tesla/bin/nv-fabricmanager %{_cross_libexecdir}/nvidia/tesla/bin/nvswitch-audit +%{_cross_libexecdir}/nvidia/tesla/bin/nvidia-persistenced # nvswitch topologies %dir %{_cross_datadir}/nvidia/tesla/nvswitch @@ -386,6 +397,12 @@ popd # tmpfiles %{_cross_tmpfilesdir}/nvidia-tesla.conf +# sysuser files +%{_cross_sysusersdir}/nvidia.conf + +# systemd units +%{_cross_unitdir}/nvidia-persistenced.service + # We only install the libraries required by all the DRIVER_CAPABILITIES, described here: # https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html#driver-capabilities diff --git a/packages/kmod-5.15-nvidia/nvidia-persistenced.service b/packages/kmod-5.15-nvidia/nvidia-persistenced.service new file mode 100644 index 00000000..f245599c --- /dev/null +++ b/packages/kmod-5.15-nvidia/nvidia-persistenced.service @@ -0,0 +1,10 @@ +[Unit] +Description=NVIDIA Persistence Daemon +After=load-tesla-kernel-modules.service load-open-gpu-kernel-modules.service + +[Service] +Type=forking +ExecStart=/usr/libexec/nvidia/tesla/bin/nvidia-persistenced --user nvidia --verbose + +[Install] +RequiredBy=preconfigured.target diff --git a/packages/kmod-5.15-nvidia/nvidia-sysusers.conf b/packages/kmod-5.15-nvidia/nvidia-sysusers.conf new file mode 100644 index 00000000..43ceba0e --- /dev/null +++ b/packages/kmod-5.15-nvidia/nvidia-sysusers.conf @@ -0,0 +1 @@ +u nvidia - "nvidia-persistenced user" \ No newline at end of file diff --git a/packages/kmod-5.15-nvidia/nvidia-tmpfiles.conf.in b/packages/kmod-5.15-nvidia/nvidia-tmpfiles.conf.in index 3d3bbc48..37d0fb79 100644 --- a/packages/kmod-5.15-nvidia/nvidia-tmpfiles.conf.in +++ b/packages/kmod-5.15-nvidia/nvidia-tmpfiles.conf.in @@ -4,3 +4,4 @@ R __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/op d __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/open-gpu 0755 root root - - C /etc/nvidia/fabricmanager.cfg - - - - d /run/nvidia 0700 root root - +D /var/run/nvidia-persistenced 0755 nvidia nvidia - - \ No newline at end of file From 98e2fb3f04817e7d3ab47102bf771012fffa3e10 Mon Sep 17 00:00:00 2001 From: Isaac Feldman Date: Tue, 3 Sep 2024 20:31:03 +0000 Subject: [PATCH 1316/1356] kmod-6.1-nvidia: add nvidia-persistenced --- packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 17 +++++++++++++++++ .../kmod-6.1-nvidia/nvidia-persistenced.service | 10 ++++++++++ packages/kmod-6.1-nvidia/nvidia-sysusers.conf | 1 + .../kmod-6.1-nvidia/nvidia-tmpfiles.conf.in | 1 + 4 files changed, 29 insertions(+) create mode 100644 packages/kmod-6.1-nvidia/nvidia-persistenced.service create mode 100644 packages/kmod-6.1-nvidia/nvidia-sysusers.conf diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index 1e4a2d8a..89e1d78e 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -40,6 +40,8 @@ Source200: nvidia-tmpfiles.conf.in Source202: nvidia-dependencies-modules-load.conf Source203: nvidia-fabricmanager.service Source204: nvidia-fabricmanager.cfg +Source205: nvidia-sysusers.conf +Source206: nvidia-persistenced.service # NVIDIA tesla conf files from 300 to 399 Source300: nvidia-tesla-tmpfiles.conf @@ -173,6 +175,7 @@ install -d %{buildroot}%{_cross_libdir} install -d %{buildroot}%{_cross_tmpfilesdir} install -d %{buildroot}%{_cross_unitdir} install -d %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/{drivers,ld.so.conf.d} +install -d %{buildroot}%{_cross_sysusersdir} KERNEL_VERSION=$(cat %{kernel_sources}/include/config/kernel.release) sed \ @@ -279,10 +282,17 @@ install -m 755 nvidia-smi %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin install -m 755 nvidia-debugdump %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin install -m 755 nvidia-cuda-mps-control %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin install -m 755 nvidia-cuda-mps-server %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin +install -m 755 nvidia-persistenced %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/ %if "%{_cross_arch}" == "x86_64" install -m 755 nvidia-ngx-updater %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin %endif +# Users +install -m 0644 %{S:205} %{buildroot}%{_cross_sysusersdir}/nvidia.conf + +# Systemd units +install -m 0644 %{S:206} %{buildroot}%{_cross_unitdir} + # We install all the libraries, and filter them out in the 'files' section, so we can catch # when new libraries are added install -m 755 *.so* %{buildroot}/%{_cross_libdir}/nvidia/tesla/ @@ -353,6 +363,7 @@ popd %{_cross_libexecdir}/nvidia/tesla/bin/nvidia-smi %{_cross_libexecdir}/nvidia/tesla/bin/nv-fabricmanager %{_cross_libexecdir}/nvidia/tesla/bin/nvswitch-audit +%{_cross_libexecdir}/nvidia/tesla/bin/nvidia-persistenced # nvswitch topologies %dir %{_cross_datadir}/nvidia/tesla/nvswitch @@ -386,6 +397,12 @@ popd # tmpfiles %{_cross_tmpfilesdir}/nvidia-tesla.conf +# sysuser files +%{_cross_sysusersdir}/nvidia.conf + +# systemd units +%{_cross_unitdir}/nvidia-persistenced.service + # We only install the libraries required by all the DRIVER_CAPABILITIES, described here: # https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html#driver-capabilities diff --git a/packages/kmod-6.1-nvidia/nvidia-persistenced.service b/packages/kmod-6.1-nvidia/nvidia-persistenced.service new file mode 100644 index 00000000..f245599c --- /dev/null +++ b/packages/kmod-6.1-nvidia/nvidia-persistenced.service @@ -0,0 +1,10 @@ +[Unit] +Description=NVIDIA Persistence Daemon +After=load-tesla-kernel-modules.service load-open-gpu-kernel-modules.service + +[Service] +Type=forking +ExecStart=/usr/libexec/nvidia/tesla/bin/nvidia-persistenced --user nvidia --verbose + +[Install] +RequiredBy=preconfigured.target diff --git a/packages/kmod-6.1-nvidia/nvidia-sysusers.conf b/packages/kmod-6.1-nvidia/nvidia-sysusers.conf new file mode 100644 index 00000000..43ceba0e --- /dev/null +++ b/packages/kmod-6.1-nvidia/nvidia-sysusers.conf @@ -0,0 +1 @@ +u nvidia - "nvidia-persistenced user" \ No newline at end of file diff --git a/packages/kmod-6.1-nvidia/nvidia-tmpfiles.conf.in b/packages/kmod-6.1-nvidia/nvidia-tmpfiles.conf.in index 3d3bbc48..37d0fb79 100644 --- a/packages/kmod-6.1-nvidia/nvidia-tmpfiles.conf.in +++ b/packages/kmod-6.1-nvidia/nvidia-tmpfiles.conf.in @@ -4,3 +4,4 @@ R __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/op d __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/open-gpu 0755 root root - - C /etc/nvidia/fabricmanager.cfg - - - - d /run/nvidia 0700 root root - +D /var/run/nvidia-persistenced 0755 nvidia nvidia - - \ No newline at end of file From 7276c78110904636e0e68f0b4d43342d3337c0c6 Mon Sep 17 00:00:00 2001 From: Isaac Feldman Date: Tue, 1 Oct 2024 16:42:31 +0000 Subject: [PATCH 1317/1356] kmod-5.10-nvidia: add nvidia-modprobe --- packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec index 458cd5b5..a12a815a 100644 --- a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec +++ b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec @@ -88,6 +88,7 @@ install -d %{buildroot}%{_cross_libexecdir} install -d %{buildroot}%{_cross_libdir} install -d %{buildroot}%{_cross_tmpfilesdir} install -d %{buildroot}%{_cross_unitdir} +install -d %{buildroot}%{_cross_bindir} install -d %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/{drivers,ld.so.conf.d} KERNEL_VERSION=$(cat %{kernel_sources}/include/config/kernel.release) @@ -162,6 +163,7 @@ install -m 755 nvidia-debugdump %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bi install -m 755 nvidia-cuda-mps-control %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} install -m 755 nvidia-cuda-mps-server %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} install -m 755 nvidia-persistenced %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} +install -m 4755 nvidia-modprobe %{buildroot}%{_cross_bindir} %if "%{_cross_arch}" == "x86_64" install -m 755 nvidia-ngx-updater %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} %endif @@ -218,6 +220,7 @@ popd %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470}/nvidia-debugdump %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470}/nvidia-smi %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470}/nvidia-persistenced +%{_cross_bindir}/nvidia-modprobe # Configuration files %{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-tesla-%{tesla_470}.toml From 2094ee3c1cf0fa399f8466907c5bf1dc4743fc69 Mon Sep 17 00:00:00 2001 From: Isaac Feldman Date: Tue, 1 Oct 2024 16:19:08 +0000 Subject: [PATCH 1318/1356] kmod-5.15-nvidia: add nvidia-modprobe --- packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec index 5f6e7e2d..271d7ca4 100644 --- a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec +++ b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec @@ -176,6 +176,7 @@ install -d %{buildroot}%{_cross_tmpfilesdir} install -d %{buildroot}%{_cross_unitdir} install -d %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/{drivers,ld.so.conf.d} install -d %{buildroot}%{_cross_sysusersdir} +install -d %{buildroot}%{_cross_bindir} KERNEL_VERSION=$(cat %{kernel_sources}/include/config/kernel.release) sed \ @@ -283,6 +284,7 @@ install -m 755 nvidia-debugdump %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bi install -m 755 nvidia-cuda-mps-control %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin install -m 755 nvidia-cuda-mps-server %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin install -m 755 nvidia-persistenced %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/ +install -m 4755 nvidia-modprobe %{buildroot}%{_cross_bindir} %if "%{_cross_arch}" == "x86_64" install -m 755 nvidia-ngx-updater %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin %endif @@ -364,6 +366,7 @@ popd %{_cross_libexecdir}/nvidia/tesla/bin/nv-fabricmanager %{_cross_libexecdir}/nvidia/tesla/bin/nvswitch-audit %{_cross_libexecdir}/nvidia/tesla/bin/nvidia-persistenced +%{_cross_bindir}/nvidia-modprobe # nvswitch topologies %dir %{_cross_datadir}/nvidia/tesla/nvswitch From c96fa7b3f7115355e8e1ec54759069c54f7b344c Mon Sep 17 00:00:00 2001 From: Isaac Feldman Date: Mon, 30 Sep 2024 21:53:34 +0000 Subject: [PATCH 1319/1356] kmod-6.1-nvidia: add nvidia-modprobe --- packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index 89e1d78e..bb708f2a 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -176,6 +176,7 @@ install -d %{buildroot}%{_cross_tmpfilesdir} install -d %{buildroot}%{_cross_unitdir} install -d %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/{drivers,ld.so.conf.d} install -d %{buildroot}%{_cross_sysusersdir} +install -d %{buildroot}%{_cross_bindir} KERNEL_VERSION=$(cat %{kernel_sources}/include/config/kernel.release) sed \ @@ -283,6 +284,7 @@ install -m 755 nvidia-debugdump %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bi install -m 755 nvidia-cuda-mps-control %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin install -m 755 nvidia-cuda-mps-server %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin install -m 755 nvidia-persistenced %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/ +install -m 4755 nvidia-modprobe %{buildroot}%{_cross_bindir} %if "%{_cross_arch}" == "x86_64" install -m 755 nvidia-ngx-updater %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin %endif @@ -364,6 +366,7 @@ popd %{_cross_libexecdir}/nvidia/tesla/bin/nv-fabricmanager %{_cross_libexecdir}/nvidia/tesla/bin/nvswitch-audit %{_cross_libexecdir}/nvidia/tesla/bin/nvidia-persistenced +%{_cross_bindir}/nvidia-modprobe # nvswitch topologies %dir %{_cross_datadir}/nvidia/tesla/nvswitch From d82e8313ed16d6b953efd46893fe7b5adba155bf Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Fri, 4 Oct 2024 02:33:16 +0000 Subject: [PATCH 1320/1356] kmod-5.10-nvidia: move to R535 branch from R470 The R470 branch is end of life. In order to keep variants using the 5.10 kernel on a supported NVIDIA driver, this commit moves the kmod package for 5.10 to build the R535 branch and brings the driver in line with the other two kernel kmod packages in packaging style. Signed-off-by: Matthew Yeazel --- packages/kmod-5.10-nvidia/.gitignore | 2 + packages/kmod-5.10-nvidia/Cargo.toml | 23 +- .../copy-open-gpu-kernel-modules.service.in | 20 + .../kmod-5.10-nvidia/kmod-5.10-nvidia.spec | 520 ++++++++++++------ .../link-tesla-kernel-modules.service.in | 5 +- .../load-open-gpu-kernel-modules.service.in | 19 + .../load-tesla-kernel-modules.service.in | 5 +- .../kmod-5.10-nvidia/nvidia-fabricmanager.cfg | 34 ++ .../nvidia-fabricmanager.service | 16 + .../kmod-5.10-nvidia/nvidia-ld.so.conf.in | 2 +- .../nvidia-open-gpu-config.toml.in | 11 + .../nvidia-open-gpu-copy-only-config.toml.in | 8 + ...service.in => nvidia-persistenced.service} | 2 +- .../nvidia-tesla-tmpfiles.conf | 5 + .../nvidia-tesla-tmpfiles.conf.in | 3 - .../kmod-5.10-nvidia/nvidia-tmpfiles.conf.in | 6 +- 16 files changed, 499 insertions(+), 182 deletions(-) create mode 100644 packages/kmod-5.10-nvidia/copy-open-gpu-kernel-modules.service.in create mode 100644 packages/kmod-5.10-nvidia/load-open-gpu-kernel-modules.service.in create mode 100644 packages/kmod-5.10-nvidia/nvidia-fabricmanager.cfg create mode 100644 packages/kmod-5.10-nvidia/nvidia-fabricmanager.service create mode 100644 packages/kmod-5.10-nvidia/nvidia-open-gpu-config.toml.in create mode 100644 packages/kmod-5.10-nvidia/nvidia-open-gpu-copy-only-config.toml.in rename packages/kmod-5.10-nvidia/{nvidia-persistenced.service.in => nvidia-persistenced.service} (69%) create mode 100644 packages/kmod-5.10-nvidia/nvidia-tesla-tmpfiles.conf delete mode 100644 packages/kmod-5.10-nvidia/nvidia-tesla-tmpfiles.conf.in diff --git a/packages/kmod-5.10-nvidia/.gitignore b/packages/kmod-5.10-nvidia/.gitignore index 0bcfb52f..db8b415b 100644 --- a/packages/kmod-5.10-nvidia/.gitignore +++ b/packages/kmod-5.10-nvidia/.gitignore @@ -1 +1,3 @@ NVidiaEULAforAWS.pdf +COPYING +*.rpm diff --git a/packages/kmod-5.10-nvidia/Cargo.toml b/packages/kmod-5.10-nvidia/Cargo.toml index 07cc9b72..785a9812 100644 --- a/packages/kmod-5.10-nvidia/Cargo.toml +++ b/packages/kmod-5.10-nvidia/Cargo.toml @@ -17,13 +17,28 @@ url = "https://s3.amazonaws.com/EULA/NVidiaEULAforAWS.pdf" sha512 = "e1926fe99afc3ab5b2f2744fcd53b4046465aefb2793e2e06c4a19455a3fde895e00af1415ff1a5804c32e6a2ed0657e475de63da6c23a0e9c59feeef52f3f58" [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/470.256.02/NVIDIA-Linux-x86_64-470.256.02.run" -sha512 = "a837946dd24d7945c1962a695f1f31965f3ceb6927f52cd08fd51b8db138b7a888bbeab69243f5c8468a7bd7ccd47f5dbdb48a1ca81264866c1ebb7d88628f88" +url = "https://us.download.nvidia.com/tesla/535.183.06/NVIDIA-Linux-x86_64-535.183.06.run" +sha512 = "424950ef303ea39499e96f8c90c1e0c83aee12309779d4f335769ef554ad4f7c38e98f69c64b408adc85a7cf51ea600d85222792402b9c6b7941f1af066d2a33" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/470.256.02/NVIDIA-Linux-aarch64-470.256.02.run" -sha512 = "38eee5933355c34ca816a2ac0fbc4f55c19c20e1322891bfc98cb6b37d99a31218eea9314877ab0e3cf3ac6eb61f9d9d4d09d0af304b689f18b4efa721b65d5c" +url = "https://us.download.nvidia.com/tesla/535.183.06/NVIDIA-Linux-aarch64-535.183.06.run" +sha512 = "bb305f1703557461b0a0a29066c304658d9684841104c6f4d9ff44f9db90fee14ae619cd2fe3242823a5fe3a69b168b8174b163740014b15cdef36db88ba2d96" +force-upstream = true + +[[package.metadata.build-package.external-files]] +url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/nvidia-fabric-manager-535.183.06-1.x86_64.rpm" +sha512 = "c3d98878363f857b2963665a0e485cb7b1afeaabd0040a970478d00ffb870ab4130ab9dfe1b7a40d1b38734636ebccec39fd1b3fc8c06abc5c07470f749b6025" +force-upstream = true + +[[package.metadata.build-package.external-files]] +url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/sbsa/nvidia-fabric-manager-535.183.06-1.aarch64.rpm" +sha512 = "6a646cd7ea11e668f7dbe6f6bb22516107a856e3c3755f8693c91d4bed706b8b3667b853f07e84c2d0da4de7ab1107337b6a1493879d75d8c201bfe9da071b32" +force-upstream = true + +[[package.metadata.build-package.external-files]] +url = "https://raw.githubusercontent.com/NVIDIA/open-gpu-kernel-modules/535/COPYING" +sha512 = "f9cee68cbb12095af4b4e92d01c210461789ef41c70b64efefd6719d0b88468b7a67a3629c432d4d9304c730b5d1a942228a5bcc74a03ab1c411c77c758cd938" force-upstream = true [build-dependencies] diff --git a/packages/kmod-5.10-nvidia/copy-open-gpu-kernel-modules.service.in b/packages/kmod-5.10-nvidia/copy-open-gpu-kernel-modules.service.in new file mode 100644 index 00000000..2c3420b6 --- /dev/null +++ b/packages/kmod-5.10-nvidia/copy-open-gpu-kernel-modules.service.in @@ -0,0 +1,20 @@ +[Unit] +Description=Copy open GPU kernel modules +RequiresMountsFor=PREFIX/lib/modules PREFIX/src/kernels +# Rerunning this service after the system is fully loaded will override +# the already linked kernel modules. This doesn't affect the running system, +# since kernel modules are linked early in the boot sequence, but we still +# disable manual restarts to prevent unnecessary kernel modules rewrites. +RefuseManualStart=true +RefuseManualStop=true + +[Service] +Type=oneshot +ExecCondition=/usr/bin/ghostdog match-nvidia-driver open-gpu +ExecStart=/usr/bin/driverdog --modules-set nvidia-open-gpu link-modules +ExecStart=/usr/bin/driverdog --modules-set nvidia-open-gpu-copy-only link-modules +RemainAfterExit=true +StandardError=journal+console + +[Install] +RequiredBy=preconfigured.target diff --git a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec index a12a815a..5af1c33f 100644 --- a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec +++ b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec @@ -1,6 +1,19 @@ -%global tesla_470 470.256.02 -%global tesla_470_libdir %{_cross_libdir}/nvidia/tesla/%{tesla_470} -%global tesla_470_bindir %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} +%global tesla_major 535 +%global tesla_minor 183 +%global tesla_patch 06 +%global tesla_ver %{tesla_major}.%{tesla_minor}.%{tesla_patch} +%if "%{?_cross_arch}" == "aarch64" +%global fm_arch sbsa +%else +%global fm_arch %{_cross_arch} +%endif + +# With the split of the firmware binary from firmware/gsp.bin to firmware/gsp_ga10x.bin +# and firmware/gsp_tu10x.bin the file format changed from executable to relocatable. +# The __spec_install_post macro will by default try to strip all binary files. +# Unfortunately the strip used is not compatible with the new file format. +# Redefine strip, so that these firmware binaries do not derail the build. +%global __strip /usr/bin/true Name: %{_cross_os}kmod-5.10-nvidia Version: 1.0.0 @@ -11,24 +24,36 @@ Summary: NVIDIA drivers for the 5.10 kernel License: Apache-2.0 OR MIT URL: http://www.nvidia.com/ -# NVIDIA .run scripts from 0 to 199 -Source0: https://us.download.nvidia.com/tesla/%{tesla_470}/NVIDIA-Linux-x86_64-%{tesla_470}.run -Source1: https://us.download.nvidia.com/tesla/%{tesla_470}/NVIDIA-Linux-aarch64-%{tesla_470}.run +# NVIDIA archives from 0 to 199 +# NVIDIA .run scripts for kernel and userspace drivers +Source0: https://us.download.nvidia.com/tesla/%{tesla_ver}/NVIDIA-Linux-x86_64-%{tesla_ver}.run +Source1: https://us.download.nvidia.com/tesla/%{tesla_ver}/NVIDIA-Linux-aarch64-%{tesla_ver}.run Source2: NVidiaEULAforAWS.pdf +Source3: COPYING + +# fabricmanager for NVSwitch +Source10: https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/nvidia-fabric-manager-%{tesla_ver}-1.x86_64.rpm +Source11: https://developer.download.nvidia.com/compute/cuda/repos/rhel9/sbsa/nvidia-fabric-manager-%{tesla_ver}-1.aarch64.rpm # Common NVIDIA conf files from 200 to 299 Source200: nvidia-tmpfiles.conf.in Source202: nvidia-dependencies-modules-load.conf -Source203: nvidia-sysusers.conf -Source204: nvidia-persistenced.service.in +Source203: nvidia-fabricmanager.service +Source204: nvidia-fabricmanager.cfg +Source205: nvidia-sysusers.conf +Source206: nvidia-persistenced.service # NVIDIA tesla conf files from 300 to 399 -Source300: nvidia-tesla-tmpfiles.conf.in +Source300: nvidia-tesla-tmpfiles.conf Source301: nvidia-tesla-build-config.toml.in -Source302: nvidia-tesla-path.env.in -Source303: nvidia-ld.so.conf.in -Source304: link-tesla-kernel-modules.service.in -Source305: load-tesla-kernel-modules.service.in +Source302: nvidia-open-gpu-config.toml.in +Source303: nvidia-open-gpu-copy-only-config.toml.in +Source304: nvidia-tesla-path.env.in +Source305: nvidia-ld.so.conf.in +Source306: link-tesla-kernel-modules.service.in +Source307: load-tesla-kernel-modules.service.in +Source308: copy-open-gpu-kernel-modules.service.in +Source309: load-open-gpu-kernel-modules.service.in BuildRequires: %{_cross_os}glibc-devel BuildRequires: %{_cross_os}kernel-5.10-archive @@ -36,33 +61,71 @@ BuildRequires: %{_cross_os}kernel-5.10-archive %description %{summary}. -%package tesla-470 -Summary: NVIDIA 470 Tesla driver -Version: %{tesla_470} +%package fabricmanager +Summary: NVIDIA fabricmanager config and service files +Requires: %{name}-tesla(fabricmanager) + +%description fabricmanager +%{summary}. + +%package open-gpu-%{tesla_major} +Summary: NVIDIA %{tesla_major} Open GPU driver +Version: %{tesla_ver} +License: MIT OR GPL-2.0-only +Requires: %{_cross_os}variant-platform(aws) + +%description open-gpu-%{tesla_major} +%{summary}. + +%package tesla-%{tesla_major} +Summary: NVIDIA %{tesla_major} Tesla driver +Version: %{tesla_ver} License: LicenseRef-NVIDIA-AWS-EULA Requires: %{_cross_os}variant-platform(aws) Requires: %{name} +Requires: %{name}-fabricmanager +Provides: %{name}-tesla(fabricmanager) +Requires: %{name}-open-gpu-%{tesla_major} -%description tesla-470 +%description tesla-%{tesla_major} %{summary} %prep # Extract nvidia sources with `-x`, otherwise the script will try to install # the driver in the current run -sh %{_sourcedir}/NVIDIA-Linux-%{_cross_arch}-%{tesla_470}.run -x +sh %{_sourcedir}/NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}.run -x + +# Extract fabricmanager from the rpm via cpio rather than `%%setup` since the +# correct source is architecture-dependent. +mkdir fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive +rpm2cpio %{_sourcedir}/nvidia-fabric-manager-%{tesla_ver}-1.%{_cross_arch}.rpm | cpio -idmV -D fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive # Add the license. install -p -m 0644 %{S:2} . +install -p -m 0644 %{S:3} . %global kernel_sources %{_builddir}/kernel-devel tar -xf %{_cross_datadir}/bottlerocket/kernel-devel.tar.xz -%build -pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_470}/kernel +%define _kernel_version %(ls %{kernel_sources}/include/config/kernel.release) +%global _cross_kmoddir %{_cross_libdir}/modules/%{_kernel_version} # This recipe was based in the NVIDIA yum/dnf specs: # https://github.com/NVIDIA/yum-packaging-precompiled-kmod +# Begin open driver build +pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}/kernel-open + +# We set IGNORE_CC_MISMATCH even though we are using the same compiler used to compile the kernel, if +# we don't set this flag the compilation fails +make %{?_smp_mflags} ARCH=%{_cross_karch} IGNORE_CC_MISMATCH=1 SYSSRC=%{kernel_sources} CC=%{_cross_target}-gcc LD=%{_cross_target}-ld + +# end open driver build +popd + +# Begin proprietary driver build +pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}/kernel + # We set IGNORE_CC_MISMATCH even though we are using the same compiler used to compile the kernel, if # we don't set this flag the compilation fails make %{?_smp_mflags} ARCH=%{_cross_karch} IGNORE_CC_MISMATCH=1 SYSSRC=%{kernel_sources} CC=%{_cross_target}-gcc LD=%{_cross_target}-ld @@ -81,6 +144,29 @@ rm nvidia{,-modeset,-peermem}.o # don't include any linked module in the base image rm nvidia{,-modeset,-peermem,-drm}.ko +# End proprietary driver build +popd + +# Grab the list of supported devices +pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_ver}/supported-gpus +# We want to grab all the `kernelopen` enabled chips except for this list that is best held back to the proprietary driver +# 10de:1db1 is V100-16G (P3dn) +# 10de:1db5 is V100-32G (P3dn) +# 10de:1eb8 is T4 (G4dn) +# 10de:1eb4 is T4G (G5g) +# 10de:2237 is A10G (G5) +# 10de:27b8 is L4 (G6) +# 10de:26b9 is L40S (G6e) +jq -r '.chips[] | select(.features[] | contains("kernelopen")) | +select(.devid != "0x1DB1" +and .devid != "0x1DB5" +and .devid != "0x1DEB8" +and .devid != "0x1EB4" +and .devid != "0x2237" +and .devid != "0x27B8" +and .devid != "0x26B9")' supported-gpus.json | jq -s '{"open-gpu": .}' > open-gpu-supported-devices.json +# confirm "NVIDIA H100" is in the resulting file to catch shape changes +jq -e '."open-gpu"[] | select(."devid" == "0x2330") | ."features"| index("kernelopen")' open-gpu-supported-devices.json popd %install @@ -88,8 +174,9 @@ install -d %{buildroot}%{_cross_libexecdir} install -d %{buildroot}%{_cross_libdir} install -d %{buildroot}%{_cross_tmpfilesdir} install -d %{buildroot}%{_cross_unitdir} -install -d %{buildroot}%{_cross_bindir} install -d %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/{drivers,ld.so.conf.d} +install -d %{buildroot}%{_cross_sysusersdir} +install -d %{buildroot}%{_cross_bindir} KERNEL_VERSION=$(cat %{kernel_sources}/include/config/kernel.release) sed \ @@ -101,94 +188,147 @@ install -p -m 0644 nvidia.conf %{buildroot}%{_cross_tmpfilesdir} install -d %{buildroot}%{_cross_libdir}/modules-load.d install -p -m 0644 %{S:202} %{buildroot}%{_cross_libdir}/modules-load.d/nvidia-dependencies.conf -# Begin NVIDIA tesla 470 -pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_470} -# We install bins and libs in a versioned directory to prevent collisions with future drivers versions -install -d %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} -install -d %{buildroot}%{tesla_470_libdir} -install -d %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d -install -d %{buildroot}%{_cross_factorydir}/nvidia/tesla/%{tesla_470} -install -d %{buildroot}%{_cross_sysusersdir} - -sed -e 's|__NVIDIA_VERSION__|%{tesla_470}|' %{S:300} > nvidia-tesla-%{tesla_470}.conf -install -m 0644 nvidia-tesla-%{tesla_470}.conf %{buildroot}%{_cross_tmpfilesdir}/ -sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/|' %{S:301} > \ - nvidia-tesla-%{tesla_470}.toml -install -m 0644 nvidia-tesla-%{tesla_470}.toml %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/drivers +# NVIDIA fabric manager service unit and config +install -p -m 0644 %{S:203} %{buildroot}%{_cross_unitdir} +install -d %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/nvidia +install -p -m 0644 %{S:204} %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/nvidia/fabricmanager.cfg + +# Begin NVIDIA tesla driver +pushd NVIDIA-Linux-%{_cross_arch}-%{tesla_ver} +# Proprietary driver +install -d %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin +install -d %{buildroot}%{_cross_libdir}/nvidia/tesla +install -d %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d +install -d %{buildroot}%{_cross_factorydir}/nvidia/tesla +install -d %{buildroot}%{_cross_factorydir}/nvidia/open-gpu +install -d %{buildroot}%{_cross_datadir}/nvidia/open-gpu/drivers + +install -m 0644 %{S:300} %{buildroot}%{_cross_tmpfilesdir}/nvidia-tesla.conf +sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/tesla/module-objects.d/|' %{S:301} > \ + nvidia-tesla.toml +install -m 0644 nvidia-tesla.toml %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/drivers +sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/open-gpu/drivers/|' %{S:302} > \ + nvidia-open-gpu.toml +install -m 0644 nvidia-open-gpu.toml %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/drivers +sed -e 's|__NVIDIA_MODULES__|%{_cross_datadir}/nvidia/open-gpu/drivers/|' %{S:303} > \ + nvidia-open-gpu-copy-only.toml +install -m 0644 nvidia-open-gpu-copy-only.toml %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/drivers # Install nvidia-path environment file, will be used as a drop-in for containerd.service since # libnvidia-container locates and mounts helper binaries into the containers from either # `PATH` or `NVIDIA_PATH` -sed -e 's|__NVIDIA_BINDIR__|%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470}|' %{S:302} > nvidia-path.env -install -m 0644 nvidia-path.env %{buildroot}%{_cross_factorydir}/nvidia/tesla/%{tesla_470} -# We need to add `_cross_libdir/tesla_470` to the paths loaded by the ldconfig service +sed -e 's|__NVIDIA_BINDIR__|%{_cross_libexecdir}/nvidia/tesla/bin|' %{S:304} > nvidia-path.env +install -m 0644 nvidia-path.env %{buildroot}%{_cross_factorydir}/nvidia/tesla +# We need to add `_cross_libdir` to the paths loaded by the ldconfig service # because libnvidia-container uses the `ldcache` file created by the service, to locate and mount the # libraries into the containers -sed -e 's|__LIBDIR__|%{_cross_libdir}|' %{S:303} | sed -e 's|__NVIDIA_VERSION__|%{tesla_470}|' \ - > nvidia-tesla-%{tesla_470}.conf -install -m 0644 nvidia-tesla-%{tesla_470}.conf %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/ +sed -e 's|__LIBDIR__|%{_cross_libdir}|' %{S:305} > nvidia-tesla.conf +install -m 0644 nvidia-tesla.conf %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/ # Services to link/copy/load modules -sed -e 's|PREFIX|%{_cross_prefix}|g' %{S:304} > link-tesla-kernel-modules.service -sed -e 's|PREFIX|%{_cross_prefix}|g' %{S:305} > load-tesla-kernel-modules.service +sed -e 's|PREFIX|%{_cross_prefix}|g' %{S:306} > link-tesla-kernel-modules.service +sed -e 's|PREFIX|%{_cross_prefix}|g' %{S:307} > load-tesla-kernel-modules.service install -p -m 0644 \ link-tesla-kernel-modules.service \ load-tesla-kernel-modules.service \ %{buildroot}%{_cross_unitdir} -# driver -install kernel/nvidia.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d -install kernel/nvidia/nv-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d -install kernel/nvidia/nv-kernel.o_binary %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nv-kernel.o +sed -e 's|PREFIX|%{_cross_prefix}|g' %{S:308} > copy-open-gpu-kernel-modules.service +sed -e 's|PREFIX|%{_cross_prefix}|g' %{S:309} > load-open-gpu-kernel-modules.service +install -p -m 0644 \ + copy-open-gpu-kernel-modules.service \ + load-open-gpu-kernel-modules.service \ + %{buildroot}%{_cross_unitdir} + +# proprietary driver +install kernel/nvidia.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d +install kernel/nvidia/nv-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d +install kernel/nvidia/nv-kernel.o_binary %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d/nv-kernel.o # uvm -install kernel/nvidia-uvm.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d -install kernel/nvidia-uvm.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d +install kernel/nvidia-uvm.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d +install kernel/nvidia-uvm.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d # modeset -install kernel/nvidia-modeset.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d -install kernel/nvidia-modeset/nv-modeset-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d -install kernel/nvidia-modeset/nv-modeset-kernel.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d +install kernel/nvidia-modeset.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d +install kernel/nvidia-modeset/nv-modeset-interface.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d +install kernel/nvidia-modeset/nv-modeset-kernel.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d # peermem -install kernel/nvidia-peermem.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d -install kernel/nvidia-peermem/nvidia-peermem.o %{buildroot}%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d +install kernel/nvidia-peermem.mod.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d +install kernel/nvidia-peermem/nvidia-peermem.o %{buildroot}%{_cross_datadir}/nvidia/tesla/module-objects.d # drm -install kernel/nvidia-drm.mod.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d -install kernel/nvidia-drm.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d +install kernel/nvidia-drm.mod.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/module-objects.d +install kernel/nvidia-drm.o %{buildroot}/%{_cross_datadir}/nvidia/tesla/module-objects.d + +# open driver +install -d %{buildroot}%{_cross_datadir}/nvidia/open-gpu/drivers/ +install kernel-open/nvidia.ko %{buildroot}%{_cross_datadir}/nvidia/open-gpu/drivers/ + +# uvm +install kernel-open/nvidia-uvm.ko %{buildroot}%{_cross_datadir}/nvidia/open-gpu/drivers/ + +# modeset +install kernel-open/nvidia-modeset.ko %{buildroot}%{_cross_datadir}/nvidia/open-gpu/drivers/ + +# peermem +install kernel-open/nvidia-peermem.ko %{buildroot}%{_cross_datadir}/nvidia/open-gpu/drivers/ + +# drm +install kernel-open/nvidia-drm.ko %{buildroot}%{_cross_datadir}/nvidia/open-gpu/drivers/ +# end open driver # Binaries -install -m 755 nvidia-smi %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} -install -m 755 nvidia-debugdump %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} -install -m 755 nvidia-cuda-mps-control %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} -install -m 755 nvidia-cuda-mps-server %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} -install -m 755 nvidia-persistenced %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} +install -m 755 nvidia-smi %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin +install -m 755 nvidia-debugdump %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin +install -m 755 nvidia-cuda-mps-control %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin +install -m 755 nvidia-cuda-mps-server %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin +install -m 755 nvidia-persistenced %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/ install -m 4755 nvidia-modprobe %{buildroot}%{_cross_bindir} %if "%{_cross_arch}" == "x86_64" -install -m 755 nvidia-ngx-updater %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} +install -m 755 nvidia-ngx-updater %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin %endif # Users -install -m 0644 %{S:203} %{buildroot}%{_cross_sysusersdir}/nvidia.conf +install -m 0644 %{S:205} %{buildroot}%{_cross_sysusersdir}/nvidia.conf # Systemd units -sed -e 's|__NVIDIA_BINDIR__|%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470}|' %{S:204} > nvidia-persistenced.service -install -m 0644 nvidia-persistenced.service %{buildroot}%{_cross_unitdir} +install -m 0644 %{S:206} %{buildroot}%{_cross_unitdir} # We install all the libraries, and filter them out in the 'files' section, so we can catch # when new libraries are added -install -m 755 *.so* %{buildroot}/%{tesla_470_libdir}/ +install -m 755 *.so* %{buildroot}/%{_cross_libdir}/nvidia/tesla/ # This library has the same SONAME as libEGL.so.1.1.0, this will cause collisions while # the symlinks are created. For now, we only symlink libEGL.so.1.1.0. -EXCLUDED_LIBS="libEGL.so.%{tesla_470}" +EXCLUDED_LIBS="libEGL.so.%{tesla_ver}" for lib in $(find . -maxdepth 1 -type f -name 'lib*.so.*' -printf '%%P\n'); do [[ "${EXCLUDED_LIBS}" =~ "${lib}" ]] && continue soname="$(%{_cross_target}-readelf -d "${lib}" | awk '/SONAME/{print $5}' | tr -d '[]')" [ -n "${soname}" ] || continue [ "${lib}" == "${soname}" ] && continue - ln -s "${lib}" %{buildroot}/%{tesla_470_libdir}/"${soname}" + ln -s "${lib}" %{buildroot}/%{_cross_libdir}/nvidia/tesla/"${soname}" +done + +# Include the firmware file for GSP support +install -d %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_ver} +install -p -m 0644 firmware/gsp_ga10x.bin %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_ver} +install -p -m 0644 firmware/gsp_tu10x.bin %{buildroot}%{_cross_libdir}/firmware/nvidia/%{tesla_ver} + +# Include the open driver supported devices file for runtime matching of the driver. This is consumed by ghostdog to match the driver to this list +install -p -m 0644 supported-gpus/open-gpu-supported-devices.json %{buildroot}%{_cross_datadir}/nvidia/open-gpu-supported-devices.json + +popd + +# Begin NVIDIA fabric manager binaries and topologies +pushd fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive +install -p -m 0755 usr/bin/nv-fabricmanager %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin +install -p -m 0755 usr/bin/nvswitch-audit %{buildroot}%{_cross_libexecdir}/nvidia/tesla/bin + +install -d %{buildroot}%{_cross_datadir}/nvidia/tesla/nvswitch +for t in usr/share/nvidia/nvswitch/*_topology ; do + install -p -m 0644 "${t}" %{buildroot}%{_cross_datadir}/nvidia/tesla/nvswitch done popd @@ -200,49 +340,65 @@ popd %dir %{_cross_datadir}/nvidia %dir %{_cross_libdir}/modules-load.d %dir %{_cross_factorydir}%{_cross_sysconfdir}/drivers +%dir %{_cross_factorydir}%{_cross_sysconfdir}/nvidia %{_cross_tmpfilesdir}/nvidia.conf -%{_cross_libdir}/systemd/system/ %{_cross_libdir}/modules-load.d/nvidia-dependencies.conf -%files tesla-470 +%files tesla-%{tesla_major} %license NVidiaEULAforAWS.pdf -%dir %{_cross_datadir}/nvidia/tesla/%{tesla_470} -%dir %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470} -%dir %{tesla_470_libdir} -%dir %{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d -%dir %{_cross_factorydir}/nvidia/tesla/%{tesla_470} +%license fabricmanager-linux-%{fm_arch}-%{tesla_ver}-archive/usr/share/doc/nvidia-fabricmanager/third-party-notices.txt +%dir %{_cross_datadir}/nvidia/tesla +%dir %{_cross_libexecdir}/nvidia/tesla/bin +%dir %{_cross_libdir}/nvidia/tesla +%dir %{_cross_libdir}/firmware/nvidia/%{tesla_ver} +%dir %{_cross_datadir}/nvidia/tesla/module-objects.d +%dir %{_cross_factorydir}/nvidia/tesla # Service files for link/copy/loading drivers %{_cross_unitdir}/link-tesla-kernel-modules.service %{_cross_unitdir}/load-tesla-kernel-modules.service +%{_cross_unitdir}/copy-open-gpu-kernel-modules.service +%{_cross_unitdir}/load-open-gpu-kernel-modules.service # Binaries -%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470}/nvidia-debugdump -%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470}/nvidia-smi -%{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470}/nvidia-persistenced +%{_cross_libexecdir}/nvidia/tesla/bin/nvidia-debugdump +%{_cross_libexecdir}/nvidia/tesla/bin/nvidia-smi +%{_cross_libexecdir}/nvidia/tesla/bin/nv-fabricmanager +%{_cross_libexecdir}/nvidia/tesla/bin/nvswitch-audit +%{_cross_libexecdir}/nvidia/tesla/bin/nvidia-persistenced %{_cross_bindir}/nvidia-modprobe +# nvswitch topologies +%dir %{_cross_datadir}/nvidia/tesla/nvswitch +%{_cross_datadir}/nvidia/tesla/nvswitch/dgxa100_hgxa100_topology +%{_cross_datadir}/nvidia/tesla/nvswitch/dgx2_hgx2_topology +%{_cross_datadir}/nvidia/tesla/nvswitch/dgxh100_hgxh100_topology +%{_cross_datadir}/nvidia/tesla/nvswitch/dgxh800_hgxh800_topology + # Configuration files -%{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-tesla-%{tesla_470}.toml -%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/nvidia-tesla-%{tesla_470}.conf -%{_cross_factorydir}/nvidia/tesla/%{tesla_470}/nvidia-path.env +%{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-tesla.toml +%{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-open-gpu.toml +%{_cross_factorydir}%{_cross_sysconfdir}/drivers/nvidia-open-gpu-copy-only.toml +%{_cross_factorydir}%{_cross_sysconfdir}/ld.so.conf.d/nvidia-tesla.conf +%{_cross_factorydir}/nvidia/tesla/nvidia-path.env +%{_cross_datadir}/nvidia/open-gpu-supported-devices.json # driver -%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nvidia.mod.o -%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nv-interface.o -%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nv-kernel.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia.mod.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nv-interface.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nv-kernel.o # uvm -%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nvidia-uvm.mod.o -%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nvidia-uvm.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia-uvm.mod.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia-uvm.o # modeset -%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nv-modeset-interface.o -%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nv-modeset-kernel.o -%{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nvidia-modeset.mod.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nv-modeset-interface.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nv-modeset-kernel.o +%{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia-modeset.mod.o # tmpfiles -%{_cross_tmpfilesdir}/nvidia-tesla-%{tesla_470}.conf +%{_cross_tmpfilesdir}/nvidia-tesla.conf # sysuser files %{_cross_sysusersdir}/nvidia.conf @@ -254,103 +410,131 @@ popd # https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html#driver-capabilities # Utility libs -%{tesla_470_libdir}/libnvidia-ml.so.%{tesla_470} -%{tesla_470_libdir}/libnvidia-ml.so.1 -%{tesla_470_libdir}/libnvidia-cfg.so.%{tesla_470} -%{tesla_470_libdir}/libnvidia-cfg.so.1 -%{tesla_470_libdir}/libnvidia-nvvm.so.4.0.0 -%{tesla_470_libdir}/libnvidia-nvvm.so.4 +%{_cross_libdir}/nvidia/tesla/libnvidia-api.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-ml.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-ml.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-cfg.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-cfg.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-nvvm.so.4 +%{_cross_libdir}/nvidia/tesla/libnvidia-nvvm.so.%{tesla_ver} # Compute libs -%{tesla_470_libdir}/libcuda.so.%{tesla_470} -%{tesla_470_libdir}/libcuda.so.1 -%{tesla_470_libdir}/libnvidia-opencl.so.%{tesla_470} -%{tesla_470_libdir}/libnvidia-opencl.so.1 -%{tesla_470_libdir}/libnvidia-ptxjitcompiler.so.%{tesla_470} -%{tesla_470_libdir}/libnvidia-ptxjitcompiler.so.1 -%{tesla_470_libdir}/libnvidia-allocator.so.%{tesla_470} -%{tesla_470_libdir}/libnvidia-allocator.so.1 -%{tesla_470_libdir}/libOpenCL.so.1.0.0 -%{tesla_470_libdir}/libOpenCL.so.1 +%{_cross_libdir}/nvidia/tesla/libcuda.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libcuda.so.1 +%{_cross_libdir}/nvidia/tesla/libcudadebugger.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libcudadebugger.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-opencl.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-opencl.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-ptxjitcompiler.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-ptxjitcompiler.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-allocator.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-allocator.so.1 +%{_cross_libdir}/nvidia/tesla/libOpenCL.so.1.0.0 +%{_cross_libdir}/nvidia/tesla/libOpenCL.so.1 %if "%{_cross_arch}" == "x86_64" -%{tesla_470_libdir}/libnvidia-compiler.so.%{tesla_470} +%{_cross_libdir}/nvidia/tesla/libnvidia-pkcs11.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-pkcs11-openssl3.so.%{tesla_ver} %endif # Video libs -%{tesla_470_libdir}/libvdpau_nvidia.so.%{tesla_470} -%{tesla_470_libdir}/libvdpau_nvidia.so.1 -%{tesla_470_libdir}/libnvidia-encode.so.%{tesla_470} -%{tesla_470_libdir}/libnvidia-encode.so.1 -%{tesla_470_libdir}/libnvidia-opticalflow.so.%{tesla_470} -%{tesla_470_libdir}/libnvidia-opticalflow.so.1 -%{tesla_470_libdir}/libnvcuvid.so.%{tesla_470} -%{tesla_470_libdir}/libnvcuvid.so.1 +%{_cross_libdir}/nvidia/tesla/libvdpau_nvidia.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libvdpau_nvidia.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-encode.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-encode.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-opticalflow.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-opticalflow.so.1 +%{_cross_libdir}/nvidia/tesla/libnvcuvid.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvcuvid.so.1 # Graphics libs -%{tesla_470_libdir}/libnvidia-eglcore.so.%{tesla_470} -%{tesla_470_libdir}/libnvidia-glcore.so.%{tesla_470} -%{tesla_470_libdir}/libnvidia-tls.so.%{tesla_470} -%{tesla_470_libdir}/libnvidia-glsi.so.%{tesla_470} -%{tesla_470_libdir}/libnvidia-rtcore.so.%{tesla_470} -%{tesla_470_libdir}/libnvidia-fbc.so.%{tesla_470} -%{tesla_470_libdir}/libnvidia-fbc.so.1 -%{tesla_470_libdir}/libnvoptix.so.%{tesla_470} -%{tesla_470_libdir}/libnvoptix.so.1 -%{tesla_470_libdir}/libnvidia-vulkan-producer.so.%{tesla_470} -%if "%{_cross_arch}" == "x86_64" -%{tesla_470_libdir}/libnvidia-ifr.so.%{tesla_470} -%{tesla_470_libdir}/libnvidia-ifr.so.1 -%endif +%{_cross_libdir}/nvidia/tesla/libnvidia-eglcore.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-glcore.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-tls.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-glsi.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-rtcore.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-fbc.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-fbc.so.1 +%{_cross_libdir}/nvidia/tesla/libnvoptix.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvoptix.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-vulkan-producer.so.%{tesla_ver} # Graphics GLVND libs -%{tesla_470_libdir}/libnvidia-glvkspirv.so.%{tesla_470} -%{tesla_470_libdir}/libnvidia-cbl.so.%{tesla_470} -%{tesla_470_libdir}/libGLX_nvidia.so.%{tesla_470} -%{tesla_470_libdir}/libGLX_nvidia.so.0 -%{tesla_470_libdir}/libEGL_nvidia.so.%{tesla_470} -%{tesla_470_libdir}/libEGL_nvidia.so.0 -%{tesla_470_libdir}/libGLESv2_nvidia.so.%{tesla_470} -%{tesla_470_libdir}/libGLESv2_nvidia.so.2 -%{tesla_470_libdir}/libGLESv1_CM_nvidia.so.%{tesla_470} -%{tesla_470_libdir}/libGLESv1_CM_nvidia.so.1 +%{_cross_libdir}/nvidia/tesla/libnvidia-glvkspirv.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libGLX_nvidia.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libGLX_nvidia.so.0 +%{_cross_libdir}/nvidia/tesla/libEGL_nvidia.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libEGL_nvidia.so.0 +%{_cross_libdir}/nvidia/tesla/libGLESv2_nvidia.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libGLESv2_nvidia.so.2 +%{_cross_libdir}/nvidia/tesla/libGLESv1_CM_nvidia.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libGLESv1_CM_nvidia.so.1 # Graphics compat -%{tesla_470_libdir}/libEGL.so.1.1.0 -%{tesla_470_libdir}/libEGL.so.1 -%{tesla_470_libdir}/libEGL.so.%{tesla_470} -%{tesla_470_libdir}/libGL.so.1.7.0 -%{tesla_470_libdir}/libGL.so.1 -%{tesla_470_libdir}/libGLESv1_CM.so.1.2.0 -%{tesla_470_libdir}/libGLESv1_CM.so.1 -%{tesla_470_libdir}/libGLESv2.so.2.1.0 -%{tesla_470_libdir}/libGLESv2.so.2 +%{_cross_libdir}/nvidia/tesla/libEGL.so.1.1.0 +%{_cross_libdir}/nvidia/tesla/libEGL.so.1 +%{_cross_libdir}/nvidia/tesla/libEGL.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libGL.so.1.7.0 +%{_cross_libdir}/nvidia/tesla/libGL.so.1 +%{_cross_libdir}/nvidia/tesla/libGLESv1_CM.so.1.2.0 +%{_cross_libdir}/nvidia/tesla/libGLESv1_CM.so.1 +%{_cross_libdir}/nvidia/tesla/libGLESv2.so.2.1.0 +%{_cross_libdir}/nvidia/tesla/libGLESv2.so.2 # NGX -%if "%{_cross_arch}" == "x86_64" -%{tesla_470_libdir}/libnvidia-ngx.so.%{tesla_470} -%{tesla_470_libdir}/libnvidia-ngx.so.1 -%endif +%{_cross_libdir}/nvidia/tesla/libnvidia-ngx.so.%{tesla_ver} +%{_cross_libdir}/nvidia/tesla/libnvidia-ngx.so.1 + +# Firmware +%{_cross_libdir}/firmware/nvidia/%{tesla_ver}/gsp_ga10x.bin +%{_cross_libdir}/firmware/nvidia/%{tesla_ver}/gsp_tu10x.bin # Neither nvidia-peermem nor nvidia-drm are included in driver container images, we exclude them # for now, and we will add them if requested -%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nvidia-peermem.mod.o -%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nvidia-peermem.o -%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nvidia-drm.mod.o -%exclude %{_cross_datadir}/nvidia/tesla/%{tesla_470}/module-objects.d/nvidia-drm.o -%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470}/nvidia-cuda-mps-control -%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470}/nvidia-cuda-mps-server +%exclude %{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia-peermem.mod.o +%exclude %{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia-peermem.o +%exclude %{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia-drm.mod.o +%exclude %{_cross_datadir}/nvidia/tesla/module-objects.d/nvidia-drm.o +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/nvidia-cuda-mps-control +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/nvidia-cuda-mps-server %if "%{_cross_arch}" == "x86_64" -%exclude %{_cross_libexecdir}/nvidia/tesla/bin/%{tesla_470}/nvidia-ngx-updater +%exclude %{_cross_libexecdir}/nvidia/tesla/bin/nvidia-ngx-updater %endif # None of these libraries are required by libnvidia-container, so they # won't be used by a containerized workload -%exclude %{tesla_470_libdir}/libGLX.so.0 -%exclude %{tesla_470_libdir}/libGLdispatch.so.0 -%exclude %{tesla_470_libdir}/libOpenGL.so.0 -%exclude %{tesla_470_libdir}/libglxserver_nvidia.so.%{tesla_470} -%exclude %{tesla_470_libdir}/libnvidia-egl-wayland.so.1.1.7 -%exclude %{tesla_470_libdir}/libnvidia-gtk2.so.%{tesla_470} -%exclude %{tesla_470_libdir}/libnvidia-gtk3.so.%{tesla_470} -%exclude %{tesla_470_libdir}/nvidia_drv.so -%exclude %{tesla_470_libdir}/libnvidia-egl-wayland.so.1 +%exclude %{_cross_libdir}/nvidia/tesla/libGLX.so.0 +%exclude %{_cross_libdir}/nvidia/tesla/libGLdispatch.so.0 +%exclude %{_cross_libdir}/nvidia/tesla/libOpenGL.so.0 +%exclude %{_cross_libdir}/nvidia/tesla/libglxserver_nvidia.so.%{tesla_ver} +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-gtk2.so.%{tesla_ver} +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-gtk3.so.%{tesla_ver} +%exclude %{_cross_libdir}/nvidia/tesla/nvidia_drv.so +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-wayland.so.1 +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-gbm.so.1 +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-gbm.so.1.1.0 +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-egl-wayland.so.1.1.11 +%exclude %{_cross_libdir}/nvidia/tesla/libnvidia-wayland-client.so.%{tesla_ver} + +%files open-gpu-%{tesla_major} +%license COPYING +%dir %{_cross_datadir}/nvidia/open-gpu/drivers +%dir %{_cross_factorydir}/nvidia/open-gpu + +# driver +%{_cross_datadir}/nvidia/open-gpu/drivers/nvidia.ko + +# uvm +%{_cross_datadir}/nvidia/open-gpu/drivers/nvidia-uvm.ko + +# modeset +%{_cross_datadir}/nvidia/open-gpu/drivers/nvidia-modeset.ko + +# drm +%{_cross_datadir}/nvidia/open-gpu/drivers/nvidia-drm.ko + +# peermem +%{_cross_datadir}/nvidia/open-gpu/drivers/nvidia-peermem.ko + +%files fabricmanager +%{_cross_factorydir}%{_cross_sysconfdir}/nvidia/fabricmanager.cfg +%{_cross_unitdir}/nvidia-fabricmanager.service diff --git a/packages/kmod-5.10-nvidia/link-tesla-kernel-modules.service.in b/packages/kmod-5.10-nvidia/link-tesla-kernel-modules.service.in index 79e5956b..8fc77921 100644 --- a/packages/kmod-5.10-nvidia/link-tesla-kernel-modules.service.in +++ b/packages/kmod-5.10-nvidia/link-tesla-kernel-modules.service.in @@ -1,5 +1,5 @@ [Unit] -Description=Link additional kernel modules +Description=Link Tesla kernel modules RequiresMountsFor=PREFIX/lib/modules PREFIX/src/kernels # Rerunning this service after the system is fully loaded will override # the already linked kernel modules. This doesn't affect the running system, @@ -10,7 +10,8 @@ RefuseManualStop=true [Service] Type=oneshot -ExecStart=/usr/bin/driverdog link-modules +ExecCondition=/usr/bin/ghostdog match-nvidia-driver tesla +ExecStart=/usr/bin/driverdog --modules-set nvidia-tesla link-modules RemainAfterExit=true StandardError=journal+console diff --git a/packages/kmod-5.10-nvidia/load-open-gpu-kernel-modules.service.in b/packages/kmod-5.10-nvidia/load-open-gpu-kernel-modules.service.in new file mode 100644 index 00000000..3862b3e7 --- /dev/null +++ b/packages/kmod-5.10-nvidia/load-open-gpu-kernel-modules.service.in @@ -0,0 +1,19 @@ +[Unit] +Description=Load open GPU kernel modules +RequiresMountsFor=PREFIX/lib/modules PREFIX/src/kernels +After=copy-open-gpu-kernel-modules.service +Requires=copy-open-gpu-kernel-modules.service +# Disable manual restarts to prevent loading kernel modules +# that weren't linked by the running system +RefuseManualStart=true +RefuseManualStop=true + +[Service] +Type=oneshot +ExecCondition=/usr/bin/ghostdog match-nvidia-driver open-gpu +ExecStart=/usr/bin/driverdog --modules-set nvidia-open-gpu load-modules +RemainAfterExit=true +StandardError=journal+console + +[Install] +RequiredBy=preconfigured.target diff --git a/packages/kmod-5.10-nvidia/load-tesla-kernel-modules.service.in b/packages/kmod-5.10-nvidia/load-tesla-kernel-modules.service.in index 3e412860..60024004 100644 --- a/packages/kmod-5.10-nvidia/load-tesla-kernel-modules.service.in +++ b/packages/kmod-5.10-nvidia/load-tesla-kernel-modules.service.in @@ -1,5 +1,5 @@ [Unit] -Description=Load additional kernel modules +Description=Load Tesla kernel modules RequiresMountsFor=PREFIX/lib/modules PREFIX/src/kernels After=link-tesla-kernel-modules.service Requires=link-tesla-kernel-modules.service @@ -10,7 +10,8 @@ RefuseManualStop=true [Service] Type=oneshot -ExecStart=/usr/bin/driverdog load-modules +ExecCondition=/usr/bin/ghostdog match-nvidia-driver tesla +ExecStart=/usr/bin/driverdog --modules-set nvidia-tesla load-modules RemainAfterExit=true StandardError=journal+console diff --git a/packages/kmod-5.10-nvidia/nvidia-fabricmanager.cfg b/packages/kmod-5.10-nvidia/nvidia-fabricmanager.cfg new file mode 100644 index 00000000..f8dc08ea --- /dev/null +++ b/packages/kmod-5.10-nvidia/nvidia-fabricmanager.cfg @@ -0,0 +1,34 @@ +# Modern, systemd-aware settings: +# - Log to journal via stderr +# - Keep running in the foreground +LOG_LEVEL=4 +LOG_FILE_NAME= +DAEMONIZE=0 + +# Use Unix domain sockets instead of localhost ports. +UNIX_SOCKET_PATH=/run/nvidia/fabricmanager.sock +FM_CMD_UNIX_SOCKET_PATH=/run/nvidia/fabricmanager-cmd.sock + +# Start Fabric Manager in bare metal or full pass through virtualization mode. +FABRIC_MODE=0 +FABRIC_MODE_RESTART=0 + +# Terminate on NVSwitch and GPU config failure. +FM_STAY_RESIDENT_ON_FAILURES=0 + +# When there is a GPU to NVSwitch NVLink failure, remove the GPU with the failure +# from NVLink P2P capability. +ACCESS_LINK_FAILURE_MODE=0 + +# When there is an NVSwitch to NVSwitch NVLink failure, exit Fabric Manager. +TRUNK_LINK_FAILURE_MODE=0 + +# When there is an NVSwitch failure or an NVSwitch is excluded, abort Fabric Manager. +NVSWITCH_FAILURE_MODE=0 + +# When Fabric Manager service is stopped or terminated, abort all running CUDA jobs. +ABORT_CUDA_JOBS_ON_FM_EXIT=1 + +# Path to topology and database files. +TOPOLOGY_FILE_PATH=/usr/share/nvidia/tesla/nvswitch +DATABASE_PATH=/usr/share/nvidia/tesla/nvswitch diff --git a/packages/kmod-5.10-nvidia/nvidia-fabricmanager.service b/packages/kmod-5.10-nvidia/nvidia-fabricmanager.service new file mode 100644 index 00000000..62ae1368 --- /dev/null +++ b/packages/kmod-5.10-nvidia/nvidia-fabricmanager.service @@ -0,0 +1,16 @@ +[Unit] +Description=NVIDIA fabric manager service + +[Service] +ExecStart=/usr/libexec/nvidia/tesla/bin/nv-fabricmanager -c /etc/nvidia/fabricmanager.cfg +Type=simple +TimeoutSec=0 +RestartSec=5 +Restart=always +RemainAfterExit=true +StandardError=journal+console +SuccessExitStatus=255 +LimitCORE=infinity + +[Install] +WantedBy=multi-user.target diff --git a/packages/kmod-5.10-nvidia/nvidia-ld.so.conf.in b/packages/kmod-5.10-nvidia/nvidia-ld.so.conf.in index a07b0ccb..f992bf22 100644 --- a/packages/kmod-5.10-nvidia/nvidia-ld.so.conf.in +++ b/packages/kmod-5.10-nvidia/nvidia-ld.so.conf.in @@ -1 +1 @@ -__LIBDIR__/nvidia/tesla/__NVIDIA_VERSION__/ +__LIBDIR__/nvidia/tesla/ diff --git a/packages/kmod-5.10-nvidia/nvidia-open-gpu-config.toml.in b/packages/kmod-5.10-nvidia/nvidia-open-gpu-config.toml.in new file mode 100644 index 00000000..5ae81b71 --- /dev/null +++ b/packages/kmod-5.10-nvidia/nvidia-open-gpu-config.toml.in @@ -0,0 +1,11 @@ +[nvidia-open-gpu] +lib-modules-path = "kernel/drivers/extra/video/nvidia/open-gpu" + +[nvidia-open-gpu.kernel-modules."nvidia.ko"] +copy-source = "__NVIDIA_MODULES__" + +[nvidia-open-gpu.kernel-modules."nvidia-modeset.ko"] +copy-source = "__NVIDIA_MODULES__" + +[nvidia-open-gpu.kernel-modules."nvidia-uvm.ko"] +copy-source = "__NVIDIA_MODULES__" diff --git a/packages/kmod-5.10-nvidia/nvidia-open-gpu-copy-only-config.toml.in b/packages/kmod-5.10-nvidia/nvidia-open-gpu-copy-only-config.toml.in new file mode 100644 index 00000000..774867d4 --- /dev/null +++ b/packages/kmod-5.10-nvidia/nvidia-open-gpu-copy-only-config.toml.in @@ -0,0 +1,8 @@ +[nvidia-open-gpu-copy-only] +lib-modules-path = "kernel/drivers/extra/video/nvidia/open-gpu" + +[nvidia-open-gpu-copy-only.kernel-modules."nvidia-drm.ko"] +copy-source = "__NVIDIA_MODULES__" + +[nvidia-open-gpu-copy-only.kernel-modules."nvidia-peermem.ko"] +copy-source = "__NVIDIA_MODULES__" diff --git a/packages/kmod-5.10-nvidia/nvidia-persistenced.service.in b/packages/kmod-5.10-nvidia/nvidia-persistenced.service similarity index 69% rename from packages/kmod-5.10-nvidia/nvidia-persistenced.service.in rename to packages/kmod-5.10-nvidia/nvidia-persistenced.service index 62663025..f245599c 100644 --- a/packages/kmod-5.10-nvidia/nvidia-persistenced.service.in +++ b/packages/kmod-5.10-nvidia/nvidia-persistenced.service @@ -4,7 +4,7 @@ After=load-tesla-kernel-modules.service load-open-gpu-kernel-modules.service [Service] Type=forking -ExecStart=__NVIDIA_BINDIR__/nvidia-persistenced --user nvidia --verbose +ExecStart=/usr/libexec/nvidia/tesla/bin/nvidia-persistenced --user nvidia --verbose [Install] RequiredBy=preconfigured.target diff --git a/packages/kmod-5.10-nvidia/nvidia-tesla-tmpfiles.conf b/packages/kmod-5.10-nvidia/nvidia-tesla-tmpfiles.conf new file mode 100644 index 00000000..fd0f4486 --- /dev/null +++ b/packages/kmod-5.10-nvidia/nvidia-tesla-tmpfiles.conf @@ -0,0 +1,5 @@ +C /etc/drivers/nvidia-tesla.toml +C /etc/drivers/nvidia-open-gpu.toml +C /etc/drivers/nvidia-open-gpu-copy-only.toml +C /etc/containerd/nvidia.env - - - - /usr/share/factory/nvidia/tesla/nvidia-path.env +C /etc/ld.so.conf.d/nvidia-tesla.conf diff --git a/packages/kmod-5.10-nvidia/nvidia-tesla-tmpfiles.conf.in b/packages/kmod-5.10-nvidia/nvidia-tesla-tmpfiles.conf.in deleted file mode 100644 index f208e1d2..00000000 --- a/packages/kmod-5.10-nvidia/nvidia-tesla-tmpfiles.conf.in +++ /dev/null @@ -1,3 +0,0 @@ -C /etc/drivers/nvidia-tesla-__NVIDIA_VERSION__.toml -C /etc/containerd/nvidia.env - - - - /usr/share/factory/nvidia/tesla/__NVIDIA_VERSION__/nvidia-path.env -C /etc/ld.so.conf.d/nvidia-tesla-__NVIDIA_VERSION__.conf diff --git a/packages/kmod-5.10-nvidia/nvidia-tmpfiles.conf.in b/packages/kmod-5.10-nvidia/nvidia-tmpfiles.conf.in index f44152b3..e58fe143 100644 --- a/packages/kmod-5.10-nvidia/nvidia-tmpfiles.conf.in +++ b/packages/kmod-5.10-nvidia/nvidia-tmpfiles.conf.in @@ -1,3 +1,7 @@ R __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/tesla - - - - - d __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/tesla 0755 root root - - -D /var/run/nvidia-persistenced 0755 nvidia nvidia - - \ No newline at end of file +R __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/open-gpu - - - - - +d __PREFIX__/lib/modules/__KERNEL_VERSION__/kernel/drivers/extra/video/nvidia/open-gpu 0755 root root - - +C /etc/nvidia/fabricmanager.cfg - - - - +d /run/nvidia 0700 root root - +D /var/run/nvidia-persistenced 0755 nvidia nvidia - - From 0fcadbd12026070b3d70685a09c18340ed72c4bc Mon Sep 17 00:00:00 2001 From: Gavin Inglis Date: Fri, 4 Oct 2024 22:42:33 +0000 Subject: [PATCH 1321/1356] packages: set Epoch of 1 if package version < core kit release version For all packages whose version is < 4.0.0, set an Epoch of 1 in the package spec. This is to appease new app inventory requirements for building Bottlerocket variants such that package version comparisons using rpmvercmp are not broken. See https://github.com/bottlerocket-os/bottlerocket/discussions/4063 for full details Signed-off-by: Gavin Inglis --- packages/grub/grub.spec | 1 + packages/microcode/microcode.spec | 1 + 2 files changed, 2 insertions(+) diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index a4a7e314..9efc1c94 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -12,6 +12,7 @@ Name: %{_cross_os}grub Version: 2.06 Release: 1%{?dist} +Epoch: 1 Summary: Bootloader with support for Linux and more License: GPL-3.0-or-later AND Unicode-DFS-2015 URL: https://www.gnu.org/software/grub/ diff --git a/packages/microcode/microcode.spec b/packages/microcode/microcode.spec index edbe01e9..5a8ad03a 100644 --- a/packages/microcode/microcode.spec +++ b/packages/microcode/microcode.spec @@ -9,6 +9,7 @@ Name: %{_cross_os}microcode Version: 0.0 Release: 1%{?dist} +Epoch: 1 Summary: Microcode for AMD and Intel processors License: LicenseRef-scancode-amd-linux-firmware-export AND LicenseRef-scancode-intel-mcu-2018 From 23423068ee5637abbfada1c5a6887b8db46e6214 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 7 Oct 2024 19:45:40 +0000 Subject: [PATCH 1322/1356] kmod-*-nvidia: drop dependency on glibc Signed-off-by: Ben Cressey --- packages/kmod-5.10-nvidia/Cargo.toml | 1 - packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec | 1 - packages/kmod-5.15-nvidia/Cargo.toml | 1 - packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec | 1 - packages/kmod-6.1-nvidia/Cargo.toml | 1 - packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 1 - 6 files changed, 6 deletions(-) diff --git a/packages/kmod-5.10-nvidia/Cargo.toml b/packages/kmod-5.10-nvidia/Cargo.toml index 785a9812..4e737c30 100644 --- a/packages/kmod-5.10-nvidia/Cargo.toml +++ b/packages/kmod-5.10-nvidia/Cargo.toml @@ -42,5 +42,4 @@ sha512 = "f9cee68cbb12095af4b4e92d01c210461789ef41c70b64efefd6719d0b88468b7a67a3 force-upstream = true [build-dependencies] -glibc = { path = "../glibc" } kernel-5_10 = { path = "../kernel-5.10" } diff --git a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec index 5af1c33f..8826063b 100644 --- a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec +++ b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec @@ -55,7 +55,6 @@ Source307: load-tesla-kernel-modules.service.in Source308: copy-open-gpu-kernel-modules.service.in Source309: load-open-gpu-kernel-modules.service.in -BuildRequires: %{_cross_os}glibc-devel BuildRequires: %{_cross_os}kernel-5.10-archive %description diff --git a/packages/kmod-5.15-nvidia/Cargo.toml b/packages/kmod-5.15-nvidia/Cargo.toml index 59c1332b..ca5c3df3 100644 --- a/packages/kmod-5.15-nvidia/Cargo.toml +++ b/packages/kmod-5.15-nvidia/Cargo.toml @@ -42,5 +42,4 @@ sha512 = "f9cee68cbb12095af4b4e92d01c210461789ef41c70b64efefd6719d0b88468b7a67a3 force-upstream = true [build-dependencies] -glibc = { path = "../glibc" } kernel-5_15 = { path = "../kernel-5.15" } diff --git a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec index 271d7ca4..c05a3eb6 100644 --- a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec +++ b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec @@ -55,7 +55,6 @@ Source307: load-tesla-kernel-modules.service.in Source308: copy-open-gpu-kernel-modules.service.in Source309: load-open-gpu-kernel-modules.service.in -BuildRequires: %{_cross_os}glibc-devel BuildRequires: %{_cross_os}kernel-5.15-archive %description diff --git a/packages/kmod-6.1-nvidia/Cargo.toml b/packages/kmod-6.1-nvidia/Cargo.toml index 3c53998f..1463b716 100644 --- a/packages/kmod-6.1-nvidia/Cargo.toml +++ b/packages/kmod-6.1-nvidia/Cargo.toml @@ -42,5 +42,4 @@ sha512 = "f9cee68cbb12095af4b4e92d01c210461789ef41c70b64efefd6719d0b88468b7a67a3 force-upstream = true [build-dependencies] -glibc = { path = "../glibc" } kernel-6_1 = { path = "../kernel-6.1" } diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index bb708f2a..f4dcbd7b 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -55,7 +55,6 @@ Source307: load-tesla-kernel-modules.service.in Source308: copy-open-gpu-kernel-modules.service.in Source309: load-open-gpu-kernel-modules.service.in -BuildRequires: %{_cross_os}glibc-devel BuildRequires: %{_cross_os}kernel-6.1-archive %description From c396c3a903028b1c6757d4f0e670ed123f94f3fa Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Fri, 11 Oct 2024 22:51:00 +0000 Subject: [PATCH 1323/1356] twoliter: update twoliter to 0.5.0 --- Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index c3e599c4..58354c1b 100644 --- a/Makefile +++ b/Makefile @@ -4,9 +4,9 @@ TWOLITER_DIR := $(TOOLS_DIR)/twoliter TWOLITER := $(TWOLITER_DIR)/twoliter CARGO_HOME := $(TOP).cargo -TWOLITER_VERSION ?= "0.4.7" -TWOLITER_SHA256_AARCH64 ?= "82fce1895f93946a9e03d71ecaa9a73d0f4681d08cf540be0b7bab3eacbdc41e" -TWOLITER_SHA256_X86_64 ?= "3b5773bea848aa94c5501fa3cf03dc0c788916c7a3da7b989495cd2cdb8b49ec" +TWOLITER_VERSION ?= "0.5.0" +TWOLITER_SHA256_AARCH64 ?= "cec8d30377f5cb38ee1d3bc99bb8aaf3958213b38be6a75d09a8bc5fcd3da590" +TWOLITER_SHA256_X86_64 ?= "d580180969f8b34b1af5d2524ff024e90432f09f991fc044444019da20a027a8" KIT ?= bottlerocket-core-kit UNAME_ARCH = $(shell uname -m) ARCH ?= $(UNAME_ARCH) From 8f13ecf077124fe12c2f8c6f95b0689b5f82b891 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Tue, 8 Oct 2024 16:53:37 +0000 Subject: [PATCH 1324/1356] actions: install clang for aws-lc-rs builds So that aws-lc-rs compiles in the actions' environment, given that the rust bindings are generated with bindgen which depends on libclang Signed-off-by: Arnaldo Garcia Rincon --- .github/actions/setup-node/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/setup-node/action.yml b/.github/actions/setup-node/action.yml index 58a9b92d..501e8721 100644 --- a/.github/actions/setup-node/action.yml +++ b/.github/actions/setup-node/action.yml @@ -3,7 +3,7 @@ description: "Performs setup for caching and other common needs." runs: using: "composite" steps: - - run: sudo apt -y install build-essential openssl libssl-dev pkg-config liblz4-tool + - run: sudo apt -y install build-essential openssl libssl-dev pkg-config liblz4-tool clang shell: bash - uses: actions/cache/restore@v4 # Restore most recent cache if available. From b557e3129d5fa8d0c73ccdf73368bb089c79d0c2 Mon Sep 17 00:00:00 2001 From: Yutong Sun Date: Tue, 15 Oct 2024 21:37:14 +0000 Subject: [PATCH 1325/1356] kernel-6.1: update to 6.1.112 Rebase to Amazon Linux upstream version 6.1.112-122.189.amzn2023. Signed-off-by: Yutong Sun --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 0bfc444d..9267e63b 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/60b1be96cb0d00c8998e26b855b51b54e1cc82a655bb47a1d4f51c5ffbdd3148/kernel-6.1.109-118.189.amzn2023.src.rpm" -sha512 = "2a40b73e7fbc28f48b01e3d0f463e6c72660662ce498fc91c4727617201ed1714480d731c9e59e8de632cb829ba1dc6cf0a07838eb9b90e61a2b422cb17aae8b" +url = "https://cdn.amazonlinux.com/al2023/blobstore/b88530d26f68ef4d2080a189cb3ff1b722a7298e63a286d4bbb86116075ba469/kernel-6.1.112-122.189.amzn2023.src.rpm" +sha512 = "77c1bea98a14f611bd59e2058495e7d6a8a117f3a378087c19ef5bbf8379d0f4e775490052578a625347f255a364a878357b3ddcea962063f0802aab09dd40b6" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index ebcc43fc..65610308 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.109 +Version: 6.1.112 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/60b1be96cb0d00c8998e26b855b51b54e1cc82a655bb47a1d4f51c5ffbdd3148/kernel-6.1.109-118.189.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/b88530d26f68ef4d2080a189cb3ff1b722a7298e63a286d4bbb86116075ba469/kernel-6.1.112-122.189.amzn2023.src.rpm Source100: config-bottlerocket # This list of FIPS modules is extracted from /etc/fipsmodules in the initramfs From 69bb4145327d37b4d89d6f5f489349ea06e33dab Mon Sep 17 00:00:00 2001 From: Yutong Sun Date: Tue, 15 Oct 2024 21:37:46 +0000 Subject: [PATCH 1326/1356] kernel-5.15: update to 5.15.167 Rebase to Amazon Linux upstream version 5.15.167-112.166.amzn2. Signed-off-by: Yutong Sun --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index e2a73cbd..ac445be6 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/7d9322ae0af16962b5b12f984ec34e1644fe111beb5933bf6d75a1cf7f267201/kernel-5.15.167-112.165.amzn2.src.rpm" -sha512 = "0d39cdc4a4bdcb66aa3f950af2f41ccb9d9f267524b3e5af7def532db68920321081684d2b37f1b0a1e24195b6d77a68f88dc92f5a47bccd3d12b665d2c36e7b" +url = "https://cdn.amazonlinux.com/blobstore/6e5bde865f2f534b3e5c1ae2c3065e711f6c55b7fa5e4f91f0dc55894b1ad844/kernel-5.15.167-112.166.amzn2.src.rpm" +sha512 = "90ca9a2ee14e34a34ddcd6d8d24904bccc5d9ce8560c211fad1a01ed451996aecc3d6bb5b0982fddcbb426b552701d809cfc68506bf3123f77c16a72b844a8dc" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 1929962c..8f477284 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -7,7 +7,7 @@ Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/7d9322ae0af16962b5b12f984ec34e1644fe111beb5933bf6d75a1cf7f267201/kernel-5.15.167-112.165.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/6e5bde865f2f534b3e5c1ae2c3065e711f6c55b7fa5e4f91f0dc55894b1ad844/kernel-5.15.167-112.166.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 546e0286e26334762525ef960d9ac1a067064ce2 Mon Sep 17 00:00:00 2001 From: Yutong Sun Date: Tue, 15 Oct 2024 21:38:20 +0000 Subject: [PATCH 1327/1356] kernel-5.10: update to 5.10.226 Rebase to Amazon Linux upstream version 5.10.226-214.880.amzn2. Signed-off-by: Yutong Sun --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 49afaa82..f2f4e011 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/11c2c91624bd7d42460b998b80573fa17717ab1fe853099730a373452bde11a2/kernel-5.10.226-214.879.amzn2.src.rpm" -sha512 = "2609956c60e622c8f6eefcc0279fe2d67958f27ef12e64f3b3515a4c51b3a224160eeeffcc219314b7aa3172c52f41dc6504aba46449eba10cae34846af66101" +url = "https://cdn.amazonlinux.com/blobstore/c7942aadb77fa921637155fdd357a91a8deaf85f3d024fb5b5371052c8309426/kernel-5.10.226-214.880.amzn2.src.rpm" +sha512 = "2992e8cb9662a8e53ca5f9cfb56dad8706fc95147cef4bb15c312406eb55cdbbc19f7584808c1b641c0b094428a3e701e15d83e545d6b9e61d107fc95b7e98f4" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index f618711a..ab67fd5f 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -7,7 +7,7 @@ Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/11c2c91624bd7d42460b998b80573fa17717ab1fe853099730a373452bde11a2/kernel-5.10.226-214.879.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/c7942aadb77fa921637155fdd357a91a8deaf85f3d024fb5b5371052c8309426/kernel-5.10.226-214.880.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 43b85845ed738ab135fb01c7c7fc4cb4acb28b6a Mon Sep 17 00:00:00 2001 From: Gavin Inglis Date: Tue, 22 Oct 2024 21:38:24 +0000 Subject: [PATCH 1328/1356] packages: add Epoch to remaining packages that require it Signed-off-by: Gavin Inglis --- packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec | 1 + packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec | 1 + packages/kmod-6.1-neuron/kmod-6.1-neuron.spec | 1 + packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 1 + packages/libkcapi/libkcapi.spec | 1 + 5 files changed, 5 insertions(+) diff --git a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec index 8826063b..70f62ade 100644 --- a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec +++ b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec @@ -18,6 +18,7 @@ Name: %{_cross_os}kmod-5.10-nvidia Version: 1.0.0 Release: 1%{?dist} +Epoch: 1 Summary: NVIDIA drivers for the 5.10 kernel # We use these licences because we only ship our own software in the main package, # each subpackage includes the LICENSE file provided by the Licenses.toml file diff --git a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec index c05a3eb6..fdf771e5 100644 --- a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec +++ b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec @@ -18,6 +18,7 @@ Name: %{_cross_os}kmod-5.15-nvidia Version: 1.0.0 Release: 1%{?dist} +Epoch: 1 Summary: NVIDIA drivers for the 5.15 kernel # We use these licences because we only ship our own software in the main package, # each subpackage includes the LICENSE file provided by the Licenses.toml file diff --git a/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec b/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec index b69fdce4..ab1a7293 100644 --- a/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec +++ b/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec @@ -1,6 +1,7 @@ Name: %{_cross_os}kmod-6.1-neuron Version: 2.18.12.0 Release: 1%{?dist} +Epoch: 1 Summary: Neuron drivers for the 6.1 kernel License: GPL-2.0-only URL: https://awsdocs-neuron.readthedocs-hosted.com/en/latest/ diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index f4dcbd7b..9fdd6b91 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -18,6 +18,7 @@ Name: %{_cross_os}kmod-6.1-nvidia Version: 1.0.0 Release: 1%{?dist} +Epoch: 1 Summary: NVIDIA drivers for the 6.1 kernel # We use these licences because we only ship our own software in the main package, # each subpackage includes the LICENSE file provided by the Licenses.toml file diff --git a/packages/libkcapi/libkcapi.spec b/packages/libkcapi/libkcapi.spec index f68fc293..fdc73889 100644 --- a/packages/libkcapi/libkcapi.spec +++ b/packages/libkcapi/libkcapi.spec @@ -21,6 +21,7 @@ ln -s .libkcapi.so.%{version}.hmac .libkcapi.so.1.hmac\ Name: %{_cross_os}libkcapi Version: 1.5.0 Release: 1%{?dist} +Epoch: 1 Summary: Library for kernel crypto API License: BSD-3-Clause OR GPL-2.0-only URL: https://www.chronox.de/libkcapi/html/index.html From fe231deac163230dd5db82b1971cb2c00ca5b4a7 Mon Sep 17 00:00:00 2001 From: Vighnesh Maheshwari Date: Tue, 22 Oct 2024 23:26:42 +0000 Subject: [PATCH 1329/1356] kmod-5.10-nvidia: update driver to 535.216.01 --- packages/kmod-5.10-nvidia/Cargo.toml | 16 ++++++++-------- packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/kmod-5.10-nvidia/Cargo.toml b/packages/kmod-5.10-nvidia/Cargo.toml index 4e737c30..9aef3c63 100644 --- a/packages/kmod-5.10-nvidia/Cargo.toml +++ b/packages/kmod-5.10-nvidia/Cargo.toml @@ -17,23 +17,23 @@ url = "https://s3.amazonaws.com/EULA/NVidiaEULAforAWS.pdf" sha512 = "e1926fe99afc3ab5b2f2744fcd53b4046465aefb2793e2e06c4a19455a3fde895e00af1415ff1a5804c32e6a2ed0657e475de63da6c23a0e9c59feeef52f3f58" [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/535.183.06/NVIDIA-Linux-x86_64-535.183.06.run" -sha512 = "424950ef303ea39499e96f8c90c1e0c83aee12309779d4f335769ef554ad4f7c38e98f69c64b408adc85a7cf51ea600d85222792402b9c6b7941f1af066d2a33" +url = "https://us.download.nvidia.com/tesla/535.216.01/NVIDIA-Linux-x86_64-535.216.01.run" +sha512 = "3b4ae3584368fcc5f81a680dd8588d8b9e48f43dafe2490f5414ed258fa8c9799ebd40d2fd115e20bd02648eeb3e5c6dff39562d89353580fa679d011cebf6f8" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/535.183.06/NVIDIA-Linux-aarch64-535.183.06.run" -sha512 = "bb305f1703557461b0a0a29066c304658d9684841104c6f4d9ff44f9db90fee14ae619cd2fe3242823a5fe3a69b168b8174b163740014b15cdef36db88ba2d96" +url = "https://us.download.nvidia.com/tesla/535.216.01/NVIDIA-Linux-aarch64-535.216.01.run" +sha512 = "f68794249bf18ba626c6a665880721c8cc0dada6c7c1d8b15bf17174a4cac35ca2ab534fff2410c8bc0326c48f6ab913b6d9a92630505eeb768e02610a7772d9" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/nvidia-fabric-manager-535.183.06-1.x86_64.rpm" -sha512 = "c3d98878363f857b2963665a0e485cb7b1afeaabd0040a970478d00ffb870ab4130ab9dfe1b7a40d1b38734636ebccec39fd1b3fc8c06abc5c07470f749b6025" +url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/nvidia-fabric-manager-535.216.01-1.x86_64.rpm" +sha512 = "9208004779a57418cef4e0eacfad549e01fc3e193cda24a4f809325fee3a74910350c7752372d5dba7b74e9d5bf9da5807bc8de2bedade6dbe23b270c3047dfe" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/sbsa/nvidia-fabric-manager-535.183.06-1.aarch64.rpm" -sha512 = "6a646cd7ea11e668f7dbe6f6bb22516107a856e3c3755f8693c91d4bed706b8b3667b853f07e84c2d0da4de7ab1107337b6a1493879d75d8c201bfe9da071b32" +url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/sbsa/nvidia-fabric-manager-535.216.01-1.aarch64.rpm" +sha512 = "1f553e4627953cceef8f630d2be907829f8b78b789ffee7691ace541f759bdb07016e364c20e1d5779ce463f0b48448cea292f58a9899523ec840bb5a0c37b0e" force-upstream = true [[package.metadata.build-package.external-files]] diff --git a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec index 70f62ade..3e8d085c 100644 --- a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec +++ b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec @@ -1,6 +1,6 @@ %global tesla_major 535 -%global tesla_minor 183 -%global tesla_patch 06 +%global tesla_minor 216 +%global tesla_patch 01 %global tesla_ver %{tesla_major}.%{tesla_minor}.%{tesla_patch} %if "%{?_cross_arch}" == "aarch64" %global fm_arch sbsa From bb3d468530e732a4abf10abe36407177b069b283 Mon Sep 17 00:00:00 2001 From: Vighnesh Maheshwari Date: Tue, 22 Oct 2024 23:27:49 +0000 Subject: [PATCH 1330/1356] kmod-5.15-nvidia: update driver to 535.216.01 --- packages/kmod-5.15-nvidia/Cargo.toml | 16 ++++++++-------- packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/kmod-5.15-nvidia/Cargo.toml b/packages/kmod-5.15-nvidia/Cargo.toml index ca5c3df3..21574524 100644 --- a/packages/kmod-5.15-nvidia/Cargo.toml +++ b/packages/kmod-5.15-nvidia/Cargo.toml @@ -17,23 +17,23 @@ url = "https://s3.amazonaws.com/EULA/NVidiaEULAforAWS.pdf" sha512 = "e1926fe99afc3ab5b2f2744fcd53b4046465aefb2793e2e06c4a19455a3fde895e00af1415ff1a5804c32e6a2ed0657e475de63da6c23a0e9c59feeef52f3f58" [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/535.183.06/NVIDIA-Linux-x86_64-535.183.06.run" -sha512 = "424950ef303ea39499e96f8c90c1e0c83aee12309779d4f335769ef554ad4f7c38e98f69c64b408adc85a7cf51ea600d85222792402b9c6b7941f1af066d2a33" +url = "https://us.download.nvidia.com/tesla/535.216.01/NVIDIA-Linux-x86_64-535.216.01.run" +sha512 = "3b4ae3584368fcc5f81a680dd8588d8b9e48f43dafe2490f5414ed258fa8c9799ebd40d2fd115e20bd02648eeb3e5c6dff39562d89353580fa679d011cebf6f8" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/535.183.06/NVIDIA-Linux-aarch64-535.183.06.run" -sha512 = "bb305f1703557461b0a0a29066c304658d9684841104c6f4d9ff44f9db90fee14ae619cd2fe3242823a5fe3a69b168b8174b163740014b15cdef36db88ba2d96" +url = "https://us.download.nvidia.com/tesla/535.216.01/NVIDIA-Linux-aarch64-535.216.01.run" +sha512 = "f68794249bf18ba626c6a665880721c8cc0dada6c7c1d8b15bf17174a4cac35ca2ab534fff2410c8bc0326c48f6ab913b6d9a92630505eeb768e02610a7772d9" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/nvidia-fabric-manager-535.183.06-1.x86_64.rpm" -sha512 = "c3d98878363f857b2963665a0e485cb7b1afeaabd0040a970478d00ffb870ab4130ab9dfe1b7a40d1b38734636ebccec39fd1b3fc8c06abc5c07470f749b6025" +url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/nvidia-fabric-manager-535.216.01-1.x86_64.rpm" +sha512 = "9208004779a57418cef4e0eacfad549e01fc3e193cda24a4f809325fee3a74910350c7752372d5dba7b74e9d5bf9da5807bc8de2bedade6dbe23b270c3047dfe" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/sbsa/nvidia-fabric-manager-535.183.06-1.aarch64.rpm" -sha512 = "6a646cd7ea11e668f7dbe6f6bb22516107a856e3c3755f8693c91d4bed706b8b3667b853f07e84c2d0da4de7ab1107337b6a1493879d75d8c201bfe9da071b32" +url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/sbsa/nvidia-fabric-manager-535.216.01-1.aarch64.rpm" +sha512 = "1f553e4627953cceef8f630d2be907829f8b78b789ffee7691ace541f759bdb07016e364c20e1d5779ce463f0b48448cea292f58a9899523ec840bb5a0c37b0e" force-upstream = true [[package.metadata.build-package.external-files]] diff --git a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec index fdf771e5..01474826 100644 --- a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec +++ b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec @@ -1,6 +1,6 @@ %global tesla_major 535 -%global tesla_minor 183 -%global tesla_patch 06 +%global tesla_minor 216 +%global tesla_patch 01 %global tesla_ver %{tesla_major}.%{tesla_minor}.%{tesla_patch} %if "%{?_cross_arch}" == "aarch64" %global fm_arch sbsa From 859cd1b14b9f7a129eced63486e3270197067f6a Mon Sep 17 00:00:00 2001 From: Vighnesh Maheshwari Date: Tue, 22 Oct 2024 23:29:58 +0000 Subject: [PATCH 1331/1356] kmod-6.1-nvidia: update driver to 535.216.01 --- packages/kmod-6.1-nvidia/Cargo.toml | 16 ++++++++-------- packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/kmod-6.1-nvidia/Cargo.toml b/packages/kmod-6.1-nvidia/Cargo.toml index 1463b716..a3fc0f1d 100644 --- a/packages/kmod-6.1-nvidia/Cargo.toml +++ b/packages/kmod-6.1-nvidia/Cargo.toml @@ -17,23 +17,23 @@ url = "https://s3.amazonaws.com/EULA/NVidiaEULAforAWS.pdf" sha512 = "e1926fe99afc3ab5b2f2744fcd53b4046465aefb2793e2e06c4a19455a3fde895e00af1415ff1a5804c32e6a2ed0657e475de63da6c23a0e9c59feeef52f3f58" [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/535.183.06/NVIDIA-Linux-x86_64-535.183.06.run" -sha512 = "424950ef303ea39499e96f8c90c1e0c83aee12309779d4f335769ef554ad4f7c38e98f69c64b408adc85a7cf51ea600d85222792402b9c6b7941f1af066d2a33" +url = "https://us.download.nvidia.com/tesla/535.216.01/NVIDIA-Linux-x86_64-535.216.01.run" +sha512 = "3b4ae3584368fcc5f81a680dd8588d8b9e48f43dafe2490f5414ed258fa8c9799ebd40d2fd115e20bd02648eeb3e5c6dff39562d89353580fa679d011cebf6f8" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://us.download.nvidia.com/tesla/535.183.06/NVIDIA-Linux-aarch64-535.183.06.run" -sha512 = "bb305f1703557461b0a0a29066c304658d9684841104c6f4d9ff44f9db90fee14ae619cd2fe3242823a5fe3a69b168b8174b163740014b15cdef36db88ba2d96" +url = "https://us.download.nvidia.com/tesla/535.216.01/NVIDIA-Linux-aarch64-535.216.01.run" +sha512 = "f68794249bf18ba626c6a665880721c8cc0dada6c7c1d8b15bf17174a4cac35ca2ab534fff2410c8bc0326c48f6ab913b6d9a92630505eeb768e02610a7772d9" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/nvidia-fabric-manager-535.183.06-1.x86_64.rpm" -sha512 = "c3d98878363f857b2963665a0e485cb7b1afeaabd0040a970478d00ffb870ab4130ab9dfe1b7a40d1b38734636ebccec39fd1b3fc8c06abc5c07470f749b6025" +url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/nvidia-fabric-manager-535.216.01-1.x86_64.rpm" +sha512 = "9208004779a57418cef4e0eacfad549e01fc3e193cda24a4f809325fee3a74910350c7752372d5dba7b74e9d5bf9da5807bc8de2bedade6dbe23b270c3047dfe" force-upstream = true [[package.metadata.build-package.external-files]] -url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/sbsa/nvidia-fabric-manager-535.183.06-1.aarch64.rpm" -sha512 = "6a646cd7ea11e668f7dbe6f6bb22516107a856e3c3755f8693c91d4bed706b8b3667b853f07e84c2d0da4de7ab1107337b6a1493879d75d8c201bfe9da071b32" +url = "https://developer.download.nvidia.com/compute/cuda/repos/rhel9/sbsa/nvidia-fabric-manager-535.216.01-1.aarch64.rpm" +sha512 = "1f553e4627953cceef8f630d2be907829f8b78b789ffee7691ace541f759bdb07016e364c20e1d5779ce463f0b48448cea292f58a9899523ec840bb5a0c37b0e" force-upstream = true [[package.metadata.build-package.external-files]] diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index 9fdd6b91..d500071b 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -1,6 +1,6 @@ %global tesla_major 535 -%global tesla_minor 183 -%global tesla_patch 06 +%global tesla_minor 216 +%global tesla_patch 01 %global tesla_ver %{tesla_major}.%{tesla_minor}.%{tesla_patch} %if "%{?_cross_arch}" == "aarch64" %global fm_arch sbsa From b1ae9bca0ddab3e5667de9cc176ce97c75c951d8 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Tue, 15 Oct 2024 18:41:41 +0000 Subject: [PATCH 1332/1356] sources: vend aws-smithy-experimental Vend `aws-smithy-experimental` so that we can use the APIs required to support Proxies in AWS SDK clients Signed-off-by: Arnaldo Garcia Rincon --- COPYRIGHT | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/COPYRIGHT b/COPYRIGHT index d670cbd2..17ae9447 100644 --- a/COPYRIGHT +++ b/COPYRIGHT @@ -17,3 +17,9 @@ operating system images. Contains modified hyper-proxy files [mod.rs, stream.rs, tunnel.rs] from https://github.com/tafia/hyper-proxy 2021-09-20. Copyright (c) 2017 Johann Tuffe. Licensed under the MIT License. + +=^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= + +Contains aws-smithy-experimental from +https://github.com/smithy-lang/smithy-rs/tree/release-2024-10-09. +Licensed under the Apache-2.0 License. From 74eed331fc75129abaaf4a85d71be23428c6d739 Mon Sep 17 00:00:00 2001 From: "Patrick J.P. Culp" Date: Sat, 26 Oct 2024 21:22:14 +0000 Subject: [PATCH 1333/1356] pluto: add proxy support for FIPS Replaces `hyper-proxy` with `hyper-http-proxy`. Adds support for hyper client creation using a proxy connector in `aws-smithy-experimental`. Signed-off-by: Patrick J.P. Culp --- COPYRIGHT | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/COPYRIGHT b/COPYRIGHT index 17ae9447..522b0c77 100644 --- a/COPYRIGHT +++ b/COPYRIGHT @@ -14,12 +14,6 @@ operating system images. =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= -Contains modified hyper-proxy files [mod.rs, stream.rs, tunnel.rs] from -https://github.com/tafia/hyper-proxy 2021-09-20. -Copyright (c) 2017 Johann Tuffe. Licensed under the MIT License. - -=^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= - -Contains aws-smithy-experimental from +Contains modified aws-smithy-experimental file(s) [hyper_1_0.rs] from https://github.com/smithy-lang/smithy-rs/tree/release-2024-10-09. Licensed under the Apache-2.0 License. From 6dbdf02d370e26544ae42e130be7625e4aa15c0d Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Mon, 28 Oct 2024 18:20:30 +0000 Subject: [PATCH 1334/1356] libkcapi: switch to Amazon Linux 2023 as upstream This aligns libkcapi with the version specified in the FIPS 140-3 security policy for the Amazon Linux 2023 Kernel Cryptographic API. Bump the epoch to ensure that the "older" 1.4.0 version is preferred over the "newer" 1.5.0 version from past core kit releases. Trim the set of installed files down to just the ones referenced by the security policy: `sha512hmac` and `libkcapi.so.1.4.0`. Signed-off-by: Ben Cressey --- packages/libkcapi/Cargo.toml | 7 ++----- packages/libkcapi/libkcapi.spec | 35 +++++++++++++++------------------ 2 files changed, 18 insertions(+), 24 deletions(-) diff --git a/packages/libkcapi/Cargo.toml b/packages/libkcapi/Cargo.toml index cd7ad6a2..5e14eb5d 100644 --- a/packages/libkcapi/Cargo.toml +++ b/packages/libkcapi/Cargo.toml @@ -8,12 +8,9 @@ build = "../build.rs" [lib] path = "../packages.rs" -[package.metadata.build-package] -releases-url = "https://github.com/smuellerDD/libkcapi/releases" - [[package.metadata.build-package.external-files]] -url = "https://github.com/smuellerDD/libkcapi/archive/v1.5.0/libkcapi-1.5.0.tar.gz" -sha512 = "510d0606cdc9479a77ed07bd3ac59b07c3996402a85cee012e6836d0a31cb06f5b7f715cdb76f3745784aab3154595caec4537b4c774236a139ebfe6e1a8be9b" +url = "https://cdn.amazonlinux.com/al2023/blobstore/0eef74b3b4eb1ec321bab80f867aee89b94dc9fc95571da58ea5bba7a70e6224/libkcapi-1.4.0-105.amzn2023.0.1.src.rpm" +sha512 = "6498147434059343f1ccdd7efadcd425ad7074e41b4e019fc995129d5df326b781e0a61a4324e1ce8d6771162d1612b754ce24625fb3b1458811f6bde8f638c9" [build-dependencies] glibc = { path = "../glibc" } diff --git a/packages/libkcapi/libkcapi.spec b/packages/libkcapi/libkcapi.spec index fdc73889..170a156a 100644 --- a/packages/libkcapi/libkcapi.spec +++ b/packages/libkcapi/libkcapi.spec @@ -1,7 +1,6 @@ -# libkcapi since 85bce6035b (1.5.0) uses sha512hmac with the same key for all -# self-checks. Earlier versions used sha256hmac with a different key to check -# the shared library. +# Helper functions that use OpenSSL to compute an HMAC with specified keys. %global openssl_sha512_hmac openssl sha512 -hmac FIPS-FTW-RHT2009 -hex +%global openssl_sha256_hmac openssl sha256 -hmac orboDeJITITejsirpADONivirpUkvarP -hex # We need to compute the HMAC after the binaries have been stripped. %define __spec_install_post\ @@ -9,23 +8,22 @@ %{__arch_install_post}\ %{__os_install_post}\ cd %{buildroot}/%{_cross_bindir}\ -%openssl_sha512_hmac kcapi-hasher\\\ - | awk '{ print $2 }' > .kcapi-hasher.hmac\ -ln -s .kcapi-hasher.hmac .sha512hmac.hmac\ +%openssl_sha512_hmac sha512hmac\\\ + | awk '{ print $2 }' > .sha512hmac.hmac\ cd %{buildroot}/%{_cross_libdir}\ -%openssl_sha512_hmac libkcapi.so.%{version}\\\ +%openssl_sha256_hmac libkcapi.so.%{version}\\\ | awk '{ print $2 }' > .libkcapi.so.%{version}.hmac\ ln -s .libkcapi.so.%{version}.hmac .libkcapi.so.1.hmac\ %{nil} Name: %{_cross_os}libkcapi -Version: 1.5.0 +Version: 1.4.0 Release: 1%{?dist} -Epoch: 1 +Epoch: 2 Summary: Library for kernel crypto API License: BSD-3-Clause OR GPL-2.0-only URL: https://www.chronox.de/libkcapi/html/index.html -Source0: https://github.com/smuellerDD/libkcapi/archive/v%{version}/libkcapi-%{version}.tar.gz +Source0: https://cdn.amazonlinux.com/al2023/blobstore/0eef74b3b4eb1ec321bab80f867aee89b94dc9fc95571da58ea5bba7a70e6224/libkcapi-1.4.0-105.amzn2023.0.1.src.rpm BuildRequires: %{_cross_os}glibc-devel %description @@ -39,12 +37,14 @@ Requires: %{name} %{summary}. %prep -%autosetup -n libkcapi-%{version} -p1 +rpm2cpio %{SOURCE0} | cpio -iu libkcapi-%{version}.tar.xz +tar -xof libkcapi-%{version}.tar.xz; rm libkcapi-%{version}.tar.xz +%setup -TDn libkcapi-%{version} %build autoreconf -fi %cross_configure \ - --enable-static \ + --disable-static \ --enable-shared \ --enable-kcapi-hasher \ @@ -55,7 +55,10 @@ autoreconf -fi %install %make_install -ln -s kcapi-hasher %{buildroot}%{_cross_bindir}/sha512hmac +# Remove all binaries except `sha512hmac`. +find %{buildroot}%{_cross_bindir} -type f ! -name 'sha512hmac' -delete + +# Clean up HMAC signatures, which will be regenerated. find %{buildroot} -type f -name '*.hmac' -delete %files @@ -63,16 +66,10 @@ find %{buildroot} -type f -name '*.hmac' -delete %{_cross_attribution_file} %{_cross_libdir}/*.so.* %{_cross_libdir}/.*.so.*.hmac -%{_cross_bindir}/kcapi-hasher -%{_cross_bindir}/.kcapi-hasher.hmac %{_cross_bindir}/sha512hmac %{_cross_bindir}/.sha512hmac.hmac -%exclude %{_cross_libexecdir}/libkcapi -%exclude %{_cross_mandir} - %files devel -%{_cross_libdir}/*.a %{_cross_libdir}/*.so %{_cross_includedir}/kcapi.h %{_cross_pkgconfigdir}/*.pc From 835b3037c7f60df11b77ba40889f25cab7e8e81b Mon Sep 17 00:00:00 2001 From: Kyle Sessions Date: Wed, 30 Oct 2024 00:00:05 +0000 Subject: [PATCH 1335/1356] kernel-6.1: add patch to fix io statistics for cgroup v1 Signed-off-by: Kyle Sessions --- ...ttle-Fix-io-statistics-for-cgroup-v1.patch | 95 +++++++++++++++++++ packages/kernel-6.1/kernel-6.1.spec | 3 + 2 files changed, 98 insertions(+) create mode 100644 packages/kernel-6.1/1100-blk-throttle-Fix-io-statistics-for-cgroup-v1.patch diff --git a/packages/kernel-6.1/1100-blk-throttle-Fix-io-statistics-for-cgroup-v1.patch b/packages/kernel-6.1/1100-blk-throttle-Fix-io-statistics-for-cgroup-v1.patch new file mode 100644 index 00000000..3bd4d39f --- /dev/null +++ b/packages/kernel-6.1/1100-blk-throttle-Fix-io-statistics-for-cgroup-v1.patch @@ -0,0 +1,95 @@ +From 4c5b35e202a3dd2c0d0bf0715c695ef3cc6d902e Mon Sep 17 00:00:00 2001 +From: Jinke Han +Date: Mon, 8 May 2023 01:06:31 +0800 +Subject: [PATCH] blk-throttle: Fix io statistics for cgroup v1 + +After commit f382fb0bcef4 ("block: remove legacy IO schedulers"), +blkio.throttle.io_serviced and blkio.throttle.io_service_bytes become +the only stable io stats interface of cgroup v1, and these statistics +are done in the blk-throttle code. But the current code only counts the +bios that are actually throttled. When the user does not add the throttle +limit, the io stats for cgroup v1 has nothing. I fix it according to the +statistical method of v2, and made it count all ios accurately. + +Fixes: a7b36ee6ba29 ("block: move blk-throtl fast path inline") +Tested-by: Andrea Righi +Signed-off-by: Jinke Han +Acked-by: Muchun Song +Acked-by: Tejun Heo +Link: https://lore.kernel.org/r/20230507170631.89607-1-hanjinke.666@bytedance.com +Signed-off-by: Jens Axboe +[bcressey: + - backport to 6.1 + - adjust context in blk_cgroup_bio_start + - avoid changes from 3b8cc629 ("blk-cgroup: Optimize blkcg_rstat_flush()")] +Signed-off-by: Ben Cressey +--- + block/blk-cgroup.c | 6 ++++-- + block/blk-throttle.c | 6 ------ + block/blk-throttle.h | 9 +++++++++ + 3 files changed, 13 insertions(+), 8 deletions(-) + +diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c +index 1b7fd1fc2f33..435432c4a62e 100644 +--- a/block/blk-cgroup.c ++++ b/block/blk-cgroup.c +@@ -1969,6 +1969,9 @@ void blk_cgroup_bio_start(struct bio *bio) + struct blkg_iostat_set *bis; + unsigned long flags; + ++ if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) ++ return; ++ + cpu = get_cpu(); + bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu); + flags = u64_stats_update_begin_irqsave(&bis->sync); +@@ -1984,8 +1987,7 @@ void blk_cgroup_bio_start(struct bio *bio) + bis->cur.ios[rwd]++; + + u64_stats_update_end_irqrestore(&bis->sync, flags); +- if (cgroup_subsys_on_dfl(io_cgrp_subsys)) +- cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu); ++ cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu); + put_cpu(); + } + +diff --git a/block/blk-throttle.c b/block/blk-throttle.c +index 62a3f62316df..ab847abe30b0 100644 +--- a/block/blk-throttle.c ++++ b/block/blk-throttle.c +@@ -2176,12 +2176,6 @@ bool __blk_throtl_bio(struct bio *bio) + + rcu_read_lock(); + +- if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) { +- blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf, +- bio->bi_iter.bi_size); +- blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1); +- } +- + spin_lock_irq(&q->queue_lock); + + throtl_update_latency_buckets(td); +diff --git a/block/blk-throttle.h b/block/blk-throttle.h +index ef4b7a4de987..d1ccbfe9f797 100644 +--- a/block/blk-throttle.h ++++ b/block/blk-throttle.h +@@ -185,6 +185,15 @@ static inline bool blk_should_throtl(struct bio *bio) + struct throtl_grp *tg = blkg_to_tg(bio->bi_blkg); + int rw = bio_data_dir(bio); + ++ if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) { ++ if (!bio_flagged(bio, BIO_CGROUP_ACCT)) { ++ bio_set_flag(bio, BIO_CGROUP_ACCT); ++ blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf, ++ bio->bi_iter.bi_size); ++ } ++ blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1); ++ } ++ + /* iops limit is always counted */ + if (tg->has_rules_iops[rw]) + return true; +-- +2.45.1 + diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 65610308..0e4ba1d8 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -36,6 +36,9 @@ Patch1004: 1004-af_unix-increase-default-max_dgram_qlen-to-512.patch # options for nvidia are instead included through DRM_SIMPLE Patch1005: 1005-Revert-Revert-drm-fb_helper-improve-CONFIG_FB-depend.patch +# Fix cgroup v1 I/O statistics in blk-throttle to count all I/Os, not just throttled ones, aligning with cgroup v2 methods. +Patch1100: 1100-blk-throttle-Fix-io-statistics-for-cgroup-v1.patch + BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From baf40aa13c3f90eafb6eaa4895361c16778ed224 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Thu, 31 Oct 2024 17:27:00 +0000 Subject: [PATCH 1336/1356] kernel-6.1: update to 6.1.112 Rebase to Amazon Linux upstream version 6.1.112-124.190.amzn2023. Signed-off-by: Martin Harriman --- packages/kernel-6.1/Cargo.toml | 4 ++-- packages/kernel-6.1/kernel-6.1.spec | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 9267e63b..28a1943d 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/b88530d26f68ef4d2080a189cb3ff1b722a7298e63a286d4bbb86116075ba469/kernel-6.1.112-122.189.amzn2023.src.rpm" -sha512 = "77c1bea98a14f611bd59e2058495e7d6a8a117f3a378087c19ef5bbf8379d0f4e775490052578a625347f255a364a878357b3ddcea962063f0802aab09dd40b6" +url = "https://cdn.amazonlinux.com/al2023/blobstore/3b0aa0d6cf05ca272d9802ccddfc28201675b2abac6abb307f5c4b8d3ca68d26/kernel-6.1.112-124.190.amzn2023.src.rpm" +sha512 = "f7c78716a78d453a0eaaae45f6aaf00a466d7b65e7def0c887f5ff267726b668f725b8a359770a93dbee477313cf297fa6b641580c073aa76dcefda39c19f5c2" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 0e4ba1d8..9084c680 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -7,7 +7,7 @@ Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/b88530d26f68ef4d2080a189cb3ff1b722a7298e63a286d4bbb86116075ba469/kernel-6.1.112-122.189.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/3b0aa0d6cf05ca272d9802ccddfc28201675b2abac6abb307f5c4b8d3ca68d26/kernel-6.1.112-124.190.amzn2023.src.rpm Source100: config-bottlerocket # This list of FIPS modules is extracted from /etc/fipsmodules in the initramfs From 41f0c450a6f2f608135816d264929a0ffc6ea899 Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Thu, 31 Oct 2024 23:21:20 +0000 Subject: [PATCH 1337/1356] kernel-5.10: update to 5.10.227 Rebase to Amazon Linux upstream version 5.10.227-219.884.amzn2. Signed-off-by: Martin Harriman --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index f2f4e011..ebf20eda 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/c7942aadb77fa921637155fdd357a91a8deaf85f3d024fb5b5371052c8309426/kernel-5.10.226-214.880.amzn2.src.rpm" -sha512 = "2992e8cb9662a8e53ca5f9cfb56dad8706fc95147cef4bb15c312406eb55cdbbc19f7584808c1b641c0b094428a3e701e15d83e545d6b9e61d107fc95b7e98f4" +url = "https://cdn.amazonlinux.com/blobstore/a9b5c6b9ca0d2a84e4dc3b963a73017a055d602139d2293ff394d33b08111be7/kernel-5.10.227-219.884.amzn2.src.rpm" +sha512 = "6729b3ef34c451685d29b736a1a98a6487d158707f503ae35cb0427daf034a442d8768e3cc0db3781bb4b17f316fd53811a211892be52c7f8d2feadf7773611c" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index ab67fd5f..de63d572 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.226 +Version: 5.10.227 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/c7942aadb77fa921637155fdd357a91a8deaf85f3d024fb5b5371052c8309426/kernel-5.10.226-214.880.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/a9b5c6b9ca0d2a84e4dc3b963a73017a055d602139d2293ff394d33b08111be7/kernel-5.10.227-219.884.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From 13ea9995d8f72f409e0619f4ee1e31a367b662de Mon Sep 17 00:00:00 2001 From: Martin Harriman Date: Thu, 31 Oct 2024 23:21:44 +0000 Subject: [PATCH 1338/1356] kernel-5.15: update to 5.15.168 Rebase to Amazon Linux upstream version 5.15.168-114.166.amzn2. Signed-off-by: Martin Harriman --- packages/kernel-5.15/Cargo.toml | 4 ++-- packages/kernel-5.15/kernel-5.15.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index ac445be6..bcedef8d 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/6e5bde865f2f534b3e5c1ae2c3065e711f6c55b7fa5e4f91f0dc55894b1ad844/kernel-5.15.167-112.166.amzn2.src.rpm" -sha512 = "90ca9a2ee14e34a34ddcd6d8d24904bccc5d9ce8560c211fad1a01ed451996aecc3d6bb5b0982fddcbb426b552701d809cfc68506bf3123f77c16a72b844a8dc" +url = "https://cdn.amazonlinux.com/blobstore/9cea3dae03703f3c4c78fcb1302eeee5fe4c07ebf53d783cf3aaf7e4f30a6d39/kernel-5.15.168-114.166.amzn2.src.rpm" +sha512 = "5b0b0e2640bb04d4868b8820781029d8148c7939802c1b4edcf580533848afe70f7c6372e6e2306dfc017d2b32120a446ada15b105f7b2fe766b9382f83937d3" [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 8f477284..d15c5932 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.15 -Version: 5.15.167 +Version: 5.15.168 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/6e5bde865f2f534b3e5c1ae2c3065e711f6c55b7fa5e4f91f0dc55894b1ad844/kernel-5.15.167-112.166.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/9cea3dae03703f3c4c78fcb1302eeee5fe4c07ebf53d783cf3aaf7e4f30a6d39/kernel-5.15.168-114.166.amzn2.src.rpm Source100: config-bottlerocket # Help out-of-tree module builds run `make prepare` automatically. From b140da1e889016af5a000eb51fa4633c7b4a5cc7 Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Wed, 30 Oct 2024 23:32:42 +0000 Subject: [PATCH 1339/1356] kmod-5.10-nvidia: update SPDX for open gpu drivers Signed-off-by: Matthew Yeazel --- packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec index 3e8d085c..7e051afb 100644 --- a/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec +++ b/packages/kmod-5.10-nvidia/kmod-5.10-nvidia.spec @@ -71,7 +71,7 @@ Requires: %{name}-tesla(fabricmanager) %package open-gpu-%{tesla_major} Summary: NVIDIA %{tesla_major} Open GPU driver Version: %{tesla_ver} -License: MIT OR GPL-2.0-only +License: MIT AND GPL-2.0-only Requires: %{_cross_os}variant-platform(aws) %description open-gpu-%{tesla_major} From ce5f454e65a1c9dbc410cbc5dcedf45c06b7265a Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Wed, 30 Oct 2024 23:33:30 +0000 Subject: [PATCH 1340/1356] kmod-5.15-nvidia: update SPDX for open gpu drivers Signed-off-by: Matthew Yeazel --- packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec index 01474826..5da7a238 100644 --- a/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec +++ b/packages/kmod-5.15-nvidia/kmod-5.15-nvidia.spec @@ -71,7 +71,7 @@ Requires: %{name}-tesla(fabricmanager) %package open-gpu-%{tesla_major} Summary: NVIDIA %{tesla_major} Open GPU driver Version: %{tesla_ver} -License: MIT OR GPL-2.0-only +License: MIT AND GPL-2.0-only Requires: %{_cross_os}variant-platform(aws) %description open-gpu-%{tesla_major} From a0f17d77d7b507567e0f84e4b940cfc25d29c834 Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Wed, 30 Oct 2024 23:33:47 +0000 Subject: [PATCH 1341/1356] kmod-6.1-nvidia: update SPDX for open gpu drivers Signed-off-by: Matthew Yeazel --- packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec index d500071b..b3b447f8 100644 --- a/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec +++ b/packages/kmod-6.1-nvidia/kmod-6.1-nvidia.spec @@ -71,7 +71,7 @@ Requires: %{name}-tesla(fabricmanager) %package open-gpu-%{tesla_major} Summary: NVIDIA %{tesla_major} Open GPU driver Version: %{tesla_ver} -License: MIT OR GPL-2.0-only +License: MIT AND GPL-2.0-only Requires: %{_cross_os}variant-platform(aws) %description open-gpu-%{tesla_major} From baff1c6700d203dfa306dd15e1d2e043c429f218 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 17 Oct 2024 20:11:32 +0000 Subject: [PATCH 1342/1356] kernel: build external neuron kmod Build the external Neuron kmod as part of the kernel build, so it can be signed with the ephemeral module signing key. That allows it to be loaded at runtime when kernel lockdown is in effect. Since autoload doesn't work for this module, add a custom instance of the modprobe unit that only runs if Neuron hardware is detected, and run it as part of sysinit.target. Signed-off-by: Ben Cressey --- packages/kernel-5.10/.gitignore | 1 + packages/kernel-5.10/Cargo.toml | 7 +- packages/kernel-5.10/kernel-5.10.spec | 64 ++++++++++++++++++- ...-srpm-url.sh => latest-kernel-srpm-url.sh} | 0 .../kernel-5.10/latest-neuron-srpm-url.sh | 9 +++ .../modprobe@neuron.service.drop-in.conf | 7 ++ .../neuron-sysinit.target.drop-in.conf | 2 + packages/kernel-5.15/.gitignore | 1 + packages/kernel-5.15/Cargo.toml | 7 +- packages/kernel-5.15/kernel-5.15.spec | 60 ++++++++++++++++- ...-srpm-url.sh => latest-kernel-srpm-url.sh} | 0 .../kernel-5.15/latest-neuron-srpm-url.sh | 9 +++ .../modprobe@neuron.service.drop-in.conf | 7 ++ .../neuron-sysinit.target.drop-in.conf | 2 + packages/kernel-6.1/.gitignore | 1 + packages/kernel-6.1/Cargo.toml | 7 +- packages/kernel-6.1/kernel-6.1.spec | 59 ++++++++++++++++- ...-srpm-url.sh => latest-kernel-srpm-url.sh} | 0 packages/kernel-6.1/latest-neuron-srpm-url.sh | 9 +++ .../modprobe@neuron.service.drop-in.conf | 7 ++ .../neuron-sysinit.target.drop-in.conf | 2 + 21 files changed, 255 insertions(+), 6 deletions(-) create mode 100644 packages/kernel-5.10/.gitignore rename packages/kernel-5.10/{latest-srpm-url.sh => latest-kernel-srpm-url.sh} (100%) create mode 100755 packages/kernel-5.10/latest-neuron-srpm-url.sh create mode 100644 packages/kernel-5.10/modprobe@neuron.service.drop-in.conf create mode 100644 packages/kernel-5.10/neuron-sysinit.target.drop-in.conf create mode 100644 packages/kernel-5.15/.gitignore rename packages/kernel-5.15/{latest-srpm-url.sh => latest-kernel-srpm-url.sh} (100%) create mode 100755 packages/kernel-5.15/latest-neuron-srpm-url.sh create mode 100644 packages/kernel-5.15/modprobe@neuron.service.drop-in.conf create mode 100644 packages/kernel-5.15/neuron-sysinit.target.drop-in.conf create mode 100644 packages/kernel-6.1/.gitignore rename packages/kernel-6.1/{latest-srpm-url.sh => latest-kernel-srpm-url.sh} (100%) create mode 100755 packages/kernel-6.1/latest-neuron-srpm-url.sh create mode 100644 packages/kernel-6.1/modprobe@neuron.service.drop-in.conf create mode 100644 packages/kernel-6.1/neuron-sysinit.target.drop-in.conf diff --git a/packages/kernel-5.10/.gitignore b/packages/kernel-5.10/.gitignore new file mode 100644 index 00000000..e7a9c134 --- /dev/null +++ b/packages/kernel-5.10/.gitignore @@ -0,0 +1 @@ +*.rpm diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index ebf20eda..7b617906 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -12,9 +12,14 @@ package-name = "kernel-5.10" path = "../packages.rs" [[package.metadata.build-package.external-files]] -# Use latest-srpm-url.sh to get this. +# Use latest-kernel-srpm-url.sh to get this. url = "https://cdn.amazonlinux.com/blobstore/a9b5c6b9ca0d2a84e4dc3b963a73017a055d602139d2293ff394d33b08111be7/kernel-5.10.227-219.884.amzn2.src.rpm" sha512 = "6729b3ef34c451685d29b736a1a98a6487d158707f503ae35cb0427daf034a442d8768e3cc0db3781bb4b17f316fd53811a211892be52c7f8d2feadf7773611c" +[[package.metadata.build-package.external-files]] +# Use latest-neuron-srpm-url.sh to get this. +url = "https://yum.repos.neuron.amazonaws.com/aws-neuronx-dkms-2.18.12.0.noarch.rpm" +sha512 = "4ed92e661d0ba368eaf8f60e1a68c202062a26819231fcfd42a5ff05d20ad2f34b82b23359a88e80eea22ee5d0056ad769b6febd5d7e7b161da0e36434ba2579" + [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index de63d572..e613c55e 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -6,10 +6,16 @@ Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ -# Use latest-srpm-url.sh to get this. +# Use latest-kernel-srpm-url.sh to get this. Source0: https://cdn.amazonlinux.com/blobstore/a9b5c6b9ca0d2a84e4dc3b963a73017a055d602139d2293ff394d33b08111be7/kernel-5.10.227-219.884.amzn2.src.rpm +# Use latest-neuron-srpm-url.sh to get this. +Source1: https://yum.repos.neuron.amazonaws.com/aws-neuronx-dkms-2.18.12.0.noarch.rpm Source100: config-bottlerocket +# Neuron-related drop-ins. +Source220: neuron-sysinit.target.drop-in.conf +Source221: modprobe@neuron.service.drop-in.conf + # Help out-of-tree module builds run `make prepare` automatically. Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch # Enable INITRAMFS_FORCE config option for our use case. @@ -42,6 +48,11 @@ Requires: %{_cross_os}microcode-licenses Requires: %{name}-modules = %{version}-%{release} Requires: %{name}-devel = %{version}-%{release} +# Pull in platform-dependent modules. +%if "%{_cross_arch}" == "x86_64" +Requires: (%{name}-modules-neuron if (%{_cross_os}variant-platform(aws) without %{_cross_os}variant-flavor(nvidia))) +%endif + # The 5.10 kernel is not FIPS certified. Conflicts: %{_cross_os}image-feature(fips) @@ -72,6 +83,18 @@ Summary: Modules for the Linux kernel %description modules %{summary}. +%if "%{_cross_arch}" == "x86_64" +%package modules-neuron +Summary: Modules for the Linux kernel with Neuron hardware +Requires: %{name} +Requires: %{_cross_os}ghostdog +Requires: %{_cross_os}variant-platform(aws) +Conflicts: %{_cross_os}variant-flavor(nvidia) + +%description modules-neuron +%{summary}. +%endif + %package headers Summary: Header files for the Linux kernel for use by glibc @@ -110,6 +133,13 @@ scripts/kconfig/merge_config.sh \ rm -f ../config-* ../*.patch +%if "%{_cross_arch}" == "x86_64" +cd %{_builddir} +rpm2cpio %{SOURCE1} | cpio -idmu './usr/src/aws-neuronx-*' +find usr/src/ -mindepth 1 -maxdepth 1 -type d -exec mv {} neuron \; +rm -r usr +%endif + %global kmake \ make -s\\\ ARCH="%{_cross_karch}"\\\ @@ -125,10 +155,22 @@ make -s\\\ %kmake %{?_smp_mflags} %{_cross_kimage} %kmake %{?_smp_mflags} modules +%if "%{_cross_arch}" == "x86_64" +%kmake %{?_smp_mflags} M=%{_builddir}/neuron +%endif + %install %kmake %{?_smp_mflags} headers_install %kmake %{?_smp_mflags} modules_install +%if "%{_cross_arch}" == "x86_64" +%kmake %{?_smp_mflags} M=%{_builddir}/neuron modules_install V=1 +mv \ + %{buildroot}%{kernel_libdir}/extra/%{_builddir}/neuron/neuron.ko* \ + %{buildroot}%{kernel_libdir}/extra +rm -rf %{buildroot}%{kernel_libdir}/extra/%{_builddir} +%endif + install -d %{buildroot}/boot install -T -m 0755 arch/%{_cross_karch}/boot/%{_cross_kimage} %{buildroot}/boot/vmlinuz install -m 0644 .config %{buildroot}/boot/config @@ -237,6 +279,18 @@ rm -f %{buildroot}%{kernel_libdir}/build %{buildroot}%{kernel_libdir}/source ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{kernel_libdir}/build ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{kernel_libdir}/source +# Install a copy of System.map so that module dependencies can be regenerated. +install -p -m 0600 System.map %{buildroot}%{kernel_libdir} + +%if "%{_cross_arch}" == "x86_64" +# Add Neuron-related drop-ins to load the module when the hardware is present. +mkdir -p %{buildroot}%{_cross_unitdir}/sysinit.target.d +install -p -m 0644 %{S:220} %{buildroot}%{_cross_unitdir}/sysinit.target.d/neuron.conf + +mkdir -p %{buildroot}%{_cross_unitdir}/modprobe@neuron.service.d +install -p -m 0644 %{S:221} %{buildroot}%{_cross_unitdir}/modprobe@neuron.service.d/neuron.conf +%endif + %files %license COPYING LICENSES/preferred/GPL-2.0 LICENSES/exceptions/Linux-syscall-note %{_cross_attribution_file} @@ -246,6 +300,14 @@ ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{kernel_libdir}/source %files modules %dir %{_cross_libdir}/modules %{_cross_libdir}/modules/* +%exclude %{kernel_libdir}/extra/neuron.ko.gz + +%if "%{_cross_arch}" == "x86_64" +%files modules-neuron +%{kernel_libdir}/extra/neuron.ko.gz +%{_cross_unitdir}/sysinit.target.d/neuron.conf +%{_cross_unitdir}/modprobe@neuron.service.d/neuron.conf +%endif %files headers %dir %{_cross_includedir}/asm diff --git a/packages/kernel-5.10/latest-srpm-url.sh b/packages/kernel-5.10/latest-kernel-srpm-url.sh similarity index 100% rename from packages/kernel-5.10/latest-srpm-url.sh rename to packages/kernel-5.10/latest-kernel-srpm-url.sh diff --git a/packages/kernel-5.10/latest-neuron-srpm-url.sh b/packages/kernel-5.10/latest-neuron-srpm-url.sh new file mode 100755 index 00000000..5bb6c85e --- /dev/null +++ b/packages/kernel-5.10/latest-neuron-srpm-url.sh @@ -0,0 +1,9 @@ +#!/bin/sh +cmd=" +dnf install -q -y --releasever=latest yum-utils && +dnf download -q --repofrompath neuron,https://yum.repos.neuron.amazonaws.com --repo=neuron --urls aws-neuronx-dkms +" +docker run --rm amazonlinux:2023 bash -c "${cmd}" \ + | grep '^http' \ + | xargs --max-args=1 --no-run-if-empty realpath --canonicalize-missing --relative-to=. \ + | sed 's_:/_://_' diff --git a/packages/kernel-5.10/modprobe@neuron.service.drop-in.conf b/packages/kernel-5.10/modprobe@neuron.service.drop-in.conf new file mode 100644 index 00000000..e9174355 --- /dev/null +++ b/packages/kernel-5.10/modprobe@neuron.service.drop-in.conf @@ -0,0 +1,7 @@ +[Unit] +ConditionPathExists=!/etc/.neuron-modprobe-done + +[Service] +ExecCondition=/usr/bin/touch /etc/.neuron-modprobe-done +ExecCondition=/usr/bin/ghostdog neuron-present +RemainAfterExit=true diff --git a/packages/kernel-5.10/neuron-sysinit.target.drop-in.conf b/packages/kernel-5.10/neuron-sysinit.target.drop-in.conf new file mode 100644 index 00000000..11c78234 --- /dev/null +++ b/packages/kernel-5.10/neuron-sysinit.target.drop-in.conf @@ -0,0 +1,2 @@ +[Unit] +Wants=modprobe@neuron.service diff --git a/packages/kernel-5.15/.gitignore b/packages/kernel-5.15/.gitignore new file mode 100644 index 00000000..e7a9c134 --- /dev/null +++ b/packages/kernel-5.15/.gitignore @@ -0,0 +1 @@ +*.rpm diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index bcedef8d..386865d1 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -12,9 +12,14 @@ package-name = "kernel-5.15" path = "../packages.rs" [[package.metadata.build-package.external-files]] -# Use latest-srpm-url.sh to get this. +# Use latest-kernel-srpm-url.sh to get this. url = "https://cdn.amazonlinux.com/blobstore/9cea3dae03703f3c4c78fcb1302eeee5fe4c07ebf53d783cf3aaf7e4f30a6d39/kernel-5.15.168-114.166.amzn2.src.rpm" sha512 = "5b0b0e2640bb04d4868b8820781029d8148c7939802c1b4edcf580533848afe70f7c6372e6e2306dfc017d2b32120a446ada15b105f7b2fe766b9382f83937d3" +[[package.metadata.build-package.external-files]] +# Use latest-neuron-srpm-url.sh to get this. +url = "https://yum.repos.neuron.amazonaws.com/aws-neuronx-dkms-2.18.12.0.noarch.rpm" +sha512 = "4ed92e661d0ba368eaf8f60e1a68c202062a26819231fcfd42a5ff05d20ad2f34b82b23359a88e80eea22ee5d0056ad769b6febd5d7e7b161da0e36434ba2579" + [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index d15c5932..2abe2e80 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -6,10 +6,16 @@ Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ -# Use latest-srpm-url.sh to get this. +# Use latest-kernel-srpm-url.sh to get this. Source0: https://cdn.amazonlinux.com/blobstore/9cea3dae03703f3c4c78fcb1302eeee5fe4c07ebf53d783cf3aaf7e4f30a6d39/kernel-5.15.168-114.166.amzn2.src.rpm +# Use latest-neuron-srpm-url.sh to get this. +Source1: https://yum.repos.neuron.amazonaws.com/aws-neuronx-dkms-2.18.12.0.noarch.rpm Source100: config-bottlerocket +# Neuron-related drop-ins. +Source220: neuron-sysinit.target.drop-in.conf +Source221: modprobe@neuron.service.drop-in.conf + # Help out-of-tree module builds run `make prepare` automatically. Patch1001: 1001-Makefile-add-prepare-target-for-external-modules.patch # Expose tools/* targets for out-of-tree module builds. @@ -37,6 +43,11 @@ Requires: %{_cross_os}microcode-licenses Requires: %{name}-modules = %{version}-%{release} Requires: %{name}-devel = %{version}-%{release} +# Pull in platform-dependent modules. +%if "%{_cross_arch}" == "x86_64" +Requires: (%{name}-modules-neuron if (%{_cross_os}variant-platform(aws) without %{_cross_os}variant-flavor(nvidia))) +%endif + # The 5.15 kernel is not FIPS certified. Conflicts: %{_cross_os}image-feature(fips) @@ -67,6 +78,18 @@ Summary: Modules for the Linux kernel %description modules %{summary}. +%if "%{_cross_arch}" == "x86_64" +%package modules-neuron +Summary: Modules for the Linux kernel with Neuron hardware +Requires: %{name} +Requires: %{_cross_os}ghostdog +Requires: %{_cross_os}variant-platform(aws) +Conflicts: %{_cross_os}variant-flavor(nvidia) + +%description modules-neuron +%{summary}. +%endif + %package headers Summary: Header files for the Linux kernel for use by glibc @@ -105,6 +128,13 @@ scripts/kconfig/merge_config.sh \ rm -f ../config-* ../*.patch +%if "%{_cross_arch}" == "x86_64" +cd %{_builddir} +rpm2cpio %{SOURCE1} | cpio -idmu './usr/src/aws-neuronx-*' +find usr/src/ -mindepth 1 -maxdepth 1 -type d -exec mv {} neuron \; +rm -r usr +%endif + %global kmake \ make -s\\\ ARCH="%{_cross_karch}"\\\ @@ -120,10 +150,18 @@ make -s\\\ %kmake %{?_smp_mflags} %{_cross_kimage} %kmake %{?_smp_mflags} modules +%if "%{_cross_arch}" == "x86_64" +%kmake %{?_smp_mflags} M=%{_builddir}/neuron +%endif + %install %kmake %{?_smp_mflags} headers_install %kmake %{?_smp_mflags} modules_install +%if "%{_cross_arch}" == "x86_64" +%kmake %{?_smp_mflags} M=%{_builddir}/neuron modules_install +%endif + install -d %{buildroot}/boot install -T -m 0755 arch/%{_cross_karch}/boot/%{_cross_kimage} %{buildroot}/boot/vmlinuz install -m 0644 .config %{buildroot}/boot/config @@ -232,6 +270,18 @@ rm -f %{buildroot}%{kernel_libdir}/build %{buildroot}%{kernel_libdir}/source ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{kernel_libdir}/build ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{kernel_libdir}/source +# Install a copy of System.map so that module dependencies can be regenerated. +install -p -m 0600 System.map %{buildroot}%{kernel_libdir} + +%if "%{_cross_arch}" == "x86_64" +# Add Neuron-related drop-ins to load the module when the hardware is present. +mkdir -p %{buildroot}%{_cross_unitdir}/sysinit.target.d +install -p -m 0644 %{S:220} %{buildroot}%{_cross_unitdir}/sysinit.target.d/neuron.conf + +mkdir -p %{buildroot}%{_cross_unitdir}/modprobe@neuron.service.d +install -p -m 0644 %{S:221} %{buildroot}%{_cross_unitdir}/modprobe@neuron.service.d/neuron.conf +%endif + %files %license COPYING LICENSES/preferred/GPL-2.0 LICENSES/exceptions/Linux-syscall-note %{_cross_attribution_file} @@ -241,6 +291,14 @@ ln -sf %{_usrsrc}/kernels/%{version} %{buildroot}%{kernel_libdir}/source %files modules %dir %{_cross_libdir}/modules %{_cross_libdir}/modules/* +%exclude %{kernel_libdir}/extra/neuron.ko.gz + +%if "%{_cross_arch}" == "x86_64" +%files modules-neuron +%{kernel_libdir}/extra/neuron.ko.gz +%{_cross_unitdir}/sysinit.target.d/neuron.conf +%{_cross_unitdir}/modprobe@neuron.service.d/neuron.conf +%endif %files headers %dir %{_cross_includedir}/asm diff --git a/packages/kernel-5.15/latest-srpm-url.sh b/packages/kernel-5.15/latest-kernel-srpm-url.sh similarity index 100% rename from packages/kernel-5.15/latest-srpm-url.sh rename to packages/kernel-5.15/latest-kernel-srpm-url.sh diff --git a/packages/kernel-5.15/latest-neuron-srpm-url.sh b/packages/kernel-5.15/latest-neuron-srpm-url.sh new file mode 100755 index 00000000..5bb6c85e --- /dev/null +++ b/packages/kernel-5.15/latest-neuron-srpm-url.sh @@ -0,0 +1,9 @@ +#!/bin/sh +cmd=" +dnf install -q -y --releasever=latest yum-utils && +dnf download -q --repofrompath neuron,https://yum.repos.neuron.amazonaws.com --repo=neuron --urls aws-neuronx-dkms +" +docker run --rm amazonlinux:2023 bash -c "${cmd}" \ + | grep '^http' \ + | xargs --max-args=1 --no-run-if-empty realpath --canonicalize-missing --relative-to=. \ + | sed 's_:/_://_' diff --git a/packages/kernel-5.15/modprobe@neuron.service.drop-in.conf b/packages/kernel-5.15/modprobe@neuron.service.drop-in.conf new file mode 100644 index 00000000..e9174355 --- /dev/null +++ b/packages/kernel-5.15/modprobe@neuron.service.drop-in.conf @@ -0,0 +1,7 @@ +[Unit] +ConditionPathExists=!/etc/.neuron-modprobe-done + +[Service] +ExecCondition=/usr/bin/touch /etc/.neuron-modprobe-done +ExecCondition=/usr/bin/ghostdog neuron-present +RemainAfterExit=true diff --git a/packages/kernel-5.15/neuron-sysinit.target.drop-in.conf b/packages/kernel-5.15/neuron-sysinit.target.drop-in.conf new file mode 100644 index 00000000..11c78234 --- /dev/null +++ b/packages/kernel-5.15/neuron-sysinit.target.drop-in.conf @@ -0,0 +1,2 @@ +[Unit] +Wants=modprobe@neuron.service diff --git a/packages/kernel-6.1/.gitignore b/packages/kernel-6.1/.gitignore new file mode 100644 index 00000000..e7a9c134 --- /dev/null +++ b/packages/kernel-6.1/.gitignore @@ -0,0 +1 @@ +*.rpm diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 28a1943d..baf1944b 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -12,9 +12,14 @@ package-name = "kernel-6.1" path = "../packages.rs" [[package.metadata.build-package.external-files]] -# Use latest-srpm-url.sh to get this. +# Use latest-kernel-srpm-url.sh to get this. url = "https://cdn.amazonlinux.com/al2023/blobstore/3b0aa0d6cf05ca272d9802ccddfc28201675b2abac6abb307f5c4b8d3ca68d26/kernel-6.1.112-124.190.amzn2023.src.rpm" sha512 = "f7c78716a78d453a0eaaae45f6aaf00a466d7b65e7def0c887f5ff267726b668f725b8a359770a93dbee477313cf297fa6b641580c073aa76dcefda39c19f5c2" +[[package.metadata.build-package.external-files]] +# Use latest-neuron-srpm-url.sh to get this. +url = "https://yum.repos.neuron.amazonaws.com/aws-neuronx-dkms-2.18.12.0.noarch.rpm" +sha512 = "4ed92e661d0ba368eaf8f60e1a68c202062a26819231fcfd42a5ff05d20ad2f34b82b23359a88e80eea22ee5d0056ad769b6febd5d7e7b161da0e36434ba2579" + [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 9084c680..ea7f8fc9 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -6,8 +6,11 @@ Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ -# Use latest-srpm-url.sh to get this. +# Use latest-kernel-srpm-url.sh to get this. Source0: https://cdn.amazonlinux.com/al2023/blobstore/3b0aa0d6cf05ca272d9802ccddfc28201675b2abac6abb307f5c4b8d3ca68d26/kernel-6.1.112-124.190.amzn2023.src.rpm +# Use latest-neuron-srpm-url.sh to get this. +Source1: https://yum.repos.neuron.amazonaws.com/aws-neuronx-dkms-2.18.12.0.noarch.rpm + Source100: config-bottlerocket # This list of FIPS modules is extracted from /etc/fipsmodules in the initramfs @@ -19,6 +22,10 @@ Source202: fipsmodules-aarch64 # Adjust kernel-devel mount behavior if not squashfs. Source210: var-lib-kernel-devel-lower.mount.drop-in.conf.in +# Neuron-related drop-ins. +Source220: neuron-sysinit.target.drop-in.conf +Source221: modprobe@neuron.service.drop-in.conf + # Bootconfig snippets to adjust the default kernel command line for the platform. Source300: bootconfig-aws.conf Source301: bootconfig-vmware.conf @@ -64,6 +71,9 @@ Requires: (%{name}-bootconfig-metal if %{_cross_os}variant-platform(vmware)) # Pull in platform-dependent modules. Requires: (%{name}-modules-metal if %{_cross_os}variant-platform(metal)) +%if "%{_cross_arch}" == "x86_64" +Requires: (%{name}-modules-neuron if (%{_cross_os}variant-platform(aws) without %{_cross_os}variant-flavor(nvidia))) +%endif # Pull in FIPS-related files if needed. Requires: (%{name}-fips if %{_cross_os}image-feature(fips)) @@ -129,6 +139,22 @@ Summary: Modules for the Linux kernel on bare metal %description modules-metal %{summary}. +%if "%{_cross_arch}" == "x86_64" +%package modules-neuron +Summary: Modules for the Linux kernel with Neuron hardware +Requires: %{name} +Requires: %{_cross_os}ghostdog +Requires: %{_cross_os}variant-platform(aws) +Conflicts: %{_cross_os}variant-flavor(nvidia) + +# Previously the neuron kmod was in a separate package, so provide that +# name for backwards compatibility. +Provides: %{_cross_os}kmod-6.1-neuron + +%description modules-neuron +%{summary}. +%endif + %package headers Summary: Header files for the Linux kernel for use by glibc @@ -175,6 +201,13 @@ scripts/kconfig/merge_config.sh \ rm -f ../config-* ../*.patch +%if "%{_cross_arch}" == "x86_64" +cd %{_builddir} +rpm2cpio %{SOURCE1} | cpio -idmu './usr/src/aws-neuronx-*' +find usr/src/ -mindepth 1 -maxdepth 1 -type d -exec mv {} neuron \; +rm -r usr +%endif + %global kmake \ make -s\\\ ARCH="%{_cross_karch}"\\\ @@ -190,10 +223,18 @@ make -s\\\ %kmake %{?_smp_mflags} %{_cross_kimage} %kmake %{?_smp_mflags} modules +%if "%{_cross_arch}" == "x86_64" +%kmake %{?_smp_mflags} M=%{_builddir}/neuron +%endif + %install %kmake %{?_smp_mflags} headers_install %kmake %{?_smp_mflags} modules_install +%if "%{_cross_arch}" == "x86_64" +%kmake %{?_smp_mflags} M=%{_builddir}/neuron modules_install +%endif + install -d %{buildroot}/boot install -T -m 0755 arch/%{_cross_karch}/boot/%{_cross_kimage} %{buildroot}/boot/vmlinuz install -m 0644 .config %{buildroot}/boot/config @@ -328,6 +369,15 @@ mkdir -p %{buildroot}%{_cross_unitdir}/"${LOWERPATH}.mount.d" sed -e 's|PREFIX|%{_cross_prefix}|g' %{S:210} \ > %{buildroot}%{_cross_unitdir}/"${LOWERPATH}.mount.d"/no-squashfs.conf +%if "%{_cross_arch}" == "x86_64" +# Add Neuron-related drop-ins to load the module when the hardware is present. +mkdir -p %{buildroot}%{_cross_unitdir}/sysinit.target.d +install -p -m 0644 %{S:220} %{buildroot}%{_cross_unitdir}/sysinit.target.d/neuron.conf + +mkdir -p %{buildroot}%{_cross_unitdir}/modprobe@neuron.service.d +install -p -m 0644 %{S:221} %{buildroot}%{_cross_unitdir}/modprobe@neuron.service.d/neuron.conf +%endif + # Install platform-specific bootconfig snippets. install -d %{buildroot}%{_cross_bootconfigdir} install -p -m 0644 %{S:300} %{buildroot}%{_cross_bootconfigdir}/05-aws.conf @@ -1376,4 +1426,11 @@ install -p -m 0644 %{S:302} %{buildroot}%{_cross_bootconfigdir}/05-metal.conf %{_cross_kmoddir}/kernel/drivers/net/mdio.ko.gz %{_cross_kmoddir}/kernel/drivers/scsi/snic/snic.ko.gz +%if "%{_cross_arch}" == "x86_64" +%files modules-neuron +%{_cross_kmoddir}/extra/neuron.ko.gz +%{_cross_unitdir}/sysinit.target.d/neuron.conf +%{_cross_unitdir}/modprobe@neuron.service.d/neuron.conf +%endif + %changelog diff --git a/packages/kernel-6.1/latest-srpm-url.sh b/packages/kernel-6.1/latest-kernel-srpm-url.sh similarity index 100% rename from packages/kernel-6.1/latest-srpm-url.sh rename to packages/kernel-6.1/latest-kernel-srpm-url.sh diff --git a/packages/kernel-6.1/latest-neuron-srpm-url.sh b/packages/kernel-6.1/latest-neuron-srpm-url.sh new file mode 100755 index 00000000..5bb6c85e --- /dev/null +++ b/packages/kernel-6.1/latest-neuron-srpm-url.sh @@ -0,0 +1,9 @@ +#!/bin/sh +cmd=" +dnf install -q -y --releasever=latest yum-utils && +dnf download -q --repofrompath neuron,https://yum.repos.neuron.amazonaws.com --repo=neuron --urls aws-neuronx-dkms +" +docker run --rm amazonlinux:2023 bash -c "${cmd}" \ + | grep '^http' \ + | xargs --max-args=1 --no-run-if-empty realpath --canonicalize-missing --relative-to=. \ + | sed 's_:/_://_' diff --git a/packages/kernel-6.1/modprobe@neuron.service.drop-in.conf b/packages/kernel-6.1/modprobe@neuron.service.drop-in.conf new file mode 100644 index 00000000..e9174355 --- /dev/null +++ b/packages/kernel-6.1/modprobe@neuron.service.drop-in.conf @@ -0,0 +1,7 @@ +[Unit] +ConditionPathExists=!/etc/.neuron-modprobe-done + +[Service] +ExecCondition=/usr/bin/touch /etc/.neuron-modprobe-done +ExecCondition=/usr/bin/ghostdog neuron-present +RemainAfterExit=true diff --git a/packages/kernel-6.1/neuron-sysinit.target.drop-in.conf b/packages/kernel-6.1/neuron-sysinit.target.drop-in.conf new file mode 100644 index 00000000..11c78234 --- /dev/null +++ b/packages/kernel-6.1/neuron-sysinit.target.drop-in.conf @@ -0,0 +1,2 @@ +[Unit] +Wants=modprobe@neuron.service From 95651e514effe6a66aac3b6fbe342a6a6d172fc0 Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 17 Oct 2024 20:19:29 +0000 Subject: [PATCH 1343/1356] packages: drop kmod-6.1-neuron The Neuron driver is now built as part of the other kernel builds. Signed-off-by: Ben Cressey --- packages/kmod-6.1-neuron/.gitignore | 1 - ...ild-do-not-outline-atomics-for-arm64.patch | 22 ------ packages/kmod-6.1-neuron/Cargo.toml | 20 ----- packages/kmod-6.1-neuron/kmod-6.1-neuron.spec | 77 ------------------- packages/kmod-6.1-neuron/latest-srpm-url.sh | 9 --- .../kmod-6.1-neuron/neuron-modules-load.conf | 1 - .../neuron-systemd-modules-load.drop-in.conf | 2 - 7 files changed, 132 deletions(-) delete mode 100644 packages/kmod-6.1-neuron/.gitignore delete mode 100644 packages/kmod-6.1-neuron/0001-kbuild-do-not-outline-atomics-for-arm64.patch delete mode 100644 packages/kmod-6.1-neuron/Cargo.toml delete mode 100644 packages/kmod-6.1-neuron/kmod-6.1-neuron.spec delete mode 100755 packages/kmod-6.1-neuron/latest-srpm-url.sh delete mode 100644 packages/kmod-6.1-neuron/neuron-modules-load.conf delete mode 100644 packages/kmod-6.1-neuron/neuron-systemd-modules-load.drop-in.conf diff --git a/packages/kmod-6.1-neuron/.gitignore b/packages/kmod-6.1-neuron/.gitignore deleted file mode 100644 index e7a9c134..00000000 --- a/packages/kmod-6.1-neuron/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.rpm diff --git a/packages/kmod-6.1-neuron/0001-kbuild-do-not-outline-atomics-for-arm64.patch b/packages/kmod-6.1-neuron/0001-kbuild-do-not-outline-atomics-for-arm64.patch deleted file mode 100644 index c52edefb..00000000 --- a/packages/kmod-6.1-neuron/0001-kbuild-do-not-outline-atomics-for-arm64.patch +++ /dev/null @@ -1,22 +0,0 @@ -From 5c77e9c0db55dd35f162ec41fa5a62856121f5da Mon Sep 17 00:00:00 2001 -From: Ben Cressey -Date: Fri, 28 Jun 2024 02:06:55 +0000 -Subject: [PATCH] kbuild: do not outline atomics for arm64 - -Signed-off-by: Ben Cressey ---- - usr/src/aws-neuronx-2.18.12.0/Kbuild | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/usr/src/aws-neuronx-2.18.12.0/Kbuild b/usr/src/aws-neuronx-2.18.12.0/Kbuild -index 11f8490..6535608 100644 ---- a/usr/src/aws-neuronx-2.18.12.0/Kbuild -+++ b/usr/src/aws-neuronx-2.18.12.0/Kbuild -@@ -16,3 +16,4 @@ neuron-objs += v3/notific.o v3/neuron_dhal_v3.o - - ccflags-y += -O3 -Wall -Werror -Wno-declaration-after-statement -Wunused-macros -Wunused-local-typedefs - ccflags-y += -I$(src)/ -+ccflags-$(CONFIG_ARM64) += -mno-outline-atomics --- -2.45.1 - diff --git a/packages/kmod-6.1-neuron/Cargo.toml b/packages/kmod-6.1-neuron/Cargo.toml deleted file mode 100644 index 63cb6b0b..00000000 --- a/packages/kmod-6.1-neuron/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "kmod-6_1-neuron" -version = "0.1.0" -edition = "2021" -publish = false -build = "../build.rs" - -[lib] -path = "../packages.rs" - -[package.metadata.build-package] -package-name = "kmod-6.1-neuron" -releases-url = "https://awsdocs-neuron.readthedocs-hosted.com/en/latest/release-notes/runtime/aws-neuronx-dkms/index.html" - -[[package.metadata.build-package.external-files]] -url = "https://yum.repos.neuron.amazonaws.com/aws-neuronx-dkms-2.18.12.0.noarch.rpm" -sha512 = "4ed92e661d0ba368eaf8f60e1a68c202062a26819231fcfd42a5ff05d20ad2f34b82b23359a88e80eea22ee5d0056ad769b6febd5d7e7b161da0e36434ba2579" - -[build-dependencies] -kernel-6_1 = { path = "../kernel-6.1" } diff --git a/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec b/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec deleted file mode 100644 index ab1a7293..00000000 --- a/packages/kmod-6.1-neuron/kmod-6.1-neuron.spec +++ /dev/null @@ -1,77 +0,0 @@ -Name: %{_cross_os}kmod-6.1-neuron -Version: 2.18.12.0 -Release: 1%{?dist} -Epoch: 1 -Summary: Neuron drivers for the 6.1 kernel -License: GPL-2.0-only -URL: https://awsdocs-neuron.readthedocs-hosted.com/en/latest/ - -Source0: https://yum.repos.neuron.amazonaws.com/aws-neuronx-dkms-%{version}.noarch.rpm -Source1: neuron-modules-load.conf -Source2: neuron-systemd-modules-load.drop-in.conf -Patch0001: 0001-kbuild-do-not-outline-atomics-for-arm64.patch - -BuildRequires: %{_cross_os}kernel-6.1-archive - -%description -%{summary}. - -%package devel -Summary: Files for development using the Neuron drivers -Requires: %{name} - -%description devel -%{summary}. - -%prep -rpm2cpio %{SOURCE0} | cpio -idmv -tar -xf %{_cross_datadir}/bottlerocket/kernel-devel.tar.xz -%autopatch -p1 - -%global neuron_sources usr/src/aws-neuronx-%{version} -%global kernel_sources %{_builddir}/kernel-devel - -%build -pushd %{_builddir}/%{neuron_sources} -%make_build \ - -C %{kernel_sources} \ - M=${PWD} \ - ARCH=%{_cross_karch} \ - CROSS_COMPILE=%{_cross_target}- \ - INSTALL_MOD_STRIP=1 \ - %{nil} -gzip -9 neuron.ko -popd - -%install -pushd %{_builddir}/%{neuron_sources} -export KVER="$(cat %{kernel_sources}/include/config/kernel.release)" -export KMODDIR="%{_cross_libdir}/modules/${KVER}/extra" -install -d "%{buildroot}${KMODDIR}" -install -p -m 0644 neuron.ko.gz "%{buildroot}${KMODDIR}" -popd - -# Install modules-load.d drop-in to autoload required kernel modules -install -d %{buildroot}%{_cross_libdir}/modules-load.d -install -p -m 0644 %{S:1} %{buildroot}%{_cross_libdir}/modules-load.d/neuron.conf - -# Install systemd-modules-load drop-in to ensure that depmod runs. -install -d %{buildroot}%{_cross_unitdir}/systemd-modules-load.service.d -install -p -m 0644 %{S:2} %{buildroot}%{_cross_unitdir}/systemd-modules-load.service.d/neuron.conf - -# Install the shared header file -install -d %{buildroot}%{_cross_includedir}/share -install -p -m 0644 %{_builddir}/%{neuron_sources}/share/neuron_driver_shared.h %{buildroot}/%{_cross_includedir}/share/neuron_driver_shared.h -install -p -m 0644 %{_builddir}/%{neuron_sources}/neuron_ioctl.h %{buildroot}/%{_cross_includedir}/neuron_ioctl.h - -%files -%license %{neuron_sources}/LICENSE -%{_cross_attribution_file} -%{_cross_libdir}/modules/*/extra/neuron.ko.gz -%{_cross_libdir}/modules-load.d/neuron.conf -%{_cross_unitdir}/systemd-modules-load.service.d/neuron.conf - -%files devel -%dir %{_cross_includedir}/share/ -%{_cross_includedir}/share/neuron_driver_shared.h -%{_cross_includedir}/neuron_ioctl.h diff --git a/packages/kmod-6.1-neuron/latest-srpm-url.sh b/packages/kmod-6.1-neuron/latest-srpm-url.sh deleted file mode 100755 index 5bb6c85e..00000000 --- a/packages/kmod-6.1-neuron/latest-srpm-url.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh -cmd=" -dnf install -q -y --releasever=latest yum-utils && -dnf download -q --repofrompath neuron,https://yum.repos.neuron.amazonaws.com --repo=neuron --urls aws-neuronx-dkms -" -docker run --rm amazonlinux:2023 bash -c "${cmd}" \ - | grep '^http' \ - | xargs --max-args=1 --no-run-if-empty realpath --canonicalize-missing --relative-to=. \ - | sed 's_:/_://_' diff --git a/packages/kmod-6.1-neuron/neuron-modules-load.conf b/packages/kmod-6.1-neuron/neuron-modules-load.conf deleted file mode 100644 index aba019c2..00000000 --- a/packages/kmod-6.1-neuron/neuron-modules-load.conf +++ /dev/null @@ -1 +0,0 @@ -neuron diff --git a/packages/kmod-6.1-neuron/neuron-systemd-modules-load.drop-in.conf b/packages/kmod-6.1-neuron/neuron-systemd-modules-load.drop-in.conf deleted file mode 100644 index 0b130296..00000000 --- a/packages/kmod-6.1-neuron/neuron-systemd-modules-load.drop-in.conf +++ /dev/null @@ -1,2 +0,0 @@ -[Service] -ExecStartPre=-/usr/bin/depmod From 85334939d50a6b9f785514664e16808487d5a810 Mon Sep 17 00:00:00 2001 From: Yutong Sun Date: Mon, 11 Nov 2024 19:19:26 +0000 Subject: [PATCH 1344/1356] kernel-6.1: fix typo for metal bootconfig loading Signed-off-by: Yutong Sun --- packages/kernel-6.1/kernel-6.1.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index ea7f8fc9..623c88dc 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -67,7 +67,7 @@ Requires: %{name}-devel = %{version}-%{release} # Pull in platform-dependent boot config snippets. Requires: (%{name}-bootconfig-aws if %{_cross_os}variant-platform(aws)) Requires: (%{name}-bootconfig-vmware if %{_cross_os}variant-platform(vmware)) -Requires: (%{name}-bootconfig-metal if %{_cross_os}variant-platform(vmware)) +Requires: (%{name}-bootconfig-metal if %{_cross_os}variant-platform(metal)) # Pull in platform-dependent modules. Requires: (%{name}-modules-metal if %{_cross_os}variant-platform(metal)) From a32f8b8076baa3112a402b310cdc13b604450043 Mon Sep 17 00:00:00 2001 From: Cezar Rata Date: Thu, 14 Nov 2024 18:50:55 +0000 Subject: [PATCH 1345/1356] kernel-5.10: update to 5.10.228 Rebase to Amazon Linux upstream version 5.10.228-219.884.amzn2. Signed-off-by: Cezar Rata --- packages/kernel-5.10/Cargo.toml | 4 ++-- packages/kernel-5.10/kernel-5.10.spec | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index 7b617906..e14e7574 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-kernel-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/blobstore/a9b5c6b9ca0d2a84e4dc3b963a73017a055d602139d2293ff394d33b08111be7/kernel-5.10.227-219.884.amzn2.src.rpm" -sha512 = "6729b3ef34c451685d29b736a1a98a6487d158707f503ae35cb0427daf034a442d8768e3cc0db3781bb4b17f316fd53811a211892be52c7f8d2feadf7773611c" +url = "https://cdn.amazonlinux.com/blobstore/0af5f80d00a3d5a867d4959d74751bc7d24b1bcb0ab8a5de558ae301ae0fa52e/kernel-5.10.228-219.884.amzn2.src.rpm" +sha512 = "124c6d662c48dc4cb8caf035e9ee44c9c47bc5e19141c319b94abc441dce4e2afa24e30b9c0196665aa267b6ef85004153b3f5cddfe9191c2c8927ddb4175fbd" [[package.metadata.build-package.external-files]] # Use latest-neuron-srpm-url.sh to get this. diff --git a/packages/kernel-5.10/kernel-5.10.spec b/packages/kernel-5.10/kernel-5.10.spec index e613c55e..88d5ab8f 100644 --- a/packages/kernel-5.10/kernel-5.10.spec +++ b/packages/kernel-5.10/kernel-5.10.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-5.10 -Version: 5.10.227 +Version: 5.10.228 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-kernel-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/blobstore/a9b5c6b9ca0d2a84e4dc3b963a73017a055d602139d2293ff394d33b08111be7/kernel-5.10.227-219.884.amzn2.src.rpm +Source0: https://cdn.amazonlinux.com/blobstore/0af5f80d00a3d5a867d4959d74751bc7d24b1bcb0ab8a5de558ae301ae0fa52e/kernel-5.10.228-219.884.amzn2.src.rpm # Use latest-neuron-srpm-url.sh to get this. Source1: https://yum.repos.neuron.amazonaws.com/aws-neuronx-dkms-2.18.12.0.noarch.rpm Source100: config-bottlerocket From 1d7ec49f9edd1d4369714822cd9125ef06c855dd Mon Sep 17 00:00:00 2001 From: Cezar Rata Date: Thu, 14 Nov 2024 18:51:21 +0000 Subject: [PATCH 1346/1356] kernel-6.1: update to 6.1.115 Rebase to Amazon Linux upstream version 6.1.115-126.197.amzn2023. Drop patch for cgroup v1 statistics in favor of the Amazon Linux one. The z3fold module is deprecated upstream and is no longer enabled, so remove it from the list of packaged modules. Signed-off-by: Cezar Rata --- ...ttle-Fix-io-statistics-for-cgroup-v1.patch | 95 ------------------- packages/kernel-6.1/Cargo.toml | 4 +- packages/kernel-6.1/kernel-6.1.spec | 8 +- 3 files changed, 4 insertions(+), 103 deletions(-) delete mode 100644 packages/kernel-6.1/1100-blk-throttle-Fix-io-statistics-for-cgroup-v1.patch diff --git a/packages/kernel-6.1/1100-blk-throttle-Fix-io-statistics-for-cgroup-v1.patch b/packages/kernel-6.1/1100-blk-throttle-Fix-io-statistics-for-cgroup-v1.patch deleted file mode 100644 index 3bd4d39f..00000000 --- a/packages/kernel-6.1/1100-blk-throttle-Fix-io-statistics-for-cgroup-v1.patch +++ /dev/null @@ -1,95 +0,0 @@ -From 4c5b35e202a3dd2c0d0bf0715c695ef3cc6d902e Mon Sep 17 00:00:00 2001 -From: Jinke Han -Date: Mon, 8 May 2023 01:06:31 +0800 -Subject: [PATCH] blk-throttle: Fix io statistics for cgroup v1 - -After commit f382fb0bcef4 ("block: remove legacy IO schedulers"), -blkio.throttle.io_serviced and blkio.throttle.io_service_bytes become -the only stable io stats interface of cgroup v1, and these statistics -are done in the blk-throttle code. But the current code only counts the -bios that are actually throttled. When the user does not add the throttle -limit, the io stats for cgroup v1 has nothing. I fix it according to the -statistical method of v2, and made it count all ios accurately. - -Fixes: a7b36ee6ba29 ("block: move blk-throtl fast path inline") -Tested-by: Andrea Righi -Signed-off-by: Jinke Han -Acked-by: Muchun Song -Acked-by: Tejun Heo -Link: https://lore.kernel.org/r/20230507170631.89607-1-hanjinke.666@bytedance.com -Signed-off-by: Jens Axboe -[bcressey: - - backport to 6.1 - - adjust context in blk_cgroup_bio_start - - avoid changes from 3b8cc629 ("blk-cgroup: Optimize blkcg_rstat_flush()")] -Signed-off-by: Ben Cressey ---- - block/blk-cgroup.c | 6 ++++-- - block/blk-throttle.c | 6 ------ - block/blk-throttle.h | 9 +++++++++ - 3 files changed, 13 insertions(+), 8 deletions(-) - -diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c -index 1b7fd1fc2f33..435432c4a62e 100644 ---- a/block/blk-cgroup.c -+++ b/block/blk-cgroup.c -@@ -1969,6 +1969,9 @@ void blk_cgroup_bio_start(struct bio *bio) - struct blkg_iostat_set *bis; - unsigned long flags; - -+ if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) -+ return; -+ - cpu = get_cpu(); - bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu); - flags = u64_stats_update_begin_irqsave(&bis->sync); -@@ -1984,8 +1987,7 @@ void blk_cgroup_bio_start(struct bio *bio) - bis->cur.ios[rwd]++; - - u64_stats_update_end_irqrestore(&bis->sync, flags); -- if (cgroup_subsys_on_dfl(io_cgrp_subsys)) -- cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu); -+ cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu); - put_cpu(); - } - -diff --git a/block/blk-throttle.c b/block/blk-throttle.c -index 62a3f62316df..ab847abe30b0 100644 ---- a/block/blk-throttle.c -+++ b/block/blk-throttle.c -@@ -2176,12 +2176,6 @@ bool __blk_throtl_bio(struct bio *bio) - - rcu_read_lock(); - -- if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) { -- blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf, -- bio->bi_iter.bi_size); -- blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1); -- } -- - spin_lock_irq(&q->queue_lock); - - throtl_update_latency_buckets(td); -diff --git a/block/blk-throttle.h b/block/blk-throttle.h -index ef4b7a4de987..d1ccbfe9f797 100644 ---- a/block/blk-throttle.h -+++ b/block/blk-throttle.h -@@ -185,6 +185,15 @@ static inline bool blk_should_throtl(struct bio *bio) - struct throtl_grp *tg = blkg_to_tg(bio->bi_blkg); - int rw = bio_data_dir(bio); - -+ if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) { -+ if (!bio_flagged(bio, BIO_CGROUP_ACCT)) { -+ bio_set_flag(bio, BIO_CGROUP_ACCT); -+ blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf, -+ bio->bi_iter.bi_size); -+ } -+ blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1); -+ } -+ - /* iops limit is always counted */ - if (tg->has_rules_iops[rw]) - return true; --- -2.45.1 - diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index baf1944b..ab37170d 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-kernel-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/3b0aa0d6cf05ca272d9802ccddfc28201675b2abac6abb307f5c4b8d3ca68d26/kernel-6.1.112-124.190.amzn2023.src.rpm" -sha512 = "f7c78716a78d453a0eaaae45f6aaf00a466d7b65e7def0c887f5ff267726b668f725b8a359770a93dbee477313cf297fa6b641580c073aa76dcefda39c19f5c2" +url = "https://cdn.amazonlinux.com/al2023/blobstore/d6984bd6e9f17839ebf3e0b0c4d7dd72aeb4db5911bf697ed299caea93c83327/kernel-6.1.115-126.197.amzn2023.src.rpm" +sha512 = "eb1e9bdbbcc4b74cc678c894b19279437e1396c56e9db0904d1dd6898e6babf4fa3c4c53908ca189ee8558c5d249314a819b255f183fe100e4a3ed068ce2e6cf" [[package.metadata.build-package.external-files]] # Use latest-neuron-srpm-url.sh to get this. diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index 623c88dc..b6dff2ee 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.112 +Version: 6.1.115 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-kernel-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/3b0aa0d6cf05ca272d9802ccddfc28201675b2abac6abb307f5c4b8d3ca68d26/kernel-6.1.112-124.190.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/d6984bd6e9f17839ebf3e0b0c4d7dd72aeb4db5911bf697ed299caea93c83327/kernel-6.1.115-126.197.amzn2023.src.rpm # Use latest-neuron-srpm-url.sh to get this. Source1: https://yum.repos.neuron.amazonaws.com/aws-neuronx-dkms-2.18.12.0.noarch.rpm @@ -43,9 +43,6 @@ Patch1004: 1004-af_unix-increase-default-max_dgram_qlen-to-512.patch # options for nvidia are instead included through DRM_SIMPLE Patch1005: 1005-Revert-Revert-drm-fb_helper-improve-CONFIG_FB-depend.patch -# Fix cgroup v1 I/O statistics in blk-throttle to count all I/Os, not just throttled ones, aligning with cgroup v2 methods. -Patch1100: 1100-blk-throttle-Fix-io-statistics-for-cgroup-v1.patch - BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname @@ -1015,7 +1012,6 @@ install -p -m 0644 %{S:302} %{buildroot}%{_cross_bootconfigdir}/05-metal.conf %{_cross_kmoddir}/kernel/lib/ts_fsm.ko.* %{_cross_kmoddir}/kernel/lib/ts_kmp.ko.* %{_cross_kmoddir}/kernel/lib/zstd/zstd_compress.ko.* -%{_cross_kmoddir}/kernel/mm/z3fold.ko.* %{_cross_kmoddir}/kernel/mm/zsmalloc.ko.* %{_cross_kmoddir}/kernel/net/8021q/8021q.ko.* %{_cross_kmoddir}/kernel/net/802/garp.ko.* From 9160d8ee81e02e0ef046c2b715ab37ad795e810e Mon Sep 17 00:00:00 2001 From: Matthew Yeazel Date: Fri, 15 Nov 2024 01:10:21 +0000 Subject: [PATCH 1347/1356] kernel-5.15: Add patch to fix IPv6 typo This patch fixes issues with ip6tables commands that fail due to a typo. This is the type of error that can come up: exit status 2: ip6tables-restore v1.8.4 (legacy): unknown option "--xor-mark" Signed-off-by: Matthew Yeazel --- ...s-fix-typo-causing-some-targets-not-.patch | 79 +++++++++++++++++++ packages/kernel-5.15/kernel-5.15.spec | 3 + 2 files changed, 82 insertions(+) create mode 100644 packages/kernel-5.15/1100-netfilter-xtables-fix-typo-causing-some-targets-not-.patch diff --git a/packages/kernel-5.15/1100-netfilter-xtables-fix-typo-causing-some-targets-not-.patch b/packages/kernel-5.15/1100-netfilter-xtables-fix-typo-causing-some-targets-not-.patch new file mode 100644 index 00000000..273bd93d --- /dev/null +++ b/packages/kernel-5.15/1100-netfilter-xtables-fix-typo-causing-some-targets-not-.patch @@ -0,0 +1,79 @@ +From 02d6d4a741619b0bc8f29705d0f59aac596a9bf6 Mon Sep 17 00:00:00 2001 +From: Greg Kroah-Hartman +Date: Mon, 28 Oct 2024 07:25:38 +0100 +Subject: [PATCH 49/79] netfilter: xtables: fix typo causing some targets not + to load on IPv6 +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +5.15-stable review patch. If anyone has any objections, please let me know. + +------------------ + +From: Pablo Neira Ayuso + +[ Upstream commit 306ed1728e8438caed30332e1ab46b28c25fe3d8 ] + +- There is no NFPROTO_IPV6 family for mark and NFLOG. +- TRACE is also missing module autoload with NFPROTO_IPV6. + +This results in ip6tables failing to restore a ruleset. This issue has been +reported by several users providing incomplete patches. + +Very similar to Ilya Katsnelson's patch including a missing chunk in the +TRACE extension. + +Fixes: 0bfcb7b71e73 ("netfilter: xtables: avoid NFPROTO_UNSPEC where needed") +Reported-by: Ignat Korchagin +Reported-by: Ilya Katsnelson +Reported-by: Krzysztof Olędzki +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Sasha Levin +--- + net/netfilter/xt_NFLOG.c | 2 +- + net/netfilter/xt_TRACE.c | 1 + + net/netfilter/xt_mark.c | 2 +- + 3 files changed, 3 insertions(+), 2 deletions(-) + +diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c +index d80abd6cc..6dcf4bc7e 100644 +--- a/net/netfilter/xt_NFLOG.c ++++ b/net/netfilter/xt_NFLOG.c +@@ -79,7 +79,7 @@ static struct xt_target nflog_tg_reg[] __read_mostly = { + { + .name = "NFLOG", + .revision = 0, +- .family = NFPROTO_IPV4, ++ .family = NFPROTO_IPV6, + .checkentry = nflog_tg_check, + .destroy = nflog_tg_destroy, + .target = nflog_tg, +diff --git a/net/netfilter/xt_TRACE.c b/net/netfilter/xt_TRACE.c +index f3fa4f113..a642ff09f 100644 +--- a/net/netfilter/xt_TRACE.c ++++ b/net/netfilter/xt_TRACE.c +@@ -49,6 +49,7 @@ static struct xt_target trace_tg_reg[] __read_mostly = { + .target = trace_tg, + .checkentry = trace_tg_check, + .destroy = trace_tg_destroy, ++ .me = THIS_MODULE, + }, + #endif + }; +diff --git a/net/netfilter/xt_mark.c b/net/netfilter/xt_mark.c +index f76fe04fc..65b965ca4 100644 +--- a/net/netfilter/xt_mark.c ++++ b/net/netfilter/xt_mark.c +@@ -62,7 +62,7 @@ static struct xt_target mark_tg_reg[] __read_mostly = { + { + .name = "MARK", + .revision = 2, +- .family = NFPROTO_IPV4, ++ .family = NFPROTO_IPV6, + .target = mark_tg, + .targetsize = sizeof(struct xt_mark_tginfo2), + .me = THIS_MODULE, +-- +2.45.0 + diff --git a/packages/kernel-5.15/kernel-5.15.spec b/packages/kernel-5.15/kernel-5.15.spec index 2abe2e80..90751b59 100644 --- a/packages/kernel-5.15/kernel-5.15.spec +++ b/packages/kernel-5.15/kernel-5.15.spec @@ -25,6 +25,9 @@ Patch1003: 1003-initramfs-unlink-INITRAMFS_FORCE-from-CMDLINE_-EXTEN.patch # Increase default of sysctl net.unix.max_dgram_qlen to 512. Patch1004: 1004-af_unix-increase-default-max_dgram_qlen-to-512.patch +# Fix typo that breaks IPv6 via ip6tables commands +Patch1100: 1100-netfilter-xtables-fix-typo-causing-some-targets-not-.patch + BuildRequires: bc BuildRequires: elfutils-devel BuildRequires: hostname From d5b1148a90081e79ed79fce28dab1c3e9f09a73f Mon Sep 17 00:00:00 2001 From: Gavin Inglis Date: Fri, 15 Nov 2024 00:18:10 +0000 Subject: [PATCH 1348/1356] packages: use upstream amazon linux sources Signed-off-by: Gavin Inglis --- packages/grub/Cargo.toml | 1 + packages/kernel-5.10/Cargo.toml | 2 ++ packages/kernel-5.15/Cargo.toml | 2 ++ packages/kernel-6.1/Cargo.toml | 2 ++ packages/libkcapi/Cargo.toml | 1 + 5 files changed, 8 insertions(+) diff --git a/packages/grub/Cargo.toml b/packages/grub/Cargo.toml index c61feae9..a4134087 100644 --- a/packages/grub/Cargo.toml +++ b/packages/grub/Cargo.toml @@ -11,3 +11,4 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] url = "https://cdn.amazonlinux.com/al2023/blobstore/f4fa28cb4e1586d622925449b1e24748c6ab09ccebe0fd8ddfa20cf5e7ce182a/grub2-2.06-61.amzn2023.0.9.src.rpm" sha512 = "57886df0580f166bd741126f19109a0e464bc2408aafca38e68def077a2ab1f64c239d85015c44162b88d787da7ec55a623f4e7d2601942391f0996038393f99" +force-upstream = true diff --git a/packages/kernel-5.10/Cargo.toml b/packages/kernel-5.10/Cargo.toml index e14e7574..9cca3978 100644 --- a/packages/kernel-5.10/Cargo.toml +++ b/packages/kernel-5.10/Cargo.toml @@ -15,11 +15,13 @@ path = "../packages.rs" # Use latest-kernel-srpm-url.sh to get this. url = "https://cdn.amazonlinux.com/blobstore/0af5f80d00a3d5a867d4959d74751bc7d24b1bcb0ab8a5de558ae301ae0fa52e/kernel-5.10.228-219.884.amzn2.src.rpm" sha512 = "124c6d662c48dc4cb8caf035e9ee44c9c47bc5e19141c319b94abc441dce4e2afa24e30b9c0196665aa267b6ef85004153b3f5cddfe9191c2c8927ddb4175fbd" +force-upstream = true [[package.metadata.build-package.external-files]] # Use latest-neuron-srpm-url.sh to get this. url = "https://yum.repos.neuron.amazonaws.com/aws-neuronx-dkms-2.18.12.0.noarch.rpm" sha512 = "4ed92e661d0ba368eaf8f60e1a68c202062a26819231fcfd42a5ff05d20ad2f34b82b23359a88e80eea22ee5d0056ad769b6febd5d7e7b161da0e36434ba2579" +force-upstream = true [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-5.15/Cargo.toml b/packages/kernel-5.15/Cargo.toml index 386865d1..3a217744 100644 --- a/packages/kernel-5.15/Cargo.toml +++ b/packages/kernel-5.15/Cargo.toml @@ -15,11 +15,13 @@ path = "../packages.rs" # Use latest-kernel-srpm-url.sh to get this. url = "https://cdn.amazonlinux.com/blobstore/9cea3dae03703f3c4c78fcb1302eeee5fe4c07ebf53d783cf3aaf7e4f30a6d39/kernel-5.15.168-114.166.amzn2.src.rpm" sha512 = "5b0b0e2640bb04d4868b8820781029d8148c7939802c1b4edcf580533848afe70f7c6372e6e2306dfc017d2b32120a446ada15b105f7b2fe766b9382f83937d3" +force-upstream = true [[package.metadata.build-package.external-files]] # Use latest-neuron-srpm-url.sh to get this. url = "https://yum.repos.neuron.amazonaws.com/aws-neuronx-dkms-2.18.12.0.noarch.rpm" sha512 = "4ed92e661d0ba368eaf8f60e1a68c202062a26819231fcfd42a5ff05d20ad2f34b82b23359a88e80eea22ee5d0056ad769b6febd5d7e7b161da0e36434ba2579" +force-upstream = true [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index ab37170d..09130475 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -15,11 +15,13 @@ path = "../packages.rs" # Use latest-kernel-srpm-url.sh to get this. url = "https://cdn.amazonlinux.com/al2023/blobstore/d6984bd6e9f17839ebf3e0b0c4d7dd72aeb4db5911bf697ed299caea93c83327/kernel-6.1.115-126.197.amzn2023.src.rpm" sha512 = "eb1e9bdbbcc4b74cc678c894b19279437e1396c56e9db0904d1dd6898e6babf4fa3c4c53908ca189ee8558c5d249314a819b255f183fe100e4a3ed068ce2e6cf" +force-upstream = true [[package.metadata.build-package.external-files]] # Use latest-neuron-srpm-url.sh to get this. url = "https://yum.repos.neuron.amazonaws.com/aws-neuronx-dkms-2.18.12.0.noarch.rpm" sha512 = "4ed92e661d0ba368eaf8f60e1a68c202062a26819231fcfd42a5ff05d20ad2f34b82b23359a88e80eea22ee5d0056ad769b6febd5d7e7b161da0e36434ba2579" +force-upstream = true [build-dependencies] microcode = { path = "../microcode" } diff --git a/packages/libkcapi/Cargo.toml b/packages/libkcapi/Cargo.toml index 5e14eb5d..6ca684ac 100644 --- a/packages/libkcapi/Cargo.toml +++ b/packages/libkcapi/Cargo.toml @@ -11,6 +11,7 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] url = "https://cdn.amazonlinux.com/al2023/blobstore/0eef74b3b4eb1ec321bab80f867aee89b94dc9fc95571da58ea5bba7a70e6224/libkcapi-1.4.0-105.amzn2023.0.1.src.rpm" sha512 = "6498147434059343f1ccdd7efadcd425ad7074e41b4e019fc995129d5df326b781e0a61a4324e1ce8d6771162d1612b754ce24625fb3b1458811f6bde8f638c9" +force-upstream = true [build-dependencies] glibc = { path = "../glibc" } From 15c93885e3ce981eb4c65d9c07f5c90d6efc098e Mon Sep 17 00:00:00 2001 From: Sam Berning Date: Wed, 13 Nov 2024 22:02:24 +0000 Subject: [PATCH 1349/1356] twoliter: update twoliter to v0.5.1 Signed-off-by: Sam Berning --- Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 58354c1b..79577074 100644 --- a/Makefile +++ b/Makefile @@ -4,9 +4,9 @@ TWOLITER_DIR := $(TOOLS_DIR)/twoliter TWOLITER := $(TWOLITER_DIR)/twoliter CARGO_HOME := $(TOP).cargo -TWOLITER_VERSION ?= "0.5.0" -TWOLITER_SHA256_AARCH64 ?= "cec8d30377f5cb38ee1d3bc99bb8aaf3958213b38be6a75d09a8bc5fcd3da590" -TWOLITER_SHA256_X86_64 ?= "d580180969f8b34b1af5d2524ff024e90432f09f991fc044444019da20a027a8" +TWOLITER_VERSION ?= "0.5.1" +TWOLITER_SHA256_AARCH64 ?= "c72a571414db175fd1d82e96daba2a7778379a8336ffa63c42d00b65ca84b34e" +TWOLITER_SHA256_X86_64 ?= "5c3801d11b77d5414071432eed48d1888555125917b322b37a84b3a9219422a7" KIT ?= bottlerocket-core-kit UNAME_ARCH = $(shell uname -m) ARCH ?= $(UNAME_ARCH) From df1b08cf1ac24681e61a0c38978cee361cd6625e Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Tue, 12 Nov 2024 19:01:21 +0000 Subject: [PATCH 1350/1356] kernel: disable BPF preload and bpfilter helpers The kernel build will automatically enable these helpers if it finds that the target C compiler can build a userspace program that depends on the C library. However, the helpers require additional libraries such as libelf which are not guaranteed to be present. Since we don't make use of precompiled BPF programs, disable these config options rather than leaving it up to auto-detect. Signed-off-by: Ben Cressey --- packages/kernel-5.10/config-bottlerocket | 6 ++++++ packages/kernel-5.15/config-bottlerocket | 6 ++++++ packages/kernel-6.1/config-bottlerocket | 5 +++++ 3 files changed, 17 insertions(+) diff --git a/packages/kernel-5.10/config-bottlerocket b/packages/kernel-5.10/config-bottlerocket index b7b4baa0..09baaab0 100644 --- a/packages/kernel-5.10/config-bottlerocket +++ b/packages/kernel-5.10/config-bottlerocket @@ -119,6 +119,12 @@ CONFIG_BOOT_CONFIG=y # Enables support for checkpoint/restore CONFIG_CHECKPOINT_RESTORE=y +# Disable user-mode helpers for BPF preload and bpfilter, since they rely on a +# more complete set of userspace libraries for the target than we want to +# depend on at kernel build time. +# CONFIG_BPF_PRELOAD_UMD is not set +# CONFIG_BPFILTER_UMH is not set + # Disable unused filesystems. # CONFIG_AFS_FS is not set # CONFIG_CRAMFS is not set diff --git a/packages/kernel-5.15/config-bottlerocket b/packages/kernel-5.15/config-bottlerocket index 454d1666..ad582803 100644 --- a/packages/kernel-5.15/config-bottlerocket +++ b/packages/kernel-5.15/config-bottlerocket @@ -132,6 +132,12 @@ CONFIG_BOOT_CONFIG=y # Enables support for checkpoint/restore CONFIG_CHECKPOINT_RESTORE=y +# Disable user-mode helpers for BPF preload and bpfilter, since they rely on a +# more complete set of userspace libraries for the target than we want to +# depend on at kernel build time. +# CONFIG_BPF_PRELOAD_UMD is not set +# CONFIG_BPFILTER_UMH is not set + # Disable unused filesystems. # CONFIG_AFS_FS is not set # CONFIG_CRAMFS is not set diff --git a/packages/kernel-6.1/config-bottlerocket b/packages/kernel-6.1/config-bottlerocket index 375b43f5..260d6abe 100644 --- a/packages/kernel-6.1/config-bottlerocket +++ b/packages/kernel-6.1/config-bottlerocket @@ -162,6 +162,11 @@ CONFIG_BOOT_CONFIG=y # Enables support for checkpoint/restore CONFIG_CHECKPOINT_RESTORE=y +# Disable user-mode helper for bpfilter, since it relies on a more complete set +# of userspace libraries for the target than we want to depend on at kernel +# build time. +# CONFIG_BPFILTER_UMH is not set + # Disable unused filesystems. # CONFIG_AFS_FS is not set # CONFIG_CRAMFS is not set From 1b96c289af03c5301704765d45bfcc5eff336a63 Mon Sep 17 00:00:00 2001 From: Arnaldo Garcia Rincon Date: Wed, 20 Nov 2024 02:15:02 +0000 Subject: [PATCH 1351/1356] kernel-6.1: prevent io_uring calls from hanging Signed-off-by: Arnaldo Garcia Rincon --- ...ways-lock-__io_cqring_overflow_flush.patch | 60 +++++++++++++++++++ packages/kernel-6.1/kernel-6.1.spec | 2 + 2 files changed, 62 insertions(+) create mode 100644 packages/kernel-6.1/1100-io_uring-always-lock-__io_cqring_overflow_flush.patch diff --git a/packages/kernel-6.1/1100-io_uring-always-lock-__io_cqring_overflow_flush.patch b/packages/kernel-6.1/1100-io_uring-always-lock-__io_cqring_overflow_flush.patch new file mode 100644 index 00000000..3c967558 --- /dev/null +++ b/packages/kernel-6.1/1100-io_uring-always-lock-__io_cqring_overflow_flush.patch @@ -0,0 +1,60 @@ +From 1863335f591d6a708fb5321fe10504174fddc9ee Mon Sep 17 00:00:00 2001 +From: Pavel Begunkov +Date: Wed, 10 Apr 2024 02:26:54 +0100 +Subject: [PATCH] io_uring: always lock __io_cqring_overflow_flush + +Commit 8d09a88ef9d3cb7d21d45c39b7b7c31298d23998 upstream. + +Conditional locking is never great, in case of +__io_cqring_overflow_flush(), which is a slow path, it's not justified. +Don't handle IOPOLL separately, always grab uring_lock for overflow +flushing. + +Signed-off-by: Pavel Begunkov +Link: https://lore.kernel.org/r/162947df299aa12693ac4b305dacedab32ec7976.1712708261.git.asml.silence@gmail.com +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + io_uring/io_uring.c | 11 ++++++----- + 1 file changed, 6 insertions(+), 5 deletions(-) + +diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c +index f902b161f02c..92c1aa8f3501 100644 +--- a/io_uring/io_uring.c ++++ b/io_uring/io_uring.c +@@ -593,6 +593,8 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) + bool all_flushed; + size_t cqe_size = sizeof(struct io_uring_cqe); + ++ lockdep_assert_held(&ctx->uring_lock); ++ + if (!force && __io_cqring_events(ctx) == ctx->cq_entries) + return false; + +@@ -647,12 +649,9 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx) + bool ret = true; + + if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) { +- /* iopoll syncs against uring_lock, not completion_lock */ +- if (ctx->flags & IORING_SETUP_IOPOLL) +- mutex_lock(&ctx->uring_lock); ++ mutex_lock(&ctx->uring_lock); + ret = __io_cqring_overflow_flush(ctx, false); +- if (ctx->flags & IORING_SETUP_IOPOLL) +- mutex_unlock(&ctx->uring_lock); ++ mutex_unlock(&ctx->uring_lock); + } + + return ret; +@@ -1405,6 +1404,8 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) + int ret = 0; + unsigned long check_cq; + ++ lockdep_assert_held(&ctx->uring_lock); ++ + if (!io_allowed_run_tw(ctx)) + return -EEXIST; + +-- +2.47.0 + diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index b6dff2ee..bae9a140 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -42,6 +42,8 @@ Patch1004: 1004-af_unix-increase-default-max_dgram_qlen-to-512.patch # Drop AL revert of upstream patch to minimize delta. The necessary dependency # options for nvidia are instead included through DRM_SIMPLE Patch1005: 1005-Revert-Revert-drm-fb_helper-improve-CONFIG_FB-depend.patch +# Prevent applications using io_uring from hanging +Patch1100: 1100-io_uring-always-lock-__io_cqring_overflow_flush.patch BuildRequires: bc BuildRequires: elfutils-devel From 5f69890ba3c269703a214dbe2d826a09b3c5857b Mon Sep 17 00:00:00 2001 From: Ben Cressey Date: Thu, 14 Nov 2024 21:18:16 +0000 Subject: [PATCH 1352/1356] grub: add patches for Boot Loader Interface Backport upstream patches from the series that added the Boot Loader Interface module, which implements a different part of the interface: support for LoaderInfo and LoaderDevicePartUUID. The refactoring commits added a new grub_efi_set_variable_to_string function, along with the ability to set a variable without the non- volatile flag applied. Add two patches of our own: one to implement the part of BLI that's used by `systemd-analyze` to report time spent in the firmware and in the bootloader; and another to make it so that grub_get_time_ms() on i386 doesn't treat the time the TSC was calibrated as the epoch for all timestamps, which otherwise hides the time spent between power on and when GRUB is started. Signed-off-by: Ben Cressey --- ...rub_efi_set_variable_with_attributes.patch | 88 ++++++++++++ ...lude-grub-types.h-Add-GRUB_SSIZE_MAX.patch | 40 ++++++ ...ern-efi-Extract-UTF-8-to-UTF-16-code.patch | 136 ++++++++++++++++++ ...-Add-grub_efi_set_variable_to_string.patch | 71 +++++++++ ...endor-GUID-for-Boot-Loader-Interface.patch | 32 +++++ ...rTimeInitUSec-and-LoaderTimeExecUSec.patch | 129 +++++++++++++++++ .../0055-tsc-drop-tsc_boot_time-offset.patch | 66 +++++++++ packages/grub/grub.spec | 7 + 8 files changed, 569 insertions(+) create mode 100644 packages/grub/0049-efi-Add-grub_efi_set_variable_with_attributes.patch create mode 100644 packages/grub/0050-include-grub-types.h-Add-GRUB_SSIZE_MAX.patch create mode 100644 packages/grub/0051-kern-misc-kern-efi-Extract-UTF-8-to-UTF-16-code.patch create mode 100644 packages/grub/0052-efi-Add-grub_efi_set_variable_to_string.patch create mode 100644 packages/grub/0053-efi-add-vendor-GUID-for-Boot-Loader-Interface.patch create mode 100644 packages/grub/0054-efi-set-LoaderTimeInitUSec-and-LoaderTimeExecUSec.patch create mode 100644 packages/grub/0055-tsc-drop-tsc_boot_time-offset.patch diff --git a/packages/grub/0049-efi-Add-grub_efi_set_variable_with_attributes.patch b/packages/grub/0049-efi-Add-grub_efi_set_variable_with_attributes.patch new file mode 100644 index 00000000..acf3bd56 --- /dev/null +++ b/packages/grub/0049-efi-Add-grub_efi_set_variable_with_attributes.patch @@ -0,0 +1,88 @@ +From 37cd5dfd522882e1d95c4205635f9b2763256707 Mon Sep 17 00:00:00 2001 +From: Oliver Steffen +Date: Fri, 26 May 2023 13:35:42 +0200 +Subject: [PATCH] efi: Add grub_efi_set_variable_with_attributes() + +Add a function to the EFI module that allows setting EFI variables +with specific attributes. + +This is useful for marking variables as volatile, for example. + +Signed-off-by: Oliver Steffen +Reviewed-by: Daniel Kiper +[bcressey: + - backport to 2.06 + - avoid changes from bb4aa6e0 ("efi: Drop all uses of efi_call_XX() wrappers")] +Signed-off-by: Ben Cressey + +(cherry picked from commit 7e4da6fb2d03ea20f7e11efc496e2e6cf360048b) +--- + grub-core/kern/efi/efi.c | 20 +++++++++++++------- + include/grub/efi/efi.h | 6 ++++++ + 2 files changed, 19 insertions(+), 7 deletions(-) + +diff --git a/grub-core/kern/efi/efi.c b/grub-core/kern/efi/efi.c +index 3a4475c5c..ab9a53966 100644 +--- a/grub-core/kern/efi/efi.c ++++ b/grub-core/kern/efi/efi.c +@@ -211,8 +211,8 @@ grub_efi_set_virtual_address_map (grub_efi_uintn_t memory_map_size, + } + + grub_err_t +-grub_efi_set_variable(const char *var, const grub_efi_guid_t *guid, +- void *data, grub_size_t datasize) ++grub_efi_set_variable_with_attributes (const char *var, const grub_efi_guid_t *guid, ++ void *data, grub_size_t datasize, grub_efi_uint32_t attributes) + { + grub_efi_status_t status; + grub_efi_runtime_services_t *r; +@@ -229,11 +229,7 @@ grub_efi_set_variable(const char *var, const grub_efi_guid_t *guid, + + r = grub_efi_system_table->runtime_services; + +- status = efi_call_5 (r->set_variable, var16, guid, +- (GRUB_EFI_VARIABLE_NON_VOLATILE +- | GRUB_EFI_VARIABLE_BOOTSERVICE_ACCESS +- | GRUB_EFI_VARIABLE_RUNTIME_ACCESS), +- datasize, data); ++ status = efi_call_5 (r->set_variable, var16, guid, attributes, datasize, data); + grub_free (var16); + if (status == GRUB_EFI_SUCCESS) + return GRUB_ERR_NONE; +@@ -244,6 +240,16 @@ grub_efi_set_variable(const char *var, const grub_efi_guid_t *guid, + return grub_error (GRUB_ERR_IO, "could not set EFI variable `%s'", var); + } + ++grub_err_t ++grub_efi_set_variable (const char *var, const grub_efi_guid_t *guid, ++ void *data, grub_size_t datasize) ++{ ++ return grub_efi_set_variable_with_attributes (var, guid, data, datasize, ++ GRUB_EFI_VARIABLE_NON_VOLATILE ++ | GRUB_EFI_VARIABLE_BOOTSERVICE_ACCESS ++ | GRUB_EFI_VARIABLE_RUNTIME_ACCESS); ++} ++ + grub_efi_status_t + grub_efi_get_variable_with_attributes (const char *var, + const grub_efi_guid_t *guid, +diff --git a/include/grub/efi/efi.h b/include/grub/efi/efi.h +index d580b6bd9..a08f9474d 100644 +--- a/include/grub/efi/efi.h ++++ b/include/grub/efi/efi.h +@@ -128,6 +128,12 @@ grub_efi_status_t EXPORT_FUNC (grub_efi_get_variable) (const char *variable, + grub_size_t *datasize_out, + void **data_out); + grub_err_t ++EXPORT_FUNC (grub_efi_set_variable_with_attributes) (const char *var, ++ const grub_efi_guid_t *guid, ++ void *data, ++ grub_size_t datasize, ++ grub_efi_uint32_t attributes); ++grub_err_t + EXPORT_FUNC (grub_efi_set_variable) (const char *var, + const grub_efi_guid_t *guid, + void *data, +-- +2.47.0 + diff --git a/packages/grub/0050-include-grub-types.h-Add-GRUB_SSIZE_MAX.patch b/packages/grub/0050-include-grub-types.h-Add-GRUB_SSIZE_MAX.patch new file mode 100644 index 00000000..5ec27352 --- /dev/null +++ b/packages/grub/0050-include-grub-types.h-Add-GRUB_SSIZE_MAX.patch @@ -0,0 +1,40 @@ +From abacbb46f1a73e33a6b7644a8a8081244c43c28f Mon Sep 17 00:00:00 2001 +From: Oliver Steffen +Date: Fri, 26 May 2023 13:35:46 +0200 +Subject: [PATCH] include/grub/types.h: Add GRUB_SSIZE_MAX + +In the same way as GRUB_SIZE_MAX, add GRUB_SSIZE_MAX. + +Signed-off-by: Oliver Steffen +Reviewed-by: Daniel Kiper +[bcressey: backport to 2.06] +Signed-off-by: Ben Cressey + +(cherry picked from commit 389d3dc835a37c42184d2fab978ccd902a2399f7) +--- + include/grub/types.h | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/include/grub/types.h b/include/grub/types.h +index ba446d990..ebf672b46 100644 +--- a/include/grub/types.h ++++ b/include/grub/types.h +@@ -122,6 +122,7 @@ typedef grub_uint64_t grub_size_t; + typedef grub_int64_t grub_ssize_t; + + # define GRUB_SIZE_MAX 18446744073709551615UL ++# define GRUB_SSIZE_MAX 9223372036854775807L + + # if GRUB_CPU_SIZEOF_LONG == 8 + # define PRIxGRUB_SIZE "lx" +@@ -140,6 +141,7 @@ typedef grub_uint32_t grub_size_t; + typedef grub_int32_t grub_ssize_t; + + # define GRUB_SIZE_MAX 4294967295UL ++# define GRUB_SSIZE_MAX 2147483647L + + # define PRIxGRUB_SIZE "x" + # define PRIxGRUB_ADDR "x" +-- +2.47.0 + diff --git a/packages/grub/0051-kern-misc-kern-efi-Extract-UTF-8-to-UTF-16-code.patch b/packages/grub/0051-kern-misc-kern-efi-Extract-UTF-8-to-UTF-16-code.patch new file mode 100644 index 00000000..156695e3 --- /dev/null +++ b/packages/grub/0051-kern-misc-kern-efi-Extract-UTF-8-to-UTF-16-code.patch @@ -0,0 +1,136 @@ +From 55011a89968539f9d54d3425dc7ab73a0812851e Mon Sep 17 00:00:00 2001 +From: Oliver Steffen +Date: Fri, 26 May 2023 13:35:47 +0200 +Subject: [PATCH] kern/misc, kern/efi: Extract UTF-8 to UTF-16 code + +Create a new function for UTF-8 to UTF-16 conversion called +grub_utf8_to_utf16_alloc() in the grub-code/kern/misc.c and replace +charset conversion code used in some places in the EFI code. It is +modeled after the grub_utf8_to_ucs4_alloc() like functions in +include/grub/charset.h. It can't live in include/grub/charset.h, +because it needs to be reachable from the kern/efi code. + +Add a check for integer overflow and remove redundant NUL-termination. + +Signed-off-by: Oliver Steffen +Reviewed-by: Daniel Kiper +[bcressey: backport to 2.06] +Signed-off-by: Ben Cressey + +(cherry picked from commit a0b16564ee2e8eb7f597926bf60c4de2d696cd66) +--- + grub-core/kern/efi/efi.c | 21 ++++++--------------- + grub-core/kern/misc.c | 32 ++++++++++++++++++++++++++++++++ + include/grub/misc.h | 3 +++ + 3 files changed, 41 insertions(+), 15 deletions(-) + +diff --git a/grub-core/kern/efi/efi.c b/grub-core/kern/efi/efi.c +index ab9a53966..354343935 100644 +--- a/grub-core/kern/efi/efi.c ++++ b/grub-core/kern/efi/efi.c +@@ -217,15 +217,11 @@ grub_efi_set_variable_with_attributes (const char *var, const grub_efi_guid_t *g + grub_efi_status_t status; + grub_efi_runtime_services_t *r; + grub_efi_char16_t *var16; +- grub_size_t len, len16; + +- len = grub_strlen (var); +- len16 = len * GRUB_MAX_UTF16_PER_UTF8; +- var16 = grub_calloc (len16 + 1, sizeof (var16[0])); +- if (!var16) ++ grub_utf8_to_utf16_alloc (var, &var16, NULL); ++ ++ if (var16 == NULL) + return grub_errno; +- len16 = grub_utf8_to_utf16 (var16, len16, (grub_uint8_t *) var, len, NULL); +- var16[len16] = 0; + + r = grub_efi_system_table->runtime_services; + +@@ -262,18 +258,13 @@ grub_efi_get_variable_with_attributes (const char *var, + grub_efi_runtime_services_t *r; + grub_efi_char16_t *var16; + void *data; +- grub_size_t len, len16; + + *data_out = NULL; + *datasize_out = 0; + +- len = grub_strlen (var); +- len16 = len * GRUB_MAX_UTF16_PER_UTF8; +- var16 = grub_calloc (len16 + 1, sizeof (var16[0])); +- if (!var16) +- return GRUB_EFI_OUT_OF_RESOURCES; +- len16 = grub_utf8_to_utf16 (var16, len16, (grub_uint8_t *) var, len, NULL); +- var16[len16] = 0; ++ grub_utf8_to_utf16_alloc (var, &var16, NULL); ++ if (var16 == NULL) ++ return grub_errno; + + r = grub_efi_system_table->runtime_services; + +diff --git a/grub-core/kern/misc.c b/grub-core/kern/misc.c +index a95d182ba..f9be124d9 100644 +--- a/grub-core/kern/misc.c ++++ b/grub-core/kern/misc.c +@@ -28,6 +28,8 @@ + #if DEBUG_WITH_TIMESTAMPS + #include + #endif ++#include ++#include + + static void + parse_printf_args (const char *fmt0, struct grub_printf_args *args, +@@ -1280,6 +1282,36 @@ grub_fatal (const char *fmt, ...) + grub_abort (); + } + ++grub_ssize_t ++grub_utf8_to_utf16_alloc (const char *str8, grub_uint16_t **utf16_msg, grub_uint16_t **last_position) ++{ ++ grub_size_t len; ++ grub_size_t len16; ++ ++ len = grub_strlen (str8); ++ ++ /* Check for integer overflow */ ++ if (len > GRUB_SSIZE_MAX / GRUB_MAX_UTF16_PER_UTF8 - 1) ++ { ++ grub_error (GRUB_ERR_BAD_ARGUMENT, N_("string too long")); ++ *utf16_msg = NULL; ++ return -1; ++ } ++ ++ len16 = len * GRUB_MAX_UTF16_PER_UTF8; ++ ++ *utf16_msg = grub_calloc (len16 + 1, sizeof (*utf16_msg[0])); ++ if (*utf16_msg == NULL) ++ return -1; ++ ++ len16 = grub_utf8_to_utf16 (*utf16_msg, len16, (grub_uint8_t *) str8, len, NULL); ++ ++ if (last_position != NULL) ++ *last_position = *utf16_msg + len16; ++ ++ return len16; ++} ++ + #if BOOT_TIME_STATS + + #include +diff --git a/include/grub/misc.h b/include/grub/misc.h +index a359b0dee..8716e486d 100644 +--- a/include/grub/misc.h ++++ b/include/grub/misc.h +@@ -536,4 +536,7 @@ void EXPORT_FUNC(grub_real_boot_time) (const char *file, + + #define grub_log2ull(n) (GRUB_TYPE_BITS (grub_uint64_t) - __builtin_clzll (n) - 1) + ++grub_ssize_t ++EXPORT_FUNC(grub_utf8_to_utf16_alloc) (const char *str8, grub_uint16_t **utf16_msg, grub_uint16_t **last_position); ++ + #endif /* ! GRUB_MISC_HEADER */ +-- +2.47.0 + diff --git a/packages/grub/0052-efi-Add-grub_efi_set_variable_to_string.patch b/packages/grub/0052-efi-Add-grub_efi_set_variable_to_string.patch new file mode 100644 index 00000000..2a6d8cd1 --- /dev/null +++ b/packages/grub/0052-efi-Add-grub_efi_set_variable_to_string.patch @@ -0,0 +1,71 @@ +From 28bf9df5e7146610fd1890ea5856c0c0a86dac1c Mon Sep 17 00:00:00 2001 +From: Oliver Steffen +Date: Fri, 26 May 2023 13:35:48 +0200 +Subject: [PATCH] efi: Add grub_efi_set_variable_to_string() + +Add a function that sets an EFI variable to a string value. +The string is converted from UTF-8 to UTF-16. + +Signed-off-by: Oliver Steffen +Reviewed-by: Daniel Kiper +[bcressey: + - backport to 2.06 + - avoid changes from 06edd40d ("guid: Unify GUID types")] +Signed-off-by: Ben Cressey + +(cherry picked from commit e83a88f6ea7f97d643387681fe044f45dcd732b9) +--- + grub-core/kern/efi/efi.c | 22 ++++++++++++++++++++++ + include/grub/efi/efi.h | 3 +++ + 2 files changed, 25 insertions(+) + +diff --git a/grub-core/kern/efi/efi.c b/grub-core/kern/efi/efi.c +index 354343935..a65ef27bd 100644 +--- a/grub-core/kern/efi/efi.c ++++ b/grub-core/kern/efi/efi.c +@@ -297,6 +297,28 @@ grub_efi_get_variable_with_attributes (const char *var, + return status; + } + ++grub_err_t ++grub_efi_set_variable_to_string (const char *name, const grub_efi_guid_t *guid, ++ const char *value, grub_efi_uint32_t attributes) ++{ ++ grub_efi_char16_t *value_16; ++ grub_ssize_t len16; ++ grub_err_t status; ++ ++ len16 = grub_utf8_to_utf16_alloc (value, &value_16, NULL); ++ ++ if (len16 < 0) ++ return grub_errno; ++ ++ status = grub_efi_set_variable_with_attributes (name, guid, ++ (void *) value_16, (len16 + 1) * sizeof (value_16[0]), ++ attributes); ++ ++ grub_free (value_16); ++ ++ return status; ++} ++ + grub_efi_status_t + grub_efi_get_variable (const char *var, const grub_efi_guid_t *guid, + grub_size_t *datasize_out, void **data_out) +diff --git a/include/grub/efi/efi.h b/include/grub/efi/efi.h +index a08f9474d..a82741ef2 100644 +--- a/include/grub/efi/efi.h ++++ b/include/grub/efi/efi.h +@@ -138,6 +138,9 @@ EXPORT_FUNC (grub_efi_set_variable) (const char *var, + const grub_efi_guid_t *guid, + void *data, + grub_size_t datasize); ++grub_err_t ++EXPORT_FUNC (grub_efi_set_variable_to_string) (const char *name, const grub_efi_guid_t *guid, ++ const char *value, grub_efi_uint32_t attributes); + int + EXPORT_FUNC (grub_efi_compare_device_paths) (const grub_efi_device_path_t *dp1, + const grub_efi_device_path_t *dp2); +-- +2.47.0 + diff --git a/packages/grub/0053-efi-add-vendor-GUID-for-Boot-Loader-Interface.patch b/packages/grub/0053-efi-add-vendor-GUID-for-Boot-Loader-Interface.patch new file mode 100644 index 00000000..9b4f6ef2 --- /dev/null +++ b/packages/grub/0053-efi-add-vendor-GUID-for-Boot-Loader-Interface.patch @@ -0,0 +1,32 @@ +From ec2bbc9d87079cd080feaad6b7512624a52ee555 Mon Sep 17 00:00:00 2001 +From: Ben Cressey +Date: Thu, 14 Nov 2024 19:24:42 +0000 +Subject: [PATCH] efi: add vendor GUID for Boot Loader Interface + +Backports the relevant part of upstream commit e0fa7dc8 ("bli: Add a +module for the Boot Loader Interface"). + +Signed-off-by: Ben Cressey +--- + include/grub/efi/api.h | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/include/grub/efi/api.h b/include/grub/efi/api.h +index 464842ba3..f301913e6 100644 +--- a/include/grub/efi/api.h ++++ b/include/grub/efi/api.h +@@ -368,6 +368,11 @@ + { 0xa1, 0x92, 0xbf, 0x1d, 0x57, 0xd0, 0xb1, 0x89 } \ + } + ++#define GRUB_EFI_VENDOR_BOOT_LOADER_INTERFACE_GUID \ ++ { 0x4a67b082, 0x0a4c, 0x41cf, \ ++ { 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f } \ ++ } ++ + struct grub_efi_sal_system_table + { + grub_uint32_t signature; +-- +2.47.0 + diff --git a/packages/grub/0054-efi-set-LoaderTimeInitUSec-and-LoaderTimeExecUSec.patch b/packages/grub/0054-efi-set-LoaderTimeInitUSec-and-LoaderTimeExecUSec.patch new file mode 100644 index 00000000..0ab6826c --- /dev/null +++ b/packages/grub/0054-efi-set-LoaderTimeInitUSec-and-LoaderTimeExecUSec.patch @@ -0,0 +1,129 @@ +From a76d5d1d5f425eb6128cadee06d9aaeff1b24b83 Mon Sep 17 00:00:00 2001 +From: Ben Cressey +Date: Thu, 14 Nov 2024 20:29:13 +0000 +Subject: [PATCH] efi: set LoaderTimeInitUSec and LoaderTimeExecUSec + +This implements the part of the Boot Loader Interface [0] that's used +by `systemd-analyze` to calculate the time spent in the firmware and +bootloader, prior to kernel execution. + +[0] https://systemd.io/BOOT_LOADER_INTERFACE/ + +Signed-off-by: Ben Cressey +--- + grub-core/kern/arm64/efi/init.c | 23 +++++++++++++++++++++++ + grub-core/kern/i386/efi/init.c | 26 ++++++++++++++++++++++++++ + 2 files changed, 49 insertions(+) + +diff --git a/grub-core/kern/arm64/efi/init.c b/grub-core/kern/arm64/efi/init.c +index 5010caefd..52e87d582 100644 +--- a/grub-core/kern/arm64/efi/init.c ++++ b/grub-core/kern/arm64/efi/init.c +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -36,11 +37,16 @@ grub_efi_get_time_ms (void) + return tmr / timer_frequency_in_khz; + } + ++static const grub_efi_guid_t bli_vendor_guid = GRUB_EFI_VENDOR_BOOT_LOADER_INTERFACE_GUID; ++static const grub_efi_uint32_t bli_efivar_attr = ++ GRUB_EFI_VARIABLE_BOOTSERVICE_ACCESS | GRUB_EFI_VARIABLE_RUNTIME_ACCESS; + + void + grub_machine_init (void) + { + grub_uint64_t timer_frequency; ++ grub_efi_uint64_t init_ms; ++ char *init_usec; + + grub_efi_init (); + +@@ -48,11 +54,28 @@ grub_machine_init (void) + timer_frequency_in_khz = timer_frequency / 1000; + + grub_install_get_time_ms (grub_efi_get_time_ms); ++ ++ init_ms = grub_get_time_ms (); ++ init_usec = grub_xasprintf ("%lu000", init_ms); ++ grub_efi_set_variable_to_string ("LoaderTimeInitUSec", ++ &bli_vendor_guid, ++ init_usec, ++ bli_efivar_attr); + } + + void + grub_machine_fini (int flags) + { ++ grub_efi_uint64_t exec_ms; ++ char *exec_usec; ++ ++ exec_ms = grub_get_time_ms (); ++ exec_usec = grub_xasprintf ("%lu000", exec_ms); ++ grub_efi_set_variable_to_string ("LoaderTimeExecUSec", ++ &bli_vendor_guid, ++ exec_usec, ++ bli_efivar_attr); ++ + if (!(flags & GRUB_LOADER_FLAG_NORETURN)) + return; + +diff --git a/grub-core/kern/i386/efi/init.c b/grub-core/kern/i386/efi/init.c +index 46476e27e..1e237cfcb 100644 +--- a/grub-core/kern/i386/efi/init.c ++++ b/grub-core/kern/i386/efi/init.c +@@ -24,20 +24,46 @@ + #include + #include + #include ++#include + #include + #include + #include ++#include ++ ++static const grub_efi_guid_t bli_vendor_guid = GRUB_EFI_VENDOR_BOOT_LOADER_INTERFACE_GUID; ++static const grub_efi_uint32_t bli_efivar_attr = ++ GRUB_EFI_VARIABLE_BOOTSERVICE_ACCESS | GRUB_EFI_VARIABLE_RUNTIME_ACCESS; + + void + grub_machine_init (void) + { ++ grub_efi_uint64_t init_ms; ++ char *init_usec; ++ + grub_efi_init (); + grub_tsc_init (); ++ ++ init_ms = grub_get_time_ms (); ++ init_usec = grub_xasprintf ("%lu000", init_ms); ++ grub_efi_set_variable_to_string ("LoaderTimeInitUSec", ++ &bli_vendor_guid, ++ init_usec, ++ bli_efivar_attr); + } + + void + grub_machine_fini (int flags) + { ++ grub_efi_uint64_t exec_ms; ++ char *exec_usec; ++ ++ exec_ms = grub_get_time_ms (); ++ exec_usec = grub_xasprintf ("%lu000", exec_ms); ++ grub_efi_set_variable_to_string ("LoaderTimeExecUSec", ++ &bli_vendor_guid, ++ exec_usec, ++ bli_efivar_attr); ++ + if (!(flags & GRUB_LOADER_FLAG_NORETURN)) + return; + +-- +2.47.0 + diff --git a/packages/grub/0055-tsc-drop-tsc_boot_time-offset.patch b/packages/grub/0055-tsc-drop-tsc_boot_time-offset.patch new file mode 100644 index 00000000..a527bcc0 --- /dev/null +++ b/packages/grub/0055-tsc-drop-tsc_boot_time-offset.patch @@ -0,0 +1,66 @@ +From 1a31957af68baeba1065a250382e4744709b80a6 Mon Sep 17 00:00:00 2001 +From: Ben Cressey +Date: Thu, 14 Nov 2024 19:55:38 +0000 +Subject: [PATCH] tsc: drop tsc_boot_time offset + +To implement the Boot Loader Interface, we need to store the time at +which GRUB was started in the `LoaderTimeInitUSec` variable. + +On the i386 architecture, GRUB stores the TSC value at the time that +TSC calibration took place, and subtracts it before returning a value +from grub_tsc_get_time_ms(), which is called by grub_get_time_ms() - +the architecture-independent function for retrieving timestamps. + +Storing the TSC value at the time of initialization, and subtracting +it from subsequent calculations, prevents callers from determining +the system time elapsed before GRUB was loaded. + +The equivalent offset is not done for arm64, and does not appear to +serve a purpose. Callers of grub_get_time_ms() all follow a pattern +where they first record a "start" time and then compute a delta in a +loop, which works the same way whether the "start" time is close to +zero or not. + +Rather than exposing another function to provide the "raw" timestamp +without the offset, just drop `tsc_boot_time` from the calculation. + +Signed-off-by: Ben Cressey +--- + grub-core/kern/i386/tsc.c | 7 +------ + 1 file changed, 1 insertion(+), 6 deletions(-) + +diff --git a/grub-core/kern/i386/tsc.c b/grub-core/kern/i386/tsc.c +index 9293b161d..ec6899812 100644 +--- a/grub-core/kern/i386/tsc.c ++++ b/grub-core/kern/i386/tsc.c +@@ -25,9 +25,6 @@ + #include + #include + +-/* This defines the value TSC had at the epoch (that is, when we calibrated it). */ +-static grub_uint64_t tsc_boot_time; +- + /* Calibrated TSC rate. (In ms per 2^32 ticks) */ + /* We assume that the tick is less than 1 ms and hence this value fits + in 32-bit. */ +@@ -36,7 +33,7 @@ grub_uint32_t grub_tsc_rate; + static grub_uint64_t + grub_tsc_get_time_ms (void) + { +- grub_uint64_t a = grub_get_tsc () - tsc_boot_time; ++ grub_uint64_t a = grub_get_tsc (); + grub_uint64_t ah = a >> 32; + grub_uint64_t al = a & 0xffffffff; + +@@ -63,8 +60,6 @@ grub_tsc_init (void) + return; + } + +- tsc_boot_time = grub_get_tsc (); +- + #if defined (GRUB_MACHINE_XEN) || defined (GRUB_MACHINE_XEN_PVH) + (void) (grub_tsc_calibrate_from_xen () || calibrate_tsc_hardcode()); + #elif defined (GRUB_MACHINE_EFI) +-- +2.47.0 + diff --git a/packages/grub/grub.spec b/packages/grub/grub.spec index 9efc1c94..c5b41a3c 100644 --- a/packages/grub/grub.spec +++ b/packages/grub/grub.spec @@ -68,6 +68,13 @@ Patch0045: 0045-mkimage-pgp-move-single-public-key-into-its-own-sect.patch Patch0046: 0046-Revert-sb-Add-fallback-to-EFI-LoadImage-if-shim_lock.patch Patch0047: 0047-Revert-UBUNTU-Move-verifiers-after-decompressors.patch Patch0048: 0048-add-flag-to-only-search-root-dev.patch +Patch0049: 0049-efi-Add-grub_efi_set_variable_with_attributes.patch +Patch0050: 0050-include-grub-types.h-Add-GRUB_SSIZE_MAX.patch +Patch0051: 0051-kern-misc-kern-efi-Extract-UTF-8-to-UTF-16-code.patch +Patch0052: 0052-efi-Add-grub_efi_set_variable_to_string.patch +Patch0053: 0053-efi-add-vendor-GUID-for-Boot-Loader-Interface.patch +Patch0054: 0054-efi-set-LoaderTimeInitUSec-and-LoaderTimeExecUSec.patch +Patch0055: 0055-tsc-drop-tsc_boot_time-offset.patch BuildRequires: automake BuildRequires: bison From 199f8a3b30d20edf6bb6c34c23d5e4f4e1936313 Mon Sep 17 00:00:00 2001 From: "Sean P. Kelly" Date: Tue, 3 Dec 2024 23:37:39 +0000 Subject: [PATCH 1353/1356] twoliter: update twoliter to v0.6.0 --- Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 79577074..5e3d144a 100644 --- a/Makefile +++ b/Makefile @@ -4,9 +4,9 @@ TWOLITER_DIR := $(TOOLS_DIR)/twoliter TWOLITER := $(TWOLITER_DIR)/twoliter CARGO_HOME := $(TOP).cargo -TWOLITER_VERSION ?= "0.5.1" -TWOLITER_SHA256_AARCH64 ?= "c72a571414db175fd1d82e96daba2a7778379a8336ffa63c42d00b65ca84b34e" -TWOLITER_SHA256_X86_64 ?= "5c3801d11b77d5414071432eed48d1888555125917b322b37a84b3a9219422a7" +TWOLITER_VERSION ?= "0.6.0" +TWOLITER_SHA256_AARCH64 ?= "73a961ff8b9e829b764a86e096b9c2630b452dadc2099f678d57b2146f6a18f9" +TWOLITER_SHA256_X86_64 ?= "739c5ed0bbd9b0f50ca641964e03b1a92ae9b2c814b1c3463e22f54bc8968e35" KIT ?= bottlerocket-core-kit UNAME_ARCH = $(shell uname -m) ARCH ?= $(UNAME_ARCH) From 53f065d380ec6addf610a811e1a776d39a8b700d Mon Sep 17 00:00:00 2001 From: Gavin Inglis Date: Tue, 10 Dec 2024 21:54:10 +0000 Subject: [PATCH 1354/1356] kernel-6.1: update to 6.1.119 Rebase to Amazon Linux upstream version 6.1.119-129.201.amzn2023. Remove io_uring patch as AL's 6.1 latest kernel release includes the patch for io_uring in tree: * https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-6.1.y&id=9f5a834715d04f32cb8a8d25fe329d67e7d16e46 * https://github.com/amazonlinux/linux/commit/9f5a834715d04f32cb8a8d25fe329d67e7d16e46 Also, add the module for z3fold back in. Signed-off-by: Gavin Inglis --- ...ways-lock-__io_cqring_overflow_flush.patch | 60 ------------------- packages/kernel-6.1/Cargo.toml | 4 +- packages/kernel-6.1/kernel-6.1.spec | 7 +-- 3 files changed, 5 insertions(+), 66 deletions(-) delete mode 100644 packages/kernel-6.1/1100-io_uring-always-lock-__io_cqring_overflow_flush.patch diff --git a/packages/kernel-6.1/1100-io_uring-always-lock-__io_cqring_overflow_flush.patch b/packages/kernel-6.1/1100-io_uring-always-lock-__io_cqring_overflow_flush.patch deleted file mode 100644 index 3c967558..00000000 --- a/packages/kernel-6.1/1100-io_uring-always-lock-__io_cqring_overflow_flush.patch +++ /dev/null @@ -1,60 +0,0 @@ -From 1863335f591d6a708fb5321fe10504174fddc9ee Mon Sep 17 00:00:00 2001 -From: Pavel Begunkov -Date: Wed, 10 Apr 2024 02:26:54 +0100 -Subject: [PATCH] io_uring: always lock __io_cqring_overflow_flush - -Commit 8d09a88ef9d3cb7d21d45c39b7b7c31298d23998 upstream. - -Conditional locking is never great, in case of -__io_cqring_overflow_flush(), which is a slow path, it's not justified. -Don't handle IOPOLL separately, always grab uring_lock for overflow -flushing. - -Signed-off-by: Pavel Begunkov -Link: https://lore.kernel.org/r/162947df299aa12693ac4b305dacedab32ec7976.1712708261.git.asml.silence@gmail.com -Signed-off-by: Jens Axboe -Signed-off-by: Greg Kroah-Hartman ---- - io_uring/io_uring.c | 11 ++++++----- - 1 file changed, 6 insertions(+), 5 deletions(-) - -diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c -index f902b161f02c..92c1aa8f3501 100644 ---- a/io_uring/io_uring.c -+++ b/io_uring/io_uring.c -@@ -593,6 +593,8 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) - bool all_flushed; - size_t cqe_size = sizeof(struct io_uring_cqe); - -+ lockdep_assert_held(&ctx->uring_lock); -+ - if (!force && __io_cqring_events(ctx) == ctx->cq_entries) - return false; - -@@ -647,12 +649,9 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx) - bool ret = true; - - if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) { -- /* iopoll syncs against uring_lock, not completion_lock */ -- if (ctx->flags & IORING_SETUP_IOPOLL) -- mutex_lock(&ctx->uring_lock); -+ mutex_lock(&ctx->uring_lock); - ret = __io_cqring_overflow_flush(ctx, false); -- if (ctx->flags & IORING_SETUP_IOPOLL) -- mutex_unlock(&ctx->uring_lock); -+ mutex_unlock(&ctx->uring_lock); - } - - return ret; -@@ -1405,6 +1404,8 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) - int ret = 0; - unsigned long check_cq; - -+ lockdep_assert_held(&ctx->uring_lock); -+ - if (!io_allowed_run_tw(ctx)) - return -EEXIST; - --- -2.47.0 - diff --git a/packages/kernel-6.1/Cargo.toml b/packages/kernel-6.1/Cargo.toml index 09130475..140c0a31 100644 --- a/packages/kernel-6.1/Cargo.toml +++ b/packages/kernel-6.1/Cargo.toml @@ -13,8 +13,8 @@ path = "../packages.rs" [[package.metadata.build-package.external-files]] # Use latest-kernel-srpm-url.sh to get this. -url = "https://cdn.amazonlinux.com/al2023/blobstore/d6984bd6e9f17839ebf3e0b0c4d7dd72aeb4db5911bf697ed299caea93c83327/kernel-6.1.115-126.197.amzn2023.src.rpm" -sha512 = "eb1e9bdbbcc4b74cc678c894b19279437e1396c56e9db0904d1dd6898e6babf4fa3c4c53908ca189ee8558c5d249314a819b255f183fe100e4a3ed068ce2e6cf" +url = "https://cdn.amazonlinux.com/al2023/blobstore/c5625ba4f37a38809773fa50b769735602f1e4e50d60cb7127ed6231d0695e95/kernel-6.1.119-129.201.amzn2023.src.rpm" +sha512 = "258a96b9216c187352405f856d69d0fd7edb2e9c89066ad098c9e50bbdfc0c13c666d74fe8b919fa5ffa03dc9f802bff454b03e7f455cf2d3d92a4a66c7d4987" force-upstream = true [[package.metadata.build-package.external-files]] diff --git a/packages/kernel-6.1/kernel-6.1.spec b/packages/kernel-6.1/kernel-6.1.spec index bae9a140..7e083776 100644 --- a/packages/kernel-6.1/kernel-6.1.spec +++ b/packages/kernel-6.1/kernel-6.1.spec @@ -1,13 +1,13 @@ %global debug_package %{nil} Name: %{_cross_os}kernel-6.1 -Version: 6.1.115 +Version: 6.1.119 Release: 1%{?dist} Summary: The Linux kernel License: GPL-2.0 WITH Linux-syscall-note URL: https://www.kernel.org/ # Use latest-kernel-srpm-url.sh to get this. -Source0: https://cdn.amazonlinux.com/al2023/blobstore/d6984bd6e9f17839ebf3e0b0c4d7dd72aeb4db5911bf697ed299caea93c83327/kernel-6.1.115-126.197.amzn2023.src.rpm +Source0: https://cdn.amazonlinux.com/al2023/blobstore/c5625ba4f37a38809773fa50b769735602f1e4e50d60cb7127ed6231d0695e95/kernel-6.1.119-129.201.amzn2023.src.rpm # Use latest-neuron-srpm-url.sh to get this. Source1: https://yum.repos.neuron.amazonaws.com/aws-neuronx-dkms-2.18.12.0.noarch.rpm @@ -42,8 +42,6 @@ Patch1004: 1004-af_unix-increase-default-max_dgram_qlen-to-512.patch # Drop AL revert of upstream patch to minimize delta. The necessary dependency # options for nvidia are instead included through DRM_SIMPLE Patch1005: 1005-Revert-Revert-drm-fb_helper-improve-CONFIG_FB-depend.patch -# Prevent applications using io_uring from hanging -Patch1100: 1100-io_uring-always-lock-__io_cqring_overflow_flush.patch BuildRequires: bc BuildRequires: elfutils-devel @@ -1014,6 +1012,7 @@ install -p -m 0644 %{S:302} %{buildroot}%{_cross_bootconfigdir}/05-metal.conf %{_cross_kmoddir}/kernel/lib/ts_fsm.ko.* %{_cross_kmoddir}/kernel/lib/ts_kmp.ko.* %{_cross_kmoddir}/kernel/lib/zstd/zstd_compress.ko.* +%{_cross_kmoddir}/kernel/mm/z3fold.ko.* %{_cross_kmoddir}/kernel/mm/zsmalloc.ko.* %{_cross_kmoddir}/kernel/net/8021q/8021q.ko.* %{_cross_kmoddir}/kernel/net/802/garp.ko.* From b76bcb9ce0aeda33015c8d3f3a8fdaebaa5ae8ad Mon Sep 17 00:00:00 2001 From: Jarrett Tierney Date: Wed, 11 Dec 2024 23:08:56 +0000 Subject: [PATCH 1355/1356] libkcapi: remove glibc build dependency --- packages/libkcapi/Cargo.toml | 1 - packages/libkcapi/libkcapi.spec | 1 - 2 files changed, 2 deletions(-) diff --git a/packages/libkcapi/Cargo.toml b/packages/libkcapi/Cargo.toml index 6ca684ac..5f287ce3 100644 --- a/packages/libkcapi/Cargo.toml +++ b/packages/libkcapi/Cargo.toml @@ -14,4 +14,3 @@ sha512 = "6498147434059343f1ccdd7efadcd425ad7074e41b4e019fc995129d5df326b781e0a6 force-upstream = true [build-dependencies] -glibc = { path = "../glibc" } diff --git a/packages/libkcapi/libkcapi.spec b/packages/libkcapi/libkcapi.spec index 170a156a..eb07d200 100644 --- a/packages/libkcapi/libkcapi.spec +++ b/packages/libkcapi/libkcapi.spec @@ -24,7 +24,6 @@ Summary: Library for kernel crypto API License: BSD-3-Clause OR GPL-2.0-only URL: https://www.chronox.de/libkcapi/html/index.html Source0: https://cdn.amazonlinux.com/al2023/blobstore/0eef74b3b4eb1ec321bab80f867aee89b94dc9fc95571da58ea5bba7a70e6224/libkcapi-1.4.0-105.amzn2023.0.1.src.rpm -BuildRequires: %{_cross_os}glibc-devel %description %{summary}. From 1e29a94e5724d3dadcba1c6066a797303f7c6064 Mon Sep 17 00:00:00 2001 From: Jarrett Tierney Date: Wed, 11 Dec 2024 23:09:20 +0000 Subject: [PATCH 1356/1356] kernel-kit: create the new bottlerocket kernel kit --- .github/workflows/cache.yml | 2 +- BUILDING.md | 20 +++--- Cargo.lock | 82 +++++++++++++++++++++++++ Cargo.toml | 16 +++++ Makefile | 2 +- README.md | 16 ++--- Twoliter.lock | 9 +++ Twoliter.toml | 10 +++ kits/bottlerocket-kernel-kit/Cargo.toml | 26 ++++++++ tools/collect-kernel-config | 2 +- 10 files changed, 164 insertions(+), 21 deletions(-) create mode 100644 Cargo.lock create mode 100644 Cargo.toml create mode 100644 Twoliter.lock create mode 100644 Twoliter.toml create mode 100644 kits/bottlerocket-kernel-kit/Cargo.toml diff --git a/.github/workflows/cache.yml b/.github/workflows/cache.yml index 1ecc570d..e9674bcc 100644 --- a/.github/workflows/cache.yml +++ b/.github/workflows/cache.yml @@ -6,7 +6,7 @@ on: branches: [develop] jobs: cache: - if: github.repository == 'bottlerocket-os/bottlerocket-core-kit' + if: github.repository == 'bottlerocket-os/bottlerocket-kernel-kit' runs-on: group: bottlerocket labels: bottlerocket_ubuntu-latest_32-core diff --git a/BUILDING.md b/BUILDING.md index 0a419d4c..7156d8b9 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -1,6 +1,6 @@ -# How to build the Bottlerocket core kit +# How to build the Bottlerocket kernel kit -If you'd like to build your own copy of the core kit for local development, follow these steps. +If you'd like to build your own copy of the kernel kit for local development, follow these steps. ## Dependencies #### System Requirements @@ -57,9 +57,9 @@ The following configuration is needed in your `/etc/docker/daemon.json` The installation instructions for [crane](https://github.com/google/go-containerregistry/tree/main/cmd/crane) should help you set it up for use with Twoliter. -## Build the core kit +## Build the kernel kit -Building the core kit can be done by using the makefile targets. +Building the kernel kit can be done by using the makefile targets. ``` make ARCH= ``` @@ -69,7 +69,7 @@ After the kit has been built you can then publish the kit image to your private ### Use a private registry for development It is recommended that you have some form of protected container registry to use for testing. -For testing purposes you can either utilize mutable tags to allow overriding of multiple versions of a core kit as you test, or you can use immutable tags and continuously bump the core kit version via the `Twoliter.toml`. +For testing purposes you can either utilize mutable tags to allow overriding of multiple versions of a kernel kit as you test, or you can use immutable tags and continuously bump the kernel kit version via the `Twoliter.toml`. ### Configure Infra.toml An `Infra.toml` file needs to be created and should have a definition of your vendor (container registry) in order to publish the kits you build. To do so make sure that the `Infra.toml` has the below. @@ -82,22 +82,22 @@ After the kit has been built locally, the kit can be published to the provided v aws ecr get-login-password --region us-west-2 | docker login --username AWS --password-stdin ####.dkr.ecr.us-west-2.amazonaws.com ``` -Finally, publishing the core kit images can be handled by the makefile target. +Finally, publishing the kernel kit images can be handled by the makefile target. ``` make publish VENDOR= ``` -At this point, there should be a core kit image in your private registry which can be consumed when building a variant to test and validate. +At this point, there should be a kernel kit image in your private registry which can be consumed when building a variant to test and validate. ## Consuming the published kit image -This section will cover building a variant to test a build of the core kit as done above. Please note this section does not cover the complete complexity of testing a change to Bottlerocket. For this see the [BUILDING](https://github.com/bottlerocket-os/bottlerocket/blob/develop/BUILDING.md) section in the [Bottlerocket](https://github.com/bottlerocket-os/bottlerocket/) repository. +This section will cover building a variant to test a build of the kernel kit as done above. Please note this section does not cover the complete complexity of testing a change to Bottlerocket. For this see the [BUILDING](https://github.com/bottlerocket-os/bottlerocket/blob/develop/BUILDING.md) section in the [Bottlerocket](https://github.com/bottlerocket-os/bottlerocket/) repository. ### Configure Twoliter.toml -To consume a private copy of the Bottlerocket core kit with your changes built into it, you need to define the vendor that points to your container registry in `Twoliter.toml` and adjust the core kit dependency: +To consume a private copy of the Bottlerocket kernel kit with your changes built into it, you need to define the vendor that points to your container registry in `Twoliter.toml` and adjust the kernel kit dependency: ``` [vendor.my-vendor] registry = "####.dkr.ecr.us-west-2.amazonaws.com" [[kit]] -name = "bottlerocket-core-kit" # Name of your ECR repo +name = "bottlerocket-kernel-kit" # Name of your ECR repo version = "2.x.y" # your version tag you want to test vendor = "my-vendor" ``` diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 00000000..567b348b --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,82 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "bottlerocket-kernel-kit" +version = "0.1.0" +dependencies = [ + "grub", + "kernel-5_10", + "kernel-5_15", + "kernel-6_1", + "kmod-5_10-nvidia", + "kmod-5_15-nvidia", + "kmod-6_1-nvidia", + "libkcapi", + "linux-firmware", + "microcode", + "shim", +] + +[[package]] +name = "grub" +version = "0.1.0" + +[[package]] +name = "kernel-5_10" +version = "0.1.0" +dependencies = [ + "microcode", +] + +[[package]] +name = "kernel-5_15" +version = "0.1.0" +dependencies = [ + "microcode", +] + +[[package]] +name = "kernel-6_1" +version = "0.1.0" +dependencies = [ + "microcode", +] + +[[package]] +name = "kmod-5_10-nvidia" +version = "0.1.0" +dependencies = [ + "kernel-5_10", +] + +[[package]] +name = "kmod-5_15-nvidia" +version = "0.1.0" +dependencies = [ + "kernel-5_15", +] + +[[package]] +name = "kmod-6_1-nvidia" +version = "0.1.0" +dependencies = [ + "kernel-6_1", +] + +[[package]] +name = "libkcapi" +version = "0.1.0" + +[[package]] +name = "linux-firmware" +version = "0.1.0" + +[[package]] +name = "microcode" +version = "0.1.0" + +[[package]] +name = "shim" +version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 00000000..07aa5f93 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,16 @@ +[workspace] +resolver = "2" +members = [ + "kits/bottlerocket-kernel-kit", + "packages/grub", + "packages/kernel-5.10", + "packages/kernel-5.15", + "packages/kernel-6.1", + "packages/kmod-5.10-nvidia", + "packages/kmod-5.15-nvidia", + "packages/kmod-6.1-nvidia", + "packages/linux-firmware", + "packages/microcode", + "packages/libkcapi", + "packages/shim", +] diff --git a/Makefile b/Makefile index 5e3d144a..db74ad8e 100644 --- a/Makefile +++ b/Makefile @@ -7,7 +7,7 @@ CARGO_HOME := $(TOP).cargo TWOLITER_VERSION ?= "0.6.0" TWOLITER_SHA256_AARCH64 ?= "73a961ff8b9e829b764a86e096b9c2630b452dadc2099f678d57b2146f6a18f9" TWOLITER_SHA256_X86_64 ?= "739c5ed0bbd9b0f50ca641964e03b1a92ae9b2c814b1c3463e22f54bc8968e35" -KIT ?= bottlerocket-core-kit +KIT ?= bottlerocket-kernel-kit UNAME_ARCH = $(shell uname -m) ARCH ?= $(UNAME_ARCH) VENDOR ?= bottlerocket diff --git a/README.md b/README.md index 1f3ef5b9..0f466ed1 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,18 @@ -# Bottlerocket Core Kit -This is the core kit for [Bottlerocket](https://github.com/bottlerocket-os/bottlerocket). +# Bottlerocket Kernel Kit +This is the kernel kit for [Bottlerocket](https://github.com/bottlerocket-os/bottlerocket). It includes many common dependencies for downstream package and variant builds. ## Contents -The core kit includes: -* Shared libraries such as glibc and libz -* Management daemons such as systemd and dbus-broker -* Agents for settings API and in-place updates +The kernel kit includes: +* multiple versions of the Linux kernel +* bootloaders +* firmware ### Availability -The [Bottlerocket core kit](https://gallery.ecr.aws/bottlerocket/bottlerocket-core-kit) is available through Amazon ECR Public. +The [Bottlerocket kernel kit](https://gallery.ecr.aws/bottlerocket/bottlerocket-kernel-kit) is available through Amazon ECR Public. ### Development -The core kit can be built on either an **x86_64** or an **aarch64** host. To do this you can use the following commands. +The kernel kit can be built on either an **x86_64** or an **aarch64** host. To do this you can use the following commands. ```shell make ``` diff --git a/Twoliter.lock b/Twoliter.lock new file mode 100644 index 00000000..5f2ca4e7 --- /dev/null +++ b/Twoliter.lock @@ -0,0 +1,9 @@ +schema-version = 1 +kit = [] + +[sdk] +name = "bottlerocket-sdk" +version = "0.50.0" +vendor = "bottlerocket" +source = "public.ecr.aws/bottlerocket/bottlerocket-sdk:v0.50.0" +digest = "Rjpy/gVgBhU/B696xaK1Y4/drz4pNJu+fyyZSIk9oLE=" diff --git a/Twoliter.toml b/Twoliter.toml new file mode 100644 index 00000000..97a574f3 --- /dev/null +++ b/Twoliter.toml @@ -0,0 +1,10 @@ +schema-version = 1 +release-version = "1.0.0" + +[vendor.bottlerocket] +registry = "public.ecr.aws/bottlerocket" + +[sdk] +name = "bottlerocket-sdk" +version = "0.50.0" +vendor = "bottlerocket" diff --git a/kits/bottlerocket-kernel-kit/Cargo.toml b/kits/bottlerocket-kernel-kit/Cargo.toml new file mode 100644 index 00000000..1c38c8c7 --- /dev/null +++ b/kits/bottlerocket-kernel-kit/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "bottlerocket-kernel-kit" +version = "0.1.0" +edition = "2021" +publish = false +build = "../build.rs" + +[package.metadata.build-kit] +vendor = "bottlerocket" + +[lib] +path = "../kit.rs" + +[build-dependencies] +grub = { path = "../../packages/grub" } +kernel-5_10 = { path = "../../packages/kernel-5.10" } +kernel-5_15 = { path = "../../packages/kernel-5.15" } +kernel-6_1 = { path = "../../packages/kernel-6.1" } +kmod-5_10-nvidia = { path = "../../packages/kmod-5.10-nvidia" } +kmod-5_15-nvidia = { path = "../../packages/kmod-5.15-nvidia" } +kmod-6_1-nvidia = { path = "../../packages/kmod-6.1-nvidia" } +linux-firmware = { path = "../../packages/linux-firmware" } +microcode = { path = "../../packages/microcode" } +libkcapi = { path = "../../packages/libkcapi" } +shim = { path = "../../packages/shim" } + diff --git a/tools/collect-kernel-config b/tools/collect-kernel-config index a5fd83e2..f5306abe 100755 --- a/tools/collect-kernel-config +++ b/tools/collect-kernel-config @@ -5,7 +5,7 @@ output_dir=/tmp/configs usage() { cat <